mihirma commited on
Commit
cd5df33
·
verified ·
1 Parent(s): 4b315cf

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-03e95750-d1d7-4aba-ba4c-b80d732967351767624280770-2026_01_05-15.44.46.878/source.csv +0 -0
  2. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-12ac3267-b673-44fd-8ea3-37e3e74cb0101755540018956-2025_08_18-20.00.27.475/source.csv +566 -0
  3. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1a3a8350-ade5-4f14-90d3-a2023f5be9fa1753600712073-2025_07_27-09.18.39.905/source.csv +0 -0
  4. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3f2b1a99-0d75-466c-970c-4deff62cba851753462933379-2025_07_25-19.02.23.245/source.csv +94 -0
  5. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6410c04a-5509-42a0-b7ec-8fa2503faf3a1758380010770-2025_09_20-16.53.40.475/source.csv +0 -0
  6. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-accd586c-9376-4507-a888-197a6c40bdf51757184416102-2025_09_06-20.47.03.130/source.csv +4 -0
  7. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-dedac322-1282-4d89-8a49-f3a5624493ea1762171752270-2025_11_03-13.09.19.936/source.csv +255 -0
  8. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f1b4c573-86a5-4c21-a501-9fb3be4a68881763632584824-2025_11_20-10.56.31.891/source.csv +0 -0
  9. 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f508ed97-76c1-4935-95ed-d4393099e6361753128212083-2025_07_21-22.03.39.166/source.csv +0 -0
  10. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-0f5513f7-8bc9-4c5d-856d-79d92f75113d1751284706913-2025_06_30-13.59.01.459/source.csv +52 -0
  11. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-17a23500-007e-4825-8127-4f0062137ef91759750602496-2025_10_06-13.37.19.164/source.csv +0 -0
  12. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-28f4aa5c-0534-40eb-ae05-51501d68e4871752860706222-2025_07_18-19.45.48.539/source.csv +90 -0
  13. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3aab53c4-8c45-4083-87ad-e991570a4f5b1752851966968-2025_07_18-17.20.32.773/source.csv +0 -0
  14. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-73ddfe20-a667-477d-9924-94f7208128f81752186339186-2025_07_11-00.25.58.835/source.csv +12 -0
  15. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-7c1bdcf0-d594-4018-8499-7d2ed33930611752094287328-2025_07_09-22.51.39.315/source.csv +216 -0
  16. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-7d09022e-0451-4d5a-95fd-fe8f629e1b4b1757071522446-2025_09_05-13.26.09.836/source.csv +0 -0
  17. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-825aa81a-f8dc-4fd3-8ed5-69638fcbfc5f1759823186564-2025_10_07-09.46.57.798/source.csv +0 -0
  18. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-9cdba2ed-e3b9-400c-aa61-3ca40652e83b1753717763365-2025_07_28-17.49.33.649/source.csv +400 -0
  19. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-bebf29de-c50f-45f7-b90b-66f518a4cf1c1758196766807-2025_09_18-14.00.11.582/source.csv +71 -0
  20. 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-d4ecca31-879c-4879-b2a7-b7463e4327b91757416440874-2025_09_09-13.15.15.617/source.csv +7 -0
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-03e95750-d1d7-4aba-ba4c-b80d732967351767624280770-2026_01_05-15.44.46.878/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-12ac3267-b673-44fd-8ea3-37e3e74cb0101755540018956-2025_08_18-20.00.27.475/source.csv ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,788,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"8:00:27 PM [info] Activating crowd-code\n8:00:27 PM [info] Recording started\n8:00:27 PM [info] Initializing git provider using file system watchers...\n",Log,tab
3
+ 3,1099,"TERMINAL",0,0,"python3",,terminal_focus
4
+ 4,1105,"TERMINAL",0,0,"bash",,terminal_focus
5
+ 5,2785,"test/test_nan.ipynb",0,0,"# Restore a dynamics checkpoint and enable sowing\nimport os\nfrom typing import Dict\n\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport optax\nimport orbax.checkpoint as ocp\nimport grain\n\nfrom utils.dataloader import get_dataloader\nfrom models.lam import LatentActionModel\n\n# Adjust to your checkpoint directory, dataset directory, and dynamics type\nckpt_dir = ""/fast/project/HFMI_SynergyUnit/jafar_ws/checkpoints/coinrun/lam/train_lam_coinrun_reproduction_20067/100000_ckpt""\ndata_dir = ""/fast/project/HFMI_SynergyUnit/jafar_ws/data/coinrun/array_records_10m""\nnum_steps: int = 200_000\nseed: int = 0\nseq_len: int = 16\nimage_channels: int = 3\nimage_height: int = 64\nimage_width: int = 64\nsave_ckpt: bool = False\nrestore_ckpt: bool = False\n# Optimization\nbatch_size: int = 36\nvq_beta: float = 0.25\ninit_lr: float = 0.0\nmax_lr: float = 3e-5\ndecay_end: float = 0.0\nwsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n)\nwarmup_steps: int = 5000\nlr_schedule: str = ""wsd"" # supported options: wsd, cos\nvq_reset_thresh: int = 50\n# LAM\nmodel_dim: int = 512\nffn_dim: int = 2048\nlatent_dim: int = 32\nnum_latents: int = 6\npatch_size: int = 16\nnum_blocks: int = 4\nnum_heads: int = 8\ndropout: float = 0.0\ncodebook_dropout: float = 0.0\nparam_dtype = jnp.float32\ndtype = jnp.bfloat16\n# Logging\nlog_interval: int = 5\nlog_image_interval: int = 250\nuse_flash_attention: bool = True\n\n# Build model graph matching the checkpoint\nrng = jax.random.key(seed)\nrng, _rng = jax.random.split(rng)\nrngs = nnx.Rngs(_rng)\nlam = LatentActionModel(\n in_dim=image_channels,\n model_dim=model_dim,\n ffn_dim=ffn_dim,\n latent_dim=latent_dim,\n num_latents=num_latents,\n patch_size=patch_size,\n num_blocks=num_blocks,\n num_heads=num_heads,\n dropout=dropout,\n codebook_dropout=codebook_dropout,\n param_dtype=param_dtype,\n dtype=dtype,\n use_flash_attention=use_flash_attention,\n rngs=rngs,\n)\n\n# Optimizer (matches training opt hyperparams; lr value is irrelevant for restore)\ntx = optax.adamw(\n learning_rate=max_lr,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=param_dtype,\n)\noptimizer = nnx.Optimizer(lam, tx)\n",python,tab
6
+ 6,325278,"test/test_nan.ipynb",186,0,"",python,selection_mouse
7
+ 7,325279,"test/test_nan.ipynb",185,0,"",python,selection_command
8
+ 8,331804,"test/test_nan.ipynb",1007,8,"schedule",python,selection_command
9
+ 9,334786,"test/test_nan.ipynb",968,8,"schedule",python,selection_command
10
+ 10,338868,"test/test_nan.ipynb",1007,8,"schedule",python,selection_command
11
+ 11,341147,"test/test_nan.ipynb",1014,0,"",python,selection_command
12
+ 12,341595,"test/test_nan.ipynb",1004,0,"",python,selection_command
13
+ 13,343378,"test/test_nan.ipynb",0,0,"# Restore latest checkpoint: optimizer and dataloader state, like in training\nfrom typing import cast\n\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n)\nhandler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n)\nhandler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n)\nhandler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n)\n\nckpt_mgr = ocp.CheckpointManager(\n directory=ckpt_dir,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry,\n)\n\n# Recreate dataloader and iterator exactly like training\narray_record_files = [\n os.path.join(data_dir, x)\n for x in os.listdir(data_dir)\n if x.endswith("".array_record"")\n]\ngrain_dataloader = get_dataloader(\n array_record_files,\n seq_len,\n batch_size,\n image_height,\n image_width,\n image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=seed,\n)\ninitial_state = grain_dataloader._create_initial_state()\nloader_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n# Restore optimizer and dataloader iterator states\nabstract_optimizer = nnx.eval_shape(lambda: optimizer)\nabstract_optimizer_state = nnx.state(abstract_optimizer)\nrestored = ckpt_mgr.restore(\n ckpt_mgr.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(loader_iterator), # type: ignore\n ),\n)\n\nnnx.update(optimizer, restored[""model_state""]) # type: ignore\nloader_iterator = restored[""dataloader_state""]\nstep = ckpt_mgr.latest_step() or 0\nckpt_mgr.close()\n\n# Convenience handle\nlam = optimizer.model\nprint(f""Restored optimizer and dataloader at step {step}."")\n",python,tab
14
+ 14,347677,"test/test_nan.ipynb",0,0,"",python,tab
15
+ 15,348788,"test/test_nan.ipynb",2041,0,"",python,selection_command
16
+ 16,350687,"test/test_nan.ipynb",2059,0,"",python,selection_command
17
+ 17,350907,"test/test_nan.ipynb",2085,0,"",python,selection_command
18
+ 18,350927,"test/test_nan.ipynb",2097,0,"",python,selection_command
19
+ 19,350996,"test/test_nan.ipynb",2109,0,"",python,selection_command
20
+ 20,351167,"test/test_nan.ipynb",2132,0,"",python,selection_command
21
+ 21,351315,"test/test_nan.ipynb",2158,0,"",python,selection_command
22
+ 22,351707,"test/test_nan.ipynb",2160,0,"",python,selection_command
23
+ 23,352067,"test/test_nan.ipynb",2158,0,"",python,selection_command
24
+ 24,360027,"test/test_nan.ipynb",2081,0,"",python,selection_mouse
25
+ 25,361167,"test/test_nan.ipynb",2077,0,"",python,selection_command
26
+ 26,361336,"test/test_nan.ipynb",2077,6,"",python,content
27
+ 27,364567,"test/test_nan.ipynb",2076,0,"",python,selection_command
28
+ 28,366495,"test/test_nan.ipynb",2077,0,"max_lr",python,content
29
+ 29,366497,"test/test_nan.ipynb",2077,0,"",python,selection_command
30
+ 30,376216,"test/test_nan.ipynb",0,0,"%pwd",python,tab
31
+ 31,377207,"test/test_nan.ipynb",0,0,"",python,tab
32
+ 32,379207,"test/test_nan.ipynb",2057,0,"",python,selection_command
33
+ 33,380927,"test/test_nan.ipynb",2040,0,"\n",python,content
34
+ 34,381867,"test/test_nan.ipynb",2040,0,"\n",python,content
35
+ 35,384135,"test/test_nan.ipynb",2041,0,"l",python,content
36
+ 36,384136,"test/test_nan.ipynb",2042,0,"",python,selection_keyboard
37
+ 37,384176,"test/test_nan.ipynb",2042,0,"r",python,content
38
+ 38,384177,"test/test_nan.ipynb",2043,0,"",python,selection_keyboard
39
+ 39,384387,"test/test_nan.ipynb",2043,0,"_",python,content
40
+ 40,384388,"test/test_nan.ipynb",2044,0,"",python,selection_keyboard
41
+ 41,384707,"test/test_nan.ipynb",2044,0,"f",python,content
42
+ 42,384709,"test/test_nan.ipynb",2045,0,"",python,selection_keyboard
43
+ 43,384792,"test/test_nan.ipynb",2045,0,"n",python,content
44
+ 44,384793,"test/test_nan.ipynb",2046,0,"",python,selection_keyboard
45
+ 45,384911,"test/test_nan.ipynb",2046,0," ",python,content
46
+ 46,384912,"test/test_nan.ipynb",2047,0,"",python,selection_keyboard
47
+ 47,385055,"test/test_nan.ipynb",2047,0,"=",python,content
48
+ 48,385056,"test/test_nan.ipynb",2048,0,"",python,selection_keyboard
49
+ 49,385127,"test/test_nan.ipynb",2048,0," ",python,content
50
+ 50,385128,"test/test_nan.ipynb",2049,0,"",python,selection_keyboard
51
+ 51,386627,"test/test_nan.ipynb",2049,0,"g",python,content
52
+ 52,386628,"test/test_nan.ipynb",2050,0,"",python,selection_keyboard
53
+ 53,386707,"test/test_nan.ipynb",2050,0,"e",python,content
54
+ 54,386708,"test/test_nan.ipynb",2051,0,"",python,selection_keyboard
55
+ 55,386787,"test/test_nan.ipynb",2051,0,"t",python,content
56
+ 56,386788,"test/test_nan.ipynb",2052,0,"",python,selection_keyboard
57
+ 57,387024,"test/test_nan.ipynb",2052,0,"_",python,content
58
+ 58,387025,"test/test_nan.ipynb",2053,0,"",python,selection_keyboard
59
+ 59,387227,"test/test_nan.ipynb",2053,0,"l",python,content
60
+ 60,387228,"test/test_nan.ipynb",2054,0,"",python,selection_keyboard
61
+ 61,387302,"test/test_nan.ipynb",2054,0,"r",python,content
62
+ 62,387303,"test/test_nan.ipynb",2055,0,"",python,selection_keyboard
63
+ 63,387576,"test/test_nan.ipynb",2055,0,"_",python,content
64
+ 64,387577,"test/test_nan.ipynb",2056,0,"",python,selection_keyboard
65
+ 65,387696,"test/test_nan.ipynb",2056,0,"s",python,content
66
+ 66,387697,"test/test_nan.ipynb",2057,0,"",python,selection_keyboard
67
+ 67,387791,"test/test_nan.ipynb",2057,0,"c",python,content
68
+ 68,387792,"test/test_nan.ipynb",2058,0,"",python,selection_keyboard
69
+ 69,387847,"test/test_nan.ipynb",2058,0,"h",python,content
70
+ 70,387848,"test/test_nan.ipynb",2059,0,"",python,selection_keyboard
71
+ 71,388047,"test/test_nan.ipynb",2059,0,"e",python,content
72
+ 72,388048,"test/test_nan.ipynb",2060,0,"",python,selection_keyboard
73
+ 73,388049,"test/test_nan.ipynb",2060,0,"d",python,content
74
+ 74,388049,"test/test_nan.ipynb",2061,0,"",python,selection_keyboard
75
+ 75,388258,"test/test_nan.ipynb",2061,0,"u",python,content
76
+ 76,388258,"test/test_nan.ipynb",2062,0,"",python,selection_keyboard
77
+ 77,388259,"test/test_nan.ipynb",2062,0,"l",python,content
78
+ 78,388260,"test/test_nan.ipynb",2063,0,"",python,selection_keyboard
79
+ 79,388260,"test/test_nan.ipynb",2063,0,"e",python,content
80
+ 80,388261,"test/test_nan.ipynb",2064,0,"",python,selection_keyboard
81
+ 81,388447,"test/test_nan.ipynb",2064,0,"{}",python,content
82
+ 82,388448,"test/test_nan.ipynb",2065,0,"",python,selection_keyboard
83
+ 83,388795,"test/test_nan.ipynb",2064,2,"",python,content
84
+ 84,389027,"test/test_nan.ipynb",2064,0,"()",python,content
85
+ 85,389028,"test/test_nan.ipynb",2065,0,"",python,selection_keyboard
86
+ 86,389975,"test/test_nan.ipynb",2065,0,"\n \n",python,content
87
+ 87,390187,"test/test_nan.ipynb",2070,0,"l",python,content
88
+ 88,390188,"test/test_nan.ipynb",2071,0,"",python,selection_keyboard
89
+ 89,390547,"test/test_nan.ipynb",2071,0,"r",python,content
90
+ 90,390548,"test/test_nan.ipynb",2072,0,"",python,selection_keyboard
91
+ 91,391007,"test/test_nan.ipynb",2072,0,"_",python,content
92
+ 92,391008,"test/test_nan.ipynb",2073,0,"",python,selection_keyboard
93
+ 93,391190,"test/test_nan.ipynb",2073,0,"s",python,content
94
+ 94,391191,"test/test_nan.ipynb",2074,0,"",python,selection_keyboard
95
+ 95,391192,"test/test_nan.ipynb",2074,0,"c",python,content
96
+ 96,391193,"test/test_nan.ipynb",2075,0,"",python,selection_keyboard
97
+ 97,391283,"test/test_nan.ipynb",2075,0,"h",python,content
98
+ 98,391284,"test/test_nan.ipynb",2076,0,"",python,selection_keyboard
99
+ 99,391387,"test/test_nan.ipynb",2076,0,"e",python,content
100
+ 100,391388,"test/test_nan.ipynb",2077,0,"",python,selection_keyboard
101
+ 101,391407,"test/test_nan.ipynb",2077,0,"d",python,content
102
+ 102,391408,"test/test_nan.ipynb",2078,0,"",python,selection_keyboard
103
+ 103,391535,"test/test_nan.ipynb",2078,0,"u",python,content
104
+ 104,391536,"test/test_nan.ipynb",2079,0,"",python,selection_keyboard
105
+ 105,391608,"test/test_nan.ipynb",2079,0,"l",python,content
106
+ 106,391609,"test/test_nan.ipynb",2080,0,"",python,selection_keyboard
107
+ 107,391695,"test/test_nan.ipynb",2080,0,"e",python,content
108
+ 108,391696,"test/test_nan.ipynb",2081,0,"",python,selection_keyboard
109
+ 109,391827,"test/test_nan.ipynb",2081,0,",",python,content
110
+ 110,391828,"test/test_nan.ipynb",2082,0,"",python,selection_keyboard
111
+ 111,392035,"test/test_nan.ipynb",2082,0,"\n ",python,content
112
+ 112,394907,"test/test_nan.ipynb",2087,0,"i",python,content
113
+ 113,394908,"test/test_nan.ipynb",2088,0,"",python,selection_keyboard
114
+ 114,394947,"test/test_nan.ipynb",2088,0,"n",python,content
115
+ 115,394948,"test/test_nan.ipynb",2089,0,"",python,selection_keyboard
116
+ 116,395075,"test/test_nan.ipynb",2089,0,"i",python,content
117
+ 117,395076,"test/test_nan.ipynb",2090,0,"",python,selection_keyboard
118
+ 118,395187,"test/test_nan.ipynb",2090,0,"t",python,content
119
+ 119,395188,"test/test_nan.ipynb",2091,0,"",python,selection_keyboard
120
+ 120,395415,"test/test_nan.ipynb",2091,0,"_",python,content
121
+ 121,395416,"test/test_nan.ipynb",2092,0,"",python,selection_keyboard
122
+ 122,395607,"test/test_nan.ipynb",2092,0,"l",python,content
123
+ 123,395608,"test/test_nan.ipynb",2093,0,"",python,selection_keyboard
124
+ 124,395755,"test/test_nan.ipynb",2093,0,"r",python,content
125
+ 125,395756,"test/test_nan.ipynb",2094,0,"",python,selection_keyboard
126
+ 126,395835,"test/test_nan.ipynb",2094,0,",",python,content
127
+ 127,395836,"test/test_nan.ipynb",2095,0,"",python,selection_keyboard
128
+ 128,396307,"test/test_nan.ipynb",2095,0,"\n ",python,content
129
+ 129,396576,"test/test_nan.ipynb",2100,0,"m",python,content
130
+ 130,396577,"test/test_nan.ipynb",2101,0,"",python,selection_keyboard
131
+ 131,396656,"test/test_nan.ipynb",2101,0,"a",python,content
132
+ 132,396657,"test/test_nan.ipynb",2102,0,"",python,selection_keyboard
133
+ 133,396829,"test/test_nan.ipynb",2102,0,"x",python,content
134
+ 134,396830,"test/test_nan.ipynb",2103,0,"",python,selection_keyboard
135
+ 135,396987,"test/test_nan.ipynb",2103,0,"_",python,content
136
+ 136,396988,"test/test_nan.ipynb",2104,0,"",python,selection_keyboard
137
+ 137,397223,"test/test_nan.ipynb",2104,0,"l",python,content
138
+ 138,397224,"test/test_nan.ipynb",2105,0,"",python,selection_keyboard
139
+ 139,397315,"test/test_nan.ipynb",2105,0,"r",python,content
140
+ 140,397316,"test/test_nan.ipynb",2106,0,"",python,selection_keyboard
141
+ 141,397415,"test/test_nan.ipynb",2106,0,",",python,content
142
+ 142,397416,"test/test_nan.ipynb",2107,0,"",python,selection_keyboard
143
+ 143,398196,"test/test_nan.ipynb",2107,0,"\n ",python,content
144
+ 144,398675,"test/test_nan.ipynb",2112,0,"d",python,content
145
+ 145,398676,"test/test_nan.ipynb",2113,0,"",python,selection_keyboard
146
+ 146,398835,"test/test_nan.ipynb",2113,0,"e",python,content
147
+ 147,398836,"test/test_nan.ipynb",2114,0,"",python,selection_keyboard
148
+ 148,398907,"test/test_nan.ipynb",2114,0,"c",python,content
149
+ 149,398908,"test/test_nan.ipynb",2115,0,"",python,selection_keyboard
150
+ 150,399015,"test/test_nan.ipynb",2115,0,"a",python,content
151
+ 151,399016,"test/test_nan.ipynb",2116,0,"",python,selection_keyboard
152
+ 152,399115,"test/test_nan.ipynb",2116,0,"y",python,content
153
+ 153,399116,"test/test_nan.ipynb",2117,0,"",python,selection_keyboard
154
+ 154,399356,"test/test_nan.ipynb",2117,0,"_",python,content
155
+ 155,399357,"test/test_nan.ipynb",2118,0,"",python,selection_keyboard
156
+ 156,399542,"test/test_nan.ipynb",2118,0,"e",python,content
157
+ 157,399543,"test/test_nan.ipynb",2119,0,"",python,selection_keyboard
158
+ 158,399667,"test/test_nan.ipynb",2119,0,"n",python,content
159
+ 159,399668,"test/test_nan.ipynb",2120,0,"",python,selection_keyboard
160
+ 160,399739,"test/test_nan.ipynb",2120,0,"d",python,content
161
+ 161,399740,"test/test_nan.ipynb",2121,0,"",python,selection_keyboard
162
+ 162,399867,"test/test_nan.ipynb",2121,0,",",python,content
163
+ 163,399868,"test/test_nan.ipynb",2122,0,"",python,selection_keyboard
164
+ 164,400175,"test/test_nan.ipynb",2122,0,"\n ",python,content
165
+ 165,400447,"test/test_nan.ipynb",2127,0,"n",python,content
166
+ 166,400448,"test/test_nan.ipynb",2128,0,"",python,selection_keyboard
167
+ 167,400676,"test/test_nan.ipynb",2128,0,"u",python,content
168
+ 168,400677,"test/test_nan.ipynb",2129,0,"",python,selection_keyboard
169
+ 169,400943,"test/test_nan.ipynb",2129,0,"m",python,content
170
+ 170,400944,"test/test_nan.ipynb",2130,0,"",python,selection_keyboard
171
+ 171,401175,"test/test_nan.ipynb",2130,0,"_",python,content
172
+ 172,401175,"test/test_nan.ipynb",2131,0,"",python,selection_keyboard
173
+ 173,401267,"test/test_nan.ipynb",2131,0,"s",python,content
174
+ 174,401268,"test/test_nan.ipynb",2132,0,"",python,selection_keyboard
175
+ 175,401435,"test/test_nan.ipynb",2132,0,"t",python,content
176
+ 176,401436,"test/test_nan.ipynb",2133,0,"",python,selection_keyboard
177
+ 177,401507,"test/test_nan.ipynb",2133,0,"p",python,content
178
+ 178,401508,"test/test_nan.ipynb",2134,0,"",python,selection_keyboard
179
+ 179,401515,"test/test_nan.ipynb",2134,0,"e",python,content
180
+ 180,401515,"test/test_nan.ipynb",2135,0,"",python,selection_keyboard
181
+ 181,401767,"test/test_nan.ipynb",2135,0,"s",python,content
182
+ 182,401768,"test/test_nan.ipynb",2136,0,"",python,selection_keyboard
183
+ 183,402067,"test/test_nan.ipynb",2135,0,"",python,selection_command
184
+ 184,404783,"train_lam.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\njax.config.update(""jax_transfer_guard"", ""allow"")\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list[str] = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n wandb_id: str = """"\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(\n model: LatentActionModel, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, jax.Array, dict]]:\n # --- Compute loss ---\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n gt_future_frames = gt[:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@nnx.jit\ndef train_step(\n lam: LatentActionModel,\n optimizer: nnx.Optimizer,\n inputs: dict,\n action_last_active: jax.Array,\n rng: jax.Array,\n) -> tuple[jax.Array, jax.Array, jax.Array, dict]:\n def loss_fn(\n model: LatentActionModel,\n ) -> tuple[jax.Array, tuple[jax.Array, jax.Array, dict]]:\n return lam_loss_fn(model, inputs)\n\n # --- Update model ---\n (loss, (recon, idx_counts, metrics)), grads = nnx.value_and_grad(\n loss_fn, has_aux=True\n )(lam)\n optimizer.update(grads)\n\n # --- Reset inactive latent actions ---\n codebook = lam.vq.codebook\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook.value\n )\n lam.vq.codebook.value = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n # Count parameters\n _, params, _ = nnx.split(lam, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.Optimizer(lam, tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n action_last_active = jnp.zeros(args.num_latents, dtype=jnp.int32)\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n inputs = dict(videos=videos, rng=_rng)\n rng, _rng = jax.random.split(rng)\n loss, recon, action_last_active, metrics = train_step(\n lam, optimizer, inputs, action_last_active, _rng\n )\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0, 1:].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
185
+ 185,409447,"train_lam.py",502,0,"",python,selection_command
186
+ 186,409727,"train_lam.py",6619,0,"",python,selection_command
187
+ 187,409907,"train_lam.py",502,0,"",python,selection_command
188
+ 188,410427,"train_lam.py",6619,0,"",python,selection_command
189
+ 189,411011,"train_lam.py",6601,34," lr_schedule = get_lr_schedule(",python,selection_command
190
+ 190,411091,"train_lam.py",6601,60," lr_schedule = get_lr_schedule(\n args.lr_schedule,",python,selection_command
191
+ 191,411349,"train_lam.py",6601,82," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,",python,selection_command
192
+ 192,411371,"train_lam.py",6601,103," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,",python,selection_command
193
+ 193,411412,"train_lam.py",6601,127," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,",python,selection_command
194
+ 194,411451,"train_lam.py",6601,151," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,",python,selection_command
195
+ 195,411467,"train_lam.py",6601,178," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,",python,selection_command
196
+ 196,411491,"train_lam.py",6601,208," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,",python,selection_command
197
+ 197,411651,"train_lam.py",6601,214," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )",python,selection_command
198
+ 198,412171,"train_lam.py",6601,208," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,",python,selection_command
199
+ 199,412387,"train_lam.py",6601,214," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )",python,selection_command
200
+ 200,412987,"train_lam.py",6601,0,"",python,selection_command
201
+ 201,413529,"test/test_nan.ipynb",0,0,"",python,tab
202
+ 202,416723,"test/test_nan.ipynb",2075,0,"",python,selection_mouse
203
+ 203,416847,"test/test_nan.ipynb",2070,11,"lr_schedule",python,selection_mouse
204
+ 204,416967,"test/test_nan.ipynb",2066,17," lr_schedule,\n",python,selection_mouse
205
+ 205,417544,"test/test_nan.ipynb",2048,0,"",python,selection_mouse
206
+ 206,417727,"test/test_nan.ipynb",2048,1," ",python,selection_mouse
207
+ 207,417827,"test/test_nan.ipynb",2041,25,"lr_fn = get_lr_schedule(\n",python,selection_mouse
208
+ 208,417967,"test/test_nan.ipynb",2041,42,"lr_fn = get_lr_schedule(\n lr_schedule,\n",python,selection_mouse
209
+ 209,418007,"test/test_nan.ipynb",2041,55,"lr_fn = get_lr_schedule(\n lr_schedule,\n init_lr,\n",python,selection_mouse
210
+ 210,418047,"test/test_nan.ipynb",2041,67,"lr_fn = get_lr_schedule(\n lr_schedule,\n init_lr,\n max_lr,\n",python,selection_mouse
211
+ 211,418055,"test/test_nan.ipynb",2041,82,"lr_fn = get_lr_schedule(\n lr_schedule,\n init_lr,\n max_lr,\n decay_end,\n",python,selection_mouse
212
+ 212,418155,"test/test_nan.ipynb",2041,96,"lr_fn = get_lr_schedule(\n lr_schedule,\n init_lr,\n max_lr,\n decay_end,\n num_stpes\n",python,selection_mouse
213
+ 213,418407,"test/test_nan.ipynb",2041,98,"lr_fn = get_lr_schedule(\n lr_schedule,\n init_lr,\n max_lr,\n decay_end,\n num_stpes\n)\n",python,selection_mouse
214
+ 214,419235,"test/test_nan.ipynb",2041,97,"\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n",python,content
215
+ 215,419247,"test/test_nan.ipynb",2046,0,"",python,selection_command
216
+ 216,420707,"test/test_nan.ipynb",2042,34," lr_schedule = get_lr_schedule(",python,selection_command
217
+ 217,420827,"test/test_nan.ipynb",2042,60," lr_schedule = get_lr_schedule(\n args.lr_schedule,",python,selection_command
218
+ 218,421087,"test/test_nan.ipynb",2042,82," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,",python,selection_command
219
+ 219,421107,"test/test_nan.ipynb",2042,103," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,",python,selection_command
220
+ 220,421147,"test/test_nan.ipynb",2042,127," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,",python,selection_command
221
+ 221,421167,"test/test_nan.ipynb",2042,151," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,",python,selection_command
222
+ 222,421207,"test/test_nan.ipynb",2042,178," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,",python,selection_command
223
+ 223,421227,"test/test_nan.ipynb",2042,208," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,",python,selection_command
224
+ 224,421375,"test/test_nan.ipynb",2042,214," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )",python,selection_command
225
+ 225,421543,"test/test_nan.ipynb",2042,215," lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n",python,selection_command
226
+ 226,421775,"test/test_nan.ipynb",2251,4,"",python,content
227
+ 227,421775,"test/test_nan.ipynb",2221,8," ",python,content
228
+ 228,421775,"test/test_nan.ipynb",2194,8," ",python,content
229
+ 229,421775,"test/test_nan.ipynb",2170,8," ",python,content
230
+ 230,421775,"test/test_nan.ipynb",2146,8," ",python,content
231
+ 231,421775,"test/test_nan.ipynb",2125,8," ",python,content
232
+ 232,421775,"test/test_nan.ipynb",2103,8," ",python,content
233
+ 233,421775,"test/test_nan.ipynb",2077,8," ",python,content
234
+ 234,421775,"test/test_nan.ipynb",2042,4,"",python,content
235
+ 235,421776,"test/test_nan.ipynb",2042,0,"",python,selection_command
236
+ 236,422167,"test/test_nan.ipynb",2041,0,"",python,selection_command
237
+ 237,422475,"test/test_nan.ipynb",2041,1,"",python,content
238
+ 238,422707,"test/test_nan.ipynb",2072,0,"",python,selection_command
239
+ 239,422947,"test/test_nan.ipynb",2094,0,"",python,selection_command
240
+ 240,422987,"test/test_nan.ipynb",2112,0,"",python,selection_command
241
+ 241,423027,"test/test_nan.ipynb",2129,0,"",python,selection_command
242
+ 242,423075,"test/test_nan.ipynb",2149,0,"",python,selection_command
243
+ 243,423087,"test/test_nan.ipynb",2169,0,"",python,selection_command
244
+ 244,423127,"test/test_nan.ipynb",2192,0,"",python,selection_command
245
+ 245,423147,"test/test_nan.ipynb",2218,0,"",python,selection_command
246
+ 246,423267,"test/test_nan.ipynb",2220,0,"",python,selection_command
247
+ 247,423867,"test/test_nan.ipynb",2220,1,"",python,content
248
+ 248,424007,"test/test_nan.ipynb",2218,0,"",python,selection_command
249
+ 249,424248,"test/test_nan.ipynb",2192,0,"",python,selection_command
250
+ 250,424295,"test/test_nan.ipynb",2169,0,"",python,selection_command
251
+ 251,424327,"test/test_nan.ipynb",2149,0,"",python,selection_command
252
+ 252,424355,"test/test_nan.ipynb",2129,0,"",python,selection_command
253
+ 253,424387,"test/test_nan.ipynb",2112,0,"",python,selection_command
254
+ 254,424415,"test/test_nan.ipynb",2094,0,"",python,selection_command
255
+ 255,424447,"test/test_nan.ipynb",2072,0,"",python,selection_command
256
+ 256,424487,"test/test_nan.ipynb",2041,0,"",python,selection_command
257
+ 257,424767,"test/test_nan.ipynb",1958,0,"",python,selection_command
258
+ 258,424927,"test/test_nan.ipynb",1960,0,"",python,selection_command
259
+ 259,425187,"test/test_nan.ipynb",1970,0,"",python,selection_command
260
+ 260,425207,"test/test_nan.ipynb",1971,0,"",python,selection_command
261
+ 261,425247,"test/test_nan.ipynb",1979,0,"",python,selection_command
262
+ 262,425275,"test/test_nan.ipynb",1988,0,"",python,selection_command
263
+ 263,425307,"test/test_nan.ipynb",1992,0,"",python,selection_command
264
+ 264,425335,"test/test_nan.ipynb",2003,0,"",python,selection_command
265
+ 265,425567,"test/test_nan.ipynb",2005,0,"",python,selection_command
266
+ 266,425807,"test/test_nan.ipynb",2004,0,"",python,selection_command
267
+ 267,426127,"test/test_nan.ipynb",2003,0,"",python,selection_command
268
+ 268,426208,"test/test_nan.ipynb",2003,1,";",python,selection_command
269
+ 269,426227,"test/test_nan.ipynb",2003,4,"; lr",python,selection_command
270
+ 270,426387,"test/test_nan.ipynb",2003,10,"; lr value",python,selection_command
271
+ 271,426575,"test/test_nan.ipynb",2003,13,"; lr value is",python,selection_command
272
+ 272,426727,"test/test_nan.ipynb",2003,24,"; lr value is irrelevant",python,selection_command
273
+ 273,427107,"test/test_nan.ipynb",2003,28,"; lr value is irrelevant for",python,selection_command
274
+ 274,427527,"test/test_nan.ipynb",2003,36,"; lr value is irrelevant for restore",python,selection_command
275
+ 275,427747,"test/test_nan.ipynb",2003,36,"",python,content
276
+ 276,428547,"test/test_nan.ipynb",1958,0,"",python,selection_command
277
+ 277,428907,"test/test_nan.ipynb",2005,0,"",python,selection_command
278
+ 278,429187,"test/test_nan.ipynb",2017,0,"",python,selection_command
279
+ 279,430215,"test/test_nan.ipynb",0,0,"",python,selection_command
280
+ 280,431287,"test/test_nan.ipynb",50,0,"",python,selection_command
281
+ 281,431527,"test/test_nan.ipynb",60,0,"",python,selection_command
282
+ 282,431567,"test/test_nan.ipynb",84,0,"",python,selection_command
283
+ 283,431590,"test/test_nan.ipynb",85,0,"",python,selection_command
284
+ 284,431607,"test/test_nan.ipynb",96,0,"",python,selection_command
285
+ 285,431647,"test/test_nan.ipynb",120,0,"",python,selection_command
286
+ 286,431687,"test/test_nan.ipynb",143,0,"",python,selection_command
287
+ 287,431727,"test/test_nan.ipynb",156,0,"",python,selection_command
288
+ 288,431887,"test/test_nan.ipynb",187,0,"",python,selection_command
289
+ 289,432027,"test/test_nan.ipynb",200,0,"",python,selection_command
290
+ 290,432167,"test/test_nan.ipynb",201,0,"",python,selection_command
291
+ 291,433475,"test/test_nan.ipynb",199,0,"\nfrom utils.lr_schedule import get_lr_schedule",python,content
292
+ 292,434547,"test/test_nan.ipynb",246,0,"",python,selection_command
293
+ 293,434727,"test/test_nan.ipynb",200,0,"",python,selection_command
294
+ 294,435147,"test/test_nan.ipynb",246,0,"",python,selection_command
295
+ 295,435467,"test/test_nan.ipynb",246,1,"",python,content
296
+ 296,435527,"test/test_nan.ipynb",200,0,"",python,selection_command
297
+ 297,435667,"test/test_nan.ipynb",187,0,"",python,selection_command
298
+ 298,435967,"test/test_nan.ipynb",200,0,"",python,selection_command
299
+ 299,436315,"test/test_nan.ipynb",199,0,"\n",python,content
300
+ 300,436895,"test/test_nan.ipynb",201,0,"",python,selection_command
301
+ 301,437067,"test/test_nan.ipynb",206,0,"",python,selection_command
302
+ 302,437247,"test/test_nan.ipynb",211,0,"",python,selection_command
303
+ 303,437429,"test/test_nan.ipynb",212,0,"",python,selection_command
304
+ 304,438267,"test/test_nan.ipynb",211,0,"",python,selection_command
305
+ 305,438415,"test/test_nan.ipynb",206,0,"",python,selection_command
306
+ 306,442867,"test/test_nan.ipynb",206,0,"L",python,content
307
+ 307,442868,"test/test_nan.ipynb",207,0,"",python,selection_keyboard
308
+ 308,443555,"test/test_nan.ipynb",206,1,"",python,content
309
+ 309,443739,"test/test_nan.ipynb",206,0,"l",python,content
310
+ 310,443740,"test/test_nan.ipynb",207,0,"",python,selection_keyboard
311
+ 311,443863,"test/test_nan.ipynb",207,0,"r",python,content
312
+ 312,443863,"test/test_nan.ipynb",208,0,"",python,selection_keyboard
313
+ 313,444355,"test/test_nan.ipynb",208,0,"_",python,content
314
+ 314,444355,"test/test_nan.ipynb",209,0,"",python,selection_keyboard
315
+ 315,444884,"test/test_nan.ipynb",208,0,"",python,selection_command
316
+ 316,445667,"test/test_nan.ipynb",214,0,"",python,selection_command
317
+ 317,445967,"test/test_nan.ipynb",206,0,"",python,selection_command
318
+ 318,448816,"utils/lr_utils.py",0,0,"import optax\n\n\ndef get_lr_schedule(\n lr_schedule: str,\n init_lr: float,\n max_lr: float,\n decay_end: float,\n total_steps: int,\n warmup_steps: int,\n wsd_decay_steps: int,\n) -> optax.Schedule:\n supported_schedules = [""wsd"", ""cos""]\n if lr_schedule == ""cos"":\n assert (\n warmup_steps <= total_steps\n ), ""Warmup steps can't be greater than total steps.""\n return optax.warmup_cosine_decay_schedule(\n init_value=init_lr,\n peak_value=max_lr,\n warmup_steps=warmup_steps,\n decay_steps=total_steps, # Note: decay_steps includes the warmup steps, so we need to pass total value\n end_value=decay_end,\n )\n elif lr_schedule == ""wsd"":\n assert (\n warmup_steps + wsd_decay_steps <= total_steps\n ), ""Warmup and decay period is longer than total steps.""\n schedules = [\n optax.linear_schedule(\n init_value=init_lr, end_value=max_lr, transition_steps=warmup_steps\n ),\n optax.constant_schedule(value=max_lr),\n optax.linear_schedule(\n init_value=max_lr, end_value=decay_end, transition_steps=wsd_decay_steps\n ),\n ]\n boundaries = [warmup_steps, total_steps - wsd_decay_steps]\n return optax.join_schedules(schedules, boundaries)\n else:\n raise ValueError(\n f""Learning rate schedule not supported. Please use one of {supported_schedules}""\n )\n",python,tab
319
+ 319,449457,"test/test_nan.ipynb",0,0,"",python,tab
320
+ 320,449847,"test/test_nan.ipynb",214,0,"",python,selection_command
321
+ 321,450007,"test/test_nan.ipynb",215,0,"",python,selection_command
322
+ 322,450807,"test/test_nan.ipynb",214,0,"",python,selection_command
323
+ 323,450947,"test/test_nan.ipynb",206,0,"",python,selection_command
324
+ 324,451295,"test/test_nan.ipynb",206,1,"l",python,selection_command
325
+ 325,451367,"test/test_nan.ipynb",206,8,"lr_utils",python,selection_command
326
+ 326,451527,"test/test_nan.ipynb",206,9,"lr_utils.",python,selection_command
327
+ 327,451947,"test/test_nan.ipynb",206,20,"lr_utils.lr_schedule",python,selection_command
328
+ 328,452140,"test/test_nan.ipynb",206,20,"",python,content
329
+ 329,452675,"test/test_nan.ipynb",206,0,"u",python,content
330
+ 330,452675,"test/test_nan.ipynb",207,0,"",python,selection_keyboard
331
+ 331,452787,"test/test_nan.ipynb",207,0,"t",python,content
332
+ 332,452787,"test/test_nan.ipynb",208,0,"",python,selection_keyboard
333
+ 333,452915,"test/test_nan.ipynb",208,0,"i",python,content
334
+ 334,452916,"test/test_nan.ipynb",209,0,"",python,selection_keyboard
335
+ 335,453095,"test/test_nan.ipynb",209,0,"s",python,content
336
+ 336,453095,"test/test_nan.ipynb",210,0,"",python,selection_keyboard
337
+ 337,453535,"test/test_nan.ipynb",209,1,"",python,content
338
+ 338,453707,"test/test_nan.ipynb",209,0,"l",python,content
339
+ 339,453708,"test/test_nan.ipynb",210,0,"",python,selection_keyboard
340
+ 340,453807,"test/test_nan.ipynb",210,0,"s",python,content
341
+ 341,453807,"test/test_nan.ipynb",211,0,"",python,selection_keyboard
342
+ 342,453907,"test/test_nan.ipynb",211,0,".",python,content
343
+ 343,453908,"test/test_nan.ipynb",212,0,"",python,selection_keyboard
344
+ 344,454103,"test/test_nan.ipynb",212,0,"l",python,content
345
+ 345,454104,"test/test_nan.ipynb",213,0,"",python,selection_keyboard
346
+ 346,454147,"test/test_nan.ipynb",213,0,"r",python,content
347
+ 347,454147,"test/test_nan.ipynb",214,0,"",python,selection_keyboard
348
+ 348,454355,"test/test_nan.ipynb",214,0,"_",python,content
349
+ 349,454355,"test/test_nan.ipynb",215,0,"",python,selection_keyboard
350
+ 350,454555,"test/test_nan.ipynb",215,0,"s",python,content
351
+ 351,454555,"test/test_nan.ipynb",216,0,"",python,selection_keyboard
352
+ 352,454816,"test/test_nan.ipynb",215,1,"",python,content
353
+ 353,454995,"test/test_nan.ipynb",215,0,"u",python,content
354
+ 354,454995,"test/test_nan.ipynb",216,0,"",python,selection_keyboard
355
+ 355,455043,"test/test_nan.ipynb",216,0,"t",python,content
356
+ 356,455044,"test/test_nan.ipynb",217,0,"",python,selection_keyboard
357
+ 357,455147,"test/test_nan.ipynb",217,0,"i",python,content
358
+ 358,455147,"test/test_nan.ipynb",218,0,"",python,selection_keyboard
359
+ 359,455251,"test/test_nan.ipynb",218,0,"s",python,content
360
+ 360,455252,"test/test_nan.ipynb",219,0,"",python,selection_keyboard
361
+ 361,455252,"test/test_nan.ipynb",219,0,"l",python,content
362
+ 362,455252,"test/test_nan.ipynb",220,0,"",python,selection_keyboard
363
+ 363,455811,"test/test_nan.ipynb",219,1,"",python,content
364
+ 364,455944,"test/test_nan.ipynb",218,1,"",python,content
365
+ 365,456131,"test/test_nan.ipynb",218,0,"l",python,content
366
+ 366,456132,"test/test_nan.ipynb",219,0,"",python,selection_keyboard
367
+ 367,456147,"test/test_nan.ipynb",219,0,"s",python,content
368
+ 368,456147,"test/test_nan.ipynb",220,0,"",python,selection_keyboard
369
+ 369,456356,"test/test_nan.ipynb",219,0,"",python,selection_command
370
+ 370,458754,"test/test_nan.ipynb",221,0,"",python,selection_command
371
+ 371,458761,"test/test_nan.ipynb",228,0,"",python,selection_command
372
+ 372,459227,"test/test_nan.ipynb",2062,0,"",python,selection_command
373
+ 373,460370,"test/test_nan.ipynb",2093,0,"",python,selection_command
374
+ 374,460617,"test/test_nan.ipynb",2115,0,"",python,selection_command
375
+ 375,460650,"test/test_nan.ipynb",2133,0,"",python,selection_command
376
+ 376,460695,"test/test_nan.ipynb",2150,0,"",python,selection_command
377
+ 377,460803,"test/test_nan.ipynb",2170,0,"",python,selection_command
378
+ 378,461115,"test/test_nan.ipynb",2150,0,"",python,selection_command
379
+ 379,461416,"test/test_nan.ipynb",2170,0,"",python,selection_command
380
+ 380,461533,"test/test_nan.ipynb",2190,0,"",python,selection_command
381
+ 381,461863,"test/test_nan.ipynb",2213,0,"",python,selection_command
382
+ 382,462229,"test/test_nan.ipynb",2225,0,"",python,selection_command
383
+ 383,463971,"test/test_nan.ipynb",2227,0,"",python,selection_command
384
+ 384,464681,"test/test_nan.ipynb",2225,0,"",python,selection_command
385
+ 385,464934,"test/test_nan.ipynb",2213,0,"",python,selection_command
386
+ 386,464959,"test/test_nan.ipynb",2190,0,"",python,selection_command
387
+ 387,465000,"test/test_nan.ipynb",2170,0,"",python,selection_command
388
+ 388,465019,"test/test_nan.ipynb",2150,0,"",python,selection_command
389
+ 389,465189,"test/test_nan.ipynb",2133,0,"",python,selection_command
390
+ 390,465345,"test/test_nan.ipynb",2115,0,"",python,selection_command
391
+ 391,465482,"test/test_nan.ipynb",2093,0,"",python,selection_command
392
+ 392,465661,"test/test_nan.ipynb",2062,0,"",python,selection_command
393
+ 393,469342,"test/test_nan.ipynb",0,0,"",python,tab
394
+ 394,470114,"test/test_nan.ipynb",0,0,"",python,tab
395
+ 395,470720,"test/test_nan.ipynb",0,0,"",python,tab
396
+ 396,470912,"test/test_nan.ipynb",0,0,"",python,tab
397
+ 397,477040,"test/test_nan.ipynb",2093,0,"",python,selection_command
398
+ 398,477041,"test/test_nan.ipynb",2079,0,"",python,selection_command
399
+ 399,477042,"test/test_nan.ipynb",2083,0,"",python,selection_command
400
+ 400,477042,"test/test_nan.ipynb",2083,1,"a",python,selection_command
401
+ 401,477043,"test/test_nan.ipynb",2083,2,"ar",python,selection_command
402
+ 402,477043,"test/test_nan.ipynb",2083,3,"arg",python,selection_command
403
+ 403,477044,"test/test_nan.ipynb",2083,4,"args",python,selection_command
404
+ 404,477044,"test/test_nan.ipynb",2083,4,"args",python,selection_command
405
+ 405,477045,"test/test_nan.ipynb",2083,5,"args.",python,selection_command
406
+ 406,477046,"test/test_nan.ipynb",2083,5,"args.",python,selection_command
407
+ 407,477046,"test/test_nan.ipynb",2083,5,"args.",python,selection_command
408
+ 408,477046,"test/test_nan.ipynb",2083,5,"args.",python,selection_command
409
+ 409,477047,"test/test_nan.ipynb",2083,5,"args.",python,selection_command
410
+ 410,477047,"test/test_nan.ipynb",2083,5,"args.",python,selection_command
411
+ 411,477047,"test/test_nan.ipynb",2080,4," a",python,selection_command
412
+ 412,477048,"test/test_nan.ipynb",2083,5,"args.",python,selection_command
413
+ 413,477122,"test/test_nan.ipynb",2203,5,"",python,content
414
+ 414,477122,"test/test_nan.ipynb",2180,5,"",python,content
415
+ 415,477122,"test/test_nan.ipynb",2160,5,"",python,content
416
+ 416,477122,"test/test_nan.ipynb",2140,5,"",python,content
417
+ 417,477122,"test/test_nan.ipynb",2123,5,"",python,content
418
+ 418,477122,"test/test_nan.ipynb",2105,5,"",python,content
419
+ 419,477122,"test/test_nan.ipynb",2083,5,"",python,content
420
+ 420,477143,"test/test_nan.ipynb",2083,0,"",python,selection_command
421
+ 421,485066,"test/test_nan.ipynb",2052,0,"",python,selection_command
422
+ 422,485112,"test/test_nan.ipynb",2060,0,"",python,selection_command
423
+ 423,485287,"test/test_nan.ipynb",2062,0,"",python,selection_command
424
+ 424,485804,"utils/lr_utils.py",0,0,"",python,tab
425
+ 425,487249,"test/test_nan.ipynb",0,0,"",python,tab
426
+ 426,488274,"test/test_nan.ipynb",2093,0,"",python,selection_command
427
+ 427,488285,"test/test_nan.ipynb",2083,0,"",python,selection_command
428
+ 428,491359,"test/test_nan.ipynb",2048,11,"lr_schedule",python,selection_command
429
+ 429,491916,"test/test_nan.ipynb",2058,0,"",python,selection_command
430
+ 430,492057,"test/test_nan.ipynb",2083,0,"",python,selection_command
431
+ 431,492512,"test/test_nan.ipynb",2052,0,"",python,selection_command
432
+ 432,494140,"test/test_nan.ipynb",2058,0,"",python,selection_command
433
+ 433,494543,"test/test_nan.ipynb",2059,0,"",python,selection_command
434
+ 434,494855,"test/test_nan.ipynb",2059,0,"_",python,content
435
+ 435,494856,"test/test_nan.ipynb",2060,0,"",python,selection_keyboard
436
+ 436,495058,"test/test_nan.ipynb",2060,0,"f",python,content
437
+ 437,495058,"test/test_nan.ipynb",2061,0,"",python,selection_keyboard
438
+ 438,495198,"test/test_nan.ipynb",2061,0,"n",python,content
439
+ 439,495198,"test/test_nan.ipynb",2062,0,"",python,selection_keyboard
440
+ 440,495273,"test/test_nan.ipynb",2061,0,"",python,selection_command
441
+ 441,495408,"test/test_nan.ipynb",2095,0,"",python,selection_command
442
+ 442,495657,"test/test_nan.ipynb",2110,0,"",python,selection_command
443
+ 443,495693,"test/test_nan.ipynb",2122,0,"",python,selection_command
444
+ 444,495725,"test/test_nan.ipynb",2137,0,"",python,selection_command
445
+ 445,495756,"test/test_nan.ipynb",2152,0,"",python,selection_command
446
+ 446,495814,"test/test_nan.ipynb",2167,0,"",python,selection_command
447
+ 447,495821,"test/test_nan.ipynb",2185,0,"",python,selection_command
448
+ 448,495859,"test/test_nan.ipynb",2193,0,"",python,selection_command
449
+ 449,495907,"test/test_nan.ipynb",2195,0,"",python,selection_command
450
+ 450,495929,"test/test_nan.ipynb",2209,0,"",python,selection_command
451
+ 451,496180,"test/test_nan.ipynb",2227,0,"",python,selection_command
452
+ 452,496339,"test/test_nan.ipynb",2250,0,"",python,selection_command
453
+ 453,496517,"test/test_nan.ipynb",2227,0,"",python,selection_command
454
+ 454,496722,"test/test_nan.ipynb",2209,0,"",python,selection_command
455
+ 455,496987,"test/test_nan.ipynb",2195,0,"",python,selection_command
456
+ 456,497031,"test/test_nan.ipynb",2193,0,"",python,selection_command
457
+ 457,497040,"test/test_nan.ipynb",2185,0,"",python,selection_command
458
+ 458,497071,"test/test_nan.ipynb",2167,0,"",python,selection_command
459
+ 459,497097,"test/test_nan.ipynb",2152,0,"",python,selection_command
460
+ 460,497129,"test/test_nan.ipynb",2137,0,"",python,selection_command
461
+ 461,497166,"test/test_nan.ipynb",2122,0,"",python,selection_command
462
+ 462,497203,"test/test_nan.ipynb",2110,0,"",python,selection_command
463
+ 463,497229,"test/test_nan.ipynb",2095,0,"",python,selection_command
464
+ 464,497264,"test/test_nan.ipynb",2061,0,"",python,selection_command
465
+ 465,497296,"test/test_nan.ipynb",2014,0,"",python,selection_command
466
+ 466,497385,"test/test_nan.ipynb",2061,0,"",python,selection_command
467
+ 467,497669,"test/test_nan.ipynb",2095,0,"",python,selection_command
468
+ 468,497926,"test/test_nan.ipynb",2110,0,"",python,selection_command
469
+ 469,497952,"test/test_nan.ipynb",2122,0,"",python,selection_command
470
+ 470,497979,"test/test_nan.ipynb",2137,0,"",python,selection_command
471
+ 471,498017,"test/test_nan.ipynb",2152,0,"",python,selection_command
472
+ 472,498075,"test/test_nan.ipynb",2167,0,"",python,selection_command
473
+ 473,498098,"test/test_nan.ipynb",2185,0,"",python,selection_command
474
+ 474,498117,"test/test_nan.ipynb",2193,0,"",python,selection_command
475
+ 475,498144,"test/test_nan.ipynb",2195,0,"",python,selection_command
476
+ 476,498182,"test/test_nan.ipynb",2209,0,"",python,selection_command
477
+ 477,498226,"test/test_nan.ipynb",2227,0,"",python,selection_command
478
+ 478,498362,"test/test_nan.ipynb",2250,0,"",python,selection_command
479
+ 479,498820,"test/test_nan.ipynb",2227,0,"",python,selection_command
480
+ 480,498940,"test/test_nan.ipynb",2231,0,"",python,selection_command
481
+ 481,499088,"test/test_nan.ipynb",2232,0,"",python,selection_command
482
+ 482,499440,"test/test_nan.ipynb",2232,6,"",python,content
483
+ 483,499929,"test/test_nan.ipynb",2232,0,"l",python,content
484
+ 484,499929,"test/test_nan.ipynb",2233,0,"",python,selection_keyboard
485
+ 485,500026,"test/test_nan.ipynb",2233,0,"r",python,content
486
+ 486,500026,"test/test_nan.ipynb",2234,0,"",python,selection_keyboard
487
+ 487,500321,"test/test_nan.ipynb",2234,0,"_",python,content
488
+ 488,500322,"test/test_nan.ipynb",2235,0,"",python,selection_keyboard
489
+ 489,500456,"test/test_nan.ipynb",2235,0,"s",python,content
490
+ 490,500457,"test/test_nan.ipynb",2236,0,"",python,selection_keyboard
491
+ 491,500543,"test/test_nan.ipynb",2236,0,"c",python,content
492
+ 492,500544,"test/test_nan.ipynb",2237,0,"",python,selection_keyboard
493
+ 493,500557,"test/test_nan.ipynb",2237,0,"h",python,content
494
+ 494,500557,"test/test_nan.ipynb",2238,0,"",python,selection_keyboard
495
+ 495,500684,"test/test_nan.ipynb",2238,0,"e",python,content
496
+ 496,500685,"test/test_nan.ipynb",2239,0,"",python,selection_keyboard
497
+ 497,500722,"test/test_nan.ipynb",2239,0,"d",python,content
498
+ 498,500723,"test/test_nan.ipynb",2240,0,"",python,selection_keyboard
499
+ 499,500829,"test/test_nan.ipynb",2240,0,"u",python,content
500
+ 500,500830,"test/test_nan.ipynb",2241,0,"",python,selection_keyboard
501
+ 501,500958,"test/test_nan.ipynb",2241,0,"l",python,content
502
+ 502,500959,"test/test_nan.ipynb",2242,0,"",python,selection_keyboard
503
+ 503,501100,"test/test_nan.ipynb",2242,0,"e",python,content
504
+ 504,501101,"test/test_nan.ipynb",2243,0,"",python,selection_keyboard
505
+ 505,501400,"test/test_nan.ipynb",2243,0,"_",python,content
506
+ 506,501401,"test/test_nan.ipynb",2244,0,"",python,selection_keyboard
507
+ 507,501576,"test/test_nan.ipynb",2244,0,"f",python,content
508
+ 508,501577,"test/test_nan.ipynb",2245,0,"",python,selection_keyboard
509
+ 509,501635,"test/test_nan.ipynb",2245,0,"n",python,content
510
+ 510,501636,"test/test_nan.ipynb",2246,0,"",python,selection_keyboard
511
+ 511,501797,"test/test_nan.ipynb",2245,0,"",python,selection_command
512
+ 512,502157,"test/test_nan.ipynb",2214,0,"",python,selection_command
513
+ 513,504365,"utils/lr_utils.py",0,0,"",python,tab
514
+ 514,507250,"test/test_nan.ipynb",0,0,"",python,tab
515
+ 515,509332,"train_lam.py",0,0,"",python,tab
516
+ 516,512271,"test/test_nan.ipynb",0,0,"",python,tab
517
+ 517,512870,"train_lam.py",0,0,"",python,tab
518
+ 518,513510,"test/test_nan.ipynb",0,0,"",python,tab
519
+ 519,2086061,"train_lam.py",0,0,"",python,tab
520
+ 520,2086423,"train_lam.py",0,13598,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\njax.config.update(""jax_transfer_guard"", ""allow"")\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list[str] = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n wandb_id: str = """"\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(\n model: LatentActionModel, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, jax.Array, dict]]:\n # --- Compute loss ---\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n gt_future_frames = gt[:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@nnx.jit\ndef train_step(\n lam: LatentActionModel,\n optimizer: nnx.Optimizer,\n inputs: dict,\n action_last_active: jax.Array,\n rng: jax.Array,\n) -> tuple[jax.Array, jax.Array, jax.Array, dict]:\n def loss_fn(\n model: LatentActionModel,\n ) -> tuple[jax.Array, tuple[jax.Array, jax.Array, dict]]:\n return lam_loss_fn(model, inputs)\n\n # --- Update model ---\n (loss, (recon, idx_counts, metrics)), grads = nnx.value_and_grad(\n loss_fn, has_aux=True\n )(lam)\n optimizer.update(grads)\n\n # --- Reset inactive latent actions ---\n codebook = lam.vq.codebook\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook.value\n )\n lam.vq.codebook.value = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n # Count parameters\n _, params, _ = nnx.split(lam, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.param_dtype, # moments in full precision\n )\n optimizer = nnx.Optimizer(lam, tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n action_last_active = jnp.zeros(args.num_latents, dtype=jnp.int32)\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n inputs = dict(videos=videos, rng=_rng)\n rng, _rng = jax.random.split(rng)\n loss, recon, action_last_active, metrics = train_step(\n lam, optimizer, inputs, action_last_active, _rng\n )\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0, 1:].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,selection_command
521
+ 521,2086571,"train_lam.py",13598,0,"",python,selection_command
522
+ 522,2248550,"utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass SpatioTemporalPositionalEncoding(nnx.Module):\n """"""\n Applies separate sinusoidal positional encodings to the temporal and spatial dimensions.\n """"""\n def __init__(self, d_model: int, max_len: int = 5000):\n self.d_model = d_model\n self.max_len = max_len\n\n pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n self.pe = nnx.Variable(pe)\n\n def __call__(self, x: jax.Array) -> jax.Array:\n """"""\n Args:\n x: The input tensor of shape (Batch, Time, Space, Dimension).\n\n Returns:\n The input tensor with positional encodings added.\n """"""\n assert x.ndim == 4, f""Input must be 4-dimensional, but got shape {x.shape}""\n\n num_timesteps = x.shape[1]\n num_spatial_patches = x.shape[2]\n\n # Temporal positional encoding: (1, T, 1, D)\n temporal_pe = self.pe.value[None, :num_timesteps, None, :]\n x = x + temporal_pe\n\n # Spatial positional encoding: (1, 1, S, D)\n spatial_pe = self.pe.value[None, None, :num_spatial_patches, :]\n x = x + spatial_pe\n\n return x\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n sow_weights: bool,\n sow_activations: bool,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM, sow_weights=self.sow_weights)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM, sow_weights=self.sow_weights)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n if self.sow_activations:\n self.sow(nnx.Intermediate, 'activations', x_BTNM)\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n V: vocabulary size\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n sow_weights: bool = False,\n sow_activations: bool = False,\n sow_logits: bool = False,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n self.sow_logits = sow_logits\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n\n self.pos_enc = SpatioTemporalPositionalEncoding(self.model_dim, max_len=max_len)\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n sow_weights=self.sow_weights,\n sow_activations=self.sow_activations,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNV = self.output_dense(x_BTNM)\n if self.sow_logits:\n self.sow(nnx.Intermediate, 'logits', x_BTNV)\n return x_BTNV\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n sow_weights: bool,\n sow_activations: bool,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n # @nnx.remat\n def __call__(self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM, sow_weights=self.sow_weights)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM, sow_weights=self.sow_weights)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n if self.sow_activations:\n self.sow(nnx.Intermediate, 'activations', x_BTNM)\n\n return x_BTNM\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n V: vocabulary size\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n sow_logits: bool = False,\n sow_weights: bool = False,\n sow_activations: bool = False,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_logits = sow_logits\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n\n self.pos_enc = SpatioTemporalPositionalEncoding(self.model_dim, max_len=max_len)\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n sow_weights=self.sow_weights,\n sow_activations=self.sow_activations,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n if self.sow_logits:\n self.sow(nnx.Intermediate, 'logits', x_BTNV)\n return x_BTNV\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n def __init__(\n self, latent_dim: int, num_latents: int, dropout: float, rngs: nnx.Rngs\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n\n self.codebook = nnx.Param(\n normalize(\n nnx.initializers.lecun_uniform()(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(self.codebook.value)\n distance_DK = -jnp.matmul(x_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = self.codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_DL + jax.lax.stop_gradient(z_DL - x_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n def attention_fn(query_BTHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x, pad_size):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BTHD.shape\n T = query_BTHD.shape[-3]\n S = key_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n Q = ((T + 3) // 4) * 4\n pad_size_Q = Q - T\n K = ((S + 3) // 4) * 4\n pad_size_K = K - S\n\n query_BQHD = _pad(_merge_batch_dims(query_BTHD), pad_size_Q)\n key_BKHD = _pad(_merge_batch_dims(key_BSHD), pad_size_K)\n value_BKHD = _pad(_merge_batch_dims(value_BSHD), pad_size_K)\n\n attention_mask = jnp.ones((Q, K), dtype=jnp.bool_)\n attention_mask = attention_mask.at[T:, :].set(False)\n attention_mask = attention_mask.at[:, S:].set(False)\n\n mask_11TS = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n\n bias_4d = jnp.pad(_merge_batch_dims(bias), ((0, 0), (0, 0), (0, pad_size_Q), (0, pad_size_K))) if bias is not None else None\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BQHD,\n key=key_BKHD,\n value=value_BKHD,\n bias=bias_4d,\n mask=mask_11TS,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :T, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab
523
+ 523,2252301,"utils/nn.py",7559,0,"",python,selection_command
524
+ 524,2252303,"utils/nn.py",7528,0,"",python,selection_command
525
+ 525,2252303,"utils/nn.py",7462,0,"",python,selection_command
526
+ 526,2252304,"utils/nn.py",7424,0,"",python,selection_command
527
+ 527,2252305,"utils/nn.py",7374,0,"",python,selection_command
528
+ 528,2252306,"utils/nn.py",7332,0,"",python,selection_command
529
+ 529,2252307,"utils/nn.py",7286,0,"",python,selection_command
530
+ 530,2252307,"utils/nn.py",7244,0,"",python,selection_command
531
+ 531,2252308,"utils/nn.py",7204,0,"",python,selection_command
532
+ 532,2252309,"utils/nn.py",7179,0,"",python,selection_command
533
+ 533,2252503,"utils/nn.py",1547,0,"",python,selection_command
534
+ 534,2253323,"utils/nn.py",1574,0,"",python,selection_command
535
+ 535,2253579,"utils/nn.py",1592,0,"",python,selection_command
536
+ 536,2253605,"utils/nn.py",1606,0,"",python,selection_command
537
+ 537,2253631,"utils/nn.py",1624,0,"",python,selection_command
538
+ 538,2253664,"utils/nn.py",1646,0,"",python,selection_command
539
+ 539,2253700,"utils/nn.py",1670,0,"",python,selection_command
540
+ 540,2253735,"utils/nn.py",1694,0,"",python,selection_command
541
+ 541,2253761,"utils/nn.py",1726,0,"",python,selection_command
542
+ 542,2253903,"utils/nn.py",1752,0,"",python,selection_command
543
+ 543,2254063,"utils/nn.py",1787,0,"",python,selection_command
544
+ 544,2254650,"utils/nn.py",1811,0,"",python,selection_command
545
+ 545,2255170,"utils/nn.py",1813,0,"",python,selection_command
546
+ 546,2255577,"utils/nn.py",2124,0,"",python,selection_command
547
+ 547,2270643,"utils/nn.py",2138,0,"",python,selection_command
548
+ 548,2271361,"utils/nn.py",4408,0,"",python,selection_command
549
+ 549,2277137,"utils/nn.py",4406,0,"",python,selection_command
550
+ 550,2277319,"utils/nn.py",4400,0,"",python,selection_command
551
+ 551,2277484,"utils/nn.py",4399,0,"",python,selection_command
552
+ 552,2277755,"utils/nn.py",4382,0,"",python,selection_command
553
+ 553,2280545,"train_lam.py",0,0,"",python,tab
554
+ 554,2282114,"utils/nn.py",0,0,"",python,tab
555
+ 555,2553194,"utils/nn.py",4985,0,"",python,selection_command
556
+ 556,2555180,"utils/nn.py",5018,0,"",python,selection_command
557
+ 557,2556838,"utils/nn.py",4985,0,"",python,selection_command
558
+ 558,2556969,"utils/nn.py",4952,0,"",python,selection_command
559
+ 559,2557302,"utils/nn.py",4949,0,"",python,selection_command
560
+ 560,2557470,"utils/nn.py",4947,0,"",python,selection_command
561
+ 561,2557644,"utils/nn.py",4940,0,"",python,selection_command
562
+ 562,2571436,"utils/nn.py",5757,0,"",python,selection_command
563
+ 563,2572184,"utils/nn.py",6274,0,"",python,selection_command
564
+ 564,2573118,"utils/nn.py",6287,0,"",python,selection_command
565
+ 565,2573318,"utils/nn.py",8258,0,"",python,selection_command
566
+ 566,2613940,"utils/nn.py",8257,0,"",python,selection_command
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-1a3a8350-ade5-4f14-90d3-a2023f5be9fa1753600712073-2025_07_27-09.18.39.905/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3f2b1a99-0d75-466c-970c-4deff62cba851753462933379-2025_07_25-19.02.23.245/source.csv ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 1,3,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nnx.Module):\n """"""Genie model""""""\n\n def __init__(\n self,\n in_dim: int,\n tokenizer_dim: int,\n tokenizer_ffn_dim: int,\n latent_patch_dim: int,\n num_patch_latents: int,\n patch_size: int,\n tokenizer_num_blocks: int,\n tokenizer_num_heads: int,\n lam_dim: int,\n lam_ffn_dim: int,\n latent_action_dim: int,\n num_latent_actions: int,\n lam_patch_size: int,\n lam_num_blocks: int,\n lam_num_heads: int,\n lam_co_train: bool,\n dyna_dim: int,\n dyna_ffn_dim: int,\n dyna_num_blocks: int,\n dyna_num_heads: int,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n dropout: float = 0.0,\n mask_limit: float = 0.0,\n ):\n # --- Tokenizer ---\n self.in_dim = in_dim\n self.tokenizer_dim = tokenizer_dim\n self.tokenizer_ffn_dim = tokenizer_ffn_dim\n self.latent_patch_dim = latent_patch_dim\n self.num_patch_latents = num_patch_latents\n self.patch_size = patch_size\n self.tokenizer_num_blocks = tokenizer_num_blocks\n self.tokenizer_num_heads = tokenizer_num_heads\n # --- LAM ---\n self.lam_dim = lam_dim\n self.lam_ffn_dim = lam_ffn_dim\n self.latent_action_dim = latent_action_dim\n self.num_latent_actions = num_latent_actions\n self.lam_patch_size = lam_patch_size\n self.lam_num_blocks = lam_num_blocks\n self.lam_num_heads = lam_num_heads\n self.lam_co_train = lam_co_train\n # --- Dynamics ---\n self.dyna_dim = dyna_dim\n self.dyna_ffn_dim = dyna_ffn_dim\n self.dyna_num_blocks = dyna_num_blocks\n self.dyna_num_heads = dyna_num_heads\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.dropout = dropout\n self.mask_limit = mask_limit\n\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n def sample(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: patches per frame\n S: sequence length\n A: action space\n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # Define the inner MaskGIT loop using nnx.scan\n maskgit_step = MaskGITStep(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def maskgit_scan_fn(module, carry, x):\n new_carry, _ = module(carry, x)\n return new_carry, None\n\n MaskGITLoop = nnx.scan(\n maskgit_scan_fn,\n in_axes=(None, nnx.Carry, 0), # (module, carry, x)\n out_axes=(nnx.Carry, None), # (new_carry, None)\n )\n\n # Define the outer autoregressive loop's body function\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)).astype(bool) # (B, S, N)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = MaskGITLoop(\n maskgit_step, init_carry_maskgit, jnp.arange(steps)\n )\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nnx.Module):\n def __init__(\n self,\n dynamics: DynamicsMaskGIT,\n tokenizer: TokenizerVQVAE,\n temperature: float,\n sample_argmax: bool,\n steps: int,\n ):\n self.dynamics = dynamics\n self.tokenizer = tokenizer\n self.temperature = temperature\n self.sample_argmax = sample_argmax\n self.steps = steps\n\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token.value # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1)\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\n\n# FIXME (f.srambical): add conversion script for old checkpoints\ndef restore_genie_components(\n optimizer: nnx.Optimizer,\n sharding: jax.sharding.NamedSharding,\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rngs = nnx.Rngs(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_tokenizer_optimizer = nnx.Optimizer(dummy_tokenizer, dummy_tx)\n dummy_tokenizer_optimizer_state = nnx.state(dummy_tokenizer_optimizer)\n abstract_sharded_tokenizer_optimizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_optimizer_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(\n abstract_sharded_tokenizer_optimizer_state\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_tokenizer_optimizer.model, restored_tokenizer.model)\n optimizer.model.tokenizer = dummy_tokenizer_optimizer.model\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_lam_optimizer = nnx.Optimizer(dummy_lam, dummy_tx)\n dummy_lam_optimizer_state = nnx.state(dummy_lam_optimizer)\n abstract_sharded_lam_optimizer_state = _create_abstract_sharded_pytree(\n dummy_lam_optimizer_state, sharding\n )\n restored_lam_optimizer = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(\n abstract_sharded_lam_optimizer_state\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_lam_optimizer.model, restored_lam_optimizer.model)\n optimizer.model.lam = dummy_lam_optimizer.model\n # Remove the LAM decoder to save memory and avoid unnecessary computation.\n del optimizer.model.lam.decoder\n lam_checkpoint_manager.close()\n\n return optimizer\n\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab
3
+ 2,357,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:02:23 PM [info] Activating crowd-code\n7:02:23 PM [info] Recording started\n7:02:23 PM [info] Initializing git provider using file system watchers...\n",Log,tab
4
+ 3,587,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"7:02:23 PM [info] Git repository found\n7:02:23 PM [info] Git provider initialized successfully\n7:02:23 PM [info] Initial git state: [object Object]\n",Log,content
5
+ 4,2898,"genie.py",0,0,"",python,tab
6
+ 5,3378,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
7
+ 6,5988,"genie.py",0,0,"",python,tab
8
+ 7,19218,"genie.py",0,0,"",python,tab
9
+ 8,19298,"genie.py",6670,0,"",python,selection_command
10
+ 9,56549,"/fast/home/franz.srambical/jafar/sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.Optimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state),\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n # @nnx.jit\n # @jax.jit\n def _sampling_fn(model, batch):\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n return model.sample(\n batch,\n args.seq_len,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n )\n\n\n # --- Define autoregressive sampling loop ---\n def _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = genie.sample(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return generated_vid\n\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n video_batch = next(iter(dataloader))\n # Get latent actions for all videos in the batch\n batch = dict(videos=video_batch)\n action_batch = genie.vq_encode(batch, training=False) # type: ignore[arg-type]\n action_batch = jnp.asarray(action_batch).reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n # --- Sample + evaluate video ---\n vid = _autoreg_sample(rng, video_batch, action_batch)\n gt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\n recon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\n # FIXME (f.srambical): investigate why this is needed\n gt = gt.astype(jnp.float32)\n ssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\n print(f""SSIM: {ssim}"")\n\n # --- Construct video ---\n # true_videos = (video_batch * 255).astype(np.uint8)\n # pred_videos = (vid * 255).astype(np.uint8)\n # video_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\n # video_comparison[0] = true_videos[:, : args.seq_len]\n # video_comparison[1] = pred_videos\n # frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # # --- Save video ---\n # imgs = [Image.fromarray(img) for img in frames]\n # # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n # for t, img in enumerate(imgs[1:]):\n # d = ImageDraw.Draw(img)\n # for row in range(action_batch.shape[0]):\n # action = action_batch[row, t, 0]\n # y_offset = row * video_batch.shape[2] + 2\n # d.text((2, y_offset), f""{action}"", fill=255)\n # imgs[0].save(\n # f""generation_{time.time()}.gif"",\n # save_all=True,\n # append_images=imgs[1:],\n # duration=250,\n # loop=0,\n # )\n",python,tab
11
+ 10,56549,"/fast/home/franz.srambical/jafar/sample.py",5611,0,"",python,selection_command
12
+ 11,58027,"/fast/home/franz.srambical/jafar/sample.py",4644,0,"",python,selection_command
13
+ 12,59949,"/fast/home/franz.srambical/jafar/genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nnx.Module):\n """"""Genie model""""""\n\n def __init__(\n self,\n in_dim: int,\n tokenizer_dim: int,\n tokenizer_ffn_dim: int,\n latent_patch_dim: int,\n num_patch_latents: int,\n patch_size: int,\n tokenizer_num_blocks: int,\n tokenizer_num_heads: int,\n lam_dim: int,\n lam_ffn_dim: int,\n latent_action_dim: int,\n num_latent_actions: int,\n lam_patch_size: int,\n lam_num_blocks: int,\n lam_num_heads: int,\n lam_co_train: bool,\n dyna_dim: int,\n dyna_ffn_dim: int,\n dyna_num_blocks: int,\n dyna_num_heads: int,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n dropout: float = 0.0,\n mask_limit: float = 0.0,\n ):\n # --- Tokenizer ---\n self.in_dim = in_dim\n self.tokenizer_dim = tokenizer_dim\n self.tokenizer_ffn_dim = tokenizer_ffn_dim\n self.latent_patch_dim = latent_patch_dim\n self.num_patch_latents = num_patch_latents\n self.patch_size = patch_size\n self.tokenizer_num_blocks = tokenizer_num_blocks\n self.tokenizer_num_heads = tokenizer_num_heads\n # --- LAM ---\n self.lam_dim = lam_dim\n self.lam_ffn_dim = lam_ffn_dim\n self.latent_action_dim = latent_action_dim\n self.num_latent_actions = num_latent_actions\n self.lam_patch_size = lam_patch_size\n self.lam_num_blocks = lam_num_blocks\n self.lam_num_heads = lam_num_heads\n self.lam_co_train = lam_co_train\n # --- Dynamics ---\n self.dyna_dim = dyna_dim\n self.dyna_ffn_dim = dyna_ffn_dim\n self.dyna_num_blocks = dyna_num_blocks\n self.dyna_num_heads = dyna_num_heads\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.dropout = dropout\n self.mask_limit = mask_limit\n\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n def sample(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: patches per frame\n S: sequence length\n A: action space\n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n # Define the inner MaskGIT loop using nnx.scan\n maskgit_step = MaskGITStep(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def maskgit_scan_fn(module, carry, x):\n new_carry, _ = module(carry, x)\n return new_carry, None\n\n MaskGITLoop = nnx.scan(\n maskgit_scan_fn,\n in_axes=(None, nnx.Carry, 0), # (module, carry, x)\n out_axes=(nnx.Carry, None), # (new_carry, None)\n )\n\n # Define the outer autoregressive loop's body function\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)).astype(bool) # (B, S, N)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = MaskGITLoop(\n maskgit_step, init_carry_maskgit, jnp.arange(steps)\n )\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nnx.Module):\n def __init__(\n self,\n dynamics: DynamicsMaskGIT,\n tokenizer: TokenizerVQVAE,\n temperature: float,\n sample_argmax: bool,\n steps: int,\n ):\n self.dynamics = dynamics\n self.tokenizer = tokenizer\n self.temperature = temperature\n self.sample_argmax = sample_argmax\n self.steps = steps\n\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token.value # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1)\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\n\n# FIXME (f.srambical): add conversion script for old checkpoints\ndef restore_genie_components(\n optimizer: nnx.Optimizer,\n sharding: jax.sharding.NamedSharding,\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rngs = nnx.Rngs(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_tokenizer_optimizer = nnx.Optimizer(dummy_tokenizer, dummy_tx)\n dummy_tokenizer_optimizer_state = nnx.state(dummy_tokenizer_optimizer)\n abstract_sharded_tokenizer_optimizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_optimizer_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(\n abstract_sharded_tokenizer_optimizer_state\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_tokenizer_optimizer.model, restored_tokenizer.model)\n optimizer.model.tokenizer = dummy_tokenizer_optimizer.model\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_lam_optimizer = nnx.Optimizer(dummy_lam, dummy_tx)\n dummy_lam_optimizer_state = nnx.state(dummy_lam_optimizer)\n abstract_sharded_lam_optimizer_state = _create_abstract_sharded_pytree(\n dummy_lam_optimizer_state, sharding\n )\n restored_lam_optimizer = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(\n abstract_sharded_lam_optimizer_state\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_lam_optimizer.model, restored_lam_optimizer.model)\n optimizer.model.lam = dummy_lam_optimizer.model\n # Remove the LAM decoder to save memory and avoid unnecessary computation.\n del optimizer.model.lam.decoder\n lam_checkpoint_manager.close()\n\n return optimizer\n\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab
14
+ 13,59950,"/fast/home/franz.srambical/jafar/genie.py",7076,0,"",python,selection_command
15
+ 14,77428,"/fast/home/franz.srambical/jafar/sample.py",0,0,"",python,tab
16
+ 15,79472,"genie.py",0,0,"",python,tab
17
+ 16,96262,"genie.py",7213,0,"",python,selection_command
18
+ 17,97679,"genie.py",7201,63," out_axes=(nnx.Carry, 0), # (new_carry, None)\n",python,content
19
+ 18,113555,"/fast/home/franz.srambical/jafar/genie.py",0,0,"",python,tab
20
+ 19,113557,"/fast/home/franz.srambical/jafar/genie.py",7201,0,"",python,selection_command
21
+ 20,116189,"genie.py",0,0,"",python,tab
22
+ 21,117566,"genie.py",7258,0,"",python,selection_mouse
23
+ 22,155903,"/fast/home/franz.srambical/jafar/genie.py",0,0,"",python,tab
24
+ 23,157626,"/fast/home/franz.srambical/jafar/genie.py",7201,62," out_axes=(nnx.Carry, None), # (new_carry, None)",python,content
25
+ 24,159605,"genie.py",0,0,"",python,tab
26
+ 25,179385,"/fast/home/franz.srambical/jafar/genie.py",0,0,"",python,tab
27
+ 26,179385,"/fast/home/franz.srambical/jafar/genie.py",7979,0,"",python,selection_command
28
+ 27,213168,"/fast/home/franz.srambical/jafar/genie.py",7979,117," final_carry_maskgit, _ = jax.lax.scan(\n maskgit_step_fn, init_carry_maskgit, jnp.arange(steps)",python,content
29
+ 28,213169,"/fast/home/franz.srambical/jafar/genie.py",7076,199,"",python,content
30
+ 29,213169,"/fast/home/franz.srambical/jafar/genie.py",6949,90," new_carry = (rng, token_idxs, new_mask, action_tokens)",python,content
31
+ 30,213169,"/fast/home/franz.srambical/jafar/genie.py",6670,277," # Define the inner MaskGIT loop function\n def maskgit_step_fn(carry, step):\n rng, token_idxs, mask, action_tokens = carry\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token.value # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1)\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (steps * 2))\n step_temp = temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)",python,content
32
+ 31,223860,"/fast/home/franz.srambical/jafar/genie.py",9430,121," final_carry_maskgit, _ = MaskGITLoop(\n maskgit_step, init_carry_maskgit, jnp.arange(steps)",python,content
33
+ 32,223860,"/fast/home/franz.srambical/jafar/genie.py",8726,0," MaskGITLoop = nnx.scan(\n maskgit_scan_fn,\n in_axes=(None, nnx.Carry, 0), # (module, carry, x)\n out_axes=(nnx.Carry, None), # (new_carry, None)\n )\n\n",python,content
34
+ 33,223860,"/fast/home/franz.srambical/jafar/genie.py",8623,66," def maskgit_scan_fn(module, carry, x):\n new_carry, _ = module(carry, x)",python,content
35
+ 34,223860,"/fast/home/franz.srambical/jafar/genie.py",6670,1951," # Define the inner MaskGIT loop using nnx.scan\n maskgit_step = MaskGITStep(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )",python,content
36
+ 35,224039,"/fast/home/franz.srambical/jafar/genie.py",11216,94,"",python,content
37
+ 36,224039,"/fast/home/franz.srambical/jafar/genie.py",9013,2202,"",python,content
38
+ 37,224039,"/fast/home/franz.srambical/jafar/genie.py",7979,117," final_carry_maskgit, _ = jax.lax.scan(\n maskgit_step_fn, init_carry_maskgit, jnp.arange(steps)",python,content
39
+ 38,224039,"/fast/home/franz.srambical/jafar/genie.py",7076,197," # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (steps * 2))\n step_temp = temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None",python,content
40
+ 39,224039,"/fast/home/franz.srambical/jafar/genie.py",6949,125," # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token.value # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1)\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)",python,content
41
+ 40,224039,"/fast/home/franz.srambical/jafar/genie.py",6670,277," # Define the inner MaskGIT loop function\n def maskgit_step_fn(carry, step):\n rng, token_idxs, mask, action_tokens = carry\n N = token_idxs.shape[2]",python,content
42
+ 41,272905,"/fast/home/franz.srambical/jafar/genie.py",6670,0,"",python,selection_command
43
+ 42,380998,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.Optimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state),\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n # @nnx.jit\n # @jax.jit\n def _sampling_fn(model, batch):\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n return model.sample(\n batch,\n args.seq_len,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n )\n\n\n # --- Define autoregressive sampling loop ---\n def _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = genie.sample(batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax)\n return generated_vid\n\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n video_batch = next(iter(dataloader))\n # Get latent actions for all videos in the batch\n batch = dict(videos=video_batch)\n action_batch = genie.vq_encode(batch, training=False) # type: ignore[arg-type]\n action_batch = jnp.asarray(action_batch).reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n # --- Sample + evaluate video ---\n vid = _autoreg_sample(rng, video_batch, action_batch)\n gt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\n recon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\n # FIXME (f.srambical): investigate why this is needed\n gt = gt.astype(jnp.float32)\n ssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\n print(f""SSIM: {ssim}"")\n\n # --- Construct video ---\n # true_videos = (video_batch * 255).astype(np.uint8)\n # pred_videos = (vid * 255).astype(np.uint8)\n # video_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\n # video_comparison[0] = true_videos[:, : args.seq_len]\n # video_comparison[1] = pred_videos\n # frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # # --- Save video ---\n # imgs = [Image.fromarray(img) for img in frames]\n # # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n # for t, img in enumerate(imgs[1:]):\n # d = ImageDraw.Draw(img)\n # for row in range(action_batch.shape[0]):\n # action = action_batch[row, t, 0]\n # y_offset = row * video_batch.shape[2] + 2\n # d.text((2, y_offset), f""{action}"", fill=255)\n # imgs[0].save(\n # f""generation_{time.time()}.gif"",\n # save_all=True,\n # append_images=imgs[1:],\n # duration=250,\n # loop=0,\n # )\n",python,tab
44
+ 43,382134,"sample.py",7041,0,"",python,selection_command
45
+ 44,382137,"sample.py",7023,0,"",python,selection_command
46
+ 45,382152,"sample.py",6999,0,"",python,selection_command
47
+ 46,382225,"sample.py",6965,0,"",python,selection_command
48
+ 47,382231,"sample.py",6940,0,"",python,selection_command
49
+ 48,382323,"sample.py",6897,0,"",python,selection_command
50
+ 49,382340,"sample.py",6877,0,"",python,selection_command
51
+ 50,382345,"sample.py",6818,0,"",python,selection_command
52
+ 51,382381,"sample.py",6762,0,"",python,selection_command
53
+ 52,382416,"sample.py",6715,0,"",python,selection_command
54
+ 53,382427,"sample.py",6664,0,"",python,selection_command
55
+ 54,382469,"sample.py",6630,0,"",python,selection_command
56
+ 55,382583,"sample.py",6589,0,"",python,selection_command
57
+ 56,382584,"sample.py",6489,0,"",python,selection_command
58
+ 57,382658,"sample.py",6435,0,"",python,selection_command
59
+ 58,382658,"sample.py",6408,0,"",python,selection_command
60
+ 59,382659,"sample.py",6407,0,"",python,selection_command
61
+ 60,382692,"sample.py",6323,0,"",python,selection_command
62
+ 61,382707,"sample.py",6283,0,"",python,selection_command
63
+ 62,382729,"sample.py",6224,0,"",python,selection_command
64
+ 63,382778,"sample.py",6157,0,"",python,selection_command
65
+ 64,382822,"sample.py",6108,0,"",python,selection_command
66
+ 65,382836,"sample.py",6051,0,"",python,selection_command
67
+ 66,383110,"sample.py",6021,0,"",python,selection_command
68
+ 67,409372,"sample.py",0,0,"",python,tab
69
+ 68,423163,"sample.py",6020,0,"",python,selection_command
70
+ 69,423280,"sample.py",5993,0,"",python,selection_command
71
+ 70,456144,"genie.py",0,0,"",python,tab
72
+ 71,456145,"genie.py",6670,0,"",python,selection_command
73
+ 72,461257,"sample.py",0,0,"",python,tab
74
+ 73,464425,"sample.py",6969,0,"",python,selection_command
75
+ 74,464665,"sample.py",7049,0,"",python,selection_command
76
+ 75,465095,"sample.py",6228,0,"",python,selection_command
77
+ 76,465860,"sample.py",5207,0,"",python,selection_command
78
+ 77,466191,"sample.py",4424,0,"",python,selection_command
79
+ 78,466192,"sample.py",3753,0,"",python,selection_command
80
+ 79,466193,"sample.py",2998,0,"",python,selection_command
81
+ 80,466194,"sample.py",2219,0,"",python,selection_command
82
+ 81,466211,"sample.py",1453,0,"",python,selection_command
83
+ 82,466211,"sample.py",917,0,"",python,selection_command
84
+ 83,466304,"sample.py",351,0,"",python,selection_command
85
+ 84,466305,"sample.py",917,0,"",python,selection_command
86
+ 85,466306,"sample.py",1453,0,"",python,selection_command
87
+ 86,466306,"sample.py",2219,0,"",python,selection_command
88
+ 87,466542,"sample.py",2998,0,"",python,selection_command
89
+ 88,468353,"/fast/home/franz.srambical/jafar/genie.py",0,0,"",python,tab
90
+ 89,470581,"/fast/home/franz.srambical/jafar/genie.py",7766,0,"",python,selection_command
91
+ 90,470693,"/fast/home/franz.srambical/jafar/genie.py",8930,0,"",python,selection_command
92
+ 91,470894,"/fast/home/franz.srambical/jafar/genie.py",9838,0,"",python,selection_command
93
+ 92,471054,"/fast/home/franz.srambical/jafar/genie.py",10437,0,"",python,selection_command
94
+ 93,475752,"sample.py",0,0,"",python,tab
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6410c04a-5509-42a0-b7ec-8fa2503faf3a1758380010770-2025_09_20-16.53.40.475/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-accd586c-9376-4507-a888-197a6c40bdf51757184416102-2025_09_06-20.47.03.130/source.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 1,3,"utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass SpatioTemporalPositionalEncoding(nnx.Module):\n """"""\n Applies separate sinusoidal positional encodings to the temporal and spatial dimensions.\n """"""\n\n def __init__(self, d_model: int, max_len: int = 5000):\n self.d_model = d_model\n self.max_len = max_len\n\n pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n self.pe = nnx.Variable(pe)\n\n def __call__(self, x: jax.Array) -> jax.Array:\n """"""\n Args:\n x: The input tensor of shape (Batch, Time, Space, Dimension).\n\n Returns:\n The input tensor with positional encodings added.\n """"""\n assert x.ndim == 4, f""Input must be 4-dimensional, but got shape {x.shape}""\n\n num_timesteps = x.shape[1]\n num_spatial_patches = x.shape[2]\n\n # Temporal positional encoding: (1, T, 1, D)\n temporal_pe = self.pe.value[None, :num_timesteps, None, :]\n x = x + temporal_pe\n\n # Spatial positional encoding: (1, 1, S, D)\n spatial_pe = self.pe.value[None, None, :num_spatial_patches, :]\n x = x + spatial_pe\n\n return x\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n sow_weights: bool,\n sow_activations: bool,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_norm(x_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM, sow_weights=self.sow_weights)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_norm(x_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM, sow_weights=self.sow_weights)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n if self.sow_activations:\n self.sow(nnx.Intermediate, ""activations"", x_BTNM)\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n V: vocabulary size\n """"""\n\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n sow_weights: bool = False,\n sow_activations: bool = False,\n sow_logits: bool = False,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_logits = sow_logits\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n\n self.pos_enc = SpatioTemporalPositionalEncoding(self.model_dim, max_len=max_len)\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n sow_weights=self.sow_weights,\n sow_activations=self.sow_activations,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNV = self.output_dense(x_BTNM)\n if self.sow_logits:\n self.sow(nnx.Intermediate, ""logits"", x_BTNV)\n return x_BTNV\n\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n sow_weights: bool,\n sow_activations: bool,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(\n self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None\n ) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n z_FNM = self.spatial_attention(z_FNM, sow_weights=self.sow_weights)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n z_PTM = self.temporal_attention(z_PTM, sow_weights=self.sow_weights)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n if self.sow_activations:\n self.sow(nnx.Intermediate, ""activations"", x_BTNM)\n\n return x_BTNM\n\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n V: vocabulary size\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n sow_logits: bool = False,\n sow_weights: bool = False,\n sow_activations: bool = False,\n max_len: int = 5000,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.sow_logits = sow_logits\n self.sow_weights = sow_weights\n self.sow_activations = sow_activations\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.param_dtype, # layer norm in full precision\n rngs=rngs,\n )\n\n self.pos_enc = SpatioTemporalPositionalEncoding(self.model_dim, max_len=max_len)\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n sow_weights=self.sow_weights,\n sow_activations=self.sow_activations,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(\n self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None\n ) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n if self.sow_logits:\n self.sow(nnx.Intermediate, ""logits"", x_BTNV)\n return x_BTNV\n\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n\n def __init__(\n self,\n latent_dim: int,\n num_latents: int,\n dropout: float,\n dtype: jnp.dtype,\n rngs: nnx.Rngs,\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n self.dtype = dtype\n\n self.codebook = nnx.Param(\n nnx.initializers.lecun_uniform()(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = x_DL.astype(self.dtype)\n codebook = self.codebook.value.astype(self.dtype)\n\n x_normalized_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(codebook)\n distance_DK = -jnp.matmul(x_normalized_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_normalized_DL + jax.lax.stop_gradient(z_DL - x_normalized_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n def attention_fn(\n query_BTHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs\n ):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x, pad_size):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BTHD.shape\n T = query_BTHD.shape[-3]\n S = key_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n Q = ((T + 3) // 4) * 4\n pad_size_Q = Q - T\n K = ((S + 3) // 4) * 4\n pad_size_K = K - S\n\n query_BQHD = _pad(_merge_batch_dims(query_BTHD), pad_size_Q)\n key_BKHD = _pad(_merge_batch_dims(key_BSHD), pad_size_K)\n value_BKHD = _pad(_merge_batch_dims(value_BSHD), pad_size_K)\n\n attention_mask = jnp.ones((Q, K), dtype=jnp.bool_)\n attention_mask = attention_mask.at[T:, :].set(False)\n attention_mask = attention_mask.at[:, S:].set(False)\n\n mask_11TS = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n\n bias_4d = (\n jnp.pad(\n _merge_batch_dims(bias),\n ((0, 0), (0, 0), (0, pad_size_Q), (0, pad_size_K)),\n )\n if bias is not None\n else None\n )\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BQHD,\n key=key_BKHD,\n value=value_BKHD,\n bias=bias_4d,\n mask=mask_11TS,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :T, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab
3
+ 2,164,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"8:47:03 PM [info] Activating crowd-code\n8:47:03 PM [info] Recording started\n8:47:03 PM [info] Initializing git provider using file system watchers...\n8:47:03 PM [info] Git repository found\n8:47:03 PM [info] Git provider initialized successfully\n8:47:03 PM [info] Initial git state: [object Object]\n",Log,tab
4
+ 3,3295,"utils/nn.py",0,0,"",python,tab
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-dedac322-1282-4d89-8a49-f3a5624493ea1762171752270-2025_11_03-13.09.19.936/source.csv ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,366,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:09:19 PM [info] Activating crowd-code\n1:09:19 PM [info] Recording started\n1:09:19 PM [info] Initializing git provider using file system watchers...\n1:09:20 PM [error] Not a git repository: EntryNotFound (FileSystemError): Error: ENOENT: no such file or directory, stat '/home/franz.srambical/jafar/slurm/dev/franz/berlin/crowd-pilot/.git'\n",Log,tab
3
+ 3,4284,"TERMINAL",0,0,"",,terminal_command
4
+ 4,7859,"start_sglang",0,0,"",plaintext,tab
5
+ 5,11243,"TERMINAL",0,0,"",,terminal_command
6
+ 6,12735,"start_sglang_server.py",0,0,"",python,tab
7
+ 7,27073,"start_sglang_server.sh",0,0,"",shellscript,tab
8
+ 8,27890,"start_sglang_server.sh",0,0,"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0\n",shellscript,content
9
+ 9,28380,"start_sglang_server.sh",86,0,"\n",shellscript,content
10
+ 10,29134,"start_sglang_server.sh",87,1,"",shellscript,content
11
+ 11,30095,"start_sglang_server.sh",0,0,"",shellscript,selection_command
12
+ 12,30473,"start_sglang_server.sh",0,0,"\n",shellscript,content
13
+ 13,31314,"start_sglang_server.sh",0,0,"s",shellscript,content
14
+ 14,31314,"start_sglang_server.sh",1,0,"",shellscript,selection_keyboard
15
+ 15,31358,"start_sglang_server.sh",1,0,"o",shellscript,content
16
+ 16,31358,"start_sglang_server.sh",2,0,"",shellscript,selection_keyboard
17
+ 17,31448,"start_sglang_server.sh",2,0,"u",shellscript,content
18
+ 18,31449,"start_sglang_server.sh",3,0,"",shellscript,selection_keyboard
19
+ 19,31476,"start_sglang_server.sh",3,0,"r",shellscript,content
20
+ 20,31476,"start_sglang_server.sh",4,0,"",shellscript,selection_keyboard
21
+ 21,31919,"start_sglang_server.sh",4,0,"c",shellscript,content
22
+ 22,31919,"start_sglang_server.sh",5,0,"",shellscript,selection_keyboard
23
+ 23,32140,"start_sglang_server.sh",5,0,"e",shellscript,content
24
+ 24,32140,"start_sglang_server.sh",6,0,"",shellscript,selection_keyboard
25
+ 25,32304,"start_sglang_server.sh",6,0," ",shellscript,content
26
+ 26,32304,"start_sglang_server.sh",7,0,"",shellscript,selection_keyboard
27
+ 27,32546,"start_sglang_server.sh",7,0,".venv/bin/activate",shellscript,content
28
+ 28,32775,"start_sglang_server.sh",24,0,"",shellscript,selection_command
29
+ 29,33216,"start_sglang_server.sh",25,0,"\n",shellscript,content
30
+ 30,35383,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
31
+ 31,37385,"TERMINAL",0,0,"",,terminal_focus
32
+ 32,37386,"start_sglang_server.sh",0,0,"",shellscript,tab
33
+ 33,43060,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
34
+ 34,49399,"TERMINAL",0,0,"ls",,terminal_command
35
+ 35,52175,"TERMINAL",0,0,"cd",,terminal_command
36
+ 36,56006,"TERMINAL",0,0,"cd crowd-pilot/",,terminal_command
37
+ 37,57470,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
38
+ 38,61752,"TERMINAL",0,0,"uv pip show sglang",,terminal_command
39
+ 39,61798,"TERMINAL",0,0,"]633;C",,terminal_output
40
+ 40,62576,"TERMINAL",0,0,"Name: sglang\r\nVersion: 0.5.4.post1\r\nLocation: /fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages\r\nRequires: aiohttp, anthropic, blobfile, build, compressed-tensors, cuda-python, datasets, decord2, einops, fastapi, flashinfer-python, gguf, grpcio, grpcio-health-checking, grpcio-reflection, grpcio-tools, hf-transfer, huggingface-hub, interegular, ipython, llguidance, modelscope, msgspec, ninja, numpy, nvidia-cutlass-dsl, nvidia-ml-py, openai, openai-harmony, orjson, outlines, packaging, partial-json-parser, pillow, prometheus-client, psutil, py-spy, pybase64, pydantic, python-multipart, pyzmq, requests, scipy, sentencepiece, setproctitle, sgl-kernel, soundfile, tiktoken, timm, torch, torch-memory-saver, torchao, torchaudio, torchvision, tqdm, transformers, uvicorn, uvloop, xgrammar\r\nRequired-by:\r\n]0;franz.srambical@hai-login1:~/crowd-pilot",,terminal_output
41
+ 41,71102,"TERMINAL",0,0,"deactivate",,terminal_command
42
+ 42,77402,"TERMINAL",0,0,"bash /home/franz.srambical/jafar/slurm/dev/franz/berlin/crowd-pilot/start_sglang_server.sh",,terminal_command
43
+ 43,77468,"TERMINAL",0,0,"]633;C",,terminal_output
44
+ 44,85588,"TERMINAL",0,0,"^CTraceback (most recent call last):\r\n File ""<frozen runpy>"", line 198, in _run_module_as_main\r\n File ""<frozen runpy>"", line 88, in _run_code\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/launch_server.py"", line 7, in <module>\r\n from sglang.srt.server_args import prepare_server_args\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/srt/server_args.py"", line 29, in <module>\r\n from sglang.srt.connector import ConnectorType\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/srt/connector/__init__.py"", line 6, in <module>\r\n from sglang.srt.connector.base_connector import (\r\n ...<3 lines>...\r\n )\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/srt/connector/base_connector.py"", line 10, in <module>\r\n import torch\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/torch/__init__.py"", line 2150, in <module>\r\n from torch import _VF as _VF, functional as functional # usort: skip\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/torch/functional.py"", line 8, in <module>\r\n import torch.nn.functional as F\r\n File ""<frozen importlib._bootstrap>"", line 1360, in _find_and_load\r\n File ""<frozen importlib._bootstrap>"", line 1331, in _find_and_load_unlocked\r\n File ""<frozen importlib._bootstrap>"", line 935, in _load_unlocked\r\n File ""<frozen importlib._bootstrap_external>"", line 1022, in exec_module\r\n File ""<frozen importlib._bootstrap_external>"", line 1118, in get_code\r\n File ""<frozen importlib._bootstrap_external>"", line 1217, in get_data\r\nKeyboardInterrupt\r\n",,terminal_output
45
+ 45,85639,"TERMINAL",0,0,"^C\r\n]0;franz.srambical@hai-login1:~/crowd-pilot",,terminal_output
46
+ 46,85825,"TERMINAL",0,0,"^C",,terminal_command
47
+ 47,86834,"start_sglang_server.sh",27,0,"",shellscript,selection_command
48
+ 48,87198,"start_sglang_server.sh",27,86,"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0",shellscript,selection_command
49
+ 49,88964,"start_sglang_server.sh",27,0,"",shellscript,selection_command
50
+ 50,89979,"TERMINAL",0,0,"/home/franz.srambical/jafar/slurm/dev/franz/berlin/crowd-pilot/start_sglang_server.sh",,terminal_command
51
+ 51,93031,"start_sglang_server.sh",27,0,"/home/franz.srambical/jafar/slurm/dev/franz/berlin/crowd-pilot/start_sglang_server.sh",shellscript,content
52
+ 52,93032,"start_sglang_server.sh",112,0,"",shellscript,selection_keyboard
53
+ 53,93769,"start_sglang_server.sh",27,85,"",shellscript,content
54
+ 54,93771,"start_sglang_server.sh",112,0,"",shellscript,selection_command
55
+ 55,94569,"start_sglang_server.sh",114,0,"",shellscript,selection_command
56
+ 56,95993,"start_sglang_server.sh",27,0,"",shellscript,selection_command
57
+ 57,96402,"start_sglang_server.sh",27,86,"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0",shellscript,selection_command
58
+ 58,96799,"start_sglang_server.sh",27,0,"",shellscript,selection_command
59
+ 59,99144,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
60
+ 60,102053,"TERMINAL",0,0,"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0",,terminal_command
61
+ 61,102098,"TERMINAL",0,0,"]633;C",,terminal_output
62
+ 62,118857,"TERMINAL",0,0,"",,terminal_command
63
+ 63,123331,"TERMINAL",0,0,"2025-11-03 13:11:23.179652: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\r\n",,terminal_output
64
+ 64,126934,"TERMINAL",0,0,"2025-11-03 13:11:26.785110: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\r\nTo enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\r\n",,terminal_output
65
+ 65,140724,"TERMINAL",0,0,"2025-11-03 13:11:40.573353: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\r\n",,terminal_output
66
+ 66,151901,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""<frozen runpy>"", line 198, in _run_module_as_main\r\n File ""<frozen runpy>"", line 88, in _run_code\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/launch_server.py"", line 11, in <module>\r\n server_args = prepare_server_args(sys.argv[1:])\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/srt/server_args.py"", line 3850, in prepare_server_args\r\n return ServerArgs.from_cli_args(raw_args)\r\n ~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/srt/server_args.py"", line 3472, in from_cli_args\r\n return cls(**{attr: getattr(args, attr) for attr in attrs})\r\n File ""<string>"", line 268, in __init__\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/srt/server_args.py"", line 538, in __post_init__\r\n self._handle_missing_default_values()\r\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/srt/server_args.py"", line 623, in _handle_missing_default_values\r\n self.device = get_device()\r\n ~~~~~~~~~~^^\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/sglang/srt/utils/common.py"", line 1781, in get_device\r\n raise RuntimeError(""No accelerator (CUDA, XPU, HPU) is available."")\r\nRuntimeError: No accelerator (CUDA, XPU, HPU) is available.\r\n",,terminal_output
67
+ 67,155163,"TERMINAL",0,0,"]0;franz.srambical@hai-login1:~/crowd-pilot",,terminal_output
68
+ 68,246640,"TERMINAL",0,0,"id",,terminal_command
69
+ 69,246640,"TERMINAL",0,0,"]633;Cuid=961800067(franz.srambical) gid=961800067(franz.srambical) groups=961800067(franz.srambical),961800017(helmholtz-member),961800019(helmholtz-all),961800033(hmgu),961900525(hfmi_synergyunit)\r\n]0;franz.srambical@hai-login1:~/crowd-pilot",,terminal_output
70
+ 70,278403,"TERMINAL",0,0,"squeue",,terminal_command
71
+ 71,278419,"TERMINAL",0,0,"]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n 33317 xiao.liu interacti 1 128 R 2025-11-02T17:43:38 2025-11-02T17:43:38 19:30:20 23:59:00 hai006\r\n 33328 kalyan.nad standard 1 64 R 2025-11-03T11:56:23 2025-11-03T11:56:38 1:17:20 1-00:00:00 hai002\r\n 33320 kalyan.nad standard 1 64 R 2025-11-03T11:36:55 2025-11-03T11:36:55 1:37:03 1-00:00:00 hai001\r\n 33318 xiao.liu standard 1 128 R 2025-11-02T19:29:40 2025-11-02T19:30:38 17:43:20 23:59:00 hai004\r\n]0;franz.srambical@hai-login1:~/crowd-pilot",,terminal_output
72
+ 72,281131,"TERMINAL",0,0,"salloc --gpus=1 --ntasks-per-node=1 --cpus-per-task=10 --mem=100G",,terminal_command
73
+ 73,281187,"TERMINAL",0,0,"]633;Csalloc: Granted job allocation 33333\r\n",,terminal_output
74
+ 74,281282,"TERMINAL",0,0,"salloc: Nodes hai003 are ready for job\r\n",,terminal_output
75
+ 75,281642,"TERMINAL",0,0,"Running inside SLURM, Job ID 33333.\r\n",,terminal_output
76
+ 76,281742,"TERMINAL",0,0,"]0;franz.srambical@hai-login1:~/crowd-pilot[?2004h[franz.srambical@hai003.haicore.berlin:~/crowd-pilot] $ ",,terminal_output
77
+ 77,283584,"TERMINAL",0,0,"l",,terminal_output
78
+ 78,283694,"TERMINAL",0,0,"s",,terminal_output
79
+ 79,283773,"TERMINAL",0,0,"\r\n[?2004l\rLICENSE README.md crowd-pilot maxtext pyproject.toml slurm uv.lock\r\n]0;franz.srambical@hai-login1:~/crowd-pilot[?2004h[franz.srambical@hai003.haicore.berlin:~/crowd-pilot] $ ",,terminal_output
80
+ 80,284758,"TERMINAL",0,0,"\r(reverse-i-search)`': ",,terminal_output
81
+ 81,284889,"TERMINAL",0,0,"s': lso': . ""/fast/home/franz.srambical/.cursor-server/bin/3ccce8f55d8cca49f6d28b491a844c699b8719a0/out/vs/workbench/contrib/terminal/common/scripts/shellIntegration-bash.sh""",,terminal_output
82
+ 82,284952,"TERMINAL",0,0,"\ru': source .venv/bin/activate\r\n\r",,terminal_output
83
+ 83,285009,"TERMINAL",0,0,"[1@r': sour",,terminal_output
84
+ 84,285651,"TERMINAL",0,0,"\r[30@[franz.srambical@hai003.haicore.berlin:~/crowd-pilot] $ sour\r\n[?2004l\r]0;franz.srambical@hai-login1:~/crowd-pilot[?2004h(crowd-pilot) [franz.srambical@hai003.haicore.berlin:~/crowd-pilot] $ ",,terminal_output
85
+ 85,287855,"start_sglang_server.sh",27,86,"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0",shellscript,selection_command
86
+ 86,288194,"start_sglang_server.sh",27,0,"",shellscript,selection_command
87
+ 87,289101,"TERMINAL",0,0,"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0",,terminal_output
88
+ 88,289327,"TERMINAL",0,0,"\rpython3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0\r\n[?2004l\r",,terminal_output
89
+ 89,306946,"TERMINAL",0,0,"2025-11-03 13:14:26.791489: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\r\n",,terminal_output
90
+ 90,308085,"TERMINAL",0,0,"2025-11-03 13:14:27.923401: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\r\nTo enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\r\n",,terminal_output
91
+ 91,312755,"TERMINAL",0,0,"2025-11-03 13:14:32.604499: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\r\n",,terminal_output
92
+ 92,326515,"TERMINAL",0,0,"[2025-11-03 13:14:46] WARNING server_args.py:1104: Attention backend not explicitly specified. Use fa3 backend by default.\r\n[2025-11-03 13:14:46] INFO trace.py:48: opentelemetry package is not installed, tracing disabled\r\n",,terminal_output
93
+ 93,328112,"TERMINAL",0,0,"[2025-11-03 13:14:47] server_args=ServerArgs(model_path='qwen/qwen2.5-0.5b-instruct', tokenizer_path='qwen/qwen2.5-0.5b-instruct', tokenizer_mode='auto', tokenizer_worker_num=1, skip_tokenizer_init=False, load_format='auto', model_loader_extra_config='{}', trust_remote_code=False, context_length=None, is_embedding=False, enable_multimodal=None, revision=None, model_impl='auto', host='0.0.0.0', port=30000, grpc_mode=False, skip_server_warmup=False, warmups=None, nccl_port=None, checkpoint_engine_wait_weights_before_ready=False, dtype='auto', quantization=None, quantization_param_path=None, kv_cache_dtype='auto', enable_fp32_lm_head=False, modelopt_quant=None, modelopt_checkpoint_restore_path=None, modelopt_checkpoint_save_path=None, modelopt_export_path=None, quantize_and_serve=False, mem_fraction_static=0.835, max_running_requests=None, max_queued_requests=None, max_total_tokens=None, chunked_prefill_size=8192, max_prefill_tokens=16384, schedule_policy='fcfs', enable_priority_scheduling=False, abort_on_priority_when_disabled=False, schedule_low_priority_values_first=False, priority_scheduling_preemption_threshold=10, schedule_conservativeness=1.0, page_size=1, hybrid_kvcache_ratio=None, swa_full_tokens_ratio=0.8, disable_hybrid_swa_memory=False, radix_eviction_policy='lru', device='cuda', tp_size=1, pp_size=1, pp_max_micro_batch_size=None, stream_interval=1, stream_output=False, random_seed=541394942, constrained_json_whitespace_pattern=None, constrained_json_disable_any_whitespace=False, watchdog_timeout=300, dist_timeout=None, download_dir=None, base_gpu_id=0, gpu_id_step=1, sleep_on_idle=False, log_level='info', log_level_http=None, log_requests=False, log_requests_level=2, crash_dump_folder=None, show_time_cost=False, enable_metrics=False, enable_metrics_for_all_schedulers=False, tokenizer_metrics_custom_labels_header='x-custom-labels', tokenizer_metrics_allowed_custom_labels=None, bucket_time_to_first_token=None, bucket_inter_token_latency=None, bucket_e2e_request_latency=None, collect_tokens_histogram=False, prompt_tokens_buckets=None, generation_tokens_buckets=None, gc_warning_threshold_secs=0.0, decode_log_interval=40, enable_request_time_stats_logging=False, kv_events_config=None, enable_trace=False, oltp_traces_endpoint='localhost:4317', api_key=None, served_model_name='qwen/qwen2.5-0.5b-instruct', weight_version='default', chat_template=None, completion_template=None, file_storage_path='sglang_storage', enable_cache_report=False, reasoning_parser=None, tool_call_parser=None, tool_server=None, sampling_defaults='model', dp_size=1, load_balance_method='round_robin', load_watch_interval=0.1, prefill_round_robin_balance=False, dist_init_addr=None, nnodes=1, node_rank=0, json_model_override_args='{}', preferred_sampling_params=None, enable_lora=None, max_lora_rank=None, lora_target_modules=None, lora_paths=None, max_loaded_loras=None, max_loras_per_batch=8, lora_eviction_policy='lru', lora_backend='triton', max_lora_chunk_size=16, attention_backend='fa3', decode_attention_backend=None, prefill_attention_backend=None, sampling_backend='flashinfer', grammar_backend='xgrammar', mm_attention_backend=None, nsa_prefill_backend='flashmla_sparse', nsa_decode_backend='fa3', speculative_algorithm=None, speculative_draft_model_path=None, speculative_draft_model_revision=None, speculative_draft_load_format=None, speculative_num_steps=None, speculative_eagle_topk=None, speculative_num_draft_tokens=None, speculative_accept_threshold_single=1.0, speculative_accept_threshold_acc=1.0, speculative_token_map=None, speculative_attention_mode='prefill', speculative_ngram_min_match_window_size=1, speculative_ngram_max_match_window_size=12, speculative_ngram_min_bfs_breadth=1, speculative_ngram_max_bfs_breadth=10, speculative_ngram_match_type='BFS', speculative_ngram_branch_length=18, speculative_ngram_capacity=10000000, ep_size=1, moe_a2a_backend='none', moe_runner_backend='auto', flashinfer_mxfp4_moe_precision='default', enable_flashinfer_allreduce_fusion=False, deepep_mode='auto', ep_num_redundant_experts=0, ep_dispatch_algorithm='static', init_expert_location='trivial', enable_eplb=False, eplb_algorithm='auto', eplb_rebalance_num_iterations=1000, eplb_rebalance_layers_per_chunk=None, eplb_min_rebalancing_utilization_threshold=1.0, expert_distribution_recorder_mode=None, expert_distribution_recorder_buffer_size=1000, enable_expert_distribution_metrics=False, deepep_config=None, moe_dense_tp_size=None, elastic_ep_backend=None, mooncake_ib_device=None, max_mamba_cache_size=None, mamba_ssm_dtype='float32', mamba_full_memory_ratio=0.9, enable_hierarchical_cache=False, hicache_ratio=2.0, hicache_size=0, hicache_write_policy='write_through', hicache_io_backend='kernel', hicache_mem_layout='layer_first', hicache_storage_backend=None, hicache_storage_prefetch_policy='best_effort', hicache_storage_backend_extra_config=None, enable_lmcache=False, kt_amx_weight_path=None, kt_amx_method='AMXINT4', kt_cpuinfer=None, kt_threadpool_count=2, kt_num_gpu_experts=None, enable_double_sparsity=False, ds_channel_config_path=None, ds_heavy_channel_num=32, ds_heavy_token_num=256, ds_heavy_channel_type='qk', ds_sparse_decode_threshold=4096, cpu_offload_gb=0, offload_group_size=-1, offload_num_in_group=1, offload_prefetch_step=1, offload_mode='cpu', multi_item_scoring_delimiter=None, disable_radix_cache=False, cuda_graph_max_bs=256, cuda_graph_bs=[1, 2, 4, 8, 12, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248, 256], disable_cuda_graph=False, disable_cuda_graph_padding=False, enable_profile_cuda_graph=False, enable_cudagraph_gc=False, enable_nccl_nvls=False, enable_symm_mem=False, disable_flashinfer_cutlass_moe_fp4_allgather=False, enable_tokenizer_batch_encode=False, disable_tokenizer_batch_decode=False, disable_outlines_disk_cache=False, disable_custom_all_reduce=False, enable_mscclpp=False, enable_torch_symm_mem=False, disable_overlap_schedule=False, enable_mixed_chunk=False, enable_dp_attention=False, enable_dp_lm_head=False, enable_two_batch_overlap=False, enable_single_batch_overlap=False, tbo_token_distribution_threshold=0.48, enable_torch_compile=False, enable_piecewise_cuda_graph=False, torch_compile_max_bs=32, piecewise_cuda_graph_max_tokens=4096, piecewise_cuda_graph_tokens=[4, 8, 12, 16, 20, 24, 28, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 640, 768, 896, 1024, 1152, 1280, 1408, 1536, 1664, 1792, 1920, 2048, 2176, 2304, 2432, 2560, 2688, 2816, 2944, 3072, 3200, 3328, 3456, 3584, 3712, 3840, 3968, 4096], piecewise_cuda_graph_compiler='eager', torchao_config='', enable_nan_detection=False, enable_p2p_check=False, triton_attention_reduce_in_fp32=False, triton_attention_num_kv_splits=8, triton_attention_split_tile_size=None, num_continuous_decode_steps=1, delete_ckpt_after_loading=False, enable_memory_saver=False, enable_weights_cpu_backup=False, allow_auto_truncate=False, enable_custom_logit_processor=False, flashinfer_mla_disable_ragged=False, disable_shared_experts_fusion=False, disable_chunked_prefix_cache=False, disable_fast_image_processor=False, keep_mm_feature_on_device=False, enable_return_hidden_states=False, scheduler_recv_interval=1, numa_node=None, enable_deterministic_inference=False, rl_on_policy_target=None, enable_dynamic_batch_tokenizer=False, dynamic_batch_tokenizer_batch_size=32, dynamic_batch_tokenizer_batch_timeout=0.002, debug_tensor_dump_output_folder=None, debug_tensor_dump_input_file=None, debug_tensor_dump_inject=False, disaggregation_mode='null', disaggregation_transfer_backend='mooncake', disaggregation_bootstrap_port=8998, disaggregation_decode_tp=None, disaggregation_decode_dp=None, disaggregation_prefill_pp=1, disaggregation_ib_device=None, disaggregation_decode_enable_offload_kvcache=False, num_reserved_decode_tokens=512, disaggregation_decode_polling_interval=1, custom_weight_loader=[], weight_loader_disable_mmap=False, remote_instance_weight_loader_seed_instance_ip=None, remote_instance_weight_loader_seed_instance_service_port=None, remote_instance_weight_loader_send_weights_group_ports=None, enable_pdmux=False, pdmux_config_path=None, sm_group_num=8)\r\n",,terminal_output
94
+ 94,329705,"TERMINAL",0,0,"[2025-11-03 13:14:49] Using default HuggingFace chat template with detected content format: string\r\n",,terminal_output
95
+ 95,346598,"TERMINAL",0,0,"[2025-11-03 13:15:06] INFO trace.py:48: opentelemetry package is not installed, tracing disabled\r\n",,terminal_output
96
+ 96,348586,"TERMINAL",0,0,"[2025-11-03 13:15:08] Init torch distributed begin.\r\n",,terminal_output
97
+ 97,348951,"TERMINAL",0,0,"[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0\r\n",,terminal_output
98
+ 98,349007,"TERMINAL",0,0,"[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0\r\n[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0\r\n[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0\r\n[2025-11-03 13:15:08] Init torch distributed ends. mem usage=0.00 GB\r\n",,terminal_output
99
+ 99,349159,"TERMINAL",0,0,"[2025-11-03 13:15:09] MOE_RUNNER_BACKEND is not initialized, the backend will be automatically selected\r\n",,terminal_output
100
+ 100,349970,"TERMINAL",0,0,"[2025-11-03 13:15:09] INFO trace.py:48: opentelemetry package is not installed, tracing disabled\r\n",,terminal_output
101
+ 101,354706,"TERMINAL",0,0,"[2025-11-03 13:15:14] Load weight begin. avail mem=78.68 GB\r\n",,terminal_output
102
+ 102,355343,"TERMINAL",0,0,"[2025-11-03 13:15:15] TensorFlow version 2.20.0 available.\r\n",,terminal_output
103
+ 103,360584,"TERMINAL",0,0,"[2025-11-03 13:15:20] Using model weights format ['*.safetensors']\r\n",,terminal_output
104
+ 104,361111,"TERMINAL",0,0,"[2025-11-03 13:15:20] No model.safetensors.index.json found in remote.\r\n\rLoading safetensors checkpoint shards: 0% Completed | 0/1 [00:00<?, ?it/s]\r\n",,terminal_output
105
+ 105,362166,"TERMINAL",0,0,"\rLoading safetensors checkpoint shards: 100% Completed | 1/1 [00:00<00:00, 1.03it/s]\r\n\rLoading safetensors checkpoint shards: 100% Completed | 1/1 [00:00<00:00, 1.03it/s]\r\n\r\n[2025-11-03 13:15:21] Load weight end. type=Qwen2ForCausalLM, dtype=torch.bfloat16, avail mem=77.61 GB, mem usage=1.07 GB.\r\n[2025-11-03 13:15:21] Using KV cache dtype: torch.bfloat16\r\n[2025-11-03 13:15:22] KV Cache is allocated. #tokens: 5647121, K size: 32.31 GB, V size: 32.31 GB\r\n[2025-11-03 13:15:22] Memory pool end. avail mem=12.31 GB\r\n",,terminal_output
106
+ 106,362703,"TERMINAL",0,0,"[2025-11-03 13:15:22] Capture cuda graph begin. This can take up to several minutes. avail mem=12.21 GB\r\n[2025-11-03 13:15:22] Capture cuda graph bs [1, 2, 4, 8, 12, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248, 256]\r\n",,terminal_output
107
+ 107,363169,"TERMINAL",0,0,"\r 0%| | 0/36 [00:00<?, ?it/s]\rCapturing batches (bs=256 avail_mem=12.00 GB): 0%| | 0/36 [00:00<?, ?it/s]",,terminal_output
108
+ 108,364442,"TERMINAL",0,0,"\rCapturing batches (bs=256 avail_mem=12.00 GB): 3%|██▎ | 1/36 [00:00<00:32, 1.09it/s]\rCapturing batches (bs=248 avail_mem=11.84 GB): 3%|██▎ | 1/36 [00:00<00:32, 1.09it/s]\rCapturing batches (bs=240 avail_mem=11.83 GB): 3%|██▎ | 1/36 [00:00<00:32, 1.09it/s]\rCapturing batches (bs=232 avail_mem=11.83 GB): 3%|██▎ | 1/36 [00:00<00:32, 1.09it/s]\rCapturing batches (bs=232 avail_mem=11.83 GB): 11%|█████████▍ | 4/36 [00:01<00:06, 4.88it/s]\rCapturing batches (bs=224 avail_mem=11.82 GB): 11%|█████████▍ | 4/36 [00:01<00:06, 4.88it/s]\rCapturing batches (bs=216 avail_mem=11.81 GB): 11%|█████████▍ | 4/36 [00:01<00:06, 4.88it/s]\rCapturing batches (bs=208 avail_mem=11.81 GB): 11%|█████████▍ | 4/36 [00:01<00:06, 4.88it/s]\rCapturing batches (bs=208 avail_mem=11.81 GB): 19%|████████████████▌ | 7/36 [00:01<00:03, 8.61it/s]\rCapturing batches (bs=200 avail_mem=11.81 GB): 19%|████████████████▌ | 7/36 [00:01<00:03, 8.61it/s]\rCapturing batches (bs=192 avail_mem=11.80 GB): 19%|████████████████▌ | 7/36 [00:01<00:03, 8.61it/s]\rCapturing batches (bs=184 avail_mem=11.80 GB): 19%|████████████████▌ | 7/36 [00:01<00:03, 8.61it/s]\rCapturing batches (bs=184 avail_mem=11.80 GB): 28%|███████████████████████▎ | 10/36 [00:01<00:02, 12.28it/s]\rCapturing batches (bs=176 avail_mem=11.79 GB): 28%|███████████████████████▎ | 10/36 [00:01<00:02, 12.28it/s]\rCapturing batches (bs=168 avail_mem=11.79 GB): 28%|███████████████████████▎ | 10/36 [00:01<00:02, 12.28it/s]",,terminal_output
109
+ 109,365219,"TERMINAL",0,0,"\rCapturing batches (bs=160 avail_mem=11.78 GB): 28%|███████████████████████▎ | 10/36 [00:01<00:02, 12.28it/s]\rCapturing batches (bs=160 avail_mem=11.78 GB): 36%|██████████████████████████████▎ | 13/36 [00:01<00:01, 14.47it/s]\rCapturing batches (bs=152 avail_mem=11.78 GB): 36%|██████████████████████████████▎ | 13/36 [00:01<00:01, 14.47it/s]\rCapturing batches (bs=144 avail_mem=11.77 GB): 36%|██████████████████████████████▎ | 13/36 [00:01<00:01, 14.47it/s]\rCapturing batches (bs=136 avail_mem=11.77 GB): 36%|██████████████████████████████▎ | 13/36 [00:01<00:01, 14.47it/s]\rCapturing batches (bs=136 avail_mem=11.77 GB): 44%|█████████████████████████████████████▎ | 16/36 [00:01<00:01, 17.34it/s]\rCapturing batches (bs=128 avail_mem=11.76 GB): 44%|█████████████████████████████████████▎ | 16/36 [00:01<00:01, 17.34it/s]\rCapturing batches (bs=120 avail_mem=11.76 GB): 44%|█████████████████████████████████████▎ | 16/36 [00:01<00:01, 17.34it/s]\rCapturing batches (bs=112 avail_mem=11.75 GB): 44%|█████████████████████████████████████▎ | 16/36 [00:01<00:01, 17.34it/s]\rCapturing batches (bs=112 avail_mem=11.75 GB): 53%|████████████████████████████████████████████▎ | 19/36 [00:01<00:00, 19.06it/s]\rCapturing batches (bs=104 avail_mem=11.75 GB): 53%|████████████████████████████████████████████▎ | 19/36 [00:01<00:00, 19.06it/s]\rCapturing batches (bs=96 avail_mem=11.75 GB): 53%|████████████████████████████████████████████▊ | 19/36 [00:01<00:00, 19.06it/s]\rCapturing batches (bs=88 avail_mem=11.74 GB): 53%|████████████████████████████████████████████▊ | 19/36 [00:01<00:00, 19.06it/s]\rCapturing batches (bs=88 avail_mem=11.74 GB): 61%|███████████████████████████████████████████████████▉ | 22/36 [00:01<00:00, 20.36it/s]\rCapturing batches (bs=80 avail_mem=11.73 GB): 61%|███████████████████████████████████████████████████▉ | 22/36 [00:01<00:00, 20.36it/s]\rCapturing batches (bs=72 avail_mem=11.73 GB): 61%|███████████████████████████████████████████████████▉ | 22/36 [00:01<00:00, 20.36it/s]\rCapturing batches (bs=64 avail_mem=11.72 GB): 61%|███████████████████████████████████████████████████▉ | 22/36 [00:01<00:00, 20.36it/s]\rCapturing batches (bs=64 avail_mem=11.72 GB): 69%|███████████████████████████████████████████████████████████ | 25/36 [00:01<00:00, 21.65it/s]\rCapturing batches (bs=56 avail_mem=11.72 GB): 69%|███████████████████████████████████████████████████████████ | 25/36 [00:01<00:00, 21.65it/s]\rCapturing batches (bs=48 avail_mem=11.72 GB): 69%|████████████████████████████████████���██████████████████████ | 25/36 [00:01<00:00, 21.65it/s]\rCapturing batches (bs=40 avail_mem=11.71 GB): 69%|███████████████████████████████████████████████████████████ | 25/36 [00:01<00:00, 21.65it/s]\rCapturing batches (bs=40 avail_mem=11.71 GB): 78%|██████████████████████████████████████████████████████████████████ | 28/36 [00:02<00:00, 22.85it/s]\rCapturing batches (bs=32 avail_mem=11.71 GB): 78%|██████████████████████████████████████████████████████████████████ | 28/36 [00:02<00:00, 22.85it/s]\rCapturing batches (bs=24 avail_mem=11.70 GB): 78%|██████████████████████████████████████████████████████████████████ | 28/36 [00:02<00:00, 22.85it/s]\rCapturing batches (bs=16 avail_mem=11.70 GB): 78%|██████████████████████████████████████████████████████████████████ | 28/36 [00:02<00:00, 22.85it/s]",,terminal_output
110
+ 110,365457,"TERMINAL",0,0,"\rCapturing batches (bs=16 avail_mem=11.70 GB): 86%|█████████████████████████████████████████████████████████████████████████▏ | 31/36 [00:02<00:00, 21.01it/s]\rCapturing batches (bs=12 avail_mem=11.69 GB): 86%|█████████████████████████████████████████████████████████████████████████▏ | 31/36 [00:02<00:00, 21.01it/s]\rCapturing batches (bs=8 avail_mem=11.69 GB): 86%|██████████████████████████████████████████████████████████████████████████ | 31/36 [00:02<00:00, 21.01it/s]\rCapturing batches (bs=4 avail_mem=11.68 GB): 86%|██████████████████████████████████████████████████████████████████████████ | 31/36 [00:02<00:00, 21.01it/s]\rCapturing batches (bs=2 avail_mem=11.68 GB): 86%|██████████████████████████████████████████████████████████████████████████ | 31/36 [00:02<00:00, 21.01it/s]\rCapturing batches (bs=2 avail_mem=11.68 GB): 97%|███████████████████████████████████████████████████████████████████████████████████▌ | 35/36 [00:02<00:00, 24.38it/s]\rCapturing batches (bs=1 avail_mem=11.67 GB): 97%|███████████████████████████████████████████████████████████████████████████████████▌ | 35/36 [00:02<00:00, 24.38it/s]\rCapturing batches (bs=1 avail_mem=11.67 GB): 100%|██████████████████████████████████████████████████████████████████████████████████████| 36/36 [00:02<00:00, 15.53it/s]\r\n",,terminal_output
111
+ 111,365803,"TERMINAL",0,0,"[2025-11-03 13:15:25] Capture cuda graph end. Time elapsed: 3.10 s. mem usage=0.54 GB. avail mem=11.67 GB.\r\n",,terminal_output
112
+ 112,366523,"TERMINAL",0,0,"[2025-11-03 13:15:26] max_total_num_tokens=5647121, chunked_prefill_size=8192, max_prefill_tokens=16384, max_running_requests=4096, context_len=32768, available_gpu_mem=11.67 GB\r\n",,terminal_output
113
+ 113,367067,"TERMINAL",0,0,"[2025-11-03 13:15:26] INFO: Started server process [1848778]\r\n[2025-11-03 13:15:26] INFO: Waiting for application startup.\r\n[2025-11-03 13:15:26] Using default chat sampling params from model generation config: {'repetition_penalty': 1.1, 'temperature': 0.7, 'top_k': 20, 'top_p': 0.8}\r\n",,terminal_output
114
+ 114,367150,"TERMINAL",0,0,"[2025-11-03 13:15:27] Using default chat sampling params from model generation config: {'repetition_penalty': 1.1, 'temperature': 0.7, 'top_k': 20, 'top_p': 0.8}\r\n[2025-11-03 13:15:27] INFO: Application startup complete.\r\n[2025-11-03 13:15:27] INFO: Uvicorn running on http://0.0.0.0:30000 (Press CTRL+C to quit)\r\n",,terminal_output
115
+ 115,368184,"TERMINAL",0,0,"[2025-11-03 13:15:28] INFO: 127.0.0.1:57018 - ""GET /get_model_info HTTP/1.1"" 200 OK\r\n[2025-11-03 13:15:28] Prefill batch, #new-seq: 1, #new-token: 6, #cached-token: 0, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n",,terminal_output
116
+ 116,370619,"TERMINAL",0,0,"[2025-11-03 13:15:30] INFO: 127.0.0.1:57028 - ""POST /generate HTTP/1.1"" 200 OK\r\n[2025-11-03 13:15:30] The server is fired up and ready to roll!\r\n",,terminal_output
117
+ 117,913264,"TERMINAL",0,0,"",,terminal_focus
118
+ 118,934101,"TERMINAL",0,0,"cd",,terminal_command
119
+ 119,936634,"TERMINAL",0,0,"cd crowd-pilot/",,terminal_command
120
+ 120,938611,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
121
+ 121,940201,"TERMINAL",0,0,"python",,terminal_command
122
+ 122,940251,"TERMINAL",0,0,"]633;C",,terminal_output
123
+ 123,940308,"TERMINAL",0,0,"Python 3.13.5 (main, Jul 1 2025, 18:37:36) [Clang 20.1.4 ] on linux\r\nType ""help"", ""copyright"", ""credits"" or ""license"" for more information.\r\n",,terminal_output
124
+ 124,940480,"TERMINAL",0,0,"[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
125
+ 125,953679,"TERMINAL",0,0,"[?25l\n>>> import requests... [?12l[?25h[?25l\n... [?12l[?25h[?25l... url = f""http://localhost:{port}/v1/chat/completions""[?12l[?25h",,terminal_output
126
+ 126,954821,"TERMINAL",0,0,"",,terminal_output
127
+ 127,954978,"TERMINAL",0,0,"",,terminal_output
128
+ 128,955111,"TERMINAL",0,0,"",,terminal_output
129
+ 129,955280,"TERMINAL",0,0,"",,terminal_output
130
+ 130,956069,"TERMINAL",0,0,"",,terminal_output
131
+ 131,956484,"TERMINAL",0,0,"[?25l... url = f""http://localhost:{port/v1/chat/completions""[?12l[?25h",,terminal_output
132
+ 132,957029,"TERMINAL",0,0,"[?25l... url = f""http://localhost:{por/v1/chat/completions""[?12l[?25h[?25l... url = f""http://localhost:{po/v1/chat/completions""[?12l[?25h[?25l... url = f""http://localhost:{p/v1/chat/completions""[?12l[?25h[?25l... url = f""http://localhost:{/v1/chat/completions""[?12l[?25h[?25l... url = f""http://localhost:/v1/chat/completions""[?12l[?25h[?25l... url = f""http://localhost/v1/chat/completions""[?12l[?25h[?25l... url = f""http://localhos/v1/chat/completions""[?12l[?25h[?25l... url = f""http://localho/v1/chat/completions""[?12l[?25h[?25l... url = f""http://localh/v1/chat/completions""[?12l[?25h[?25l... url = f""http://local/v1/chat/completions""[?12l[?25h",,terminal_output
133
+ 133,957146,"TERMINAL",0,0,"[?25l... url = f""http://loca/v1/chat/completions""[?12l[?25h",,terminal_output
134
+ 134,957298,"TERMINAL",0,0,"[?25l... url = f""http://loc/v1/chat/completions""[?12l[?25h",,terminal_output
135
+ 135,957432,"TERMINAL",0,0,"[?25l... url = f""http://lo/v1/chat/completions""[?12l[?25h",,terminal_output
136
+ 136,957569,"TERMINAL",0,0,"[?25l... url = f""http://l/v1/chat/completions""[?12l[?25h",,terminal_output
137
+ 137,958025,"TERMINAL",0,0,"[?25l... url = f""http:///v1/chat/completions""[?12l[?25h",,terminal_output
138
+ 138,959244,"TERMINAL",0,0,"",,terminal_focus
139
+ 139,960619,"TERMINAL",0,0,"squeue",,terminal_command
140
+ 140,960623,"TERMINAL",0,0,"]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n 33333 franz.sram interacti 1 20 R 2025-11-03T13:14:01 2025-11-03T13:14:01 11:19 1-00:00:00 hai003\r\n 33317 xiao.liu interacti 1 128 R 2025-11-02T17:43:38 2025-11-02T17:43:38 19:41:42 23:59:00 hai006\r\n 33328 kalyan.nad standard 1 64 R 2025-11-03T11:56:23 2025-11-03T11:56:38 1:28:42 1-00:00:00 hai002\r\n 33318 xiao.liu standard 1 128 R 2025-11-02T19:29:40 2025-11-02T19:30:38 17:54:42 23:59:00 hai004\r\n]0;franz.srambical@hai-login1:~/jafar/slurm/dev/franz/berlin/crowd-pilot",,terminal_output
141
+ 141,965159,"TERMINAL",0,0,"python",,terminal_focus
142
+ 142,965738,"TERMINAL",0,0,"srun",,terminal_focus
143
+ 143,966961,"TERMINAL",0,0,"python",,terminal_focus
144
+ 144,968671,"TERMINAL",0,0,"[?25l... url = f""http://h/v1/chat/completions""[?12l[?25h",,terminal_output
145
+ 145,968869,"TERMINAL",0,0,"[?25l... url = f""http://ha/v1/chat/completions""[?12l[?25h[?25l... url = f""http://hai/v1/chat/completions""[?12l[?25h",,terminal_output
146
+ 146,969460,"TERMINAL",0,0,"[?25l... url = f""http://hai0/v1/chat/completions""[?12l[?25h",,terminal_output
147
+ 147,969519,"TERMINAL",0,0,"[?25l... url = f""http://hai00/v1/chat/completions""[?12l[?25h",,terminal_output
148
+ 148,969617,"TERMINAL",0,0,"[?25l... url = f""http://hai003/v1/chat/completions""[?12l[?25h",,terminal_output
149
+ 149,970391,"TERMINAL",0,0,"[?25l... url = f""http://hai003:/v1/chat/completions""[?12l[?25h",,terminal_output
150
+ 150,973152,"TERMINAL",0,0,"srun",,terminal_focus
151
+ 151,975372,"TERMINAL",0,0,"python",,terminal_focus
152
+ 152,977464,"TERMINAL",0,0,"[?25l... url = f""http://hai003:3/v1/chat/completions""[?12l[?25h",,terminal_output
153
+ 153,977548,"TERMINAL",0,0,"[?25l... url = f""http://hai003:30/v1/chat/completions""[?12l[?25h",,terminal_output
154
+ 154,978092,"TERMINAL",0,0,"[?25l... url = f""http://hai003:300/v1/chat/completions""[?12l[?25h",,terminal_output
155
+ 155,978256,"TERMINAL",0,0,"[?25l... url = f""http://hai003:3000/v1/chat/completions""[?12l[?25h",,terminal_output
156
+ 156,978414,"TERMINAL",0,0,"[?25l... url = f""http://hai003:30000/v1/chat/completions""[?12l[?25h",,terminal_output
157
+ 157,978848,"TERMINAL",0,0,"",,terminal_output
158
+ 158,979078,"TERMINAL",0,0,"[?25l\n... [?12l[?25h",,terminal_output
159
+ 159,981853,"TERMINAL",0,0,"\n\r[?2004l[?1l>",,terminal_output
160
+ 160,982027,"TERMINAL",0,0,"[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
161
+ 161,986222,"TERMINAL",0,0,"[?25l\n>>> data = {... [?12l[?25h[?25l\n...  ""model"": ""qwen/qwen2.5-0.5b-instruct"",... [?12l[?25h[?25l\n...  ""messages"": [{""role"": ""user"", ""content"": ""What is the capital of France?""}],... [?12l[?25h[?25l... }[?12l[?25h",,terminal_output
162
+ 162,986446,"TERMINAL",0,0,"\n\r[?2004l[?1l>[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
163
+ 163,986621,"TERMINAL",0,0,"\n\r[?2004l[?1l>[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
164
+ 164,990025,"TERMINAL",0,0,"[?25l>>> response = requests.post(url, json=data)[?12l[?25h",,terminal_output
165
+ 165,990225,"TERMINAL",0,0,"\n\r[?2004l[?1l>",,terminal_output
166
+ 166,990235,"TERMINAL",0,0,"[2025-11-03 13:25:50] Prefill batch, #new-seq: 1, #new-token: 36, #cached-token: 0, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n",,terminal_output
167
+ 167,990372,"TERMINAL",0,0,"\r\n[?2004h[?1h=[?25l\n>>> [?12l[?25h\n\r[?2004l[?1l>[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
168
+ 168,990374,"TERMINAL",0,0,"[2025-11-03 13:25:50] INFO: 10.86.2.251:49732 - ""POST /v1/chat/completions HTTP/1.1"" 200 OK\r\n",,terminal_output
169
+ 169,992718,"TERMINAL",0,0,"[?25l>>> p[?12l[?25h",,terminal_output
170
+ 170,992795,"TERMINAL",0,0,"[?25l>>> pr[?12l[?25h",,terminal_output
171
+ 171,992936,"TERMINAL",0,0,"[?25l>>> pri[?12l[?25h[?25l>>> prin[?12l[?25h",,terminal_output
172
+ 172,993015,"TERMINAL",0,0,"[?25l>>> print[?12l[?25h",,terminal_output
173
+ 173,993292,"TERMINAL",0,0,"[?25l>>> print([?12l[?25h",,terminal_output
174
+ 174,993402,"TERMINAL",0,0,"[?25l>>> print()[?12l[?25h",,terminal_output
175
+ 175,993786,"TERMINAL",0,0,"",,terminal_output
176
+ 176,994396,"TERMINAL",0,0,"[?25l>>> print(r)[?12l[?25h",,terminal_output
177
+ 177,994490,"TERMINAL",0,0,"[?25l>>> print(re)[?12l[?25h",,terminal_output
178
+ 178,994656,"TERMINAL",0,0,"[?25l>>> print(res)[?12l[?25h",,terminal_output
179
+ 179,994799,"TERMINAL",0,0,"[?25l>>> print(response)[?12l[?25h",,terminal_output
180
+ 180,996827,"TERMINAL",0,0,"[?25l>>> print(response.)[?12l[?25h",,terminal_output
181
+ 181,996908,"TERMINAL",0,0,"[?25l>>> print(response.s)[?12l[?25h",,terminal_output
182
+ 182,997008,"TERMINAL",0,0,"[?25l>>> print(response.sj)[?12l[?25h",,terminal_output
183
+ 183,997447,"TERMINAL",0,0,"[?25l>>> print(response.s)[?12l[?25h",,terminal_output
184
+ 184,997591,"TERMINAL",0,0,"[?25l>>> print(response.)[?12l[?25h",,terminal_output
185
+ 185,997715,"TERMINAL",0,0,"[?25l>>> print(response.j)[?12l[?25h",,terminal_output
186
+ 186,997801,"TERMINAL",0,0,"[?25l>>> print(response.js)[?12l[?25h",,terminal_output
187
+ 187,997933,"TERMINAL",0,0,"[?25l>>> print(response.json()[?12l[?25h",,terminal_output
188
+ 188,998729,"TERMINAL",0,0,"",,terminal_output
189
+ 189,998898,"TERMINAL",0,0,"[?25l\n... [?12l[?25h",,terminal_output
190
+ 190,1000184,"TERMINAL",0,0,"[?25l... _[?12l[?25h",,terminal_output
191
+ 191,1000759,"TERMINAL",0,0,"[?25l... [?12l[?25h",,terminal_output
192
+ 192,1001046,"TERMINAL",0,0,"[?25l... )[?12l[?25h",,terminal_output
193
+ 193,1001598,"TERMINAL",0,0,"\n\r[?2004l[?1l>{'id': 'b76c04917df44d9aaf99082ae9769ded', 'object': 'chat.completion', 'created': 1762172750, 'model': 'qwen/qwen2.5-0.5b-instruct', 'choices': [{'index': 0, 'message': {'role': 'assistant', 'content': 'The capital of France is Paris.', 'reasoning_content': None, 'tool_calls': None}, 'logprobs': None, 'finish_reason': 'stop', 'matched_stop': 151645}], 'usage': {'prompt_tokens': 36, 'total_tokens': 44, 'completion_tokens': 8, 'prompt_tokens_details': None, 'reasoning_tokens': 0}, 'metadata': {'weight_version': 'default'}}\r\n[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
194
+ 194,1024494,"TERMINAL",0,0,"srun",,terminal_focus
195
+ 195,1055248,"TERMINAL",0,0,"python",,terminal_focus
196
+ 196,1241705,"TERMINAL",0,0,"[2025-11-03 13:30:01] SIGTERM received. signum=None frame=None. Draining requests and shutting down...\r\n",,terminal_output
197
+ 197,1243235,"TERMINAL",0,0,"[2025-11-03 13:30:03] Gracefully exiting... Remaining number of requests 0. Remaining requests remaining_rids=[].\r\n",,terminal_output
198
+ 198,1243545,"TERMINAL",0,0,"Killed\r\n]0;franz.srambical@hai-login1:~/crowd-pilot[?2004h(crowd-pilot) [franz.srambical@hai003.haicore.berlin:~/crowd-pilot] $ ",,terminal_output
199
+ 199,1381345,"TERMINAL",0,0,"[?25l\n>>> print(response.json()... )[?12l[?25h",,terminal_output
200
+ 200,1381698,"TERMINAL",0,0,"[?25l>>> response = requests.post(url, json=data)[?12l[?25h",,terminal_output
201
+ 201,1382817,"TERMINAL",0,0,"[?25l\n\n\n>>> data = {...  ""model"": ""qwen/qwen2.5-0.5b-instruct"",...  ""messages"": [{""role"": ""user"", ""content"": ""What is the capital of France?""}],... }[?12l[?25h",,terminal_output
202
+ 202,1383740,"TERMINAL",0,0,"[?25l>>> import requests... ... url = f""http://hai003:30000/v1/chat/completions""[?12l[?25h",,terminal_output
203
+ 203,1385966,"TERMINAL",0,0,"[?25l\n... [?12l[?25h",,terminal_output
204
+ 204,1386716,"TERMINAL",0,0,"\n\r[?2004l[?1l>[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
205
+ 205,1387796,"TERMINAL",0,0,"[?25l\n\n>>> import requests... ... url = f""http://hai003:30000/v1/chat/completions""[?12l[?25h",,terminal_output
206
+ 206,1387869,"TERMINAL",0,0,"[?25l>>> print(response.json()... )[?12l[?25h",,terminal_output
207
+ 207,1388047,"TERMINAL",0,0,"[?25l>>> response = requests.post(url, json=data)[?12l[?25h",,terminal_output
208
+ 208,1388536,"TERMINAL",0,0,"[?25l\n\n\n>>> data = {...  ""model"": ""qwen/qwen2.5-0.5b-instruct"",...  ""messages"": [{""role"": ""user"", ""content"": ""What is the capital of France?""}],... }[?12l[?25h",,terminal_output
209
+ 209,1390082,"TERMINAL",0,0,"[?25l>>> import requests... ... url = f""http://hai003:30000/v1/chat/completions""[?12l[?25h",,terminal_output
210
+ 210,1390618,"TERMINAL",0,0,"[?25l\n>>> data = {...  ""model"": ""qwen/qwen2.5-0.5b-instruct"",...  ""messages"": [{""role"": ""user"", ""content"": ""What is the capital of France?""}],... }[?12l[?25h",,terminal_output
211
+ 211,1391047,"TERMINAL",0,0,"\n\r[?2004l[?1l>[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
212
+ 212,1391308,"TERMINAL",0,0,"\n\r[?2004l[?1l>[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
213
+ 213,1391930,"TERMINAL",0,0,"[?25l\n\n\n>>> data = {...  ""model"": ""qwen/qwen2.5-0.5b-instruct"",...  ""messages"": [{""role"": ""user"", ""content"": ""What is the capital of France?""}],... }[?12l[?25h",,terminal_output
214
+ 214,1392014,"TERMINAL",0,0,"[?25l>>> import requests... ... url = f""http://hai003:30000/v1/chat/completions""[?12l[?25h",,terminal_output
215
+ 215,1392471,"TERMINAL",0,0,"[?25l>>> print(response.json()... )[?12l[?25h",,terminal_output
216
+ 216,1392947,"TERMINAL",0,0,"[?25l>>> response = requests.post(url, json=data)[?12l[?25h",,terminal_output
217
+ 217,1393731,"TERMINAL",0,0,"\n\r[?2004l[?1l>Traceback (most recent call last):\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/connection.py"", line 198, in _new_conn\r\n sock = connection.create_connection(\r\n (self._dns_host, self.port),\r\n ...<2 lines>...\r\n socket_options=self.socket_options,\r\n )\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/util/connection.py"", line 85, in create_connection\r\n raise err\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/util/connection.py"", line 73, in create_connection\r\n sock.connect(sa)\r\n ~~~~~~~~~~~~^^^^\r\nConnectionRefusedError: [Errno 111] Connection refused\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/connectionpool.py"", line 787, in urlopen\r\n response = self._make_request(\r\n conn,\r\n ...<10 lines>...\r\n **response_kw,\r\n )\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/connectionpool.py"", line 493, in _make_request\r\n conn.request(\r\n ~~~~~~~~~~~~^\r\n method,\r\n ^^^^^^^\r\n ...<6 lines>...\r\n enforce_content_length=enforce_content_length,\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n )\r\n ^\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/connection.py"", line 494, in request\r\n self.endheaders()\r\n ~~~~~~~~~~~~~~~^^\r\n File ""/home/franz.srambical/.local/share/uv/python/cpython-3.13.5-linux-x86_64-gnu/lib/python3.13/http/client.py"", line 1333, in endheaders\r\n self._send_output(message_body, encode_chunked=encode_chunked)\r\n ~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/home/franz.srambical/.local/share/uv/python/cpython-3.13.5-linux-x86_64-gnu/lib/python3.13/http/client.py"", line 1093, in _send_output\r\n self.send(msg)\r\n ~~~~~~~~~^^^^^\r\n File ""/home/franz.srambical/.local/share/uv/python/cpython-3.13.5-linux-x86_64-gnu/lib/python3.13/http/client.py"", line 1037, in send\r\n self.connect()\r\n ~~~~~~~~~~~~^^\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/connection.py"", line 325, in connect\r\n self.sock = self._new_conn()\r\n ~~~~~~~~~~~~~~^^\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/connection.py"", line 213, in _new_conn\r\n raise NewConnectionError(\r\n self, f""Failed to establish a new connection: {e}""\r\n ) from e\r\nurllib3.exceptions.NewConnectionError: <urllib3.connection.HTTPConnection object at 0x7f4217d02fd0>: Failed to establish a new connection: [Errno 111] Connection refused\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/requests/adapters.py"", line 644, in send\r\n resp = conn.urlopen(\r\n method=request.method,\r\n ...<9 lines>...\r\n chunked=chunked,\r\n )\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/connectionpool.py"", line 841, in urlopen\r\n retries = retries.increment(\r\n method, url, error=new_e, _pool=self, _stacktrace=sys.exc_info()[2]\r\n )\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/urllib3/util/retry.py"", line 519, in increment\r\n raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nurllib3.exceptions.MaxRetryError: HTTPConnectionPool(host='hai003', port=30000): Max retries exceeded with url: /v1/chat/completions (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f4217d02fd0>: Failed to establish a new connection: [Errno 111] Connection refused'))\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File ""<python-input-9>"", line 1, in <module>\r\n response = requests.post(url, json=data)\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/requests/api.py"", line 115, in post\r\n return request(""post"", url, data=data, json=json, **kwargs)\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/requests/api.py"", line 59, in request\r\n return session.request(method=method, url=url, **kwargs)\r\n ~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/requests/sessions.py"", line 589, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/requests/sessions.py"", line 703, in send\r\n r = adapter.send(request, **kwargs)\r\n File ""/fast/home/franz.srambical/crowd-pilot/.venv/lib/python3.13/site-packages/requests/adapters.py"", line 677, in send\r\n raise ConnectionError(e, request=request)\r\nrequests.exceptions.ConnectionError: HTTPConnectionPool(host='hai003', port=30000): Max retries exceeded with url: /v1/chat/completions (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f4217d02fd0>: Failed to establish a new connection: [Errno 111] Connection refused'))\r\n[?2004h[?1h=[?25l\n>>> [?12l[?25h",,terminal_output
218
+ 218,1398331,"TERMINAL",0,0,"srun",,terminal_focus
219
+ 219,1403264,"TERMINAL",0,0,"python3 -m sglang.launch_server --model-path qwen/qwen2.5-0.5b-instruct --host 0.0.0.0",,terminal_output
220
+ 220,1403498,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output
221
+ 221,1413006,"TERMINAL",0,0,"2025-11-03 13:32:52.861020: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\r\n",,terminal_output
222
+ 222,1413064,"TERMINAL",0,0,"2025-11-03 13:32:52.916612: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\r\nTo enable the following instructions: AVX2 AVX512F AVX512_VNNI AVX512_BF16 AVX512_FP16 AVX_VNNI AMX_TILE AMX_INT8 AMX_BF16 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\r\n",,terminal_output
223
+ 223,1415760,"TERMINAL",0,0,"2025-11-03 13:32:55.587656: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\r\n",,terminal_output
224
+ 224,1422741,"TERMINAL",0,0,"[2025-11-03 13:33:02] WARNING server_args.py:1104: Attention backend not explicitly specified. Use fa3 backend by default.\r\n[2025-11-03 13:33:02] INFO trace.py:48: opentelemetry package is not installed, tracing disabled\r\n",,terminal_output
225
+ 225,1423202,"TERMINAL",0,0,"[2025-11-03 13:33:03] server_args=ServerArgs(model_path='qwen/qwen2.5-0.5b-instruct', tokenizer_path='qwen/qwen2.5-0.5b-instruct', tokenizer_mode='auto', tokenizer_worker_num=1, skip_tokenizer_init=False, load_format='auto', model_loader_extra_config='{}', trust_remote_code=False, context_length=None, is_embedding=False, enable_multimodal=None, revision=None, model_impl='auto', host='0.0.0.0', port=30000, grpc_mode=False, skip_server_warmup=False, warmups=None, nccl_port=None, checkpoint_engine_wait_weights_before_ready=False, dtype='auto', quantization=None, quantization_param_path=None, kv_cache_dtype='auto', enable_fp32_lm_head=False, modelopt_quant=None, modelopt_checkpoint_restore_path=None, modelopt_checkpoint_save_path=None, modelopt_export_path=None, quantize_and_serve=False, mem_fraction_static=0.835, max_running_requests=None, max_queued_requests=None, max_total_tokens=None, chunked_prefill_size=8192, max_prefill_tokens=16384, schedule_policy='fcfs', enable_priority_scheduling=False, abort_on_priority_when_disabled=False, schedule_low_priority_values_first=False, priority_scheduling_preemption_threshold=10, schedule_conservativeness=1.0, page_size=1, hybrid_kvcache_ratio=None, swa_full_tokens_ratio=0.8, disable_hybrid_swa_memory=False, radix_eviction_policy='lru', device='cuda', tp_size=1, pp_size=1, pp_max_micro_batch_size=None, stream_interval=1, stream_output=False, random_seed=417737316, constrained_json_whitespace_pattern=None, constrained_json_disable_any_whitespace=False, watchdog_timeout=300, dist_timeout=None, download_dir=None, base_gpu_id=0, gpu_id_step=1, sleep_on_idle=False, log_level='info', log_level_http=None, log_requests=False, log_requests_level=2, crash_dump_folder=None, show_time_cost=False, enable_metrics=False, enable_metrics_for_all_schedulers=False, tokenizer_metrics_custom_labels_header='x-custom-labels', tokenizer_metrics_allowed_custom_labels=None, bucket_time_to_first_token=None, bucket_inter_token_latency=None, bucket_e2e_request_latency=None, collect_tokens_histogram=False, prompt_tokens_buckets=None, generation_tokens_buckets=None, gc_warning_threshold_secs=0.0, decode_log_interval=40, enable_request_time_stats_logging=False, kv_events_config=None, enable_trace=False, oltp_traces_endpoint='localhost:4317', api_key=None, served_model_name='qwen/qwen2.5-0.5b-instruct', weight_version='default', chat_template=None, completion_template=None, file_storage_path='sglang_storage', enable_cache_report=False, reasoning_parser=None, tool_call_parser=None, tool_server=None, sampling_defaults='model', dp_size=1, load_balance_method='round_robin', load_watch_interval=0.1, prefill_round_robin_balance=False, dist_init_addr=None, nnodes=1, node_rank=0, json_model_override_args='{}', preferred_sampling_params=None, enable_lora=None, max_lora_rank=None, lora_target_modules=None, lora_paths=None, max_loaded_loras=None, max_loras_per_batch=8, lora_eviction_policy='lru', lora_backend='triton', max_lora_chunk_size=16, attention_backend='fa3', decode_attention_backend=None, prefill_attention_backend=None, sampling_backend='flashinfer', grammar_backend='xgrammar', mm_attention_backend=None, nsa_prefill_backend='flashmla_sparse', nsa_decode_backend='fa3', speculative_algorithm=None, speculative_draft_model_path=None, speculative_draft_model_revision=None, speculative_draft_load_format=None, speculative_num_steps=None, speculative_eagle_topk=None, speculative_num_draft_tokens=None, speculative_accept_threshold_single=1.0, speculative_accept_threshold_acc=1.0, speculative_token_map=None, speculative_attention_mode='prefill', speculative_ngram_min_match_window_size=1, speculative_ngram_max_match_window_size=12, speculative_ngram_min_bfs_breadth=1, speculative_ngram_max_bfs_breadth=10, speculative_ngram_match_type='BFS', speculative_ngram_branch_length=18, speculative_ngram_capacity=10000000, ep_size=1, moe_a2a_backend='none', moe_runner_backend='auto', flashinfer_mxfp4_moe_precision='default', enable_flashinfer_allreduce_fusion=False, deepep_mode='auto', ep_num_redundant_experts=0, ep_dispatch_algorithm='static', init_expert_location='trivial', enable_eplb=False, eplb_algorithm='auto', eplb_rebalance_num_iterations=1000, eplb_rebalance_layers_per_chunk=None, eplb_min_rebalancing_utilization_threshold=1.0, expert_distribution_recorder_mode=None, expert_distribution_recorder_buffer_size=1000, enable_expert_distribution_metrics=False, deepep_config=None, moe_dense_tp_size=None, elastic_ep_backend=None, mooncake_ib_device=None, max_mamba_cache_size=None, mamba_ssm_dtype='float32', mamba_full_memory_ratio=0.9, enable_hierarchical_cache=False, hicache_ratio=2.0, hicache_size=0, hicache_write_policy='write_through', hicache_io_backend='kernel', hicache_mem_layout='layer_first', hicache_storage_backend=None, hicache_storage_prefetch_policy='best_effort', hicache_storage_backend_extra_config=None, enable_lmcache=False, kt_amx_weight_path=None, kt_amx_method='AMXINT4', kt_cpuinfer=None, kt_threadpool_count=2, kt_num_gpu_experts=None, enable_double_sparsity=False, ds_channel_config_path=None, ds_heavy_channel_num=32, ds_heavy_token_num=256, ds_heavy_channel_type='qk', ds_sparse_decode_threshold=4096, cpu_offload_gb=0, offload_group_size=-1, offload_num_in_group=1, offload_prefetch_step=1, offload_mode='cpu', multi_item_scoring_delimiter=None, disable_radix_cache=False, cuda_graph_max_bs=256, cuda_graph_bs=[1, 2, 4, 8, 12, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248, 256], disable_cuda_graph=False, disable_cuda_graph_padding=False, enable_profile_cuda_graph=False, enable_cudagraph_gc=False, enable_nccl_nvls=False, enable_symm_mem=False, disable_flashinfer_cutlass_moe_fp4_allgather=False, enable_tokenizer_batch_encode=False, disable_tokenizer_batch_decode=False, disable_outlines_disk_cache=False, disable_custom_all_reduce=False, enable_mscclpp=False, enable_torch_symm_mem=False, disable_overlap_schedule=False, enable_mixed_chunk=False, enable_dp_attention=False, enable_dp_lm_head=False, enable_two_batch_overlap=False, enable_single_batch_overlap=False, tbo_token_distribution_threshold=0.48, enable_torch_compile=False, enable_piecewise_cuda_graph=False, torch_compile_max_bs=32, piecewise_cuda_graph_max_tokens=4096, piecewise_cuda_graph_tokens=[4, 8, 12, 16, 20, 24, 28, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 640, 768, 896, 1024, 1152, 1280, 1408, 1536, 1664, 1792, 1920, 2048, 2176, 2304, 2432, 2560, 2688, 2816, 2944, 3072, 3200, 3328, 3456, 3584, 3712, 3840, 3968, 4096], piecewise_cuda_graph_compiler='eager', torchao_config='', enable_nan_detection=False, enable_p2p_check=False, triton_attention_reduce_in_fp32=False, triton_attention_num_kv_splits=8, triton_attention_split_tile_size=None, num_continuous_decode_steps=1, delete_ckpt_after_loading=False, enable_memory_saver=False, enable_weights_cpu_backup=False, allow_auto_truncate=False, enable_custom_logit_processor=False, flashinfer_mla_disable_ragged=False, disable_shared_experts_fusion=False, disable_chunked_prefix_cache=False, disable_fast_image_processor=False, keep_mm_feature_on_device=False, enable_return_hidden_states=False, scheduler_recv_interval=1, numa_node=None, enable_deterministic_inference=False, rl_on_policy_target=None, enable_dynamic_batch_tokenizer=False, dynamic_batch_tokenizer_batch_size=32, dynamic_batch_tokenizer_batch_timeout=0.002, debug_tensor_dump_output_folder=None, debug_tensor_dump_input_file=None, debug_tensor_dump_inject=False, disaggregation_mode='null', disaggregation_transfer_backend='mooncake', disaggregation_bootstrap_port=8998, disaggregation_decode_tp=None, disaggregation_decode_dp=None, disaggregation_prefill_pp=1, disaggregation_ib_device=None, disaggregation_decode_enable_offload_kvcache=False, num_reserved_decode_tokens=512, disaggregation_decode_polling_interval=1, custom_weight_loader=[], weight_loader_disable_mmap=False, remote_instance_weight_loader_seed_instance_ip=None, remote_instance_weight_loader_seed_instance_service_port=None, remote_instance_weight_loader_send_weights_group_ports=None, enable_pdmux=False, pdmux_config_path=None, sm_group_num=8)\r\n",,terminal_output
226
+ 226,1424166,"TERMINAL",0,0,"[2025-11-03 13:33:04] Using default HuggingFace chat template with detected content format: string\r\n",,terminal_output
227
+ 227,1440312,"TERMINAL",0,0,"[2025-11-03 13:33:20] INFO trace.py:48: opentelemetry package is not installed, tracing disabled\r\n",,terminal_output
228
+ 228,1445360,"TERMINAL",0,0,"[2025-11-03 13:33:25] INFO trace.py:48: opentelemetry package is not installed, tracing disabled\r\n",,terminal_output
229
+ 229,1447450,"TERMINAL",0,0,"[2025-11-03 13:33:27] Init torch distributed begin.\r\n",,terminal_output
230
+ 230,1447715,"TERMINAL",0,0,"[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0\r\n[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0\r\n[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0\r\n[Gloo] Rank 0 is connected to 0 peer ranks. Expected number of connected peer ranks is : 0\r\n[2025-11-03 13:33:27] Init torch distributed ends. mem usage=0.00 GB\r\n",,terminal_output
231
+ 231,1447771,"TERMINAL",0,0,"[2025-11-03 13:33:27] MOE_RUNNER_BACKEND is not initialized, the backend will be automatically selected\r\n",,terminal_output
232
+ 232,1448948,"TERMINAL",0,0,"[2025-11-03 13:33:28] Load weight begin. avail mem=78.68 GB\r\n",,terminal_output
233
+ 233,1449042,"TERMINAL",0,0,"[2025-11-03 13:33:28] TensorFlow version 2.20.0 available.\r\n",,terminal_output
234
+ 234,1450092,"TERMINAL",0,0,"[2025-11-03 13:33:29] Using model weights format ['*.safetensors']\r\n",,terminal_output
235
+ 235,1450641,"TERMINAL",0,0,"[2025-11-03 13:33:30] No model.safetensors.index.json found in remote.\r\n\rLoading safetensors checkpoint shards: 0% Completed | 0/1 [00:00<?, ?it/s]\r\n",,terminal_output
236
+ 236,1450848,"TERMINAL",0,0,"\rLoading safetensors checkpoint shards: 100% Completed | 1/1 [00:00<00:00, 6.08it/s]\r\n\rLoading safetensors checkpoint shards: 100% Completed | 1/1 [00:00<00:00, 6.07it/s]\r\n\r\n[2025-11-03 13:33:30] Load weight end. type=Qwen2ForCausalLM, dtype=torch.bfloat16, avail mem=77.61 GB, mem usage=1.07 GB.\r\n[2025-11-03 13:33:30] Using KV cache dtype: torch.bfloat16\r\n[2025-11-03 13:33:30] KV Cache is allocated. #tokens: 5647121, K size: 32.31 GB, V size: 32.31 GB\r\n[2025-11-03 13:33:30] Memory pool end. avail mem=12.31 GB\r\n",,terminal_output
237
+ 237,1450942,"TERMINAL",0,0,"[2025-11-03 13:33:30] Capture cuda graph begin. This can take up to several minutes. avail mem=12.21 GB\r\n[2025-11-03 13:33:30] Capture cuda graph bs [1, 2, 4, 8, 12, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200, 208, 216, 224, 232, 240, 248, 256]\r\n",,terminal_output
238
+ 238,1451331,"TERMINAL",0,0,"\r 0%| | 0/36 [00:00<?, ?it/s]\rCapturing batches (bs=256 avail_mem=12.00 GB): 0%| | 0/36 [00:00<?, ?it/s]",,terminal_output
239
+ 239,1451596,"TERMINAL",0,0,"\rCapturing batches (bs=256 avail_mem=12.00 GB): 3%|█▊ | 1/36 [00:00<00:08, 4.22it/s]\rCapturing batches (bs=248 avail_mem=11.84 GB): 3%|█▊ | 1/36 [00:00<00:08, 4.22it/s]\rCapturing batches (bs=240 avail_mem=11.83 GB): 3%|█▊ | 1/36 [00:00<00:08, 4.22it/s]",,terminal_output
240
+ 240,1452423,"TERMINAL",0,0,"\rCapturing batches (bs=232 avail_mem=11.83 GB): 3%|█▊ | 1/36 [00:00<00:08, 4.22it/s]\rCapturing batches (bs=232 avail_mem=11.83 GB): 11%|███████▏ | 4/36 [00:00<00:02, 13.09it/s]\rCapturing batches (bs=224 avail_mem=11.82 GB): 11%|███████▏ | 4/36 [00:00<00:02, 13.09it/s]\rCapturing batches (bs=216 avail_mem=11.81 GB): 11%|███████▏ | 4/36 [00:00<00:02, 13.09it/s]\rCapturing batches (bs=208 avail_mem=11.81 GB): 11%|███████▏ | 4/36 [00:00<00:02, 13.09it/s]\rCapturing batches (bs=208 avail_mem=11.81 GB): 19%|████████████▋ | 7/36 [00:00<00:01, 17.78it/s]\rCapturing batches (bs=200 avail_mem=11.81 GB): 19%|████████████▋ | 7/36 [00:00<00:01, 17.78it/s]\rCapturing batches (bs=192 avail_mem=11.80 GB): 19%|████████████▋ | 7/36 [00:00<00:01, 17.78it/s]\rCapturing batches (bs=184 avail_mem=11.80 GB): 19%|████████████▋ | 7/36 [00:00<00:01, 17.78it/s]\rCapturing batches (bs=184 avail_mem=11.80 GB): 28%|█████████████████▊ | 10/36 [00:00<00:01, 21.11it/s]\rCapturing batches (bs=176 avail_mem=11.79 GB): 28%|█████████████████▊ | 10/36 [00:00<00:01, 21.11it/s]\rCapturing batches (bs=168 avail_mem=11.79 GB): 28%|█████████████████▊ | 10/36 [00:00<00:01, 21.11it/s]\rCapturing batches (bs=160 avail_mem=11.78 GB): 28%|█████████████████▊ | 10/36 [00:00<00:01, 21.11it/s]\rCapturing batches (bs=160 avail_mem=11.78 GB): 36%|███████████████████████ | 13/36 [00:00<00:01, 22.38it/s]\rCapturing batches (bs=152 avail_mem=11.78 GB): 36%|███████████████████████ | 13/36 [00:00<00:01, 22.38it/s]\rCapturing batches (bs=144 avail_mem=11.77 GB): 36%|███████████████████████ | 13/36 [00:00<00:01, 22.38it/s]\rCapturing batches (bs=136 avail_mem=11.77 GB): 36%|███████████████████████ | 13/36 [00:00<00:01, 22.38it/s]\rCapturing batches (bs=136 avail_mem=11.77 GB): 44%|████████████████████████████▍ | 16/36 [00:00<00:00, 23.98it/s]\rCapturing batches (bs=128 avail_mem=11.76 GB): 44%|████████████████████████████▍ | 16/36 [00:00<00:00, 23.98it/s]\rCapturing batches (bs=120 avail_mem=11.76 GB): 44%|████████████████████████████▍ | 16/36 [00:00<00:00, 23.98it/s]\rCapturing batches (bs=112 avail_mem=11.75 GB): 44%|████████████████████████████▍ | 16/36 [00:00<00:00, 23.98it/s]\rCapturing batches (bs=112 avail_mem=11.75 GB): 53%|█████████████████████████████████▊ | 19/36 [00:00<00:00, 24.01it/s]\rCapturing batches (bs=104 avail_mem=11.75 GB): 53%|█████████████████████████████████▊ | 19/36 [00:00<00:00, 24.01it/s]\rCapturing batches (bs=96 avail_mem=11.75 GB): 53%|██████████████████████████████████▎ | 19/36 [00:00<00:00, 24.01it/s]\rCapturing batches (bs=88 avail_mem=11.74 GB): 53%|██████████████████████████████████▎ | 19/36 [00:01<00:00, 24.01it/s]\rCapturing batches (bs=88 avail_mem=11.74 GB): 61%|█████████��█████████████████████████████▋ | 22/36 [00:01<00:00, 24.03it/s]\rCapturing batches (bs=80 avail_mem=11.73 GB): 61%|███████████████████████████████████████▋ | 22/36 [00:01<00:00, 24.03it/s]\rCapturing batches (bs=72 avail_mem=11.73 GB): 61%|███████████████████████████████████████▋ | 22/36 [00:01<00:00, 24.03it/s]",,terminal_output
241
+ 241,1452687,"TERMINAL",0,0,"\rCapturing batches (bs=64 avail_mem=11.72 GB): 61%|███████████████████████████████████████▋ | 22/36 [00:01<00:00, 24.03it/s]\rCapturing batches (bs=64 avail_mem=11.72 GB): 69%|█████████████████████████████████████████████▏ | 25/36 [00:01<00:00, 24.45it/s]\rCapturing batches (bs=56 avail_mem=11.72 GB): 69%|█████████████████████████████████████████████▏ | 25/36 [00:01<00:00, 24.45it/s]\rCapturing batches (bs=48 avail_mem=11.72 GB): 69%|█████████████████████████████████████████████▏ | 25/36 [00:01<00:00, 24.45it/s]\rCapturing batches (bs=40 avail_mem=11.71 GB): 69%|█████████████████████████████████████████████▏ | 25/36 [00:01<00:00, 24.45it/s]\rCapturing batches (bs=40 avail_mem=11.71 GB): 78%|██████████████████████████████████████████████████▌ | 28/36 [00:01<00:00, 25.01it/s]\rCapturing batches (bs=32 avail_mem=11.71 GB): 78%|██████████████████████████████████████████████████▌ | 28/36 [00:01<00:00, 25.01it/s]\rCapturing batches (bs=24 avail_mem=11.70 GB): 78%|██████████████████████████████████████████████████▌ | 28/36 [00:01<00:00, 25.01it/s]\rCapturing batches (bs=16 avail_mem=11.70 GB): 78%|██████████████████████████████████████████████████▌ | 28/36 [00:01<00:00, 25.01it/s]",,terminal_output
242
+ 242,1452903,"TERMINAL",0,0,"\rCapturing batches (bs=16 avail_mem=11.70 GB): 86%|███████████████████████████████████████████████████████▉ | 31/36 [00:01<00:00, 23.88it/s]\rCapturing batches (bs=12 avail_mem=11.69 GB): 86%|███████████████████████████████████████████████████████▉ | 31/36 [00:01<00:00, 23.88it/s]\rCapturing batches (bs=8 avail_mem=11.69 GB): 86%|████████████████████████████████████████████████████████▊ | 31/36 [00:01<00:00, 23.88it/s]\rCapturing batches (bs=4 avail_mem=11.68 GB): 86%|████████████████████████████████████████████████████████▊ | 31/36 [00:01<00:00, 23.88it/s]\rCapturing batches (bs=2 avail_mem=11.68 GB): 86%|████████████████████████████████████████████████████████▊ | 31/36 [00:01<00:00, 23.88it/s]\rCapturing batches (bs=2 avail_mem=11.68 GB): 97%|████████████████████████████████████████████████████████████████▏ | 35/36 [00:01<00:00, 26.88it/s]\rCapturing batches (bs=1 avail_mem=11.67 GB): 97%|████████████████████████████████████████████████████████████████▏ | 35/36 [00:01<00:00, 26.88it/s]\rCapturing batches (bs=1 avail_mem=11.67 GB): 100%|██████████████████████████████████████████████████████████████████| 36/36 [00:01<00:00, 22.95it/s]\r\n",,terminal_output
243
+ 243,1453253,"TERMINAL",0,0,"[2025-11-03 13:33:33] Capture cuda graph end. Time elapsed: 2.32 s. mem usage=0.54 GB. avail mem=11.67 GB.\r\n",,terminal_output
244
+ 244,1454157,"TERMINAL",0,0,"[2025-11-03 13:33:34] max_total_num_tokens=5647121, chunked_prefill_size=8192, max_prefill_tokens=16384, max_running_requests=4096, context_len=32768, available_gpu_mem=11.67 GB\r\n",,terminal_output
245
+ 245,1454646,"TERMINAL",0,0,"[2025-11-03 13:33:34] INFO: Started server process [1856857]\r\n[2025-11-03 13:33:34] INFO: Waiting for application startup.\r\n[2025-11-03 13:33:34] Using default chat sampling params from model generation config: {'repetition_penalty': 1.1, 'temperature': 0.7, 'top_k': 20, 'top_p': 0.8}\r\n[2025-11-03 13:33:34] Using default chat sampling params from model generation config: {'repetition_penalty': 1.1, 'temperature': 0.7, 'top_k': 20, 'top_p': 0.8}\r\n[2025-11-03 13:33:34] INFO: Application startup complete.\r\n[2025-11-03 13:33:34] INFO: Uvicorn running on http://0.0.0.0:30000 (Press CTRL+C to quit)\r\n",,terminal_output
246
+ 246,1455651,"TERMINAL",0,0,"[2025-11-03 13:33:35] INFO: 127.0.0.1:51886 - ""GET /get_model_info HTTP/1.1"" 200 OK\r\n[2025-11-03 13:33:35] Prefill batch, #new-seq: 1, #new-token: 6, #cached-token: 0, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n",,terminal_output
247
+ 247,1456318,"TERMINAL",0,0,"[2025-11-03 13:33:36] INFO: 127.0.0.1:51898 - ""POST /generate HTTP/1.1"" 200 OK\r\n[2025-11-03 13:33:36] The server is fired up and ready to roll!\r\n",,terminal_output
248
+ 248,1459037,"TERMINAL",0,0,"[2025-11-03 13:33:38] Prefill batch, #new-seq: 1, #new-token: 36, #cached-token: 0, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n[2025-11-03 13:33:38] INFO: 10.86.2.252:38622 - ""POST /v1/chat/completions HTTP/1.1"" 200 OK\r\n",,terminal_output
249
+ 249,4563086,"TERMINAL",0,0,"[2025-11-03 14:25:22] INFO: 10.86.2.251:48316 - ""POST /v1/chat/completions HTTP/1.1"" 200 OK\r\n[2025-11-03 14:25:22] Prefill batch, #new-seq: 1, #new-token: 8192, #cached-token: 5, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n[2025-11-03 14:25:22] Prefill batch, #new-seq: 1, #new-token: 1418, #cached-token: 0, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n",,terminal_output
250
+ 250,4563164,"TERMINAL",0,0,"[2025-11-03 14:25:22] Decode batch, #running-req: 1, #token: 9640, token usage: 0.00, cuda graph: True, gen throughput (token/s): 0.01, #queue-req: 0, \r\n",,terminal_output
251
+ 251,4576851,"TERMINAL",0,0,"[2025-11-03 14:25:36] INFO: 10.86.2.251:53276 - ""POST /v1/chat/completions HTTP/1.1"" 200 OK\r\n[2025-11-03 14:25:36] Prefill batch, #new-seq: 1, #new-token: 164, #cached-token: 9654, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n",,terminal_output
252
+ 252,4576930,"TERMINAL",0,0,"[2025-11-03 14:25:36] Decode batch, #running-req: 1, #token: 9843, token usage: 0.00, cuda graph: True, gen throughput (token/s): 2.91, #queue-req: 0, \r\n",,terminal_output
253
+ 253,4603575,"TERMINAL",0,0,"[2025-11-03 14:26:03] INFO: 10.86.2.251:56392 - ""POST /v1/chat/completions HTTP/1.1"" 200 OK\r\n[2025-11-03 14:26:03] Prefill batch, #new-seq: 1, #new-token: 164, #cached-token: 9850, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n",,terminal_output
254
+ 254,4603624,"TERMINAL",0,0,"[2025-11-03 14:26:03] Decode batch, #running-req: 1, #token: 10046, token usage: 0.00, cuda graph: True, gen throughput (token/s): 1.50, #queue-req: 0, \r\n",,terminal_output
255
+ 255,4619746,"TERMINAL",0,0,"[2025-11-03 14:26:19] INFO: 10.86.2.251:33794 - ""POST /v1/chat/completions HTTP/1.1"" 200 OK\r\n[2025-11-03 14:26:19] Prefill batch, #new-seq: 1, #new-token: 167, #cached-token: 10046, token usage: 0.00, #running-req: 0, #queue-req: 0, \r\n",,terminal_output
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f1b4c573-86a5-4c21-a501-9fb3be4a68881763632584824-2025_11_20-10.56.31.891/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f508ed97-76c1-4935-95ed-d4393099e6361753128212083-2025_07_21-22.03.39.166/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-0f5513f7-8bc9-4c5d-856d-79d92f75113d1751284706913-2025_06_30-13.59.01.459/source.csv ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,263,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:59:01 PM [info] Activating crowd-code\n1:59:01 PM [info] Recording started\n1:59:01 PM [info] Initializing git provider using file system watchers...\n1:59:01 PM [info] Git repository found\n1:59:01 PM [info] Git provider initialized successfully\n1:59:01 PM [info] Initial git state: [object Object]\n",Log,tab
3
+ 3,3941,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
4
+ 4,3980,"TERMINAL",0,0,"]633;E;2025-06-30 13:59:05 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;ae710212-bbd7-466b-8215-e56dfc4f7a88]633;C",,terminal_output
5
+ 5,4004,"TERMINAL",0,0,"]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
6
+ 6,31538,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
7
+ 7,33235,"TERMINAL",0,0,"bash",,terminal_focus
8
+ 8,35303,"scripts_horeka/train_tokenizer.sh",0,0,"#!/usr/bin/env bash\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\ntf_records_dir=$ws_dir/knoms_tfrecords_500_shards\nws_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/'\n\njob_name=""debug""\nslurm_job_id=""0000""\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=16 \\n --min_lr=4.24e-4 \\n --max_lr=4.24e-4 \\n --log_image_interval=100 \\n --log \\n --name=test-wandb-tags-$slurm_job_id \\n --tags test tokenizer debug \\n --entity instant-uv \\n --project jafar \\n --data_dir $tf_records_dir",shellscript,tab
9
+ 9,35920,"TERMINAL",0,0,"bash",,terminal_focus
10
+ 10,39024,"TERMINAL",0,0,"queue",,terminal_command
11
+ 11,39105,"TERMINAL",0,0,"]633;E;2025-06-30 13:59:40 queue;79ec3af7-6a10-4dac-bb07-e3b50f56ded4]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Mon Jun 30 13:59:40 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)",,terminal_output
12
+ 12,39625,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
13
+ 13,1168748,"TERMINAL",0,0,"salloc --time=01:00:00 --partition=accelerated --nodes=4 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5 --mem=200G --mail-user=mihir@pdoom.org --mail-type=ALL",,terminal_command
14
+ 14,1168797,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:30 salloc --time=01:00:00 --partition=accelerated --nodes=4 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=5 --mem=200G --mail-user=mihir@pdoom.org --mail-type=ALL;79ec3af7-6a10-4dac-bb07-e3b50f56ded4]633;C",,terminal_output
15
+ 15,1168893,"TERMINAL",0,0,"salloc: Pending job allocation 3306136\r\nsalloc: job 3306136 queued and waiting for resources\r\n",,terminal_output
16
+ 16,1169921,"TERMINAL",0,0,"bash",,terminal_focus
17
+ 17,1170955,"TERMINAL",0,0,"queue",,terminal_command
18
+ 18,1171010,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:32 queue;ead59344-49db-4336-9336-47fae706e637]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Mon Jun 30 14:18:32 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3306136 accelerat interact tum_cte0 PD\t0:00\t 4 (Resources)",,terminal_output
19
+ 19,1172154,"TERMINAL",0,0,"3\t ",,terminal_output
20
+ 20,1173084,"TERMINAL",0,0,"4\t ",,terminal_output
21
+ 21,1174201,"TERMINAL",0,0,"5\t ",,terminal_output
22
+ 22,1174691,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
23
+ 23,1175459,"TERMINAL",0,0,"idle",,terminal_command
24
+ 24,1175485,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:36 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 0 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
25
+ 25,1181058,"TERMINAL",0,0,"^C",,terminal_command
26
+ 26,1181077,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;ead59344-49db-4336-9336-47fae706e637]633;C]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D",,terminal_output
27
+ 27,1183520,"TERMINAL",0,0,"idle",,terminal_command
28
+ 28,1183538,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:44 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
29
+ 29,1185175,"TERMINAL",0,0,"^C",,terminal_command
30
+ 30,1185191,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;ead59344-49db-4336-9336-47fae706e637]633;C]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D",,terminal_output
31
+ 31,1186579,"TERMINAL",0,0,"idle",,terminal_command
32
+ 32,1186594,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:47 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
33
+ 33,1187256,"TERMINAL",0,0,"idle",,terminal_command
34
+ 34,1187270,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:48 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
35
+ 35,1187998,"TERMINAL",0,0,"idle",,terminal_command
36
+ 36,1188014,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:49 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
37
+ 37,1188374,"TERMINAL",0,0,"idle",,terminal_command
38
+ 38,1188379,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:49 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
39
+ 39,1188713,"TERMINAL",0,0,"idle",,terminal_command
40
+ 40,1188724,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:50 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
41
+ 41,1189024,"TERMINAL",0,0,"idle",,terminal_command
42
+ 42,1189039,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:50 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
43
+ 43,1189680,"TERMINAL",0,0,"",,terminal_command
44
+ 44,1189694,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;;ead59344-49db-4336-9336-47fae706e637]633;C]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D",,terminal_output
45
+ 45,1190195,"TERMINAL",0,0,"idle",,terminal_command
46
+ 46,1190210,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:51 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 154 nodes idle\r\nPartition dev_accelerated : 1 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
47
+ 47,1192901,"TERMINAL",0,0,"idle",,terminal_command
48
+ 48,1192917,"TERMINAL",0,0,"]633;E;2025-06-30 14:18:54 idle;ead59344-49db-4336-9336-47fae706e637]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 152 nodes idle\r\nPartition dev_accelerated : 1 nodes idle\r\nPartition accelerated : 1 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 8 nodes idle\r\n]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar]633;D;0",,terminal_output
49
+ 49,1194207,"TERMINAL",0,0,"salloc",,terminal_focus
50
+ 50,1198907,"TERMINAL",0,0,"^Csalloc: Job allocation 3306136 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;1",,terminal_output
51
+ 51,1199160,"TERMINAL",0,0,"^C",,terminal_command
52
+ 52,1199175,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;79ec3af7-6a10-4dac-bb07-e3b50f56ded4]633;C]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D",,terminal_output
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-17a23500-007e-4825-8127-4f0062137ef91759750602496-2025_10_06-13.37.19.164/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-28f4aa5c-0534-40eb-ae05-51501d68e4871752860706222-2025_07_18-19.45.48.539/source.csv ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,667,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:45:48 PM [info] Activating crowd-code\n7:45:48 PM [info] Recording started\n7:45:48 PM [info] Initializing git provider using file system watchers...\n7:45:48 PM [info] Git repository found\n7:45:48 PM [info] Git provider initialized successfully\n7:45:48 PM [info] Initial git state: [object Object]\n",Log,tab
3
+ 3,228069,"TERMINAL",0,0,"queue",,terminal_command
4
+ 4,228163,"TERMINAL",0,0,"]633;E;2025-07-18 19:49:36 queue;9dd50732-d6d7-4d7e-a22c-51ae92e646cb]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Fri Jul 18 19:49:36 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3357893 accelerat interact tum_cte0 R 3:08:29\t 2 hkn[0436,0708]3357894 accelerat interact tum_cte0 R 3:45:12\t 1 hkn0715\t\t",,terminal_output
5
+ 5,229211,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
6
+ 6,231917,"TERMINAL",0,0,"idling",,terminal_command
7
+ 7,232013,"TERMINAL",0,0,"]633;E;2025-07-18 19:49:40 idling;9dd50732-d6d7-4d7e-a22c-51ae92e646cb]633;C[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1991.localdomain: Fri Jul 18 19:49:40 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly:\t 7 nodes idle\rPartition dev_accelerated:\t 3 nodes idle\rPartition accelerated:\t 1 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 8 nodes idle",,terminal_output
8
+ 8,233034,"TERMINAL",0,0,"1\t",,terminal_output
9
+ 9,233586,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
10
+ 10,237086,"TERMINAL",0,0,"queue",,terminal_command
11
+ 11,237134,"TERMINAL",0,0,"]633;E;2025-07-18 19:49:45 queue;9dd50732-d6d7-4d7e-a22c-51ae92e646cb]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Fri Jul 18 19:49:45 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3357893 accelerat interact tum_cte0 R 3:08:38\t 2 hkn[0436,0708]3357894 accelerat interact tum_cte0 R 3:45:21\t 1 hkn0715\t\t",,terminal_output
12
+ 12,237968,"TERMINAL",0,0,"692",,terminal_output
13
+ 13,238857,"TERMINAL",0,0,"7403",,terminal_output
14
+ 14,239544,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
15
+ 15,241735,"TERMINAL",0,0,"scancel 3357893",,terminal_command
16
+ 16,241795,"TERMINAL",0,0,"]633;E;2025-07-18 19:49:50 scancel 3357893;9dd50732-d6d7-4d7e-a22c-51ae92e646cb]633;C",,terminal_output
17
+ 17,241927,"TERMINAL",0,0,"]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output
18
+ 18,243399,"TERMINAL",0,0,"scancel 3357894",,terminal_command
19
+ 19,243459,"TERMINAL",0,0,"]633;E;2025-07-18 19:49:51 scancel 3357894;9dd50732-d6d7-4d7e-a22c-51ae92e646cb]633;C]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_cte0515/Projects/jafar",,terminal_output
20
+ 20,245208,"TERMINAL",0,0,"idling",,terminal_command
21
+ 21,245252,"TERMINAL",0,0,"]633;E;2025-07-18 19:49:53 idling;9dd50732-d6d7-4d7e-a22c-51ae92e646cb]633;C",,terminal_output
22
+ 22,245311,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: sinfo_t_idlehkn1991.localdomain: Fri Jul 18 19:49:53 2025Partition dev_cpuonly: 12 nodes idle\rPartition cpuonly:\t 7 nodes idle\rPartition dev_accelerated:\t 3 nodes idle\rPartition accelerated:\t 3 nodes idle\rPartition dev_accelerated-h100 :\t 1 nodes idle\rPartition accelerated-h100:\t 0 nodes idle\rPartition large:\t 8 nodes idle",,terminal_output
23
+ 23,246324,"TERMINAL",0,0,"4\t",,terminal_output
24
+ 24,246706,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
25
+ 25,247830,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport einops\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n spacial_bert=True,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n\n\nclass DynamicsAutoregressive(nn.Module):\n """"""Autoregressive (causal) dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n spacial_bert=False,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n vid_embed = self.patch_embed(batch[""video_tokens""])\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n vid_embed_padded = jnp.pad(vid_embed, ((0, 0), (1, 0), (1, 0), (0, 0)))\n logits = self.dynamics(vid_embed_padded)[:, 1:, 1:]\n mask = jnp.ones(vid_embed.shape[:-1])\n next_tokens = jnp.argmax(logits, axis=-1)\n print(next_tokens.shape)\n jax.debug.breakpoint()\n return dict(token_logits=logits, mask=mask)",python,tab
26
+ 26,247835,"models/dynamics.py",2889,0,"",python,selection_mouse
27
+ 27,247936,"models/dynamics.py",2887,9,"act_embed",python,selection_mouse
28
+ 28,248563,"models/dynamics.py",2826,0,"",python,selection_mouse
29
+ 29,251754,"models/dynamics.py",2995,0,"",python,selection_mouse
30
+ 30,253317,"models/dynamics.py",2989,0,"",python,selection_mouse
31
+ 31,254853,"models/dynamics.py",2988,0,"",python,selection_mouse
32
+ 32,303300,"models/lam.py",0,0,"from typing import Dict, Any\n\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass LatentActionModel(nn.Module):\n """"""Latent Action ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n def setup(self):\n self.patch_token_dim = self.in_dim * self.patch_size**2\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n )\n self.action_in = self.param(\n ""action_in"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.patch_token_dim),\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.patch_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n self.action_up = nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n self.decoder = STTransformer(\n self.model_dim,\n self.patch_token_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Encode + VQ ---\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n video_action_patches = self.action_up(outputs[""z_q""]) + self.patch_up(\n outputs[""patches""][:, :-1]\n )\n del outputs[""patches""]\n\n # --- Decode ---\n video_recon = self.decoder(video_action_patches)\n video_recon = video_recon.astype(jnp.float32)\n video_recon = nn.sigmoid(video_recon)\n video_recon = video_recon.astype(self.dtype)\n outputs[""recon""] = unpatchify(video_recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess videos ---\n B, T = videos.shape[:2]\n patches = patchify(videos, self.patch_size)\n action_pad = jnp.broadcast_to(self.action_in, (B, T, 1, self.patch_token_dim))\n # FIXME mihir do this the other way around\n padded_patches = jnp.concatenate((action_pad, patches), axis=2)\n\n # --- Encode ---\n z = self.encoder(padded_patches) # (B, T, N, E)\n # Get latent action for all future frames\n z = z[:, 1:, 0] # (B, T-1, E)\n\n # --- Vector quantize ---\n z = z.reshape(B * (T - 1), self.latent_dim)\n z_q, z, emb, indices = self.vq(z, training)\n z_q = z_q.reshape(B, T - 1, 1, self.latent_dim)\n return dict(patches=patches, z_q=z_q, z=z, emb=emb, indices=indices)\n",python,tab
33
+ 33,304984,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsAutoregressive\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport os\nimport grain\n\n\nclass Genie(nn.Module):\n """"""Genie model""""""\n\n # --- Tokenizer ---\n in_dim: int\n tokenizer_dim: int\n latent_patch_dim: int\n num_patch_latents: int\n patch_size: int\n tokenizer_num_blocks: int\n tokenizer_num_heads: int\n # --- LAM ---\n lam_dim: int\n latent_action_dim: int\n num_latent_actions: int\n lam_patch_size: int\n lam_num_blocks: int\n lam_num_heads: int\n lam_co_train: bool\n # --- Dynamics ---\n dyna_dim: int\n dyna_num_blocks: int\n dyna_num_heads: int\n use_maskgit: bool\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n dropout: float = 0.0\n mask_limit: float = 0.0\n\n def setup(self):\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n if self.use_maskgit:\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ) \n else:\n self.dynamics = DynamicsAutoregressive(\n model_dim=self.dyna_dim,\n num_latents=self.num_patch_latents,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""])\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n\n def sample_causal(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ):\n """"""\n Autoregressively samples up to `seq_len` future frames using the causal transformer backend.\n\n - Input frames are tokenized once.\n - Future frames are generated one at a time, each conditioned on all previous frames.\n - All frames are detokenized in a single pass at the end.\n\n Args:\n batch: Dict with at least ""videos"" (B, T, H, W, C)\n seq_len: total number of frames to generate (including context)\n temperature: sampling temperature\n sample_argmax: if True, use argmax instead of sampling\n\n Returns:\n Generated video frames (B, seq_len, H, W, C)\n """"""\n # --- Encode context frames ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n\n # jax.debug.print(""token_idxs shape: {}"", token_idxs.shape)\n # --- Prepare initial token sequence ---\n # Pad with zeros for future frames\n pad_shape = (B, seq_len - T, N)\n token_idxs_full = jnp.concatenate(\n [token_idxs, jnp.zeros(pad_shape, dtype=token_idxs.dtype)], axis=1\n ) # (B, seq_len, N)\n\n # --- Prepare latent actions ---\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""]) # (B, S-1, )\n # --- Autoregressive generation loop ---\n rng = batch[""rng""]\n for t in range(T, seq_len):\n for n in range(32):\n dyna_inputs = {\n ""video_tokens"": token_idxs_full,\n ""latent_actions"": action_tokens\n }\n # jax.debug.print(""token_idxs_full 0: {}"", token_idxs_full[0,:,0])\n dyna_outputs = self.dynamics(dyna_inputs, training=False)\n # # dyna_outputs[""token_logits""]: (B, t, N, vocab_size)\n # # We want the logits for the last time step (frame t-1 predicting t)\n # jax.debug.breakpoint()\n next_token_logits = dyna_outputs[""token_logits""][:, t, n, :].astype(jnp.float32) # (B, 1, vocab_size)\n\n # Sample or argmax for each patch\n if sample_argmax:\n next_token = jnp.argmax(next_token_logits, axis=-1) # (B, 1)\n else:\n rng, step_rng = jax.random.split(rng)\n next_token = jax.random.categorical(\n step_rng, next_token_logits / temperature, axis=-1\n ) # (B, 1)\n\n # Insert the generated tokens into the sequence\n token_idxs_full = token_idxs_full.at[:, t, n].set(next_token)\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n token_idxs_full, video_hw=batch[""videos""].shape[2:4]\n )\n return final_frames\n\n\n @nn.compact\n def sample_maskgit(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by \n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size \n T: number of input (conditioning) frames \n N: patches per frame \n S: sequence length \n A: action space \n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""]) \n\n MaskGITLoop = nn.scan(\n MaskGITStep,\n variable_broadcast=""params"",\n split_rngs={""params"": False},\n in_axes=0,\n out_axes=0,\n length=steps,\n )\n \n loop_fn = MaskGITLoop(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n final_carry_maskgit, _ = loop_fn(init_carry_maskgit, jnp.arange(steps))\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn,\n initial_carry,\n timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nn.Module):\n dynamics: nn.Module\n tokenizer: nn.Module\n temperature: float\n sample_argmax: bool\n steps: int\n\n @nn.compact\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1) \n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\ndef restore_genie_components(\n train_state: TrainState,\n sharding: jax.sharding.NamedSharding,\n grain_iterator: grain.DataLoaderIterator,\n inputs: Dict[str, jax.Array],\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rng, _rng = jax.random.split(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\n handler_registry.add('dataloader_state', grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler)\n \n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n tokenizer_init_params = dummy_tokenizer.init(_rng, inputs)\n dummy_tokenizer_train_state = TrainState.create(\n apply_fn=dummy_tokenizer.apply, params=tokenizer_init_params, tx=dummy_tx\n )\n abstract_sharded_tokenizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_train_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_tokenizer_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )[""model_state""]\n restored_tokenizer_params = restored_tokenizer.params[""params""]\n train_state.params[""params""][""tokenizer""].update(restored_tokenizer_params)\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n lam_init_params = dummy_lam.init(_rng, inputs)\n dummy_lam_train_state = TrainState.create(\n apply_fn=dummy_lam.apply, params=lam_init_params, tx=dummy_tx\n )\n abstract_sharded_lam_state = _create_abstract_sharded_pytree(\n dummy_lam_train_state, sharding\n )\n restored_lam = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_sharded_lam_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )[""model_state""]\n restored_lam_params = restored_lam.params[""params""]\n # Genie does not initialize all LAM modules, thus we omit those extra modules during restoration\n # (f.srambical) FIXME: Currently, this is a small HBM memory crunch since the LAM's decoder is loaded into HBM and immediately dicarded.\n # A workaround would be to restore to host memory first, and only move the weights to HBM after pruning the decoder\n restored_lam_params = {\n k: v\n for k, v in restored_lam_params.items()\n if k in train_state.params[""params""][""lam""]\n }\n train_state.params[""params""][""lam""].update(restored_lam_params)\n lam_checkpoint_manager.close()\n\n return train_state\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)",python,tab
34
+ 34,311383,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n warmup_steps: int = 5000\n lr_schedule : str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n use_maskgit: bool = False\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n logits = outputs[""token_logits""]\n targets = outputs[""video_tokens""]\n\n # if not args.use_maskgit:\n # logits = outputs[""token_logits""][:, :, :-1]\n # targets = outputs[""video_tokens""][:, :, 1:]\n # mask = outputs[""mask""][:, :, 1:] \n\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n logits, targets\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = logits.argmax(-1) == targets\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(logits)\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=logits.max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n use_maskgit=args.use_maskgit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n # for videos in dataloader:\n videos = np.load(""overfit_dir/corner_8repl.npy"")\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n while True:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) #/ 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
35
+ 35,312861,"train_dynamics.py",2979,0,"",python,selection_mouse
36
+ 36,328600,"models/tokenizer.py",0,0,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\nimport jax.numpy as jnp\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n self.param_dtype,\n self.dtype,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = recon.astype(jnp.float32)\n recon = nn.sigmoid(recon)\n recon = recon.astype(self.dtype)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = recon.astype(jnp.float32)\n recon = nn.sigmoid(recon)\n recon = recon.astype(self.dtype)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,tab
37
+ 37,329565,"models/dynamics.py",0,0,"",python,tab
38
+ 38,330894,"models/dynamics.py",3065,0,"",python,selection_mouse
39
+ 39,331494,"models/dynamics.py",3070,0,"",python,selection_mouse
40
+ 40,332511,"models/dynamics.py",3069,0,"",python,selection_command
41
+ 41,335761,"models/dynamics.py",3070,0,"",python,selection_command
42
+ 42,335998,"models/dynamics.py",3069,1,"",python,content
43
+ 43,336111,"models/dynamics.py",3068,1,"",python,content
44
+ 44,337261,"models/dynamics.py",3068,0,":",python,content
45
+ 45,337262,"models/dynamics.py",3069,0,"",python,selection_keyboard
46
+ 46,337444,"models/dynamics.py",3069,0,"-",python,content
47
+ 47,337444,"models/dynamics.py",3070,0,"",python,selection_keyboard
48
+ 48,337711,"models/dynamics.py",3070,0,"1",python,content
49
+ 49,337712,"models/dynamics.py",3071,0,"",python,selection_keyboard
50
+ 50,339339,"models/dynamics.py",3066,0,"",python,selection_command
51
+ 51,339494,"models/dynamics.py",3065,1,"",python,content
52
+ 52,339644,"models/dynamics.py",3064,1,"",python,content
53
+ 53,340683,"models/dynamics.py",3064,0,":",python,content
54
+ 54,340684,"models/dynamics.py",3065,0,"",python,selection_keyboard
55
+ 55,340980,"models/dynamics.py",3065,0,"-",python,content
56
+ 56,340981,"models/dynamics.py",3066,0,"",python,selection_keyboard
57
+ 57,341127,"models/dynamics.py",3066,0,"1",python,content
58
+ 58,341128,"models/dynamics.py",3067,0,"",python,selection_keyboard
59
+ 59,341777,"models/dynamics.py",3066,0,"",python,selection_command
60
+ 60,343332,"TERMINAL",0,0,"bash",,terminal_focus
61
+ 61,343797,"TERMINAL",0,0,"bash",,terminal_focus
62
+ 62,352015,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/%x_%j.log\n#SBATCH --job-name=train_dyn_yolorun_new_arch\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --num_steps=1001 \\n --warmup_steps=0 \\n --wsd_decay_steps=0 \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=8 \\n --init_lr=1e-3 \\n --max_lr=1e-3 \\n --log_image_interval=100 \\n --log \\n --log_checkpoint_interval=100 \\n --name=dynamics-new-arch-speedrun-$slurm_job_id \\n --tags dynamics \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4\n ",shellscript,tab
63
+ 63,357312,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1295,0,"",shellscript,selection_mouse
64
+ 64,360013,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1295,0,"-",shellscript,content
65
+ 65,360015,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1296,0,"",shellscript,selection_keyboard
66
+ 66,360277,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1296,0,"m",shellscript,content
67
+ 67,360278,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1297,0,"",shellscript,selection_keyboard
68
+ 68,360361,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1297,0,"e",shellscript,content
69
+ 69,360362,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1298,0,"",shellscript,selection_keyboard
70
+ 70,360527,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1298,0,"w",shellscript,content
71
+ 71,360528,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1299,0,"",shellscript,selection_keyboard
72
+ 72,360727,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1299,0,"-",shellscript,content
73
+ 73,360728,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1300,0,"",shellscript,selection_keyboard
74
+ 74,361210,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1299,1,"",shellscript,content
75
+ 75,361360,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1298,1,"",shellscript,content
76
+ 76,361479,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1297,1,"",shellscript,content
77
+ 77,361594,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1296,1,"",shellscript,content
78
+ 78,361794,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1296,0,"n",shellscript,content
79
+ 79,361795,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1297,0,"",shellscript,selection_keyboard
80
+ 80,361877,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1297,0,"e",shellscript,content
81
+ 81,361878,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1298,0,"",shellscript,selection_keyboard
82
+ 82,361978,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1298,0,"w",shellscript,content
83
+ 83,361979,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",1299,0,"",shellscript,selection_keyboard
84
+ 84,365481,"slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",0,0,"",shellscript,tab
85
+ 85,370577,"TERMINAL",0,0,"sbatch slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch",,terminal_command
86
+ 86,370660,"TERMINAL",0,0,"]633;E;2025-07-18 19:51:59 sbatch slurm/jobs/mihir/horeka/yolo-runs/train_dynamics_new_arch_speedrun.sbatch;9dd50732-d6d7-4d7e-a22c-51ae92e646cb]633;CSubmitted batch job 3358457\r\n]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
87
+ 87,373210,"TERMINAL",0,0,"queue",,terminal_command
88
+ 88,373260,"TERMINAL",0,0,"]633;E;2025-07-18 19:52:01 queue;9dd50732-d6d7-4d7e-a22c-51ae92e646cb]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Fri Jul 18 19:52:01 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3358457 accelerat train_dy tum_cte0 PD\t0:00\t 2 (Priority)\t\t",,terminal_output
89
+ 89,374277,"TERMINAL",0,0,"2\t",,terminal_output
90
+ 90,375261,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3aab53c4-8c45-4083-87ad-e991570a4f5b1752851966968-2025_07_18-17.20.32.773/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-73ddfe20-a667-477d-9924-94f7208128f81752186339186-2025_07_11-00.25.58.835/source.csv ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 1,6,"slurm/jobs/mihir/horeka/modelsize_scaling/dynamics/1_train_dyn_36M.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big-runs/dynamics-cotraining-modelsize-scaling/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big-runs/dynamics-cotraining-modelsize-scaling/%x_%j.log\n#SBATCH --job-name=train_dynamics_modelsize_scaling_36M_2_node\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dynamics-cotraining-modelsize-scaling/$job_name\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.5e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-modelsize-scaling-36M-$slurm_job_id \\n --tags dynamics modelsize-scaling 36M \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n",shellscript,tab
3
+ 2,766,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:25:58 AM [info] Activating crowd-code\n12:25:58 AM [info] Recording started\n12:25:58 AM [info] Initializing git provider using file system watchers...\n12:25:59 AM [info] Git repository found\n12:25:59 AM [info] Git provider initialized successfully\n12:25:59 AM [info] Initial git state: [object Object]\n",Log,tab
4
+ 3,4116,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
5
+ 4,4194,"TERMINAL",0,0,"]633;E;2025-07-11 00:26:02 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;d1c8480d-5bbb-4ee3-b67f-eb04590abc9f]633;C]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
6
+ 5,76596,"slurm/jobs/mihir/horeka/modelsize_scaling/dynamics/1_train_dyn_36M.sbatch",0,0,"",shellscript,tab
7
+ 6,76600,"slurm/jobs/mihir/horeka/modelsize_scaling/dynamics/1_train_dyn_36M.sbatch",1303,0,"",shellscript,selection_mouse
8
+ 7,76617,"slurm/jobs/mihir/horeka/modelsize_scaling/dynamics/1_train_dyn_36M.sbatch",1302,0,"",shellscript,selection_command
9
+ 8,77837,"slurm/jobs/mihir/horeka/modelsize_scaling/dynamics/1_train_dyn_36M.sbatch",1620,0,"",shellscript,selection_mouse
10
+ 9,217736,"TERMINAL",0,0,"bash",,terminal_focus
11
+ 10,218636,"slurm/jobs/mihir/horeka/modelsize_scaling/dynamics/1_train_dyn_36M.sbatch",0,0,"",shellscript,tab
12
+ 11,226720,"slurm/jobs/mihir/horeka/batchsize_scaling/dynamics_cotraining/sqrt_lr/train_dynamics_2_nodes.sbatch",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=48:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big-runs/dynamics-cotraining-batchsize-scaling/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_mihir/big-runs/dynamics-cotraining-batchsize-scaling/%x_%j.log\n#SBATCH --job-name=train_tokenizer_batch_size_scaling_2_node\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/dynamics-cotraining-batchsize-scaling/$job_name\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/train_tokenizer_batch_size_scaling_16_node/3321526/tokenizer_22000/\n\nenv | grep SLURM\n\nsrun python train_dynamics.py \\n --save_ckpt \\n --ckpt_dir $CHECKPOINT_DIR \\n --batch_size=96 \\n --min_lr=0 \\n --max_lr=1.5e-5 \\n --log_image_interval=1000 \\n --log \\n --log_checkpoint_interval=1000 \\n --name=dynamics-batch-size-scaling-2-node-$slurm_job_id \\n --tags dynamics batch-size-scaling 2-node \\n --entity instant-uv \\n --project jafar \\n --tokenizer_checkpoint=$tokenizer_ckpt_dir \\n --data_dir $array_records_dir \\n",shellscript,tab
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-7c1bdcf0-d594-4018-8499-7d2ed33930611752094287328-2025_07_09-22.51.39.315/source.csv ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 1,5,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n noise = jax.random.normal(rng2, self.mask_token.shape) * 1.0 # stddev=1.0, adjust if needed\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n \n\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab
3
+ 2,410,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:51:39 PM [info] Activating crowd-code\n10:51:39 PM [info] Recording started\n10:51:39 PM [info] Initializing git provider using file system watchers...\n10:51:39 PM [info] Git repository found\n10:51:39 PM [info] Git provider initialized successfully\n",Log,tab
4
+ 3,569,"extension-output-pdoom-org.crowd-code-#1-crowd-code",250,0,"10:51:39 PM [info] Initial git state: [object Object]\n",Log,content
5
+ 4,3410,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
6
+ 5,3484,"TERMINAL",0,0,"]633;E;2025-07-09 22:51:42 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;dbf2f7cf-c02e-4ed1-93dc-847ffbf8836e]633;C",,terminal_output
7
+ 6,3495,"TERMINAL",0,0,"]0;tum_cte0515@hkn1990:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
8
+ 7,5445,"models/dynamics.py",0,0,"",python,tab
9
+ 8,5449,"models/dynamics.py",1436,0,"",python,selection_mouse
10
+ 9,5460,"models/dynamics.py",1435,0,"",python,selection_command
11
+ 10,7028,"models/dynamics.py",1436,0,"\n ",python,content
12
+ 11,8322,"models/dynamics.py",1437,12,"",python,content
13
+ 12,8764,"models/dynamics.py",1043,0,"",python,selection_mouse
14
+ 13,10291,"models/dynamics.py",1437,0,"",python,selection_mouse
15
+ 14,10947,"models/dynamics.py",1437,0,"\n rng1, rng2 = jax.random.split(batch[""mask_rng""])",python,content
16
+ 15,10975,"models/dynamics.py",1450,0,"",python,selection_command
17
+ 16,11582,"models/dynamics.py",1437,0,"",python,selection_command
18
+ 17,12054,"models/dynamics.py",1437,1,"",python,content
19
+ 18,12066,"models/dynamics.py",1449,0,"",python,selection_command
20
+ 19,12469,"models/dynamics.py",1450,0,"",python,selection_command
21
+ 20,12594,"models/dynamics.py",1451,0,"",python,selection_command
22
+ 21,12742,"models/dynamics.py",1452,0,"",python,selection_command
23
+ 22,12880,"models/dynamics.py",1453,0,"",python,selection_command
24
+ 23,13017,"models/dynamics.py",1454,0,"",python,selection_command
25
+ 24,13410,"models/dynamics.py",1455,0,"",python,selection_command
26
+ 25,13862,"models/dynamics.py",1455,5,"",python,content
27
+ 26,14581,"models/dynamics.py",1455,0,"r",python,content
28
+ 27,14583,"models/dynamics.py",1456,0,"",python,selection_keyboard
29
+ 28,14768,"models/dynamics.py",1456,0,"n",python,content
30
+ 29,14770,"models/dynamics.py",1457,0,"",python,selection_keyboard
31
+ 30,14888,"models/dynamics.py",1457,0,"g",python,content
32
+ 31,14890,"models/dynamics.py",1458,0,"",python,selection_keyboard
33
+ 32,15046,"models/dynamics.py",1458,0,"_",python,content
34
+ 33,15048,"models/dynamics.py",1459,0,"",python,selection_keyboard
35
+ 34,15668,"models/dynamics.py",1458,1,"",python,content
36
+ 35,15795,"models/dynamics.py",1457,1,"",python,content
37
+ 36,15977,"models/dynamics.py",1456,1,"",python,content
38
+ 37,16094,"models/dynamics.py",1455,1,"",python,content
39
+ 38,16361,"models/dynamics.py",1455,0,"_",python,content
40
+ 39,16362,"models/dynamics.py",1456,0,"",python,selection_keyboard
41
+ 40,16741,"models/dynamics.py",1456,0,"r",python,content
42
+ 41,16742,"models/dynamics.py",1457,0,"",python,selection_keyboard
43
+ 42,16881,"models/dynamics.py",1457,0,"n",python,content
44
+ 43,16883,"models/dynamics.py",1458,0,"",python,selection_keyboard
45
+ 44,17195,"models/dynamics.py",1458,0,"g",python,content
46
+ 45,17196,"models/dynamics.py",1459,0,"",python,selection_keyboard
47
+ 46,17324,"models/dynamics.py",1459,0," ",python,content
48
+ 47,17326,"models/dynamics.py",1460,0,"",python,selection_keyboard
49
+ 48,17677,"models/dynamics.py",1459,0,"",python,selection_command
50
+ 49,18158,"models/dynamics.py",1460,0,"",python,selection_command
51
+ 50,18628,"models/dynamics.py",1497,0,"",python,selection_command
52
+ 51,18905,"models/dynamics.py",1496,0,"",python,selection_command
53
+ 52,19342,"models/dynamics.py",1494,2,"",python,content
54
+ 53,19678,"models/dynamics.py",1486,8,"",python,content
55
+ 54,19957,"models/dynamics.py",1484,2,"",python,content
56
+ 55,20366,"models/dynamics.py",1479,5,"",python,content
57
+ 56,20872,"models/dynamics.py",1479,0,"r",python,content
58
+ 57,20873,"models/dynamics.py",1480,0,"",python,selection_keyboard
59
+ 58,21049,"models/dynamics.py",1480,0,"n",python,content
60
+ 59,21050,"models/dynamics.py",1481,0,"",python,selection_keyboard
61
+ 60,21640,"models/dynamics.py",1481,0,"g",python,content
62
+ 61,21642,"models/dynamics.py",1482,0,"",python,selection_keyboard
63
+ 62,22058,"models/dynamics.py",1482,0,"1",python,content
64
+ 63,22059,"models/dynamics.py",1483,0,"",python,selection_keyboard
65
+ 64,22539,"models/dynamics.py",1482,0,"",python,selection_command
66
+ 65,24952,"models/dynamics.py",1530,0,"",python,selection_command
67
+ 66,25648,"models/dynamics.py",1529,0,"",python,selection_command
68
+ 67,25800,"models/dynamics.py",1528,0,"",python,selection_command
69
+ 68,25938,"models/dynamics.py",1527,0,"",python,selection_command
70
+ 69,26098,"models/dynamics.py",1526,0,"",python,selection_command
71
+ 70,26219,"models/dynamics.py",1525,0,"",python,selection_command
72
+ 71,26363,"models/dynamics.py",1524,0,"",python,selection_command
73
+ 72,26519,"models/dynamics.py",1523,0,"",python,selection_command
74
+ 73,26856,"models/dynamics.py",1523,4,"",python,content
75
+ 74,27627,"models/dynamics.py",1523,0,"_",python,content
76
+ 75,27629,"models/dynamics.py",1524,0,"",python,selection_keyboard
77
+ 76,27875,"models/dynamics.py",1524,0,"r",python,content
78
+ 77,27876,"models/dynamics.py",1525,0,"",python,selection_keyboard
79
+ 78,27987,"models/dynamics.py",1525,0,"n",python,content
80
+ 79,27990,"models/dynamics.py",1526,0,"",python,selection_keyboard
81
+ 80,28146,"models/dynamics.py",1526,0,"g",python,content
82
+ 81,28147,"models/dynamics.py",1527,0,"",python,selection_keyboard
83
+ 82,28633,"models/dynamics.py",1526,0,"",python,selection_command
84
+ 83,28772,"models/dynamics.py",1527,0,"",python,selection_command
85
+ 84,29254,"models/dynamics.py",1528,0,"",python,selection_command
86
+ 85,29298,"models/dynamics.py",1529,0,"",python,selection_command
87
+ 86,29341,"models/dynamics.py",1530,0,"",python,selection_command
88
+ 87,29342,"models/dynamics.py",1531,0,"",python,selection_command
89
+ 88,29395,"models/dynamics.py",1532,0,"",python,selection_command
90
+ 89,29401,"models/dynamics.py",1533,0,"",python,selection_command
91
+ 90,29446,"models/dynamics.py",1534,0,"",python,selection_command
92
+ 91,29489,"models/dynamics.py",1535,0,"",python,selection_command
93
+ 92,29502,"models/dynamics.py",1536,0,"",python,selection_command
94
+ 93,29563,"models/dynamics.py",1537,0,"",python,selection_command
95
+ 94,29604,"models/dynamics.py",1538,0,"",python,selection_command
96
+ 95,29606,"models/dynamics.py",1539,0,"",python,selection_command
97
+ 96,29607,"models/dynamics.py",1540,0,"",python,selection_command
98
+ 97,29649,"models/dynamics.py",1541,0,"",python,selection_command
99
+ 98,29690,"models/dynamics.py",1542,0,"",python,selection_command
100
+ 99,29702,"models/dynamics.py",1543,0,"",python,selection_command
101
+ 100,29735,"models/dynamics.py",1544,0,"",python,selection_command
102
+ 101,29778,"models/dynamics.py",1545,0,"",python,selection_command
103
+ 102,29793,"models/dynamics.py",1546,0,"",python,selection_command
104
+ 103,29834,"models/dynamics.py",1547,0,"",python,selection_command
105
+ 104,29865,"models/dynamics.py",1548,0,"",python,selection_command
106
+ 105,29905,"models/dynamics.py",1549,0,"",python,selection_command
107
+ 106,29999,"models/dynamics.py",1550,0,"",python,selection_command
108
+ 107,30170,"models/dynamics.py",1551,0,"",python,selection_command
109
+ 108,30301,"models/dynamics.py",1552,0,"",python,selection_command
110
+ 109,30497,"models/dynamics.py",1554,0,"",python,selection_command
111
+ 110,32175,"models/dynamics.py",1554,2,"",python,content
112
+ 111,33125,"models/dynamics.py",1553,0,"",python,selection_command
113
+ 112,33308,"models/dynamics.py",1552,0,"",python,selection_command
114
+ 113,33722,"models/dynamics.py",1552,2,"",python,content
115
+ 114,34101,"models/dynamics.py",1552,1,"",python,content
116
+ 115,34397,"models/dynamics.py",1552,1,"",python,content
117
+ 116,35045,"models/dynamics.py",1552,1,"",python,content
118
+ 117,35331,"models/dynamics.py",1552,1,"",python,content
119
+ 118,35905,"models/dynamics.py",1552,1,"",python,content
120
+ 119,36383,"models/dynamics.py",1552,1,"",python,content
121
+ 120,36428,"models/dynamics.py",1552,1,"",python,content
122
+ 121,36440,"models/dynamics.py",1552,1,"",python,content
123
+ 122,36481,"models/dynamics.py",1552,1,"",python,content
124
+ 123,36513,"models/dynamics.py",1552,1,"",python,content
125
+ 124,36554,"models/dynamics.py",1552,1,"",python,content
126
+ 125,36563,"models/dynamics.py",1552,1,"",python,content
127
+ 126,36595,"models/dynamics.py",1552,1,"",python,content
128
+ 127,36638,"models/dynamics.py",1552,1,"",python,content
129
+ 128,36688,"models/dynamics.py",1552,1,"",python,content
130
+ 129,36689,"models/dynamics.py",1552,1,"",python,content
131
+ 130,36732,"models/dynamics.py",1552,1,"",python,content
132
+ 131,36774,"models/dynamics.py",1552,1,"",python,content
133
+ 132,36776,"models/dynamics.py",1552,1,"",python,content
134
+ 133,36822,"models/dynamics.py",1552,1,"",python,content
135
+ 134,36863,"models/dynamics.py",1552,1,"",python,content
136
+ 135,36889,"models/dynamics.py",1552,1,"",python,content
137
+ 136,36932,"models/dynamics.py",1552,1,"",python,content
138
+ 137,36975,"models/dynamics.py",1552,1,"",python,content
139
+ 138,36976,"models/dynamics.py",1552,1,"",python,content
140
+ 139,36997,"models/dynamics.py",1552,1,"",python,content
141
+ 140,37170,"models/dynamics.py",1552,1,"",python,content
142
+ 141,37363,"models/dynamics.py",1552,1,"",python,content
143
+ 142,37534,"models/dynamics.py",1552,1,"",python,content
144
+ 143,37726,"models/dynamics.py",1552,1,"",python,content
145
+ 144,37903,"models/dynamics.py",1552,1,"",python,content
146
+ 145,38092,"models/dynamics.py",1552,1,"",python,content
147
+ 146,38267,"models/dynamics.py",1552,1,"",python,content
148
+ 147,38278,"models/dynamics.py",1551,0,"",python,selection_command
149
+ 148,42166,"models/dynamics.py",1616,0,"",python,selection_mouse
150
+ 149,42779,"models/dynamics.py",1615,0,"",python,selection_mouse
151
+ 150,42928,"models/dynamics.py",1614,4,"self",python,selection_mouse
152
+ 151,43144,"models/dynamics.py",1614,5,"self.",python,selection_mouse
153
+ 152,43144,"models/dynamics.py",1614,15,"self.mask_token",python,selection_mouse
154
+ 153,45149,"models/dynamics.py",1614,15,"",python,content
155
+ 154,45559,"models/dynamics.py",1614,0,"n",python,content
156
+ 155,45560,"models/dynamics.py",1615,0,"",python,selection_keyboard
157
+ 156,45762,"models/dynamics.py",1615,0,"o",python,content
158
+ 157,45764,"models/dynamics.py",1616,0,"",python,selection_keyboard
159
+ 158,45932,"models/dynamics.py",1616,0,"i",python,content
160
+ 159,45934,"models/dynamics.py",1617,0,"",python,selection_keyboard
161
+ 160,46041,"models/dynamics.py",1617,0,"s",python,content
162
+ 161,46042,"models/dynamics.py",1618,0,"",python,selection_keyboard
163
+ 162,46230,"models/dynamics.py",1618,0,"e",python,content
164
+ 163,46232,"models/dynamics.py",1619,0,"",python,selection_keyboard
165
+ 164,46792,"models/dynamics.py",1618,0,"",python,selection_command
166
+ 165,51459,"models/dynamics.py",1626,0,"",python,selection_mouse
167
+ 166,52068,"models/dynamics.py",1644,0,"",python,selection_mouse
168
+ 167,52072,"models/dynamics.py",1643,0,"",python,selection_command
169
+ 168,52748,"models/dynamics.py",1644,0,"",python,selection_mouse
170
+ 169,52762,"models/dynamics.py",1643,0,"",python,selection_command
171
+ 170,53477,"models/dynamics.py",1629,0,"",python,selection_mouse
172
+ 171,54060,"models/dynamics.py",1631,0,"",python,selection_mouse
173
+ 172,54062,"models/dynamics.py",1630,0,"",python,selection_command
174
+ 173,54246,"models/dynamics.py",1630,1,")",python,selection_mouse
175
+ 174,54247,"models/dynamics.py",1631,0,"",python,selection_command
176
+ 175,54370,"models/dynamics.py",1549,82,"e) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
177
+ 176,54371,"models/dynamics.py",1534,97,"mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
178
+ 177,54371,"models/dynamics.py",1472,159,".split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
179
+ 178,54372,"models/dynamics.py",1418,213,"ith gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
180
+ 179,54372,"models/dynamics.py",1411,220,"ange, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
181
+ 180,54372,"models/dynamics.py",1409,222,"change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
182
+ 181,54412,"models/dynamics.py",1405,226," my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
183
+ 182,54413,"models/dynamics.py",1402,229," # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
184
+ 183,54427,"models/dynamics.py",1391,240,"\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
185
+ 184,54548,"models/dynamics.py",1300,331," # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
186
+ 185,54599,"models/dynamics.py",1262,369," # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
187
+ 186,54691,"models/dynamics.py",1218,413," mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
188
+ 187,54729,"models/dynamics.py",1139,492," mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
189
+ 188,54753,"models/dynamics.py",1066,565," mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
190
+ 189,54808,"models/dynamics.py",1005,626," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
191
+ 190,54849,"models/dynamics.py",984,647," if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
192
+ 191,54979,"models/dynamics.py",924,707," vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)",python,selection_mouse
193
+ 192,56344,"models/dynamics.py",1009,0,"",python,selection_mouse
194
+ 193,56345,"models/dynamics.py",1005,12," ",python,selection_mouse
195
+ 194,56935,"models/dynamics.py",1005,65," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n ",python,selection_mouse
196
+ 195,56983,"models/dynamics.py",1005,138," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n ",python,selection_mouse
197
+ 196,56984,"models/dynamics.py",1005,218," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n ",python,selection_mouse
198
+ 197,57025,"models/dynamics.py",1005,303," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n ",python,selection_mouse
199
+ 198,57028,"models/dynamics.py",1005,396," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n ",python,selection_mouse
200
+ 199,57065,"models/dynamics.py",1005,491," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n ",python,selection_mouse
201
+ 200,57066,"models/dynamics.py",1005,492," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n ",python,selection_mouse
202
+ 201,57070,"models/dynamics.py",1005,497," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise",python,selection_mouse
203
+ 202,57088,"models/dynamics.py",1005,569," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed",python,selection_mouse
204
+ 203,57228,"models/dynamics.py",1005,560," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n ",python,selection_mouse
205
+ 204,57229,"models/dynamics.py",1005,492," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n ",python,selection_mouse
206
+ 205,57231,"models/dynamics.py",1005,491," rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n ",python,selection_mouse
207
+ 206,57634,"models/dynamics.py",1496,0,"",python,selection_mouse
208
+ 207,59595,"models/dynamics.py",128,0,"",python,selection_mouse
209
+ 208,59744,"models/dynamics.py",127,5,"class",python,selection_mouse
210
+ 209,59933,"models/dynamics.py",127,67,"class DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n",python,selection_mouse
211
+ 210,59976,"models/dynamics.py",127,196,"class DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def",python,selection_mouse
212
+ 211,59977,"models/dynamics.py",127,627,"class DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )",python,selection_mouse
213
+ 212,60095,"models/dynamics.py",127,833,"class DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed",python,selection_mouse
214
+ 213,60096,"models/dynamics.py",127,1263,"class DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)",python,selection_mouse
215
+ 214,60097,"models/dynamics.py",127,1557,"class DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)\n \n\n else:\n mask = None\n",python,selection_mouse
216
+ 215,60097,"models/dynamics.py",127,1823,"class DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n # before: with mask token\n # vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n\n # my change, with gaussian noise\n rng1, _rng = jax.random.split(rng1)\n noise = jax.random.normal(_rng, self.mask_token.shape) \n vid_embed = jnp.where(jnp.expand_dims(mask, -1), noise, vid_embed)\n \n\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,selection_mouse
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-7d09022e-0451-4d5a-95fd-fe8f629e1b4b1757071522446-2025_09_05-13.26.09.836/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-825aa81a-f8dc-4fd3-8ed5-69638fcbfc5f1759823186564-2025_10_07-09.46.57.798/source.csv ADDED
The diff for this file is too large to render. See raw diff
 
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-9cdba2ed-e3b9-400c-aa61-3ca40652e83b1753717763365-2025_07_28-17.49.33.649/source.csv ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,344,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:49:33 PM [info] Activating crowd-code\n5:49:33 PM [info] Recording started\n5:49:33 PM [info] Initializing git provider using file system watchers...\n5:49:33 PM [error] Not a git repository: EntryNotFound (FileSystemError): Error: ENOENT: no such file or directory, stat '/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/.git'\n",Log,tab
3
+ 3,2291,"extension-output-pdoom-org.crowd-code-#1-crowd-code",336,0,"5:49:35 PM [info] Retrying git provider initialization...\n5:49:35 PM [error] Not a git repository: EntryNotFound (FileSystemError): Error: ENOENT: no such file or directory, stat '/home/hk-project-p0023960/tum_cte0515/Projects/jafar_jobs/.git'\n",Log,content
4
+ 4,13045,"utils/nn.py",0,0,"import math\nfrom typing import Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n ffn_dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n z = nn.Dense(\n self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n ffn_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(\n self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n\n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, ""... l h d -> (...) l h d"")\n\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(\n mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False\n )\n return jnp.logical_and(attention_mask, expanded_mask)\n\n original_shape = query.shape\n original_seq_len = query.shape[-3]\n\n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n\n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = (\n _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n )\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n\n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n\n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab
5
+ 5,26550,"TERMINAL",0,0,"",,terminal_focus
6
+ 6,28662,"TERMINAL",0,0,"",,terminal_focus
7
+ 7,34654,"TERMINAL",0,0,"ime=01:00:00 --partition=accelerated-h100 --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command
8
+ 8,34754,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;e1833c94-a8b3-4524-9e80-61ed159495e5]633;C]0;tum_cte0515@hkn1993:~/Projects/jafar_jobs]633;D",,terminal_output
9
+ 9,72762,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom PIL import Image, ImageDraw\nimport tyro\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\nrng = jax.random.PRNGKey(args.seed)\n\n# --- Load Genie checkpoint ---\ngenie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n)\nrng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=jnp.float32),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\nckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n\ndef _sampling_wrapper(module, batch):\n return module.sample(\n batch, args.seq_len, args.maskgit_steps, args.temperature, args.sample_argmax\n )\n\n\n# --- Define autoregressive sampling loop ---\ndef _autoreg_sample(rng, video_batch, action_batch):\n vid = video_batch[:, : args.start_frame + 1]\n sampling_fn = jax.jit(nn.apply(_sampling_wrapper, genie))\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=vid, latent_actions=action_batch, rng=_rng)\n generated_vid = sampling_fn(params, batch)\n return generated_vid\n\n\n# --- Get video + latent actions ---\narray_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n]\ndataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n)\nvideo_batch = next(iter(dataloader))\n# Get latent actions for all videos in the batch\nbatch = dict(videos=video_batch)\naction_batch = genie.apply(params, batch, False, method=Genie.vq_encode)\naction_batch = action_batch.reshape(video_batch.shape[0], args.seq_len - 1, 1)\n\n# --- Sample + evaluate video ---\nvid = _autoreg_sample(rng, video_batch, action_batch)\ngt = video_batch[:, : vid.shape[1]].clip(0, 1).reshape(-1, *video_batch.shape[2:])\nrecon = vid.clip(0, 1).reshape(-1, *vid.shape[2:])\nssim = pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :]).mean()\nprint(f""SSIM: {ssim}"")\n\n# --- Construct video ---\ntrue_videos = (video_batch * 255).astype(np.uint8)\npred_videos = (vid * 255).astype(np.uint8)\nvideo_comparison = np.zeros((2, *vid.shape), dtype=np.uint8)\nvideo_comparison[0] = true_videos[:, : args.seq_len]\nvideo_comparison[1] = pred_videos\nframes = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n# --- Save video ---\nimgs = [Image.fromarray(img) for img in frames]\n# Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\nfor t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch.shape[0]):\n action = action_batch[row, t, 0]\n y_offset = row * video_batch.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\nimgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n)\n",python,tab
10
+ 10,174652,"sample.py",4736,0,"",python,selection_mouse
11
+ 11,175135,"sample.py",4885,0,"",python,selection_mouse
12
+ 12,175670,"sample.py",4799,0,"",python,selection_mouse
13
+ 13,176257,"sample.py",4887,0,"",python,selection_mouse
14
+ 14,176617,"sample.py",5047,0,"",python,selection_mouse
15
+ 15,177208,"sample.py",5054,0,"",python,selection_mouse
16
+ 16,177668,"sample.py",5136,0,"",python,selection_mouse
17
+ 17,178079,"sample.py",5194,0,"",python,selection_mouse
18
+ 18,178390,"sample.py",5228,0,"",python,selection_mouse
19
+ 19,178395,"sample.py",5227,0,"",python,selection_command
20
+ 20,548314,"sample.py",0,0,"",python,tab
21
+ 21,612430,"sample.py",0,0,"",python,tab
22
+ 22,637465,"sample.py",3924,0,"",python,selection_mouse
23
+ 23,638094,"sample.py",3883,0,"",python,selection_mouse
24
+ 24,638114,"sample.py",3882,0,"",python,selection_command
25
+ 25,654272,"sample.py",3883,0," ",python,content
26
+ 26,654279,"sample.py",3883,0,"",python,selection_command
27
+ 27,656338,"sample.py",3883,1,"",python,content
28
+ 28,656376,"sample.py",3882,0,"",python,selection_command
29
+ 29,656753,"sample.py",3883,0,"\n",python,content
30
+ 30,657598,"sample.py",3884,0,"/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/maskgit-maskprob-fix/train_dynamics_maskprob_fix_8_node/3371237",python,content
31
+ 31,658806,"sample.py",3884,131,"",python,content
32
+ 32,659478,"sample.py",3884,0," ",python,content
33
+ 33,659502,"sample.py",3884,0,"",python,selection_command
34
+ 34,660696,"sample.py",3926,0,"",python,selection_command
35
+ 35,662321,"sample.py",3885,0,"",python,selection_mouse
36
+ 36,662325,"sample.py",3884,0,"",python,selection_command
37
+ 37,678461,"sample.py",3884,0,"video_batch = jnp.array(video_batch)\nprint(video_batch.dtype)\nvideo_batch = video_batch.astype(args.dtype) # / 255.0\nprint(video_batch.dtype)\nvideo_batch = video_batch / 255.0\nprint(video_batch.dtype)",python,content
38
+ 38,679700,"sample.py",4083,0,"",python,selection_command
39
+ 39,683326,"sample.py",3638,0,"",python,selection_mouse
40
+ 40,684400,"utils/dataloader.py",0,0,"import jax\nimport numpy as np\nimport grain\nfrom typing import Any\nimport pickle\n\n\nclass EpisodeLengthFilter(grain.transforms.Filter):\n """"""\n A Grain Filter that keeps only episodes with sufficient length.\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the filter with sequence length requirements.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def filter(self, element: Any) -> bool:\n """"""\n Filters episodes based on length.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes) and 'sequence_length' (int)\n\n Returns:\n True if the episode has sufficient length, False otherwise.\n """"""\n assert isinstance(element, bytes)\n element = pickle.loads(element)\n\n current_episode_len = element[""sequence_length""]\n if current_episode_len < self.seq_len:\n print(\n f""Filtering out episode with length {current_episode_len}, which is ""\n f""shorter than the requested sequence length {self.seq_len}.""\n )\n return False\n\n return True\n\n\nclass ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the transformation with processing parameters.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def random_map(self, element: dict, rng: np.random.Generator) -> Any:\n """"""\n Processes a single raw episode from the data source.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes) and 'sequence_length' (int)\n rng: A per-record random number generator provided by the Grain sampler.\n\n Returns:\n A processed video sequence as a NumPy array with shape\n (seq_len, height, width, channels) and dtype float32.\n """"""\n assert isinstance(element, bytes)\n element = pickle.loads(element)\n\n video_shape = (\n element[""sequence_length""],\n self.image_h,\n self.image_w,\n self.image_c,\n )\n episode_tensor = np.frombuffer(element[""raw_video""], dtype=np.uint8)\n episode_tensor = episode_tensor.reshape(video_shape)\n\n current_episode_len = episode_tensor.shape[0]\n if current_episode_len < self.seq_len:\n raise ValueError(\n f""Episode length {current_episode_len} is shorter than ""\n f""requested sequence length {self.seq_len}. This should ""\n f""have been filtered out.""\n )\n\n max_start_idx = current_episode_len - self.seq_len\n\n start_idx = rng.integers(0, max_start_idx + 1)\n\n seq = episode_tensor[start_idx : start_idx + self.seq_len]\n\n return seq\n\n\ndef get_dataloader(\n array_record_paths: list[str],\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n num_workers: int = 1,\n prefetch_buffer_size: int = 1,\n seed: int = 42,\n):\n """"""\n Creates a data loading pipeline using Grain.\n """"""\n if not array_record_paths:\n raise ValueError(""array_record_paths list cannot be empty."")\n\n num_processes = jax.process_count()\n\n if global_batch_size % num_processes != 0:\n raise ValueError(\n f""Global batch size {global_batch_size} must be divisible by ""\n f""the number of JAX processes {num_processes} for proper sharding.""\n )\n per_process_batch_size = global_batch_size // num_processes\n\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\n\n sampler = grain.samplers.IndexSampler(\n num_records=len(source),\n shard_options=grain.sharding.ShardByJaxProcess(drop_remainder=True),\n shuffle=True,\n num_epochs=None,\n seed=seed,\n )\n\n operations = [\n EpisodeLengthFilter(\n seq_len=seq_len, image_h=image_h, image_w=image_w, image_c=image_c\n ),\n ProcessEpisodeAndSlice(\n seq_len=seq_len, image_h=image_h, image_w=image_w, image_c=image_c\n ),\n grain.transforms.Batch(batch_size=per_process_batch_size, drop_remainder=True),\n ]\n\n read_options = grain.ReadOptions(\n prefetch_buffer_size=prefetch_buffer_size,\n num_threads=1,\n )\n dataloader = grain.DataLoader(\n data_source=source,\n sampler=sampler,\n operations=operations,\n worker_count=num_workers,\n worker_buffer_size=1,\n read_options=read_options,\n )\n\n return dataloader\n",python,tab
41
+ 41,687591,"sample.py",0,0,"",python,tab
42
+ 42,699155,"sample.py",3992,0,"",python,selection_mouse
43
+ 43,699359,"sample.py",3992,2," /",python,selection_mouse
44
+ 44,699375,"sample.py",3992,3," / ",python,selection_mouse
45
+ 45,699391,"sample.py",3992,5," / 25",python,selection_mouse
46
+ 46,699406,"sample.py",3992,7," / 255.",python,selection_mouse
47
+ 47,699424,"sample.py",3992,8," / 255.0",python,selection_mouse
48
+ 48,700379,"sample.py",3992,8,"",python,content
49
+ 49,700396,"sample.py",3991,0,"",python,selection_command
50
+ 50,700520,"sample.py",3991,1,"",python,content
51
+ 51,700526,"sample.py",3990,0,"",python,selection_command
52
+ 52,700698,"sample.py",3990,1,"",python,content
53
+ 53,700704,"sample.py",3989,0,"",python,selection_command
54
+ 54,831446,"sample.py",2895,0,"",python,selection_mouse
55
+ 55,831625,"sample.py",2890,5,"kpt)\n",python,selection_mouse
56
+ 56,831636,"sample.py",2886,9,"te(ckpt)\n",python,selection_mouse
57
+ 57,831650,"sample.py",2801,94,"pointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
58
+ 58,831668,"sample.py",2797,98,"heckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
59
+ 59,831678,"sample.py",2796,99,"Checkpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
60
+ 60,831703,"sample.py",2794,101,"eeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
61
+ 61,831711,"sample.py",2792,103,"TreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
62
+ 62,831745,"sample.py",2789,106," PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
63
+ 63,831746,"sample.py",2788,107,"= PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
64
+ 64,831829,"sample.py",2787,108," = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
65
+ 65,831831,"sample.py",2786,109,"t = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
66
+ 66,831831,"sample.py",2785,110,"pt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
67
+ 67,831831,"sample.py",2784,111,"kpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
68
+ 68,831911,"sample.py",2783,112,"ckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n",python,selection_mouse
69
+ 69,832707,"sample.py",2783,113,"",python,content
70
+ 70,833712,"sample.py",2783,0,"\n",python,content
71
+ 71,834084,"sample.py",2784,0,"\ndummy_train_state = TrainState.create(\n apply_fn=genie.apply,\n params=params,\n tx=optax.adamw(\n optax.warmup_cosine_decay_schedule(\n 0, 0, 1, 2 # dummy values\n )\n ), \n)\nhandler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\nhandler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\ncheckpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=ocp.CheckpointManagerOptions(step_format_fixed_length=6),\n handler_registry=handler_registry\n)\nabstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, dummy_train_state\n)\n\nrestored = checkpoint_manager.restore(\n args.checkpoint_step or checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n ),\n)\nrestored_train_state = restored[""model_state""]\nparams = restored_train_state.params",python,content
72
+ 72,835493,"sample.py",3729,0,"",python,selection_command
73
+ 73,838304,"sample.py",2784,0,"",python,selection_mouse
74
+ 74,849552,"sample.py",250,0,"",python,selection_mouse
75
+ 75,849558,"sample.py",249,0,"",python,selection_command
76
+ 76,851771,"sample.py",201,0,"",python,selection_command
77
+ 77,852002,"sample.py",202,0,"ckpt = PyTreeCheckpointer().restore(args.checkpoint)[""model""][""params""][""params""]\nparams[""params""].update(ckpt)\n\n",python,content
78
+ 78,852026,"sample.py",202,0,"",python,selection_command
79
+ 79,853606,"sample.py",202,113,"",python,content
80
+ 80,853623,"sample.py",201,0,"",python,selection_command
81
+ 81,854162,"sample.py",217,0,"\n",python,content
82
+ 82,854375,"sample.py",218,0,"import optax\n",python,content
83
+ 83,863874,"sample.py",231,0,"from flax.training.train_state import TrainState\n",python,content
84
+ 84,864734,"sample.py",279,1,"",python,content
85
+ 85,865699,"sample.py",206,0,"",python,selection_mouse
86
+ 86,865744,"sample.py",205,0,"",python,selection_command
87
+ 87,866346,"sample.py",170,48,"",python,content
88
+ 88,877297,"sample.py",182,0,"",python,selection_mouse
89
+ 89,877303,"sample.py",181,0,"",python,selection_command
90
+ 90,878057,"sample.py",182,0,"\n",python,content
91
+ 91,878214,"sample.py",183,0,"import orbax.checkpoint as ocp\n",python,content
92
+ 92,878935,"sample.py",213,1,"",python,content
93
+ 93,886469,"sample.py",0,0,"",python,tab
94
+ 94,1017820,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
95
+ 95,1038040,"train_dynamics.py",9442,0,"",python,selection_mouse
96
+ 96,1039475,"train_dynamics.py",9578,0,"",python,selection_mouse
97
+ 97,1040030,"train_dynamics.py",9599,0,"",python,selection_mouse
98
+ 98,1041040,"train_dynamics.py",9561,0,"",python,selection_mouse
99
+ 99,1041786,"train_dynamics.py",9598,0,"",python,selection_mouse
100
+ 100,1041787,"train_dynamics.py",9597,0,"",python,selection_command
101
+ 101,1042107,"train_dynamics.py",9597,1,")",python,selection_mouse
102
+ 102,1042112,"train_dynamics.py",9598,0,"",python,selection_command
103
+ 103,1042486,"train_dynamics.py",9556,42," handler_registry=handler_registry,\n )",python,selection_mouse
104
+ 104,1042521,"train_dynamics.py",9555,43," handler_registry=handler_registry,\n )",python,selection_mouse
105
+ 105,1049993,"train_dynamics.py",10588,0,"",python,selection_mouse
106
+ 106,1050114,"train_dynamics.py",10574,18,"checkpoint_manager",python,selection_mouse
107
+ 107,1051453,"sample.py",0,0,"",python,tab
108
+ 108,1055431,"sample.py",3549,0,"",python,selection_mouse
109
+ 109,1056015,"sample.py",3540,0,"",python,selection_mouse
110
+ 110,1056169,"sample.py",3533,15,"checkpoint_step",python,selection_mouse
111
+ 111,1056463,"sample.py",3532,16,".checkpoint_step",python,selection_mouse
112
+ 112,1056507,"sample.py",3528,20,"args.checkpoint_step",python,selection_mouse
113
+ 113,1057117,"sample.py",3531,0,"",python,selection_mouse
114
+ 114,1057118,"sample.py",3528,4,"args",python,selection_mouse
115
+ 115,1057359,"sample.py",3528,5,"args.",python,selection_mouse
116
+ 116,1057392,"sample.py",3528,20,"args.checkpoint_step",python,selection_mouse
117
+ 117,1060630,"sample.py",3528,20,"",python,content
118
+ 118,1060952,"sample.py",3528,1,"",python,content
119
+ 119,1061195,"sample.py",3528,1,"",python,content
120
+ 120,1061241,"sample.py",3528,1,"",python,content
121
+ 121,1061419,"sample.py",3528,1,"",python,content
122
+ 122,1201521,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\nsrun python sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=10 \\n --start_frame=0 \\n --data_dir $array_records_dir\n\n# srun python sample.py \\n # --checkpoint $dynamics_ckpt_dir \\n # --start_frame=0 \\n # --batch_size=12 \\n # --seq_len=2 \\n # --data_dir $array_records_dir\n",shellscript,tab
123
+ 123,1205330,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",794,0,"",shellscript,selection_mouse
124
+ 124,1208070,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",793,1,"",shellscript,content
125
+ 125,1208232,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,1,"",shellscript,content
126
+ 126,1209872,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,0,"5",shellscript,content
127
+ 127,1209874,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",793,0,"",shellscript,selection_keyboard
128
+ 128,1341561,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,1,"",shellscript,content
129
+ 129,1341666,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",792,0,"1",shellscript,content
130
+ 130,1341667,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",793,0,"",shellscript,selection_keyboard
131
+ 131,1342833,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",753,0,"",shellscript,selection_mouse
132
+ 132,1342965,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",750,4,"4096",shellscript,selection_mouse
133
+ 133,1343118,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",749,5,"=4096",shellscript,selection_mouse
134
+ 134,1343136,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",711,43,"dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse
135
+ 135,1343170,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",684,70,"dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse
136
+ 136,1343286,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,92,"dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096",shellscript,selection_mouse
137
+ 137,1343722,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,0,"",shellscript,selection_mouse
138
+ 138,1343723,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,8,"dyna_dim",shellscript,selection_mouse
139
+ 139,1343914,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,37,"dyna_dim=1024 \\n --dyna_num_blocks",shellscript,selection_mouse
140
+ 140,1343934,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,63,"dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads",shellscript,selection_mouse
141
+ 141,1343970,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,64,"dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=",shellscript,selection_mouse
142
+ 142,1344014,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,66,"dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16",shellscript,selection_mouse
143
+ 143,1344015,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,67,"dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 ",shellscript,selection_mouse
144
+ 144,1344051,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,68,"dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \",shellscript,selection_mouse
145
+ 145,1344085,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",662,94,"dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
146
+ 146,1344432,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",756,0,"",shellscript,selection_mouse
147
+ 147,1344677,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",754,2," \",shellscript,selection_mouse
148
+ 148,1344714,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",724,32,"s=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
149
+ 149,1344719,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",722,34,"ads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
150
+ 150,1344719,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",719,37,"_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
151
+ 151,1344725,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",690,66,"um_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
152
+ 152,1344768,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",686,70,"na_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
153
+ 153,1344769,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",685,71,"yna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
154
+ 154,1344807,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",683,73,"-dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
155
+ 155,1344864,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",682,74,"--dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
156
+ 156,1344864,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",681,75," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
157
+ 157,1344865,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",658,98," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
158
+ 158,1344902,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",657,99," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
159
+ 159,1344949,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",619,137," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \",shellscript,selection_mouse
160
+ 160,1345397,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",619,0,"",shellscript,selection_mouse
161
+ 161,1345398,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,4," ",shellscript,selection_mouse
162
+ 162,1345595,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,40," --checkpoint $dynamics_ckpt_dir \\n ",shellscript,selection_mouse
163
+ 163,1345611,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,64," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n ",shellscript,selection_mouse
164
+ 164,1345629,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,93," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --",shellscript,selection_mouse
165
+ 165,1345654,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,131," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim",shellscript,selection_mouse
166
+ 166,1345675,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,152," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len",shellscript,selection_mouse
167
+ 167,1345688,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,173," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size",shellscript,selection_mouse
168
+ 168,1345708,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,175," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1",shellscript,selection_mouse
169
+ 169,1345720,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,177," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \",shellscript,selection_mouse
170
+ 170,1345740,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,199," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \",shellscript,selection_mouse
171
+ 171,1345834,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,233," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
172
+ 172,1346251,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",851,0,"",shellscript,selection_mouse
173
+ 173,1346419,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,17,"array_records_dir",shellscript,selection_mouse
174
+ 174,1346604,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",817,34,"\n --data_dir $array_records_dir",shellscript,selection_mouse
175
+ 175,1346631,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,56,"\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
176
+ 176,1346663,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",793,58," \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
177
+ 177,1346701,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",773,78,"\\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
178
+ 178,1346702,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",770,81,"=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
179
+ 179,1346733,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",763,88,"seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
180
+ 180,1346734,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",737,114,"dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
181
+ 181,1346811,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",710,141,"-dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
182
+ 182,1346820,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",709,142,"--dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
183
+ 183,1346857,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",708,143," --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
184
+ 184,1346890,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",681,170," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
185
+ 185,1346923,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",680,171," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
186
+ 186,1346954,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",658,193," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
187
+ 187,1346998,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",657,194," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
188
+ 188,1347077,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",619,232," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
189
+ 189,1347479,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",619,0,"",shellscript,selection_mouse
190
+ 190,1347480,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,4," ",shellscript,selection_mouse
191
+ 191,1347663,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,42," --checkpoint $dynamics_ckpt_dir \\n ",shellscript,selection_mouse
192
+ 192,1347702,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,81," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks",shellscript,selection_mouse
193
+ 193,1347702,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,107," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads",shellscript,selection_mouse
194
+ 194,1347737,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,131," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim",shellscript,selection_mouse
195
+ 195,1347738,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,156," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \",shellscript,selection_mouse
196
+ 196,1347771,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,177," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \",shellscript,selection_mouse
197
+ 197,1347803,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,199," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \",shellscript,selection_mouse
198
+ 198,1347839,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,233," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
199
+ 199,1348177,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",851,0,"",shellscript,selection_mouse
200
+ 200,1348310,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,17,"array_records_dir",shellscript,selection_mouse
201
+ 201,1348487,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",817,34,"\n --data_dir $array_records_dir",shellscript,selection_mouse
202
+ 202,1348530,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",795,56,"\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
203
+ 203,1348566,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",774,77,"\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
204
+ 204,1348567,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",771,80,"2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
205
+ 205,1348571,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",737,114,"dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
206
+ 206,1348657,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",736,115,"-dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
207
+ 207,1348689,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",735,116,"--dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
208
+ 208,1348725,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",709,142,"--dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
209
+ 209,1348762,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",708,143," --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
210
+ 210,1348841,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",680,171," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
211
+ 211,1348852,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",679,172," --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
212
+ 212,1348870,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",657,194," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
213
+ 213,1348906,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,233," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
214
+ 214,1350172,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",619,0,"",shellscript,selection_mouse
215
+ 215,1350172,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,4," ",shellscript,selection_mouse
216
+ 216,1350393,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,40," --checkpoint $dynamics_ckpt_dir \\n ",shellscript,selection_mouse
217
+ 217,1350403,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,64," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n ",shellscript,selection_mouse
218
+ 218,1350433,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,66," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --",shellscript,selection_mouse
219
+ 219,1350438,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,107," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads",shellscript,selection_mouse
220
+ 220,1350466,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,131," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim",shellscript,selection_mouse
221
+ 221,1350557,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,154," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2",shellscript,selection_mouse
222
+ 222,1350557,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,155," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 ",shellscript,selection_mouse
223
+ 223,1350557,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,156," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \",shellscript,selection_mouse
224
+ 224,1350612,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,177," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \",shellscript,selection_mouse
225
+ 225,1350645,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,199," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \",shellscript,selection_mouse
226
+ 226,1350827,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,233," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
227
+ 227,1351509,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",851,0,"",shellscript,selection_mouse
228
+ 228,1351729,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",834,17,"array_records_dir",shellscript,selection_mouse
229
+ 229,1351973,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",817,34,"\n --data_dir $array_records_dir",shellscript,selection_mouse
230
+ 230,1351990,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",814,37,"0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
231
+ 231,1352001,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",813,38,"=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
232
+ 232,1352025,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",781,70,"batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
233
+ 233,1352110,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",763,88,"seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
234
+ 234,1352117,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",737,114,"dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
235
+ 235,1352149,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",711,140,"dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
236
+ 236,1352228,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",684,167,"dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
237
+ 237,1352233,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",683,168,"-dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
238
+ 238,1352265,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",682,169,"--dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
239
+ 239,1352299,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",659,192," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
240
+ 240,1352377,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",658,193," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
241
+ 241,1352382,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",657,194," --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
242
+ 242,1352421,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",618,233," --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=1024 \\n --dyna_num_blocks=16 \\n --dyna_num_heads=16 \\n --dyna_ffn_dim=4096 \\n --seq_len=2 \\n --batch_size=1 \\n --start_frame=0 \\n --data_dir $array_records_dir",shellscript,selection_mouse
243
+ 243,1511438,"sample.py",0,0,"",python,tab
244
+ 244,1512708,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab
245
+ 245,1532243,"sample.py",0,0,"",python,tab
246
+ 246,1538054,"sample.py",3729,0,"",python,selection_mouse
247
+ 247,1555508,"sample.py",2724,0,"",python,selection_mouse
248
+ 248,1557391,"sample.py",2727,0,"",python,selection_mouse
249
+ 249,1557517,"sample.py",2723,7,"float32",python,selection_mouse
250
+ 250,1557873,"sample.py",2722,8,".float32",python,selection_mouse
251
+ 251,1558088,"sample.py",2719,11,"jnp.float32",python,selection_mouse
252
+ 252,1558943,"sample.py",2720,0,"",python,selection_mouse
253
+ 253,1558944,"sample.py",2719,3,"jnp",python,selection_mouse
254
+ 254,1559182,"sample.py",2719,4,"jnp.",python,selection_mouse
255
+ 255,1559194,"sample.py",2719,11,"jnp.float32",python,selection_mouse
256
+ 256,1560674,"sample.py",2719,11,"a",python,content
257
+ 257,1560676,"sample.py",2720,0,"",python,selection_keyboard
258
+ 258,1560951,"sample.py",2720,0,"r",python,content
259
+ 259,1560952,"sample.py",2721,0,"",python,selection_keyboard
260
+ 260,1561136,"sample.py",2721,0,"g",python,content
261
+ 261,1561137,"sample.py",2722,0,"",python,selection_keyboard
262
+ 262,1561224,"sample.py",2722,0,"s",python,content
263
+ 263,1561226,"sample.py",2723,0,"",python,selection_keyboard
264
+ 264,1561387,"sample.py",2723,0,".",python,content
265
+ 265,1561388,"sample.py",2724,0,"",python,selection_keyboard
266
+ 266,1562325,"sample.py",2724,0,"d",python,content
267
+ 267,1562326,"sample.py",2725,0,"",python,selection_keyboard
268
+ 268,1562870,"sample.py",2725,0,"t",python,content
269
+ 269,1562870,"sample.py",2726,0,"",python,selection_keyboard
270
+ 270,1563177,"sample.py",2726,0,"y",python,content
271
+ 271,1563178,"sample.py",2727,0,"",python,selection_keyboard
272
+ 272,1563351,"sample.py",2727,0,"p",python,content
273
+ 273,1563351,"sample.py",2728,0,"",python,selection_keyboard
274
+ 274,1563459,"sample.py",2728,0,"e",python,content
275
+ 275,1563459,"sample.py",2729,0,"",python,selection_keyboard
276
+ 276,1564448,"sample.py",2731,0,"",python,selection_mouse
277
+ 277,1564753,"sample.py",2729,2,"),",python,selection_mouse
278
+ 278,1564760,"sample.py",2731,19,"\n mask_rng=_rng,",python,selection_mouse
279
+ 279,1564880,"sample.py",2660,71,"zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),",python,selection_mouse
280
+ 280,1564880,"sample.py",2656,75,"jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),",python,selection_mouse
281
+ 281,1564884,"sample.py",2655,76,"=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),",python,selection_mouse
282
+ 282,1564900,"sample.py",2649,82,"videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),",python,selection_mouse
283
+ 283,1565113,"sample.py",2624,107,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),",python,selection_mouse
284
+ 284,1565702,"sample.py",2626,0,"",python,selection_mouse
285
+ 285,1565702,"sample.py",2624,12,"dummy_inputs",python,selection_mouse
286
+ 286,1565884,"sample.py",2624,23,"dummy_inputs = dict(\n ",python,selection_mouse
287
+ 287,1565916,"sample.py",2624,24,"dummy_inputs = dict(\n ",python,selection_mouse
288
+ 288,1565916,"sample.py",2624,120,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng",python,selection_mouse
289
+ 289,1565994,"sample.py",2624,128,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)",python,selection_mouse
290
+ 290,1566231,"sample.py",2752,0,"",python,selection_mouse
291
+ 291,1566618,"sample.py",2745,7,"_rng,\n)",python,selection_mouse
292
+ 292,1566637,"sample.py",2736,16,"mask_rng=_rng,\n)",python,selection_mouse
293
+ 293,1566670,"sample.py",2649,103,"videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)",python,selection_mouse
294
+ 294,1566765,"sample.py",2624,128,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)",python,selection_mouse
295
+ 295,1566806,"sample.py",2551,201,"image_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)",python,selection_mouse
296
+ 296,1566903,"sample.py",2517,235,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)",python,selection_mouse
297
+ 297,1567458,"sample.py",2518,0,"",python,selection_mouse
298
+ 298,1567458,"sample.py",2517,3,"rng",python,selection_mouse
299
+ 299,1567681,"sample.py",2517,45,"rng, _rng = jax.random.split(rng)\nimage_shape",python,selection_mouse
300
+ 300,1567700,"sample.py",2517,119,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs",python,selection_mouse
301
+ 301,1567720,"sample.py",2517,132,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n ",python,selection_mouse
302
+ 302,1567752,"sample.py",2517,227,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng",python,selection_mouse
303
+ 303,1567788,"sample.py",2517,235,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)",python,selection_mouse
304
+ 304,1567823,"sample.py",2517,246,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng ",python,selection_mouse
305
+ 305,1567856,"sample.py",2517,284,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie",python,selection_mouse
306
+ 306,1567889,"sample.py",2517,310,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n",python,selection_mouse
307
+ 307,1568001,"sample.py",2517,311,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
308
+ 308,1568793,"sample.py",2828,0,"",python,selection_mouse
309
+ 309,1569175,"sample.py",2827,1,"\n",python,selection_mouse
310
+ 310,1569209,"sample.py",2811,17,", dummy_inputs)\n\n",python,selection_mouse
311
+ 311,1569247,"sample.py",2807,21,"_rng, dummy_inputs)\n\n",python,selection_mouse
312
+ 312,1569280,"sample.py",2769,59,"random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
313
+ 313,1569280,"sample.py",2752,76,"\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
314
+ 314,1569319,"sample.py",2745,83,"_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
315
+ 315,1569351,"sample.py",2744,84,"=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
316
+ 316,1569352,"sample.py",2656,172,"jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
317
+ 317,1569352,"sample.py",2655,173,"=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
318
+ 318,1569383,"sample.py",2624,204,"dummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
319
+ 319,1569525,"sample.py",2551,277,"image_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
320
+ 320,1569602,"sample.py",2520,308,", _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
321
+ 321,1569603,"sample.py",2517,311,"rng, _rng = jax.random.split(rng)\nimage_shape = (args.image_height, args.image_width, args.image_channels)\ndummy_inputs = dict(\n videos=jnp.zeros((args.batch_size, args.seq_len, *image_shape), dtype=args.dtype),\n mask_rng=_rng,\n)\nrng, _rng = jax.random.split(rng)\nparams = genie.init(_rng, dummy_inputs)\n\n",python,selection_mouse
322
+ 322,1569952,"sample.py",2519,0,"",python,selection_mouse
323
+ 323,1587102,"sample.py",2752,0,"",python,selection_mouse
324
+ 324,1587947,"sample.py",2752,0,"u",python,content
325
+ 325,1587948,"sample.py",2753,0,"",python,selection_keyboard
326
+ 326,1588356,"sample.py",2753,0,"u",python,content
327
+ 327,1588358,"sample.py",2754,0,"",python,selection_keyboard
328
+ 328,1588945,"sample.py",2753,1,"",python,content
329
+ 329,1589092,"sample.py",2752,1,"",python,content
330
+ 330,1589664,"sample.py",2751,0,"",python,selection_command
331
+ 331,1589815,"sample.py",3526,0," args.checkpoint_step or",python,content
332
+ 332,1589816,"sample.py",2719,10,"jnp.float32",python,content
333
+ 333,1589816,"sample.py",183,31,"",python,content
334
+ 334,1589848,"sample.py",181,0,"",python,selection_command
335
+ 335,1592180,"sample.py",183,0,"import orbax.checkpoint as ocp\n",python,content
336
+ 336,1592185,"sample.py",2719,11,"args.dtype",python,content
337
+ 337,1592186,"sample.py",3526,24,"",python,content
338
+ 338,1595319,"sample.py",2720,0,"",python,selection_mouse
339
+ 339,1596497,"sample.py",3526,0," args.checkpoint_step or",python,content
340
+ 340,1597293,"sample.py",2719,10,"jnp.float32",python,content
341
+ 341,1597620,"sample.py",183,31,"",python,content
342
+ 342,1598176,"sample.py",183,0,"import orbax.checkpoint as ocp\n",python,content
343
+ 343,1599549,"sample.py",2719,11,"args.dtype",python,content
344
+ 344,1600700,"sample.py",3526,24,"",python,content
345
+ 345,1601110,"sample.py",2752,0,"uu",python,content
346
+ 346,1601626,"sample.py",2752,2,"",python,content
347
+ 347,1602108,"sample.py",2719,10,"jnp.float32",python,content
348
+ 348,1999166,"sample.py",3484,0,"",python,selection_mouse
349
+ 349,1999740,"sample.py",3269,0,"",python,selection_mouse
350
+ 350,2000462,"sample.py",3415,0,"",python,selection_mouse
351
+ 351,2001193,"sample.py",3379,0,"",python,selection_mouse
352
+ 352,2004722,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab
353
+ 353,2006170,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",700,0,"",shellscript,selection_mouse
354
+ 354,2006591,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",794,0,"",shellscript,selection_mouse
355
+ 355,2007314,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",744,0,"",shellscript,selection_mouse
356
+ 356,2007886,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",772,0,"",shellscript,selection_mouse
357
+ 357,2009255,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",743,0,"",shellscript,selection_mouse
358
+ 358,2009397,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",737,12,"dyna_ffn_dim",shellscript,selection_mouse
359
+ 359,2010286,"sample.py",0,0,"",python,tab
360
+ 360,2013566,"sample.py",1062,0,"",python,selection_mouse
361
+ 361,2013720,"sample.py",1056,11,"lam_ffn_dim",python,selection_mouse
362
+ 362,2024615,"sample.py",2727,0,"",python,selection_mouse
363
+ 363,2024769,"sample.py",2723,7,"float32",python,selection_mouse
364
+ 364,2025005,"sample.py",2722,8,".float32",python,selection_mouse
365
+ 365,2025040,"sample.py",2719,11,"jnp.float32",python,selection_mouse
366
+ 366,2028574,"sample.py",2719,11,"a",python,content
367
+ 367,2028575,"sample.py",2720,0,"",python,selection_keyboard
368
+ 368,2028803,"sample.py",2720,0,"r",python,content
369
+ 369,2028807,"sample.py",2721,0,"",python,selection_keyboard
370
+ 370,2028904,"sample.py",2721,0,"g",python,content
371
+ 371,2028905,"sample.py",2722,0,"",python,selection_keyboard
372
+ 372,2029735,"sample.py",2719,3,"args",python,content
373
+ 373,2032082,"sample.py",2723,0,".",python,content
374
+ 374,2032083,"sample.py",2724,0,"",python,selection_keyboard
375
+ 375,2032804,"sample.py",2724,0,"s",python,content
376
+ 376,2032805,"sample.py",2725,0,"",python,selection_keyboard
377
+ 377,2033140,"sample.py",2725,0,"t",python,content
378
+ 378,2033142,"sample.py",2726,0,"",python,selection_keyboard
379
+ 379,2033345,"sample.py",2725,1,"",python,content
380
+ 380,2033512,"sample.py",2724,1,"",python,content
381
+ 381,2033548,"sample.py",2724,0,"d",python,content
382
+ 382,2033550,"sample.py",2725,0,"",python,selection_keyboard
383
+ 383,2033785,"sample.py",2725,0,"t",python,content
384
+ 384,2033785,"sample.py",2726,0,"",python,selection_keyboard
385
+ 385,2034041,"sample.py",2726,0,"p",python,content
386
+ 386,2034042,"sample.py",2727,0,"",python,selection_keyboard
387
+ 387,2034395,"sample.py",2726,1,"",python,content
388
+ 388,2034418,"sample.py",2726,0,"y",python,content
389
+ 389,2034420,"sample.py",2727,0,"",python,selection_keyboard
390
+ 390,2034483,"sample.py",2727,0,"p",python,content
391
+ 391,2034484,"sample.py",2728,0,"",python,selection_keyboard
392
+ 392,2034619,"sample.py",2728,0,"e",python,content
393
+ 393,2034620,"sample.py",2729,0,"",python,selection_keyboard
394
+ 394,2153137,"sample.py",2692,0,"",python,selection_mouse
395
+ 395,2153277,"sample.py",2689,7,"seq_len",python,selection_mouse
396
+ 396,2156276,"slurm/dev/mihir/horeka/yolo-runs/sampling_dev.sh",0,0,"",shellscript,tab
397
+ 397,4512967,"utils/nn.py",0,0,"",python,tab
398
+ 398,4514214,"utils/nn.py",836,0,"",python,selection_mouse
399
+ 399,4514542,"utils/nn.py",927,0,"",python,selection_mouse
400
+ 400,4515109,"utils/nn.py",896,0,"",python,selection_mouse
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-bebf29de-c50f-45f7-b90b-66f518a4cf1c1758196766807-2025_09_18-14.00.11.582/source.csv ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,781,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:00:11 PM [info] Activating crowd-code\n2:00:11 PM [info] Recording started\n2:00:11 PM [info] Initializing git provider using file system watchers...\n2:00:11 PM [info] Git repository found\n2:00:11 PM [info] Git provider initialized successfully\n2:00:11 PM [info] Initial git state: [object Object]\n",Log,tab
3
+ 3,67473,"TERMINAL",0,0,"bash",,terminal_focus
4
+ 4,69307,"TERMINAL",0,0,"queue",,terminal_command
5
+ 5,69398,"TERMINAL",0,0,"]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Thu Sep 18 14:01:20 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3501894 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3501895 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3501896 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)3501898 accelerat interact tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output
6
+ 6,70446,"TERMINAL",0,0,"1",,terminal_output
7
+ 7,71471,"TERMINAL",0,0,"2",,terminal_output
8
+ 8,72014,"TERMINAL",0,0,"bash",,terminal_focus
9
+ 9,72519,"TERMINAL",0,0,"3",,terminal_output
10
+ 10,73560,"TERMINAL",0,0,"5",,terminal_output
11
+ 11,73838,"TERMINAL",0,0,"watch",,terminal_focus
12
+ 12,74606,"TERMINAL",0,0,"6",,terminal_output
13
+ 13,75684,"TERMINAL",0,0,"7",,terminal_output
14
+ 14,76588,"TERMINAL",0,0,"bash",,terminal_focus
15
+ 15,76705,"TERMINAL",0,0,"8",,terminal_output
16
+ 16,77780,"TERMINAL",0,0,"9",,terminal_output
17
+ 17,78769,"TERMINAL",0,0,"scancel 3501898",,terminal_command
18
+ 18,78780,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
19
+ 19,78821,"TERMINAL",0,0,"30",,terminal_output
20
+ 20,79916,"TERMINAL",0,0,"\r1",,terminal_output
21
+ 21,80049,"TERMINAL",0,0,"watch",,terminal_focus
22
+ 22,80596,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
23
+ 23,81931,"TERMINAL",0,0,"queue",,terminal_command
24
+ 24,81994,"TERMINAL",0,0,"]633;C",,terminal_output
25
+ 25,82072,"TERMINAL",0,0,"[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Thu Sep 18 14:01:33 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3501894 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3501895 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3501896 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output
26
+ 26,83171,"TERMINAL",0,0,"4",,terminal_output
27
+ 27,83321,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
28
+ 28,175951,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
29
+ 29,176011,"TERMINAL",0,0,"]633;C]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
30
+ 30,177065,"TERMINAL",0,0,"queue",,terminal_command
31
+ 31,177170,"TERMINAL",0,0,"]633;C[?1049h(B[?7hEvery 1.0s: squeue --mehkn1991.localdomain: Thu Sep 18 14:03:08 2025JOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)3501894 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3501895 accelerat train_la tum_cte0 PD\t0:00\t 1 (Priority)3501896 accelerat train_to tum_cte0 PD\t0:00\t 1 (Priority)",,terminal_output
32
+ 32,178187,"TERMINAL",0,0,"9",,terminal_output
33
+ 33,178743,"TERMINAL",0,0,"[?1049l\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jasmine",,terminal_output
34
+ 34,179749,"TERMINAL",0,0,"bash",,terminal_focus
35
+ 35,297446,"input_pipeline/generate_coinrun_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\nimport json\nimport os\nfrom utils import save_chunks\n\n\n@dataclass\nclass Args:\n num_episodes_train: int = 10000\n num_episodes_val: int = 500\n num_episodes_test: int = 500\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 1000\n max_episode_length: int = 1000\n chunk_size: int = 100\n chunks_per_file: int = 100\n seed: int = 0\n\n\nargs = tyro.cli(Args)\nassert (\n args.max_episode_length >= args.min_episode_length\n), ""Maximum episode length must be greater than or equal to minimum episode length.""\n\nif args.min_episode_length < args.chunk_size:\n print(\n ""Warning: Minimum episode length is smaller than chunk size. Note that episodes shorter than the chunk size will be discarded.""\n )\n\n\n# --- Generate episodes ---\ndef generate_episodes(num_episodes, split):\n episode_idx = 0\n episode_metadata = []\n obs_chunks = []\n act_chunks = []\n file_idx = 0\n output_dir_split = os.path.join(args.output_dir, split)\n while episode_idx < num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n\n observations_seq = []\n actions_seq = []\n episode_obs_chunks = []\n episode_act_chunks = []\n\n # --- Run episode ---\n step_t = 0\n for step_t in range(args.max_episode_length):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n _, obs, first = env.observe()\n observations_seq.append(obs[""rgb""])\n actions_seq.append(action)\n if len(observations_seq) == args.chunk_size:\n episode_obs_chunks.append(observations_seq)\n episode_act_chunks.append(actions_seq)\n observations_seq = []\n actions_seq = []\n if first:\n break\n\n # --- Save episode ---\n if step_t + 1 >= args.min_episode_length:\n if observations_seq:\n if len(observations_seq) < args.chunk_size:\n print(\n f""Warning: Inconsistent chunk_sizes. Episode has {len(observations_seq)} frames, ""\n f""which is smaller than the requested chunk_size: {args.chunk_size}. ""\n ""This might lead to performance degradation during training.""\n )\n episode_obs_chunks.append(observations_seq)\n episode_act_chunks.append(actions_seq)\n\n obs_chunks_data = [\n np.concatenate(seq, axis=0).astype(np.uint8)\n for seq in episode_obs_chunks\n ]\n act_chunks_data = [\n np.concatenate(act, axis=0) for act in episode_act_chunks\n ]\n obs_chunks.extend(obs_chunks_data)\n act_chunks.extend(act_chunks_data)\n\n ep_metadata, obs_chunks, file_idx, act_chunks = save_chunks(\n obs_chunks, file_idx, args.chunks_per_file, output_dir_split, act_chunks\n )\n episode_metadata.extend(ep_metadata)\n\n print(f""Episode {episode_idx} completed, length: {step_t + 1}."")\n episode_idx += 1\n else:\n print(f""Episode too short ({step_t + 1}), resampling..."")\n\n if len(obs_chunks) > 0:\n print(\n f""Warning: Dropping {len(obs_chunks)} chunks for consistent number of chunks per file."",\n ""Consider changing the chunk_size and chunks_per_file parameters to prevent data-loss."",\n )\n\n print(f""Done generating {split} split"")\n return episode_metadata\n\n\ndef get_action_space():\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=0)\n return env.ac_space.eltype.n\n\n\ndef main():\n # Set random seed and create dataset directories\n np.random.seed(args.seed)\n # --- Generate episodes ---\n train_episode_metadata = generate_episodes(args.num_episodes_train, ""train"")\n val_episode_metadata = generate_episodes(args.num_episodes_val, ""val"")\n test_episode_metadata = generate_episodes(args.num_episodes_test, ""test"")\n\n # --- Save metadata ---\n metadata = {\n ""env"": ""coinrun"",\n ""num_actions"": get_action_space(),\n ""num_episodes_train"": args.num_episodes_train,\n ""num_episodes_val"": args.num_episodes_val,\n ""num_episodes_test"": args.num_episodes_test,\n ""avg_episode_len_train"": np.mean(\n [ep[""avg_seq_len""] for ep in train_episode_metadata]\n ),\n ""avg_episode_len_val"": np.mean(\n [ep[""avg_seq_len""] for ep in val_episode_metadata]\n ),\n ""avg_episode_len_test"": np.mean(\n [ep[""avg_seq_len""] for ep in test_episode_metadata]\n ),\n ""episode_metadata_train"": train_episode_metadata,\n ""episode_metadata_val"": val_episode_metadata,\n ""episode_metadata_test"": test_episode_metadata,\n }\n with open(os.path.join(args.output_dir, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)\n\n print(f""Done generating dataset."")\n\n\nif __name__ == ""__main__"":\n main()\n",python,tab
36
+ 36,301969,"input_pipeline/generate_coinrun_dataset.py",1352,0,"",python,selection_mouse
37
+ 37,303278,"input_pipeline/generate_coinrun_dataset.py",1350,0,"",python,selection_mouse
38
+ 38,303463,"input_pipeline/generate_coinrun_dataset.py",1343,14,"ProcgenGym3Env",python,selection_mouse
39
+ 39,306091,"input_pipeline/generate_coinrun_dataset.py",231,0,"",python,selection_mouse
40
+ 40,306321,"input_pipeline/generate_coinrun_dataset.py",227,7,"procgen",python,selection_mouse
41
+ 41,306897,"input_pipeline/generate_coinrun_dataset.py",184,0,"",python,selection_mouse
42
+ 42,307088,"input_pipeline/generate_coinrun_dataset.py",182,4,"gym3",python,selection_mouse
43
+ 43,311458,"input_pipeline/generate_coinrun_dataset.py",197,0,"",python,selection_mouse
44
+ 44,311594,"input_pipeline/generate_coinrun_dataset.py",194,8,"types_np",python,selection_mouse
45
+ 45,315008,"input_pipeline/generate_coinrun_dataset.py",1654,0,"",python,selection_mouse
46
+ 46,315082,"input_pipeline/generate_coinrun_dataset.py",1648,8,"types_np",python,selection_mouse
47
+ 47,317287,"input_pipeline/generate_coinrun_dataset.py",184,0,"",python,selection_mouse
48
+ 48,317761,".venv/lib/python3.10/site-packages/gym3/__init__.py",0,0,"from gym3 import libenv, testing, types, types_np\nfrom gym3.asynchronous import AsynchronousWrapper\nfrom gym3.concat import ConcatEnv\nfrom gym3.env import Env\nfrom gym3.interactive import Interactive\nfrom gym3.interop import (\n FromBaselinesVecEnv,\n FromGymEnv,\n ToBaselinesVecEnv,\n ToGymEnv,\n vectorize_gym,\n)\nfrom gym3.subproc import SubprocEnv, SubprocError\nfrom gym3.trajectory_recorder import TrajectoryRecorderWrapper\nfrom gym3.util import call_func\nfrom gym3.video_recorder import VideoRecorderWrapper\nfrom gym3.viewer import ViewerWrapper\nfrom gym3.wrapper import Wrapper, unwrap\nfrom gym3.extract_dict_ob import ExtractDictObWrapper\n\n__all__ = [\n ""AsynchronousWrapper"",\n ""call_func"",\n ""ConcatEnv"",\n ""Env"",\n ""ExtractDictObWrapper"",\n ""FromBaselinesVecEnv"",\n ""FromGymEnv"",\n ""Interactive"",\n ""libenv"",\n ""SubprocEnv"",\n ""SubprocError"",\n ""testing"",\n ""ToBaselinesVecEnv"",\n ""ToGymEnv"",\n ""TrajectoryRecorderWrapper"",\n ""types_np"",\n ""types"",\n ""unwrap"",\n ""vectorize_gym"",\n ""VideoRecorderWrapper"",\n ""ViewerWrapper"",\n ""Wrapper"",\n ""wrappers"",\n]\n",python,tab
49
+ 49,320860,".venv/lib/python3.10/site-packages/gym3/__init__.py",713,0,"",python,selection_mouse
50
+ 50,320880,".venv/lib/python3.10/site-packages/gym3/__init__.py",712,0,"",python,selection_command
51
+ 51,321055,".venv/lib/python3.10/site-packages/gym3/__init__.py",712,1,",",python,selection_mouse
52
+ 52,321138,".venv/lib/python3.10/site-packages/gym3/__init__.py",696,16,"\n ""call_func""",python,selection_mouse
53
+ 53,321139,".venv/lib/python3.10/site-packages/gym3/__init__.py",669,43,"\n ""AsynchronousWrapper"",\n ""call_func""",python,selection_mouse
54
+ 54,321139,".venv/lib/python3.10/site-packages/gym3/__init__.py",657,55,"\n__all__ = [\n ""AsynchronousWrapper"",\n ""call_func""",python,selection_mouse
55
+ 55,321140,".venv/lib/python3.10/site-packages/gym3/__init__.py",642,70,"tDictObWrapper\n\n__all__ = [\n ""AsynchronousWrapper"",\n ""call_func""",python,selection_mouse
56
+ 56,321140,".venv/lib/python3.10/site-packages/gym3/__init__.py",713,0,"",python,selection_command
57
+ 57,321184,".venv/lib/python3.10/site-packages/gym3/__init__.py",601,112,"p\nfrom gym3.extract_dict_ob import ExtractDictObWrapper\n\n__all__ = [\n ""AsynchronousWrapper"",\n ""call_func"",",python,selection_mouse
58
+ 58,321230,".venv/lib/python3.10/site-packages/gym3/__init__.py",561,152,"\nfrom gym3.wrapper import Wrapper, unwrap\nfrom gym3.extract_dict_ob import ExtractDictObWrapper\n\n__all__ = [\n ""AsynchronousWrapper"",\n ""call_func"",",python,selection_mouse
59
+ 59,321746,".venv/lib/python3.10/site-packages/gym3/__init__.py",561,0,"",python,selection_mouse
60
+ 60,321747,".venv/lib/python3.10/site-packages/gym3/__init__.py",560,0,"",python,selection_command
61
+ 61,322629,".venv/lib/python3.10/site-packages/gym3/__init__.py",657,0,"",python,selection_mouse
62
+ 62,323244,".venv/lib/python3.10/site-packages/gym3/__init__.py",636,0,"",python,selection_mouse
63
+ 63,329407,"input_pipeline/generate_coinrun_dataset.py",0,0,"",python,tab
64
+ 64,333462,"input_pipeline/generate_coinrun_dataset copy.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\nimport json\nimport os\nfrom utils import save_chunks\n\n\n@dataclass\nclass Args:\n num_episodes_train: int = 10000\n num_episodes_val: int = 500\n num_episodes_test: int = 500\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 1000\n max_episode_length: int = 1000\n chunk_size: int = 100\n chunks_per_file: int = 100\n seed: int = 0\n\n\nargs = tyro.cli(Args)\nassert (\n args.max_episode_length >= args.min_episode_length\n), ""Maximum episode length must be greater than or equal to minimum episode length.""\n\nif args.min_episode_length < args.chunk_size:\n print(\n ""Warning: Minimum episode length is smaller than chunk size. Note that episodes shorter than the chunk size will be discarded.""\n )\n\n\n# --- Generate episodes ---\ndef generate_episodes(num_episodes, split):\n episode_idx = 0\n episode_metadata = []\n obs_chunks = []\n act_chunks = []\n file_idx = 0\n output_dir_split = os.path.join(args.output_dir, split)\n while episode_idx < num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n\n observations_seq = []\n actions_seq = []\n episode_obs_chunks = []\n episode_act_chunks = []\n\n # --- Run episode ---\n step_t = 0\n for step_t in range(args.max_episode_length):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n _, obs, first = env.observe()\n observations_seq.append(obs[""rgb""])\n actions_seq.append(action)\n if len(observations_seq) == args.chunk_size:\n episode_obs_chunks.append(observations_seq)\n episode_act_chunks.append(actions_seq)\n observations_seq = []\n actions_seq = []\n if first:\n break\n\n # --- Save episode ---\n if step_t + 1 >= args.min_episode_length:\n if observations_seq:\n if len(observations_seq) < args.chunk_size:\n print(\n f""Warning: Inconsistent chunk_sizes. Episode has {len(observations_seq)} frames, ""\n f""which is smaller than the requested chunk_size: {args.chunk_size}. ""\n ""This might lead to performance degradation during training.""\n )\n episode_obs_chunks.append(observations_seq)\n episode_act_chunks.append(actions_seq)\n\n obs_chunks_data = [\n np.concatenate(seq, axis=0).astype(np.uint8)\n for seq in episode_obs_chunks\n ]\n act_chunks_data = [\n np.concatenate(act, axis=0) for act in episode_act_chunks\n ]\n obs_chunks.extend(obs_chunks_data)\n act_chunks.extend(act_chunks_data)\n\n ep_metadata, obs_chunks, file_idx, act_chunks = save_chunks(\n obs_chunks, file_idx, args.chunks_per_file, output_dir_split, act_chunks\n )\n episode_metadata.extend(ep_metadata)\n\n print(f""Episode {episode_idx} completed, length: {step_t + 1}."")\n episode_idx += 1\n else:\n print(f""Episode too short ({step_t + 1}), resampling..."")\n\n if len(obs_chunks) > 0:\n print(\n f""Warning: Dropping {len(obs_chunks)} chunks for consistent number of chunks per file."",\n ""Consider changing the chunk_size and chunks_per_file parameters to prevent data-loss."",\n )\n\n print(f""Done generating {split} split"")\n return episode_metadata\n\n\ndef get_action_space():\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=0)\n return env.ac_space.eltype.n\n\n\ndef main():\n # Set random seed and create dataset directories\n np.random.seed(args.seed)\n # --- Generate episodes ---\n train_episode_metadata = generate_episodes(args.num_episodes_train, ""train"")\n val_episode_metadata = generate_episodes(args.num_episodes_val, ""val"")\n test_episode_metadata = generate_episodes(args.num_episodes_test, ""test"")\n\n # --- Save metadata ---\n metadata = {\n ""env"": ""coinrun"",\n ""num_actions"": get_action_space(),\n ""num_episodes_train"": args.num_episodes_train,\n ""num_episodes_val"": args.num_episodes_val,\n ""num_episodes_test"": args.num_episodes_test,\n ""avg_episode_len_train"": np.mean(\n [ep[""avg_seq_len""] for ep in train_episode_metadata]\n ),\n ""avg_episode_len_val"": np.mean(\n [ep[""avg_seq_len""] for ep in val_episode_metadata]\n ),\n ""avg_episode_len_test"": np.mean(\n [ep[""avg_seq_len""] for ep in test_episode_metadata]\n ),\n ""episode_metadata_train"": train_episode_metadata,\n ""episode_metadata_val"": val_episode_metadata,\n ""episode_metadata_test"": test_episode_metadata,\n }\n with open(os.path.join(args.output_dir, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)\n\n print(f""Done generating dataset."")\n\n\nif __name__ == ""__main__"":\n main()\n",python,tab
65
+ 65,350130,"input_pipeline/generate_breakout_dataset.py",0,0,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\nimport json\nimport os\nfrom utils import save_chunks\n\n\n@dataclass\nclass Args:\n num_episodes_train: int = 10000\n num_episodes_val: int = 500\n num_episodes_test: int = 500\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 1000\n max_episode_length: int = 1000\n chunk_size: int = 100\n chunks_per_file: int = 100\n seed: int = 0\n\n\nargs = tyro.cli(Args)\nassert (\n args.max_episode_length >= args.min_episode_length\n), ""Maximum episode length must be greater than or equal to minimum episode length.""\n\nif args.min_episode_length < args.chunk_size:\n print(\n ""Warning: Minimum episode length is smaller than chunk size. Note that episodes shorter than the chunk size will be discarded.""\n )\n\n\n# --- Generate episodes ---\ndef generate_episodes(num_episodes, split):\n episode_idx = 0\n episode_metadata = []\n obs_chunks = []\n act_chunks = []\n file_idx = 0\n output_dir_split = os.path.join(args.output_dir, split)\n while episode_idx < num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n\n observations_seq = []\n actions_seq = []\n episode_obs_chunks = []\n episode_act_chunks = []\n\n # --- Run episode ---\n step_t = 0\n for step_t in range(args.max_episode_length):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n _, obs, first = env.observe()\n observations_seq.append(obs[""rgb""])\n actions_seq.append(action)\n if len(observations_seq) == args.chunk_size:\n episode_obs_chunks.append(observations_seq)\n episode_act_chunks.append(actions_seq)\n observations_seq = []\n actions_seq = []\n if first:\n break\n\n # --- Save episode ---\n if step_t + 1 >= args.min_episode_length:\n if observations_seq:\n if len(observations_seq) < args.chunk_size:\n print(\n f""Warning: Inconsistent chunk_sizes. Episode has {len(observations_seq)} frames, ""\n f""which is smaller than the requested chunk_size: {args.chunk_size}. ""\n ""This might lead to performance degradation during training.""\n )\n episode_obs_chunks.append(observations_seq)\n episode_act_chunks.append(actions_seq)\n\n obs_chunks_data = [\n np.concatenate(seq, axis=0).astype(np.uint8)\n for seq in episode_obs_chunks\n ]\n act_chunks_data = [\n np.concatenate(act, axis=0) for act in episode_act_chunks\n ]\n obs_chunks.extend(obs_chunks_data)\n act_chunks.extend(act_chunks_data)\n\n ep_metadata, obs_chunks, file_idx, act_chunks = save_chunks(\n obs_chunks, file_idx, args.chunks_per_file, output_dir_split, act_chunks\n )\n episode_metadata.extend(ep_metadata)\n\n print(f""Episode {episode_idx} completed, length: {step_t + 1}."")\n episode_idx += 1\n else:\n print(f""Episode too short ({step_t + 1}), resampling..."")\n\n if len(obs_chunks) > 0:\n print(\n f""Warning: Dropping {len(obs_chunks)} chunks for consistent number of chunks per file."",\n ""Consider changing the chunk_size and chunks_per_file parameters to prevent data-loss."",\n )\n\n print(f""Done generating {split} split"")\n return episode_metadata\n\n\ndef get_action_space():\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=0)\n return env.ac_space.eltype.n\n\n\ndef main():\n # Set random seed and create dataset directories\n np.random.seed(args.seed)\n # --- Generate episodes ---\n train_episode_metadata = generate_episodes(args.num_episodes_train, ""train"")\n val_episode_metadata = generate_episodes(args.num_episodes_val, ""val"")\n test_episode_metadata = generate_episodes(args.num_episodes_test, ""test"")\n\n # --- Save metadata ---\n metadata = {\n ""env"": ""coinrun"",\n ""num_actions"": get_action_space(),\n ""num_episodes_train"": args.num_episodes_train,\n ""num_episodes_val"": args.num_episodes_val,\n ""num_episodes_test"": args.num_episodes_test,\n ""avg_episode_len_train"": np.mean(\n [ep[""avg_seq_len""] for ep in train_episode_metadata]\n ),\n ""avg_episode_len_val"": np.mean(\n [ep[""avg_seq_len""] for ep in val_episode_metadata]\n ),\n ""avg_episode_len_test"": np.mean(\n [ep[""avg_seq_len""] for ep in test_episode_metadata]\n ),\n ""episode_metadata_train"": train_episode_metadata,\n ""episode_metadata_val"": val_episode_metadata,\n ""episode_metadata_test"": test_episode_metadata,\n }\n with open(os.path.join(args.output_dir, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)\n\n print(f""Done generating dataset."")\n\n\nif __name__ == ""__main__"":\n main()\n",python,tab
66
+ 66,489770,"input_pipeline/generate_breakout_dataset.py",1485,0,"",python,selection_mouse
67
+ 67,551302,"input_pipeline/generate_coinrun_dataset.py",0,0,"",python,tab
68
+ 68,551809,"input_pipeline/generate_coinrun_dataset.py",1060,0,"",python,selection_mouse
69
+ 69,552660,"input_pipeline/generate_coinrun_dataset.py",0,0,"",python,selection_command
70
+ 70,553135,"input_pipeline/generate_coinrun_dataset.py",0,3,"""""""",python,selection_command
71
+ 71,553385,"input_pipeline/generate_coinrun_dataset.py",0,5351,"""""""\nGenerates a dataset of random-action CoinRun episodes.\nEpisodes are saved individually as memory-mapped files for efficient loading.\n""""""\n\nfrom dataclasses import dataclass\n\nfrom gym3 import types_np\nimport numpy as np\nfrom procgen import ProcgenGym3Env\nimport tyro\nimport json\nimport os\nfrom utils import save_chunks\n\n\n@dataclass\nclass Args:\n num_episodes_train: int = 10000\n num_episodes_val: int = 500\n num_episodes_test: int = 500\n output_dir: str = ""data/coinrun_episodes""\n min_episode_length: int = 1000\n max_episode_length: int = 1000\n chunk_size: int = 100\n chunks_per_file: int = 100\n seed: int = 0\n\n\nargs = tyro.cli(Args)\nassert (\n args.max_episode_length >= args.min_episode_length\n), ""Maximum episode length must be greater than or equal to minimum episode length.""\n\nif args.min_episode_length < args.chunk_size:\n print(\n ""Warning: Minimum episode length is smaller than chunk size. Note that episodes shorter than the chunk size will be discarded.""\n )\n\n\n# --- Generate episodes ---\ndef generate_episodes(num_episodes, split):\n episode_idx = 0\n episode_metadata = []\n obs_chunks = []\n act_chunks = []\n file_idx = 0\n output_dir_split = os.path.join(args.output_dir, split)\n while episode_idx < num_episodes:\n seed = np.random.randint(0, 10000)\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=seed)\n\n observations_seq = []\n actions_seq = []\n episode_obs_chunks = []\n episode_act_chunks = []\n\n # --- Run episode ---\n step_t = 0\n for step_t in range(args.max_episode_length):\n action = types_np.sample(env.ac_space, bshape=(env.num,))\n env.act(action)\n _, obs, first = env.observe()\n observations_seq.append(obs[""rgb""])\n actions_seq.append(action)\n if len(observations_seq) == args.chunk_size:\n episode_obs_chunks.append(observations_seq)\n episode_act_chunks.append(actions_seq)\n observations_seq = []\n actions_seq = []\n if first:\n break\n\n # --- Save episode ---\n if step_t + 1 >= args.min_episode_length:\n if observations_seq:\n if len(observations_seq) < args.chunk_size:\n print(\n f""Warning: Inconsistent chunk_sizes. Episode has {len(observations_seq)} frames, ""\n f""which is smaller than the requested chunk_size: {args.chunk_size}. ""\n ""This might lead to performance degradation during training.""\n )\n episode_obs_chunks.append(observations_seq)\n episode_act_chunks.append(actions_seq)\n\n obs_chunks_data = [\n np.concatenate(seq, axis=0).astype(np.uint8)\n for seq in episode_obs_chunks\n ]\n act_chunks_data = [\n np.concatenate(act, axis=0) for act in episode_act_chunks\n ]\n obs_chunks.extend(obs_chunks_data)\n act_chunks.extend(act_chunks_data)\n\n ep_metadata, obs_chunks, file_idx, act_chunks = save_chunks(\n obs_chunks, file_idx, args.chunks_per_file, output_dir_split, act_chunks\n )\n episode_metadata.extend(ep_metadata)\n\n print(f""Episode {episode_idx} completed, length: {step_t + 1}."")\n episode_idx += 1\n else:\n print(f""Episode too short ({step_t + 1}), resampling..."")\n\n if len(obs_chunks) > 0:\n print(\n f""Warning: Dropping {len(obs_chunks)} chunks for consistent number of chunks per file."",\n ""Consider changing the chunk_size and chunks_per_file parameters to prevent data-loss."",\n )\n\n print(f""Done generating {split} split"")\n return episode_metadata\n\n\ndef get_action_space():\n env = ProcgenGym3Env(num=1, env_name=""coinrun"", start_level=0)\n return env.ac_space.eltype.n\n\n\ndef main():\n # Set random seed and create dataset directories\n np.random.seed(args.seed)\n # --- Generate episodes ---\n train_episode_metadata = generate_episodes(args.num_episodes_train, ""train"")\n val_episode_metadata = generate_episodes(args.num_episodes_val, ""val"")\n test_episode_metadata = generate_episodes(args.num_episodes_test, ""test"")\n\n # --- Save metadata ---\n metadata = {\n ""env"": ""coinrun"",\n ""num_actions"": get_action_space(),\n ""num_episodes_train"": args.num_episodes_train,\n ""num_episodes_val"": args.num_episodes_val,\n ""num_episodes_test"": args.num_episodes_test,\n ""avg_episode_len_train"": np.mean(\n [ep[""avg_seq_len""] for ep in train_episode_metadata]\n ),\n ""avg_episode_len_val"": np.mean(\n [ep[""avg_seq_len""] for ep in val_episode_metadata]\n ),\n ""avg_episode_len_test"": np.mean(\n [ep[""avg_seq_len""] for ep in test_episode_metadata]\n ),\n ""episode_metadata_train"": train_episode_metadata,\n ""episode_metadata_val"": val_episode_metadata,\n ""episode_metadata_test"": test_episode_metadata,\n }\n with open(os.path.join(args.output_dir, ""metadata.json""), ""w"") as f:\n json.dump(metadata, f)\n\n print(f""Done generating dataset."")\n\n\nif __name__ == ""__main__"":\n main()\n",python,selection_command
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-d4ecca31-879c-4879-b2a7-b7463e4327b91757416440874-2025_09_09-13.15.15.617/source.csv ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
2
+ 2,1249,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:15:15 PM [info] Activating crowd-code\n1:15:15 PM [info] Recording started\n1:15:15 PM [info] Initializing git provider using file system watchers...\n1:15:16 PM [info] Git repository found\n1:15:16 PM [info] Git provider initialized successfully\n1:15:16 PM [info] Initial git state: [object Object]\n",Log,tab
3
+ 3,436098,"train_lam.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_lam""\n tags: list[str] = field(default_factory=lambda: [""lam""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n wandb_id: str = """"\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(\n model: LatentActionModel, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, jax.Array, dict]]:\n # --- Compute loss ---\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n outputs[""recon""] = outputs[""recon""].astype(jnp.float32)\n gt_future_frames = gt[:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\n@nnx.jit\ndef train_step(\n lam: LatentActionModel,\n optimizer: nnx.Optimizer,\n inputs: dict,\n action_last_active: jax.Array,\n rng: jax.Array,\n) -> tuple[jax.Array, jax.Array, jax.Array, dict]:\n def loss_fn(\n model: LatentActionModel,\n ) -> tuple[jax.Array, tuple[jax.Array, jax.Array, dict]]:\n return lam_loss_fn(model, inputs)\n\n # --- Update model ---\n (loss, (recon, idx_counts, metrics)), grads = nnx.value_and_grad(\n loss_fn, has_aux=True\n )(lam)\n optimizer.update(grads)\n\n # --- Reset inactive latent actions ---\n codebook = lam.vq.codebook\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook.value\n )\n lam.vq.codebook.value = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n # Count parameters\n _, params, _ = nnx.split(lam, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(lam, tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n action_last_active = jnp.zeros(args.num_latents, dtype=jnp.int32)\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n inputs = dict(videos=videos, rng=_rng)\n rng, _rng = jax.random.split(rng)\n loss, recon, action_last_active, metrics = train_step(\n lam, optimizer, inputs, action_last_active, _rng\n )\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0, 1:].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
4
+ 4,440483,"train_lam.py",2637,0,"",python,selection_mouse
5
+ 5,441103,"train_lam.py",2648,0,"",python,selection_mouse
6
+ 6,441819,"train_lam.py",2673,0,"",python,selection_mouse
7
+ 7,443158,"train_lam.py",2683,0,"",python,selection_mouse