Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-05830693-fec2-4daa-bf9a-df97d3f440b31752570818732-2025_07_15-11.14.44.738/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-1e500cb5-2752-4243-8002-15d1ebb40f691751798498690-2025_07_06-12.42.01.118/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2c44fb8b-ed49-4624-90ac-19d20bbba3331752833225757-2025_07_18-12.07.47.556/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-33e2e492-00ca-4b0f-8523-da20187ea5301751307587112-2025_06_30-20.21.41.519/source.csv +53 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3dde1b0c-c963-467e-aa73-fb6c54df3ae41751963426964-2025_07_08-10.30.57.271/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-41b294b4-b89c-4c1d-8a02-14afc9168dc41753085667665-2025_07_21-10.15.04.628/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4457e5d2-f5e8-4b15-95aa-bafa247369991751528947759-2025_07_03-09.50.10.663/source.csv +45 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4624dad2-2cdd-4760-a1d4-90d8cf5390e11752824923646-2025_07_18-09.49.17.759/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-46ca2edf-d936-40bf-b5cb-2bace160abae1751617999883-2025_07_04-16.08.06.165/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4719c5f9-1b15-4792-8afd-690761108bda1751617825355-2025_07_04-10.31.22.581/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4925dddc-46dd-4a2d-8d37-761ea748b28d1753197051515-2025_07_22-17.10.58.842/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53035602-cd5a-4dad-bc79-2cb4d8d4f7681751162692203-2025_06_28-19.04.53.413/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53473ec0-46ca-440e-9f03-b85aa9497ebf1751358924978-2025_07_01-10.36.06.808/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53a6d9e8-1004-4f9b-86d8-e54e29f62e511751401053914-2025_07_01-22.18.23.766/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-5b1a6152-1602-4538-a4b1-6fa9507221151753212707189-2025_07_22-21.32.36.855/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-5e81a867-cde0-4985-b3ad-048a43f856df1751310142803-2025_06_30-12.02.24.285/source.csv +79 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-79bdcdff-1889-496c-9bc9-830e887f70d81751447790479-2025_07_02-11.39.26.104/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-a313d008-5546-415a-a27c-b4bbbd49fb041754912780018-2025_08_11-13.46.25.836/source.csv +69 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-a8a1505c-044b-48cb-8f8f-813d10c86e631752143082919-2025_07_10-12.25.38.442/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-ac1e2da2-a2d2-4327-aaa3-5900bc2b3a561753469517123-2025_07_25-20.52.05.763/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-ae61aa9e-3c01-45fb-b595-af4191213c4d1752828764609-2025_07_18-10.53.17.363/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-b07db19d-68eb-49f3-ac94-3d4c9ee495c61751056974607-2025_06_27-13.42.58.489/source.csv +16 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-b49e6270-a637-4365-8782-4c6523f19f151751436712098-2025_07_02-08.12.19.616/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-e7d20f74-415c-47d0-ad95-3f6da31696d51753194904459-2025_07_22-16.35.52.74/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-f0382786-979c-4a6d-8e9b-f5977f18eb4f1753726151187-2025_08_02-06.58.58.573/source.csv +4 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-f23be3cf-4da5-450c-91f1-df9de045459c1752656830830-2025_07_16-11.08.01.978/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-fbd09e27-2302-4b0c-83a4-a77b7bc2e3dc1751440721102-2025_07_02-09.19.16.832/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-0f165f77-177d-4df8-8da6-833f9d4dc2621758655771904-2025_09_23-21.29.35.488/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-1505f3d0-0cb4-4cc0-84bf-678810d0ac8f1757148592235-2025_09_06-10.49.56.658/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-1c39eab1-1a04-48fa-9b08-becafb3fa49e1764420703814-2025_11_29-13.51.46.850/source.csv +4 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-23a9afd3-c333-4e29-b2ed-efddc66dd34c1757847239961-2025_09_14-11.54.02.348/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-2e0f3382-d392-45fd-ba41-93c983d734d11764453259461-2025_11_29-22.54.23.496/source.csv +202 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-477e72ce-2f20-4f16-86c7-8d47149aaaf41762423822508-2025_11_06-11.10.25.383/source.csv +410 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-4a0c523e-3509-46d2-9ab8-f144f364f7ff1755356823323-2025_08_16-17.07.06.759/source.csv +10 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-4c69dcf8-a147-4975-8e49-6c7ed4761fb81758276984897-2025_09_19-12.16.27.253/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-544d5f3a-a597-4531-ac2f-947fc20565021764422378535-2025_11_29-14.19.42.617/source.csv +102 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-55b74e48-50e3-4bf3-8e02-f03e464c22ac1750632538084-2025_06_22-15.48.59.681/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-6dbde8ba-25c3-4408-ace8-c25f1d6c04e31764455107698-2025_11_29-23.25.11.741/source.csv +34 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-72482fee-a24c-4c9f-bb09-3efbfa32b9fa1765978902238-2025_12_17-14.41.49.56/source.csv +105 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-7ae86302-2ae7-4165-bd93-90b8ccd6716a1754576773319-2025_08_07-16.26.15.956/source.csv +22 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-99fbb8cb-fb8a-45a1-8eda-f4f2025861341764421443087-2025_11_29-14.04.10.639/source.csv +21 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-9dc22d58-e1a3-4f57-8db8-c0aa13ce6c4a1762164243344-2025_11_03-11.04.09.807/source.csv +98 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-a48196e3-0243-4a79-b294-ca177a8db9741764454294286-2025_11_29-23.11.37.391/source.csv +67 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-aa99b858-9894-42ca-bd30-69bb000349481764445038488-2025_11_29-20.37.21.510/source.csv +14 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-aed8fe1d-c82c-4ea1-b53e-fd7d19d3a7b31762451775561-2025_11_06-18.56.19.375/source.csv +11 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b0378404-ad4c-4db7-a171-f843ccf34d071764845103157-2025_12_04-11.45.09.784/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b08d92a3-9c0a-4526-b12f-c973e9c3c43f1752071802867-2025_07_09-16.36.43.962/source.csv +267 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b8260a6a-d6ea-4985-9e8c-0662afe503b41755760144143-2025_08_21-09.09.06.825/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-c20692df-d9df-465f-b623-69a0d9b635a71755511610414-2025_08_18-12.06.56.941/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-d3edf16c-6e28-41c2-b902-1ffbcf83b1411764448152506-2025_11_29-21.29.16.493/source.csv +64 -0
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-05830693-fec2-4daa-bf9a-df97d3f440b31752570818732-2025_07_15-11.14.44.738/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-1e500cb5-2752-4243-8002-15d1ebb40f691751798498690-2025_07_06-12.42.01.118/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2c44fb8b-ed49-4624-90ac-19d20bbba3331752833225757-2025_07_18-12.07.47.556/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-33e2e492-00ca-4b0f-8523-da20187ea5301751307587112-2025_06_30-20.21.41.519/source.csv
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,12,"utils/dataloader.py",0,0,"import jax\nimport numpy as np\nimport grain\nfrom typing import Any\n\n\n# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write\n# them can perform this one-time conversion. Each record in the ArrayRecord file\n# is expected to be a dictionary with keys like ""raw_video"" and ""sequence_length"",\n# mirroring the structure of the original TFRecord features.\n\n\nclass ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n\n This class replaces the functionality of both `_parse_tfrecord_fn` and\n `_tf_process_episode` from the original tf.data pipeline. It operates on\n a single data record (episode).\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the transformation with processing parameters.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def random_map(self, element: dict, rng: np.random.Generator) -> Any:\n """"""\n Processes a single raw episode from the data source.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes), and\n 'sequence_length' (int).\n rng: A per-record random number generator provided by the Grain sampler.\n\n Returns:\n A processed video sequence as a NumPy array with shape\n (seq_len, image_h, image_w, image_c) and dtype float32.\n """"""\n # Step 1: Parse and reshape the raw video data (replaces _parse_tfrecord_fn)\n # We assume the element from the data source is a dict.\n video_shape = (\n element[""sequence_length""],\n self.image_h,\n self.image_w,\n self.image_c,\n )\n episode_tensor = np.frombuffer(element[""raw_video""], dtype=np.uint8)\n episode_tensor = episode_tensor.reshape(video_shape)\n\n # Step 2: Randomly slice and normalize (replaces _tf_process_episode)\n current_episode_len = episode_tensor.shape[0]\n if current_episode_len < self.seq_len:\n raise ValueError(f""An episode has length {current_episode_len}, which is ""\n f""shorter than the requested sequence length {self.seq_len}."")\n \n max_start_idx = current_episode_len - self.seq_len\n \n # Use the provided Grain RNG for deterministic randomness.\n start_idx = rng.integers(0, max_start_idx + 1)\n\n seq = episode_tensor[start_idx : start_idx + self.seq_len]\n\n # Normalize to [0, 1]\n processed_sequence = seq.astype(np.float32) / 255.0\n\n return processed_sequence\n\n\ndef get_dataloader_grain(\n array_record_paths: list[str], # List of ArrayRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n # shuffle_buffer_size is not needed; Grain shuffles all indices globally.\n num_workers: int = 4, # Replaces num_parallel_calls\n seed: int = 42,\n):\n """"""\n Creates a data loading pipeline using Google Grain.\n\n This function sets up a data source, a sampler for sharding/shuffling,\n and a sequence of operations (transformations) to be applied to the data.\n """"""\n if not array_record_paths:\n raise ValueError(""array_record_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n if global_batch_size % num_processes != 0:\n raise ValueError(\n f""Global batch size {global_batch_size} must be divisible by ""\n f""the number of JAX processes {num_processes} for proper sharding.""\n )\n per_process_batch_size = global_batch_size // num_processes\n\n # 1. DataSource: Reads records from ArrayRecord files.\n # This replaces `tf.data.TFRecordDataset`.\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\n \n # 2. Sampler: Defines the order of access, sharding, shuffling, and epochs.\n # This replaces `dataset.shard`, `dataset.shuffle`, and `dataset.repeat`.\n shard_options = grain.sharding.ShardOptions(\n shard_index=process_id, shard_count=num_processes, drop_remainder=True\n )\n\n sampler = grain.samplers.IndexSampler(\n num_records=len(source),\n shard_options=shard_options,\n seed=seed,\n num_epochs=None, # `None` means repeat indefinitely, like `tf.data.repeat(None)`.\n shuffle=True, # Enables global shuffling of all record indices.\n )\n\n # 3. Operations: A list of transformations to apply to each record.\n # This replaces the sequence of `.map()` calls.\n operations = [\n ProcessEpisodeAndSlice(\n seq_len=seq_len, image_h=image_h, image_w=image_w, image_c=image_c\n ),\n grain.transforms.Batch(batch_size=per_process_batch_size, drop_remainder=True),\n ]\n\n # 4. DataLoader: The main entry point that orchestrates the pipeline.\n # It manages worker processes for parallel data loading and transformation,\n # replacing `num_parallel_calls` and `.prefetch()`.\n dataloader = grain.DataLoader(\n data_source=source,\n sampler=sampler,\n operations=operations,\n worker_count=num_workers,\n )\n\n # 5. Return an iterator, which is the standard way to consume a Grain DataLoader.\n return iter(dataloader)",python,tab
|
| 3 |
+
2,45577,"utils/dataloader.py",1756,0,"",python,selection_mouse
|
| 4 |
+
3,45581,"utils/dataloader.py",1755,0,"",python,selection_command
|
| 5 |
+
4,47836,"utils/dataloader.py",0,0,"",python,selection_command
|
| 6 |
+
5,135562,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"8:20:34 PM [info] Activating crowd-code\n8:20:34 PM [info] Initializing git provider using file system watchers...\n8:20:34 PM [error] Autostart recording failed unexpectedly: Error: EACCES: permission denied, mkdir '/scratch/crowd-code/crowd-code-33e2e492-00ca-4b0f-8523-da20187ea5301751307587112-2025_06_30-20.20.34.639'\n8:20:34 PM [info] Git repository found\n8:20:34 PM [info] Git provider initialized successfully\n8:20:34 PM [info] Initial git state: [object Object]\n8:21:41 PM [info] Recording started\n",Log,tab
|
| 7 |
+
6,137827,"utils/dataloader.py",0,0,"",python,tab
|
| 8 |
+
7,137833,"TERMINAL",0,0,"",,terminal_focus
|
| 9 |
+
8,168423,"utils/dataloader.py",11,0,"",python,selection_command
|
| 10 |
+
9,168670,"utils/dataloader.py",30,0,"",python,selection_command
|
| 11 |
+
10,168680,"utils/dataloader.py",43,0,"",python,selection_command
|
| 12 |
+
11,168713,"utils/dataloader.py",66,0,"",python,selection_command
|
| 13 |
+
12,168900,"utils/dataloader.py",67,0,"",python,selection_command
|
| 14 |
+
13,169071,"utils/dataloader.py",68,0,"",python,selection_command
|
| 15 |
+
14,170086,"utils/dataloader.py",68,22,"# Note on Data Format:",python,selection_command
|
| 16 |
+
15,170312,"utils/dataloader.py",68,102,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you",python,selection_command
|
| 17 |
+
16,170441,"utils/dataloader.py",68,177,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script",python,selection_command
|
| 18 |
+
17,170593,"utils/dataloader.py",68,252,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write",python,selection_command
|
| 19 |
+
18,170709,"utils/dataloader.py",68,333,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write\n# them can perform this one-time conversion. Each record in the ArrayRecord file",python,selection_command
|
| 20 |
+
19,170849,"utils/dataloader.py",68,416,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write\n# them can perform this one-time conversion. Each record in the ArrayRecord file\n# is expected to be a dictionary with keys like ""raw_video"" and ""sequence_length"",",python,selection_command
|
| 21 |
+
20,171018,"utils/dataloader.py",68,477,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write\n# them can perform this one-time conversion. Each record in the ArrayRecord file\n# is expected to be a dictionary with keys like ""raw_video"" and ""sequence_length"",\n# mirroring the structure of the original TFRecord features.",python,selection_command
|
| 22 |
+
21,176574,"utils/dataloader.py",485,0,"",python,selection_command
|
| 23 |
+
22,394785,"utils/dataloader.py",547,0,"",python,selection_mouse
|
| 24 |
+
23,395482,"utils/dataloader.py",5621,0,"",python,selection_command
|
| 25 |
+
24,397214,"utils/dataloader.py",5644,0,"\n ",python,content
|
| 26 |
+
25,397428,"utils/dataloader.py",5645,4,"",python,content
|
| 27 |
+
26,397654,"utils/dataloader.py",5645,0,"\n",python,content
|
| 28 |
+
27,398149,"utils/dataloader.py",5646,0,"c",python,content
|
| 29 |
+
28,398150,"utils/dataloader.py",5647,0,"",python,selection_keyboard
|
| 30 |
+
29,398263,"utils/dataloader.py",5647,0,"o",python,content
|
| 31 |
+
30,398264,"utils/dataloader.py",5648,0,"",python,selection_keyboard
|
| 32 |
+
31,398372,"utils/dataloader.py",5648,0,"v",python,content
|
| 33 |
+
32,398373,"utils/dataloader.py",5649,0,"",python,selection_keyboard
|
| 34 |
+
33,398377,"utils/dataloader.py",5649,0,"n",python,content
|
| 35 |
+
34,398378,"utils/dataloader.py",5650,0,"",python,selection_keyboard
|
| 36 |
+
35,398553,"utils/dataloader.py",5650,0,"e",python,content
|
| 37 |
+
36,398554,"utils/dataloader.py",5651,0,"",python,selection_keyboard
|
| 38 |
+
37,398623,"utils/dataloader.py",5651,0,"r",python,content
|
| 39 |
+
38,398624,"utils/dataloader.py",5652,0,"",python,selection_keyboard
|
| 40 |
+
39,398935,"utils/dataloader.py",5651,1,"",python,content
|
| 41 |
+
40,399061,"utils/dataloader.py",5650,1,"",python,content
|
| 42 |
+
41,399196,"utils/dataloader.py",5649,1,"",python,content
|
| 43 |
+
42,399325,"utils/dataloader.py",5648,1,"",python,content
|
| 44 |
+
43,399467,"utils/dataloader.py",5647,1,"",python,content
|
| 45 |
+
44,399701,"utils/dataloader.py",5646,1,"",python,content
|
| 46 |
+
45,446238,"utils/dataloader.py",5458,0,"",python,selection_mouse
|
| 47 |
+
46,446245,"utils/dataloader.py",5457,0,"",python,selection_command
|
| 48 |
+
47,446480,"utils/dataloader.py",0,0,"",python,selection_command
|
| 49 |
+
48,472031,"utils/dataloader.py",691,0,"class ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n",python,content
|
| 50 |
+
49,472032,"utils/dataloader.py",614,75," Converts TFRecord files to ArrayRecord format for use with Grain.\n \n Args:\n tfrecord_folder: Path to folder containing TFRecord files\n output_folder: Path to output folder for ArrayRecord files\n records_per_file: Number of records to write per ArrayRecord file\n feature_description: Dictionary describing TFRecord features. If None,\n uses default description for video data.\n \n Returns:\n List of paths to created ArrayRecord files\n """"""\n if feature_description is None:\n # Default feature description for video data\n feature_description = {\n 'raw_video': tf.io.FixedLenFeature([], tf.string),\n 'sequence_length': tf.io.FixedLenFeature([], tf.int64),\n }\n \n # Create output directory if it doesn't exist\n os.makedirs(output_folder, exist_ok=True)\n \n # Find all TFRecord files\n tfrecord_files = list(Path(tfrecord_folder).glob(""*.tfrecord""))\n if not tfrecord_files:\n raise ValueError(f""No TFRecord files found in {tfrecord_folder}"")\n \n print(f""Found {len(tfrecord_files)} TFRecord files"")\n \n # Create dataset from all TFRecord files\n dataset = tf.data.TFRecordDataset([str(f) for f in tfrecord_files])\n \n def parse_tfrecord(example_proto):\n """"""Parse a single TFRecord example.""""""\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\n return {\n 'raw_video': parsed_features['raw_video'].numpy(),\n 'sequence_length': int(parsed_features['sequence_length'].numpy())\n }\n \n # Process records and write to ArrayRecord files\n arrayrecord_files = []\n record_count = 0\n file_index = 0\n \n current_writer = None\n \n for record in dataset:\n parsed_record = parse_tfrecord(record)\n \n # Create new writer if needed\n if current_writer is None:\n output_file = os.path.join(output_folder, f""data_{file_index:04d}.arrayrecord"")\n current_writer = grain.ArrayRecordWriter(output_file)\n arrayrecord_files.append(output_file)\n \n # Write record\n current_writer.write(parsed_record)\n record_count += 1\n \n # Close current file and start new one if we've reached the limit\n if record_count % records_per_file == 0:\n current_writer.close()\n current_writer = None\n file_index += 1\n print(f""Created ArrayRecord file {file_index}: {record_count} records processed"")\n \n # Close final writer if it exists\n if current_writer is not None:\n current_writer.close()\n print(f""Created final ArrayRecord file: {record_count} total records processed"")\n \n print(f""Conversion complete! Created {len(arrayrecord_files)} ArrayRecord files"")\n return arrayrecord_files\n",python,content
|
| 51 |
+
50,472032,"utils/dataloader.py",548,57,"def convert_tfrecords_to_arrayrecords(\n tfrecord_folder: str,\n output_folder: str,\n records_per_file: int = 1000,\n feature_description: dict = None,\n):",python,content
|
| 52 |
+
51,472032,"utils/dataloader.py",66,0,"import tensorflow as tf\nimport os\nfrom pathlib import Path\n",python,content
|
| 53 |
+
52,472226,"utils/dataloader.py",3723,0," """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n\n",python,content
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3dde1b0c-c963-467e-aa73-fb6c54df3ae41751963426964-2025_07_08-10.30.57.271/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-41b294b4-b89c-4c1d-8a02-14afc9168dc41753085667665-2025_07_21-10.15.04.628/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4457e5d2-f5e8-4b15-95aa-bafa247369991751528947759-2025_07_03-09.50.10.663/source.csv
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,7,"utils/dataloader.py",0,0,"import jax\nimport numpy as np\nimport grain\nfrom typing import Any, Optional\nfrom array_record.python.array_record_module import ArrayRecordWriter\nimport tensorflow as tf\nimport os\nfrom pathlib import Path\nimport pickle\nimport multiprocessing as mp\nfrom functools import partial\nimport logging\n\ngrain.config.update(""py_debug_mode"", True)\n\n# Configure logging to see debug output\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\ndef _convert_single_tfrecord(\n tfrecord_file: Path,\n output_folder: str,\n feature_description: dict,\n) -> str:\n """"""\n Convert a single TFRecord file to ArrayRecord format.\n \n Args:\n tfrecord_file: Path to the TFRecord file\n output_folder: Output folder for the ArrayRecord file\n feature_description: Dictionary describing TFRecord features\n \n Returns:\n Path to the created ArrayRecord file\n """"""\n output_filename = tfrecord_file.stem + "".array_record""\n output_file = os.path.join(output_folder, output_filename)\n \n dataset = tf.data.TFRecordDataset(str(tfrecord_file))\n \n def parse_tfrecord(example_proto):\n """"""Parse a single TFRecord example.""""""\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\n raw_video_bytes = parsed_features['raw_video'].numpy()\n sequence_length = int(parsed_features['sequence_length'].numpy())\n \n return {\n 'raw_video': raw_video_bytes,\n 'sequence_length': sequence_length,\n }\n \n record_count = 0\n writer = ArrayRecordWriter(output_file, ""group_size:1"")\n for record in dataset:\n parsed_record = parse_tfrecord(record)\n writer.write(pickle.dumps(parsed_record))\n record_count += 1\n writer.close()\n \n print(f""Converted {tfrecord_file.name} -> {output_filename}: {record_count} records"")\n return output_file\n\n\ndef convert_tfrecords_to_arrayrecords(\n tfrecord_folder: str,\n output_folder: str,\n feature_description: Optional[dict] = None,\n num_workers: Optional[int] = None,\n):\n """"""\n Converts TFRecord files to ArrayRecord format for use with Grain.\n Creates one ArrayRecord file per TFRecord file using multiprocessing.\n \n Args:\n tfrecord_folder: Path to folder containing TFRecord files\n output_folder: Path to output folder for ArrayRecord files\n feature_description: Dictionary describing TFRecord features. If None,\n uses default description for video data.\n num_workers: Number of worker processes. If None, uses CPU count.\n \n Returns:\n List of paths to created ArrayRecord files\n """"""\n if feature_description is None:\n feature_description = {\n 'raw_video': tf.io.FixedLenFeature([], tf.string),\n 'sequence_length': tf.io.FixedLenFeature([], tf.int64),\n }\n \n os.makedirs(output_folder, exist_ok=True)\n \n tfrecord_files = list(Path(tfrecord_folder).glob(""*.tfrecord""))\n if not tfrecord_files:\n raise ValueError(f""No TFRecord files found in {tfrecord_folder}"")\n \n print(f""Found {len(tfrecord_files)} TFRecord files"")\n \n if num_workers is None:\n num_workers = min(mp.cpu_count(), len(tfrecord_files))\n \n print(f""Using {num_workers} worker processes for conversion"")\n \n convert_func = partial(\n _convert_single_tfrecord,\n output_folder=output_folder,\n feature_description=feature_description\n )\n \n with mp.Pool(processes=num_workers) as pool:\n arrayrecord_files = pool.map(convert_func, tfrecord_files)\n \n print(f""Conversion complete! Created {len(arrayrecord_files)} ArrayRecord files"")\n return arrayrecord_files\n\n\nclass ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the transformation with processing parameters.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def random_map(self, element: dict, rng: np.random.Generator) -> Any:\n """"""\n Processes a single raw episode from the data source.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes) and 'sequence_length' (int)\n rng: A per-record random number generator provided by the Grain sampler.\n\n Returns:\n A processed video sequence as a NumPy array with shape\n (seq_len, height, width, channels) and dtype float32.\n """"""\n assert isinstance(element, bytes)\n element = pickle.loads(element)\n \n video_shape = (\n element[""sequence_length""],\n self.image_h,\n self.image_w,\n self.image_c,\n )\n episode_tensor = np.frombuffer(element[""raw_video""], dtype=np.uint8)\n episode_tensor = episode_tensor.reshape(video_shape)\n\n current_episode_len = episode_tensor.shape[0]\n if current_episode_len < self.seq_len:\n raise ValueError(f""An episode has length {current_episode_len}, which is ""\n f""shorter than the requested sequence length {self.seq_len}."")\n \n max_start_idx = current_episode_len - self.seq_len\n \n start_idx = rng.integers(0, max_start_idx + 1)\n\n seq = episode_tensor[start_idx : start_idx + self.seq_len]\n\n processed_sequence = seq.astype(np.float32) / 255.0\n\n return processed_sequence\n\n\ndef get_dataloader(\n array_record_paths: list[str],\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n num_workers: int = 1,\n prefetch_buffer_size: int = 1,\n seed: int = 42,\n):\n """"""\n Creates a data loading pipeline using Grain.\n """"""\n if not array_record_paths:\n raise ValueError(""array_record_paths list cannot be empty."")\n\n num_processes = jax.process_count()\n\n if global_batch_size % num_processes != 0:\n raise ValueError(\n f""Global batch size {global_batch_size} must be divisible by ""\n f""the number of JAX processes {num_processes} for proper sharding.""\n )\n per_process_batch_size = global_batch_size // num_processes\n\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\n \n sampler = grain.samplers.IndexSampler(\n num_records=len(source),\n shard_options=grain.sharding.ShardByJaxProcess(drop_remainder=True),\n # FIXME: check whether the global shuffle is the reason why the dataloader is so slow\n shuffle=False,\n num_epochs=100, # FIXME: is there an equivalent to tf.data.repeat(None)?\n seed=seed,\n )\n\n operations = [\n ProcessEpisodeAndSlice(\n seq_len=seq_len, image_h=image_h, image_w=image_w, image_c=image_c\n ),\n grain.transforms.Batch(batch_size=per_process_batch_size, drop_remainder=True),\n ]\n\n read_options = grain.ReadOptions(\n prefetch_buffer_size=prefetch_buffer_size,\n # FIXME: `If the data is already loaded in memory, we recommend setting this to 0 to\n # avoid Python GIL contention by multiple threads.`\n num_threads=1,\n )\n dataloader = grain.DataLoader(\n data_source=source,\n sampler=sampler,\n operations=operations,\n worker_count=num_workers,\n # FIXME: think about whether we should tune this\n worker_buffer_size=1,\n read_options=read_options,\n )\n\n return iter(dataloader)\n\n",python,tab
|
| 3 |
+
2,440,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:50:10 AM [info] Activating crowd-code\n9:50:10 AM [info] Recording started\n9:50:10 AM [info] Initializing git provider using file system watchers...\n9:50:10 AM [info] Git repository found\n9:50:10 AM [info] Git provider initialized successfully\n9:50:10 AM [info] Initial git state: [object Object]\n",Log,tab
|
| 4 |
+
3,9246,"utils/dataloader.py",0,0,"",python,tab
|
| 5 |
+
4,27286,"TERMINAL",0,0,"",,terminal_command
|
| 6 |
+
5,27298,"TERMINAL",0,0,"[H[2J]633;A(jafar) ]633;B\r\n[?2004l\r]633;E;;]633;C]0;tum_dbd0378@hkn1993:~/jafar]633;D",,terminal_output
|
| 7 |
+
6,32834,"TERMINAL",0,0,"cd /home/hk-project-p0023960/tum_dbd0378/jafar && python calculate_avg_frames.py",,terminal_command
|
| 8 |
+
7,32888,"TERMINAL",0,0,"]633;E;2025-07-03 09:50:43 cd /home/hk-project-p0023960/tum_dbd0378/jafar && python calculate_avg_frames.py;]633;C",,terminal_output
|
| 9 |
+
8,33000,"TERMINAL",0,0,"Reading ArrayRecord file: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy_arrayrecords_600_shards/videos_0010.array_record\r\nError: 'array_record.python.array_record_module.ArrayRecordReader' object is not iterable\r\n]0;tum_dbd0378@hkn1993:~/jafar]633;D;1]633;P;Cwd=/home/hk-project-p0023960/tum_dbd0378/jafar[?2004h",,terminal_output
|
| 10 |
+
9,44132,"calculate_avg_frames.py",0,0,"#!/usr/bin/env python3\n""""""\nScript to calculate the average number of frames per video in an ArrayRecord file.\n""""""\n\nfrom array_record.python.array_record_module import ArrayRecordReader\nimport pickle\nimport sys\n\ndef calculate_avg_frames(array_record_path):\n """"""\n Calculate the average number of frames per video in an ArrayRecord file.\n \n Args:\n array_record_path: Path to the ArrayRecord file\n \n Returns:\n Average number of frames per video\n """"""\n total_frames = 0\n num_videos = 0\n \n print(f""Reading ArrayRecord file: {array_record_path}"")\n \n reader = ArrayRecordReader(array_record_path)\n for record in reader:\n data = pickle.loads(record)\n total_frames += data[""sequence_length""]\n num_videos += 1\n \n # Print progress every 1000 videos\n if num_videos % 1000 == 0:\n print(f""Processed {num_videos} videos..."")\n \n reader.close()\n \n if num_videos > 0:\n avg_frames = total_frames / num_videos\n print(f""\nResults:"")\n print(f""Total videos: {num_videos}"")\n print(f""Total frames: {total_frames}"")\n print(f""Average frames per video: {avg_frames:.2f}"")\n return avg_frames\n else:\n print(""No videos found in the ArrayRecord file."")\n return 0\n\nif __name__ == ""__main__"":\n # Default path to the ArrayRecord file\n array_record_path = ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy_arrayrecords_600_shards/videos_0010.array_record""\n \n # Allow command line argument to override the path\n if len(sys.argv) > 1:\n array_record_path = sys.argv[1]\n \n try:\n calculate_avg_frames(array_record_path)\n except Exception as e:\n print(f""Error: {e}"")\n sys.exit(1) ",python,tab
|
| 11 |
+
10,45692,"calculate_avg_frames.py",142,0,"",python,selection_mouse
|
| 12 |
+
11,47314,"calculate_avg_frames.py",160,0,"",python,selection_command
|
| 13 |
+
12,47505,"calculate_avg_frames.py",167,0,"",python,selection_command
|
| 14 |
+
13,48186,"calculate_avg_frames.py",608,0,"",python,selection_command
|
| 15 |
+
14,75883,"calculate_avg_frames.py",0,1790," ",python,content
|
| 16 |
+
15,76043,"calculate_avg_frames.py",0,0,"#!/usr/bin/env python3\n""""""\nScript to calculate the average number of frames per video in an ArrayRecord file.\n""""""\n\nimport grain\nimport pickle\nimport sys\n\ndef calculate_avg_frames(array_record_path):\n """"""\n Calculate the average number of frames per video in an ArrayRecord file.\n \n Args:\n array_record_path: Path to the ArrayRecord file\n \n Returns:\n Average number of frames per video\n """"""\n total_frames = 0\n num_videos = 0\n \n print(f""Reading ArrayRecord file: {array_record_path}"")\n \n # Use Grain's ArrayRecordDataSource to read the file\n data_source = grain.sources.ArrayRecordDataSource([array_record_path])\n num_records = len(data_source)\n \n print(f""Found {num_records} records in the file"")\n \n for i in range(num_records):\n record_bytes = data_source[i]\n data = pickle.loads(record_bytes)\n total_frames += data[""sequence_length""]\n num_videos += 1\n \n # Print progress every 1000 videos\n if num_videos % 1000 == 0:\n print(f""Processed {num_videos}/{num_records} videos..."")\n \n if num_videos > 0:\n avg_frames = total_frames / num_videos\n print(f""\nResults:"")\n print(f""Total videos: {num_videos}"")\n print(f""Total frames: {total_frames}"")\n print(f""Average frames per video: {avg_frames:.2f}"")\n return avg_frames\n else:\n print(""No videos found in the ArrayRecord file."")\n return 0\n\nif __name__ == ""__main__"":\n # Default path to the ArrayRecord file\n array_record_path = ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy_arrayrecords_600_shards/videos_0010.array_record""\n \n # Allow command line argument to override the path\n if len(sys.argv) > 1:\n array_record_path = sys.argv[1]\n \n try:\n calculate_avg_frames(array_record_path)\n except Exception as e:\n print(f""Error: {e}"")\n sys.exit(1) \n",python,content
|
| 17 |
+
16,76046,"calculate_avg_frames.py",1955,2,"",python,content
|
| 18 |
+
17,83394,"TERMINAL",0,0,"python calculate_avg_frames.py",,terminal_command
|
| 19 |
+
18,83449,"TERMINAL",0,0,"]633;E;2025-07-03 09:51:33 python calculate_avg_frames.py;]633;C",,terminal_output
|
| 20 |
+
19,95104,"TERMINAL",0,0,"Reading ArrayRecord file: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy_arrayrecords_600_shards/videos_0010.array_record\r\nFound 100 records in the file\r\n\r\nResults:\r\nTotal videos: 100\r\nTotal frames: 1600\r\nAverage frames per video: 16.00\r\n",,terminal_output
|
| 21 |
+
20,95266,"TERMINAL",0,0,"]0;tum_dbd0378@hkn1993:~/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_dbd0378/jafar",,terminal_output
|
| 22 |
+
21,158929,"TERMINAL",0,0,"/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record",,terminal_command
|
| 23 |
+
22,159080,"TERMINAL",0,0,"]633;E;2025-07-03 09:52:47 python calculate_avg_frames.py /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record;]633;C",,terminal_output
|
| 24 |
+
23,159326,"TERMINAL",0,0,"Reading ArrayRecord file: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record\r\nFound 2 records in the file\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record was created with group size 65536. Grain requires group size 1 for good performance. Please re-generate your ArrayRecord files with 'group_size:1'.\r\n",,terminal_output
|
| 25 |
+
24,165149,"TERMINAL",0,0,"\r\nResults:\r\nTotal videos: 2\r\nTotal frames: 21361\r\nAverage frames per video: 10680.50\r\n",,terminal_output
|
| 26 |
+
25,169642,"TERMINAL",0,0,"python calculate_avg_frames.py /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record",,terminal_focus
|
| 27 |
+
26,176593,"calculate_avg_frames.py",419,0,"",python,selection_mouse
|
| 28 |
+
27,176597,"calculate_avg_frames.py",418,0,"",python,selection_command
|
| 29 |
+
28,178545,"utils/dataloader.py",0,0,"",python,tab
|
| 30 |
+
29,181793,"generate_arrayrecord_dataset.py",0,0,"#!/usr/bin/env python3\n""""""\nGenerate ArrayRecord dataset compatible with train_tokenizer.py\n\nThis script creates synthetic video data and saves it in ArrayRecord format\nthat can be used by the tokenizer training script.\n""""""\n\nimport os\nimport pickle\nimport numpy as np\nimport grain\nfrom array_record.python.array_record_module import ArrayRecordWriter\nimport argparse\nimport multiprocessing as mp\nfrom functools import partial\nimport time\n\n\ndef generate_synthetic_video(\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3\n) -> np.ndarray:\n """"""\n Generate synthetic video data with random frames for training.\n \n Args:\n seq_len: Number of frames in the video sequence\n height: Height of each frame\n width: Width of each frame\n channels: Number of color channels\n \n Returns:\n Video array of shape (seq_len, height, width, channels)\n """"""\n video = np.random.rand(seq_len, height, width, channels).astype(np.float32)\n \n return video\n\n\ndef create_single_arrayrecord_file(\n file_info: tuple,\n output_dir: str,\n seq_len: int,\n height: int,\n width: int,\n channels: int,\n records_per_file: int,\n seed: int\n) -> tuple:\n """"""\n Create a single ArrayRecord file with synthetic video data.\n \n Args:\n file_info: Tuple of (file_idx, start_idx, end_idx)\n output_dir: Directory to save ArrayRecord files\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n \n Returns:\n Tuple of (file_path, num_videos_created, success)\n """"""\n file_idx, start_idx, end_idx = file_info\n videos_in_file = end_idx - start_idx\n \n # Set seed for this process (add file_idx to make each file different)\n np.random.seed(seed + file_idx)\n \n file_path = os.path.join(output_dir, f""videos_{file_idx:04d}.array_record"")\n \n try:\n writer = ArrayRecordWriter(file_path, ""group_size:1"")\n \n for video_idx in range(videos_in_file):\n video = generate_synthetic_video(seq_len, height, width, channels)\n \n # Convert to uint8 format as expected by the dataloader\n video_uint8 = (video * 255).astype(np.uint8)\n \n # Create record in the format expected by ProcessEpisodeAndSlice\n record = {\n ""raw_video"": video_uint8.tobytes(),\n ""sequence_length"": seq_len\n }\n \n writer.write(pickle.dumps(record))\n \n writer.close()\n return (file_path, videos_in_file, True)\n \n except Exception as e:\n print(f""Error creating file {file_path}: {e}"")\n return (file_path, 0, False)\n\n\ndef create_arrayrecord_dataset(\n output_dir: str,\n num_videos: int = 1000,\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3,\n records_per_file: int = 100,\n seed: int = 42,\n num_processes: int | None = None\n):\n """"""\n Create ArrayRecord dataset with synthetic video data using multiprocessing.\n \n Args:\n output_dir: Directory to save ArrayRecord files\n num_videos: Total number of videos to generate\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n num_processes: Number of processes to use (None for auto-detect)\n """"""\n os.makedirs(output_dir, exist_ok=True)\n \n num_files = (num_videos + records_per_file - 1) // records_per_file\n \n print(f""Generating {num_videos} videos across {num_files} ArrayRecord files..."")\n print(f""Each file will contain up to {records_per_file} videos"")\n print(f""Video dimensions: {seq_len} frames × {height}×{width}×{channels}"")\n \n # Prepare file information for each worker\n file_infos = []\n for file_idx in range(num_files):\n start_idx = file_idx * records_per_file\n end_idx = min((file_idx + 1) * records_per_file, num_videos)\n file_infos.append((file_idx, start_idx, end_idx))\n \n # Set number of processes (use CPU count if not specified)\n if num_processes is None:\n num_processes = min(mp.cpu_count(), num_files)\n \n print(f""Using {num_processes} processes for parallel generation..."")\n \n start_time = time.time()\n \n # Create partial function with fixed arguments\n worker_func = partial(\n create_single_arrayrecord_file,\n output_dir=output_dir,\n seq_len=seq_len,\n height=height,\n width=width,\n channels=channels,\n records_per_file=records_per_file,\n seed=seed\n )\n \n # Use multiprocessing to create files in parallel\n with mp.Pool(processes=num_processes) as pool:\n results = pool.map(worker_func, file_infos)\n \n end_time = time.time()\n \n # Process results\n total_records = 0\n successful_files = 0\n \n for file_path, num_videos_created, success in results:\n if success:\n print(f""✓ Created {file_path} with {num_videos_created} videos"")\n total_records += num_videos_created\n successful_files += 1\n else:\n print(f""✗ Failed to create {file_path}"")\n \n print(f""\nDataset generation complete!"")\n print(f""Total videos generated: {total_records}"")\n print(f""Successful files: {successful_files}/{num_files}"")\n print(f""Files created in: {output_dir}"")\n print(f""Generation time: {end_time - start_time:.2f} seconds"")\n\n\ndef verify_dataset(output_dir: str, num_samples: int = 5):\n """"""\n Verify the generated dataset using Grain's ArrayRecordDataSource.\n \n Args:\n output_dir: Directory containing ArrayRecord files\n num_samples: Number of samples to verify\n """"""\n print(f""\nVerifying dataset in {output_dir}..."")\n \n # Find all ArrayRecord files\n array_record_files = [\n os.path.join(output_dir, f) \n for f in os.listdir(output_dir) \n if f.endswith('.array_record')\n ]\n \n if not array_record_files:\n print(""No ArrayRecord files found!"")\n return\n \n print(f""Found {len(array_record_files)} ArrayRecord files"")\n \n # Use Grain's ArrayRecordDataSource as shown in the documentation\n try:\n data_source = grain.sources.ArrayRecordDataSource(array_record_files[0])\n print(f""Number of records in first file: {len(data_source)}"")\n \n # Load and verify a few samples\n for i in range(min(num_samples, len(data_source))):\n record_bytes = data_source[i]\n record = pickle.loads(record_bytes)\n \n # Reconstruct video from raw_video bytes\n video_shape = (record[""sequence_length""], 90, 160, 3) # Hardcoded for now\n video = np.frombuffer(record[""raw_video""], dtype=np.uint8).reshape(video_shape)\n \n print(f"" Record {i}: video shape = {video.shape}, dtype = {video.dtype}"")\n print(f"" Value range: [{video.min()}, {video.max()}]"")\n print(f"" Mean: {video.mean():.1f}"")\n print(f"" Sequence length: {record['sequence_length']}"")\n \n except Exception as e:\n print(f""Error reading ArrayRecord file: {e}"")\n print(""This might indicate a file format issue."")\n\n\ndef main():\n parser = argparse.ArgumentParser(description=""Generate ArrayRecord dataset for tokenizer training"")\n parser.add_argument(""--output_dir"", type=str, default=""data_arrayrecord/dummy"",\n help=""Output directory for ArrayRecord files"")\n parser.add_argument(""--num_videos"", type=int, default=1000,\n help=""Total number of videos to generate"")\n parser.add_argument(""--seq_len"", type=int, default=16,\n help=""Number of frames per video"")\n parser.add_argument(""--height"", type=int, default=90,\n help=""Frame height"")\n parser.add_argument(""--width"", type=int, default=160,\n help=""Frame width"")\n parser.add_argument(""--channels"", type=int, default=3,\n help=""Number of color channels"")\n parser.add_argument(""--records_per_file"", type=int, default=100,\n help=""Number of records per ArrayRecord file"")\n parser.add_argument(""--seed"", type=int, default=42,\n help=""Random seed for reproducibility"")\n parser.add_argument(""--num_processes"", type=int, default=None,\n help=""Number of processes to use (default: auto-detect)"")\n parser.add_argument(""--verify"", action=""store_true"",\n help=""Verify the generated dataset"")\n \n args = parser.parse_args()\n \n # Generate the dataset\n create_arrayrecord_dataset(\n output_dir=args.output_dir,\n num_videos=args.num_videos,\n seq_len=args.seq_len,\n height=args.height,\n width=args.width,\n channels=args.channels,\n records_per_file=args.records_per_file,\n seed=args.seed,\n num_processes=args.num_processes\n )\n \n # Verify if requested\n if args.verify:\n verify_dataset(args.output_dir)\n \nif __name__ == ""__main__"":\n main()\n",python,tab
|
| 31 |
+
30,183371,"TERMINAL",0,0,"bash",,terminal_focus
|
| 32 |
+
31,217610,"generate_arrayrecord_dataset.py",9009,0,"",python,selection_command
|
| 33 |
+
32,217614,"generate_arrayrecord_dataset.py",8127,0,"",python,selection_command
|
| 34 |
+
33,217620,"generate_arrayrecord_dataset.py",8069,0,"",python,selection_command
|
| 35 |
+
34,217625,"generate_arrayrecord_dataset.py",8088,0,"",python,selection_command
|
| 36 |
+
35,217628,"generate_arrayrecord_dataset.py",8092,0,"",python,selection_command
|
| 37 |
+
36,217633,"generate_arrayrecord_dataset.py",8094,0,"",python,selection_command
|
| 38 |
+
37,217641,"generate_arrayrecord_dataset.py",8101,0,"",python,selection_command
|
| 39 |
+
38,217644,"generate_arrayrecord_dataset.py",8104,0,"",python,selection_command
|
| 40 |
+
39,217647,"generate_arrayrecord_dataset.py",8101,0,"",python,selection_command
|
| 41 |
+
40,217649,"generate_arrayrecord_dataset.py",8094,0,"",python,selection_command
|
| 42 |
+
41,247808,"TERMINAL",0,0,"salloc",,terminal_focus
|
| 43 |
+
42,268574,"TERMINAL",0,0,"bash",,terminal_focus
|
| 44 |
+
43,269333,"TERMINAL",0,0,"salloc",,terminal_focus
|
| 45 |
+
44,270973,"TERMINAL",0,0,"bash",,terminal_focus
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4624dad2-2cdd-4760-a1d4-90d8cf5390e11752824923646-2025_07_18-09.49.17.759/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-46ca2edf-d936-40bf-b5cb-2bace160abae1751617999883-2025_07_04-16.08.06.165/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4719c5f9-1b15-4792-8afd-690761108bda1751617825355-2025_07_04-10.31.22.581/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4925dddc-46dd-4a2d-8d37-761ea748b28d1753197051515-2025_07_22-17.10.58.842/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53035602-cd5a-4dad-bc79-2cb4d8d4f7681751162692203-2025_06_28-19.04.53.413/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53473ec0-46ca-440e-9f03-b85aa9497ebf1751358924978-2025_07_01-10.36.06.808/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53a6d9e8-1004-4f9b-86d8-e54e29f62e511751401053914-2025_07_01-22.18.23.766/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-5b1a6152-1602-4538-a4b1-6fa9507221151753212707189-2025_07_22-21.32.36.855/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-5e81a867-cde0-4985-b3ad-048a43f856df1751310142803-2025_06_30-12.02.24.285/source.csv
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,2,"tasks",0,0,"",Log,tab
|
| 3 |
+
2,34,"models/tokenizer.py",0,0,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = nn.sigmoid(recon)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = nn.sigmoid(recon)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,tab
|
| 4 |
+
3,47,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 5 |
+
4,68,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:02:24 PM [info] Activating crowd-code\n12:02:24 PM [info] Recording started\n12:02:24 PM [info] Initializing git provider using file system watchers...\n12:02:24 PM [info] Git repository found\n12:02:24 PM [info] Git provider initialized successfully\n",Log,content
|
| 6 |
+
5,86,"extension-output-pdoom-org.crowd-code-#1-crowd-code",250,0,"12:02:24 PM [info] Initial git state: [object Object]\n",Log,content
|
| 7 |
+
6,1994,"models/tokenizer.py",0,0,"",python,tab
|
| 8 |
+
7,2010,"TERMINAL",0,0,"",,terminal_focus
|
| 9 |
+
8,9708,"requirements.txt",0,0,"dm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.8.5\njax>=0.4.30\noptax>=0.2.3\n# procgen>=0.10.7\ntyro>=0.8.5\nwandb>=0.17.4\ntensorflow>=2.1\npre-commit>=4.2.0",pip-requirements,tab
|
| 10 |
+
9,10459,"requirements.txt",55,0,"",pip-requirements,selection_command
|
| 11 |
+
10,10700,"requirements.txt",68,0,"",pip-requirements,selection_command
|
| 12 |
+
11,10732,"requirements.txt",86,0,"",pip-requirements,selection_command
|
| 13 |
+
12,10767,"requirements.txt",98,0,"",pip-requirements,selection_command
|
| 14 |
+
13,10799,"requirements.txt",112,0,"",pip-requirements,selection_command
|
| 15 |
+
14,10834,"requirements.txt",128,0,"",pip-requirements,selection_command
|
| 16 |
+
15,11032,"requirements.txt",112,0,"",pip-requirements,selection_command
|
| 17 |
+
16,11198,"requirements.txt",98,0,"",pip-requirements,selection_command
|
| 18 |
+
17,150706,"vscode.git.Git",0,0,"2025-06-30 12:02:18.847 [info] [main] Log level: Info\n2025-06-30 12:02:18.854 [info] [main] Validating found git in: ""/usr/bin/git""\n2025-06-30 12:02:18.881 [info] [main] Using git ""2.39.5 (Apple Git-154)"" from ""/usr/bin/git""\n2025-06-30 12:02:18.881 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-30 12:02:18.883 [info] [Model][doInitialScan] Initial repository scan completed - repositories (0), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-30 12:02:19.417 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [14ms]\n2025-06-30 12:02:19.417 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.427 [info] > git rev-parse --verify origin/main [9ms]\n2025-06-30 12:02:19.427 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.437 [info] > git rev-parse --verify origin/master [10ms]\n2025-06-30 12:02:19.437 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.446 [info] > git rev-parse --verify origin/develop [9ms]\n2025-06-30 12:02:19.446 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.454 [info] > git branch -r [7ms]\n2025-06-30 12:02:19.454 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.464 [info] > git config --get init.defaultBranch [9ms]\n2025-06-30 12:02:19.833 [info] > git status [13ms]\n2025-06-30 12:02:19.833 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:23.888 [info] [main] Log level: Info\n2025-06-30 12:02:23.891 [info] [main] Validating found git in: ""/usr/bin/git""\n2025-06-30 12:02:23.910 [info] [main] Using git ""2.39.5 (Apple Git-154)"" from ""/usr/bin/git""\n2025-06-30 12:02:23.910 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-30 12:02:24.289 [info] > git rev-parse --show-toplevel [373ms]\n2025-06-30 12:02:24.341 [info] > git rev-parse --git-dir --git-common-dir [21ms]\n2025-06-30 12:02:24.346 [info] [Model][openRepository] Opened repository: /Users/franzsrambical/Documents/pdoom/jafar\n2025-06-30 12:02:24.366 [info] > git rev-parse --show-toplevel [18ms]\n2025-06-30 12:02:24.366 [info] > git config --get commit.template [18ms]\n2025-06-30 12:02:24.377 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [25ms]\n2025-06-30 12:02:24.382 [info] > git rev-parse --show-toplevel [14ms]\n2025-06-30 12:02:24.392 [info] > git rev-parse --show-toplevel [10ms]\n2025-06-30 12:02:24.393 [info] > git status -z -uall [14ms]\n2025-06-30 12:02:24.401 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [20ms]\n2025-06-30 12:02:24.427 [info] > git config --get commit.template [21ms]\n2025-06-30 12:02:24.427 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [23ms]\n2025-06-30 12:02:24.428 [info] > git rev-parse --show-toplevel [36ms]\n2025-06-30 12:02:24.441 [info] > git config --local branch.main.vscode-merge-base [13ms]\n2025-06-30 12:02:24.444 [info] > git rev-parse --show-toplevel [15ms]\n2025-06-30 12:02:24.447 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [18ms]\n2025-06-30 12:02:24.470 [info] > git rev-parse --show-toplevel [23ms]\n2025-06-30 12:02:24.471 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/origin/main refs/remotes/origin/main [29ms]\n2025-06-30 12:02:24.472 [info] > git status -z -uall [24ms]\n2025-06-30 12:02:24.477 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [25ms]\n2025-06-30 12:02:24.482 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [32ms]\n2025-06-30 12:02:24.505 [info] > git merge-base refs/heads/main refs/remotes/origin/main [34ms]\n2025-06-30 12:02:24.511 [info] > git rev-parse --show-toplevel [37ms]\n2025-06-30 12:02:24.516 [info] > git merge-base refs/heads/main refs/remotes/origin/main [32ms]\n2025-06-30 12:02:24.539 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [33ms]\n2025-06-30 12:02:24.545 [info] > git rev-parse --show-toplevel [30ms]\n2025-06-30 12:02:24.546 [info] [Model][doInitialScan] Initial repository scan completed - repositories (1), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-30 12:02:24.549 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [32ms]\n2025-06-30 12:02:24.573 [info] > git rev-parse --show-toplevel [24ms]\n2025-06-30 12:02:24.700 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [31ms]\n2025-06-30 12:02:24.701 [info] > git show --textconv :models/tokenizer.py [34ms]\n2025-06-30 12:02:24.733 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [32ms]\n2025-06-30 12:02:24.833 [info] > git status [41ms]\n2025-06-30 12:02:24.904 [info] > git check-ignore -v -z --stdin [46ms]\n2025-06-30 12:02:25.584 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [12ms]\n2025-06-30 12:02:25.594 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [10ms]\n2025-06-30 12:02:25.607 [info] > git show --textconv :models/tokenizer.py [9ms]\n2025-06-30 12:02:26.301 [info] > git blame --root --incremental 15aa06ec6d6ba69a2d123d5e28b918fec749829a -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [22ms]\n2025-06-30 12:02:28.510 [info] > git fetch [4165ms]\n2025-06-30 12:02:28.510 [info] From github.com:p-doom/jafar\n 15aa06e..a537770 main -> origin/main\n * [new branch] convert-to-jax-array-in-iter -> origin/convert-to-jax-array-in-iter\n * [new branch] feat/explicit-image-dims -> origin/feat/explicit-image-dims\n d95ca45..a97a559 feature/model-parameter-count-utils -> origin/feature/model-parameter-count-utils\n * [new branch] fix_preprocess_video_paths -> origin/fix_preprocess_video_paths\n2025-06-30 12:02:28.521 [info] > git config --get commit.template [10ms]\n2025-06-30 12:02:28.524 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [12ms]\n2025-06-30 12:02:28.537 [info] > git status -z -uall [13ms]\n2025-06-30 12:02:28.539 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [13ms]\n2025-06-30 12:02:28.552 [info] > git merge-base refs/heads/main refs/remotes/origin/main [12ms]\n2025-06-30 12:02:28.563 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [10ms]\n2025-06-30 12:02:29.712 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [21ms]\n2025-06-30 12:02:29.730 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [17ms]\n2025-06-30 12:02:29.748 [info] > git show --textconv :models/tokenizer.py [16ms]\n2025-06-30 12:02:31.082 [info] > git fetch --all [3398ms]\n2025-06-30 12:02:31.102 [info] > git config --get commit.template [17ms]\n2025-06-30 12:02:31.106 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [19ms]\n2025-06-30 12:02:31.120 [info] > git status -z -uall [14ms]\n2025-06-30 12:02:31.122 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:02:32.020 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [26ms]\n2025-06-30 12:02:32.039 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [19ms]\n2025-06-30 12:02:32.054 [info] > git show --textconv :models/tokenizer.py [14ms]\n2025-06-30 12:02:33.933 [info] > git config --global user.name [24ms]\n2025-06-30 12:02:33.951 [info] > git config --global user.email [17ms]\n2025-06-30 12:02:33.951 [info] [main] Stored git author name in global state: Franz Srambical <franz.srambical@gmail.com>\n2025-06-30 12:02:34.206 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [10ms]\n2025-06-30 12:02:34.207 [info] > git show --textconv :requirements.txt [11ms]\n2025-06-30 12:02:34.217 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [10ms]\n2025-06-30 12:02:34.234 [info] > git blame --root --incremental 15aa06ec6d6ba69a2d123d5e28b918fec749829a -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [14ms]\n2025-06-30 12:02:34.534 [info] > git check-ignore -v -z --stdin [18ms]\n2025-06-30 12:02:34.884 [info] > git config --get commit.template [18ms]\n2025-06-30 12:02:34.888 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [22ms]\n2025-06-30 12:02:34.902 [info] > git status -z -uall [13ms]\n2025-06-30 12:02:34.904 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:02:34.976 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:02:34.992 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [16ms]\n2025-06-30 12:02:35.019 [info] > git show --textconv :requirements.txt [25ms]\n2025-06-30 12:02:35.023 [info] > git status [21ms]\n2025-06-30 12:04:43.429 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [26ms]\n2025-06-30 12:04:43.444 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [13ms]\n2025-06-30 12:04:43.459 [info] > git show --textconv :requirements.txt [13ms]\n2025-06-30 12:04:45.656 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [30ms]\n2025-06-30 12:04:46.302 [info] > git fetch [4119ms]\n2025-06-30 12:04:46.302 [info] From github.com:p-doom/jafar\n * [new branch] grain-dataloader -> origin/grain-dataloader\n2025-06-30 12:04:46.315 [info] > git config --get commit.template [11ms]\n2025-06-30 12:04:46.319 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [16ms]\n2025-06-30 12:04:46.331 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [11ms]\n2025-06-30 12:04:46.332 [info] > git status -z -uall [13ms]\n2025-06-30 12:04:47.421 [info] > git config --get commit.template [28ms]\n2025-06-30 12:04:47.425 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [30ms]\n2025-06-30 12:04:47.440 [info] > git status -z -uall [14ms]\n2025-06-30 12:04:47.442 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:04:47.507 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:04:47.518 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [11ms]\n2025-06-30 12:04:47.530 [info] > git show --textconv :requirements.txt [11ms]\n2025-06-30 12:04:49.187 [info] > git log --oneline --cherry main...main@{upstream} -- [17ms]\n2025-06-30 12:04:50.433 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [28ms]\n2025-06-30 12:04:50.448 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [14ms]\n2025-06-30 12:04:50.462 [info] > git show --textconv :requirements.txt [12ms]\n2025-06-30 12:04:53.045 [info] > git pull --tags origin main [3857ms]\n2025-06-30 12:04:53.045 [info] From github.com:p-doom/jafar\n * branch main -> FETCH_HEAD\nerror: Your local changes to the following files would be overwritten by merge:\n\trequirements.txt\nPlease commit your changes or stash them before you merge.\nAborting\n2025-06-30 12:04:53.062 [info] > git config --get commit.template [14ms]\n2025-06-30 12:04:53.064 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [16ms]\n2025-06-30 12:04:53.077 [info] > git status -z -uall [12ms]\n2025-06-30 12:04:53.078 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [12ms]\n2025-06-30 12:04:54.214 [info] > git config --get commit.template [18ms]\n2025-06-30 12:04:54.217 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [19ms]\n2025-06-30 12:04:54.231 [info] > git status -z -uall [13ms]\n2025-06-30 12:04:54.232 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [14ms]\n2025-06-30 12:04:54.308 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:04:54.319 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [11ms]\n2025-06-30 12:04:54.330 [info] > git show --textconv :requirements.txt [10ms]\n",log,tab
|
| 19 |
+
18,151217,"vscode.git.Git",14002,0,"2025-06-30 12:04:55.496 [info] > git check-ignore -v -z --stdin [21ms]\n",log,content
|
| 20 |
+
19,151739,"requirements.txt",0,0,"",pip-requirements,tab
|
| 21 |
+
20,152857,"requirements.txt",0,0,"",pip-requirements,tab
|
| 22 |
+
21,152869,"requirements.txt",40,0,"",pip-requirements,selection_command
|
| 23 |
+
22,157919,"requirements.txt",40,43,"jax[cuda12]>=0.4.30\noptax>=0.2.3\nprocgen>=0.10.7\n",pip-requirements,content
|
| 24 |
+
23,158930,"requirements.txt",0,0,"",pip-requirements,tab
|
| 25 |
+
24,164840,"requirements.txt",148,0,"\ntqdm>=4.67.1",pip-requirements,content
|
| 26 |
+
25,166157,"models/tokenizer.py",0,0,"",python,tab
|
| 27 |
+
26,170155,"models/tokenizer.py",0,0,"Switched from branch 'main' to 'grain-dataloader'",python,git_branch_checkout
|
| 28 |
+
27,172580,"vscode.git.Git",0,0,"2025-06-30 12:02:18.847 [info] [main] Log level: Info\n2025-06-30 12:02:18.854 [info] [main] Validating found git in: ""/usr/bin/git""\n2025-06-30 12:02:18.881 [info] [main] Using git ""2.39.5 (Apple Git-154)"" from ""/usr/bin/git""\n2025-06-30 12:02:18.881 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-30 12:02:18.883 [info] [Model][doInitialScan] Initial repository scan completed - repositories (0), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-30 12:02:19.417 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [14ms]\n2025-06-30 12:02:19.417 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.427 [info] > git rev-parse --verify origin/main [9ms]\n2025-06-30 12:02:19.427 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.437 [info] > git rev-parse --verify origin/master [10ms]\n2025-06-30 12:02:19.437 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.446 [info] > git rev-parse --verify origin/develop [9ms]\n2025-06-30 12:02:19.446 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.454 [info] > git branch -r [7ms]\n2025-06-30 12:02:19.454 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.464 [info] > git config --get init.defaultBranch [9ms]\n2025-06-30 12:02:19.833 [info] > git status [13ms]\n2025-06-30 12:02:19.833 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:23.888 [info] [main] Log level: Info\n2025-06-30 12:02:23.891 [info] [main] Validating found git in: ""/usr/bin/git""\n2025-06-30 12:02:23.910 [info] [main] Using git ""2.39.5 (Apple Git-154)"" from ""/usr/bin/git""\n2025-06-30 12:02:23.910 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-30 12:02:24.289 [info] > git rev-parse --show-toplevel [373ms]\n2025-06-30 12:02:24.341 [info] > git rev-parse --git-dir --git-common-dir [21ms]\n2025-06-30 12:02:24.346 [info] [Model][openRepository] Opened repository: /Users/franzsrambical/Documents/pdoom/jafar\n2025-06-30 12:02:24.366 [info] > git rev-parse --show-toplevel [18ms]\n2025-06-30 12:02:24.366 [info] > git config --get commit.template [18ms]\n2025-06-30 12:02:24.377 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [25ms]\n2025-06-30 12:02:24.382 [info] > git rev-parse --show-toplevel [14ms]\n2025-06-30 12:02:24.392 [info] > git rev-parse --show-toplevel [10ms]\n2025-06-30 12:02:24.393 [info] > git status -z -uall [14ms]\n2025-06-30 12:02:24.401 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [20ms]\n2025-06-30 12:02:24.427 [info] > git config --get commit.template [21ms]\n2025-06-30 12:02:24.427 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [23ms]\n2025-06-30 12:02:24.428 [info] > git rev-parse --show-toplevel [36ms]\n2025-06-30 12:02:24.441 [info] > git config --local branch.main.vscode-merge-base [13ms]\n2025-06-30 12:02:24.444 [info] > git rev-parse --show-toplevel [15ms]\n2025-06-30 12:02:24.447 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [18ms]\n2025-06-30 12:02:24.470 [info] > git rev-parse --show-toplevel [23ms]\n2025-06-30 12:02:24.471 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/origin/main refs/remotes/origin/main [29ms]\n2025-06-30 12:02:24.472 [info] > git status -z -uall [24ms]\n2025-06-30 12:02:24.477 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [25ms]\n2025-06-30 12:02:24.482 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [32ms]\n2025-06-30 12:02:24.505 [info] > git merge-base refs/heads/main refs/remotes/origin/main [34ms]\n2025-06-30 12:02:24.511 [info] > git rev-parse --show-toplevel [37ms]\n2025-06-30 12:02:24.516 [info] > git merge-base refs/heads/main refs/remotes/origin/main [32ms]\n2025-06-30 12:02:24.539 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [33ms]\n2025-06-30 12:02:24.545 [info] > git rev-parse --show-toplevel [30ms]\n2025-06-30 12:02:24.546 [info] [Model][doInitialScan] Initial repository scan completed - repositories (1), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-30 12:02:24.549 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [32ms]\n2025-06-30 12:02:24.573 [info] > git rev-parse --show-toplevel [24ms]\n2025-06-30 12:02:24.700 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [31ms]\n2025-06-30 12:02:24.701 [info] > git show --textconv :models/tokenizer.py [34ms]\n2025-06-30 12:02:24.733 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [32ms]\n2025-06-30 12:02:24.833 [info] > git status [41ms]\n2025-06-30 12:02:24.904 [info] > git check-ignore -v -z --stdin [46ms]\n2025-06-30 12:02:25.584 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [12ms]\n2025-06-30 12:02:25.594 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [10ms]\n2025-06-30 12:02:25.607 [info] > git show --textconv :models/tokenizer.py [9ms]\n2025-06-30 12:02:26.301 [info] > git blame --root --incremental 15aa06ec6d6ba69a2d123d5e28b918fec749829a -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [22ms]\n2025-06-30 12:02:28.510 [info] > git fetch [4165ms]\n2025-06-30 12:02:28.510 [info] From github.com:p-doom/jafar\n 15aa06e..a537770 main -> origin/main\n * [new branch] convert-to-jax-array-in-iter -> origin/convert-to-jax-array-in-iter\n * [new branch] feat/explicit-image-dims -> origin/feat/explicit-image-dims\n d95ca45..a97a559 feature/model-parameter-count-utils -> origin/feature/model-parameter-count-utils\n * [new branch] fix_preprocess_video_paths -> origin/fix_preprocess_video_paths\n2025-06-30 12:02:28.521 [info] > git config --get commit.template [10ms]\n2025-06-30 12:02:28.524 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [12ms]\n2025-06-30 12:02:28.537 [info] > git status -z -uall [13ms]\n2025-06-30 12:02:28.539 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [13ms]\n2025-06-30 12:02:28.552 [info] > git merge-base refs/heads/main refs/remotes/origin/main [12ms]\n2025-06-30 12:02:28.563 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [10ms]\n2025-06-30 12:02:29.712 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [21ms]\n2025-06-30 12:02:29.730 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [17ms]\n2025-06-30 12:02:29.748 [info] > git show --textconv :models/tokenizer.py [16ms]\n2025-06-30 12:02:31.082 [info] > git fetch --all [3398ms]\n2025-06-30 12:02:31.102 [info] > git config --get commit.template [17ms]\n2025-06-30 12:02:31.106 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [19ms]\n2025-06-30 12:02:31.120 [info] > git status -z -uall [14ms]\n2025-06-30 12:02:31.122 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:02:32.020 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [26ms]\n2025-06-30 12:02:32.039 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [19ms]\n2025-06-30 12:02:32.054 [info] > git show --textconv :models/tokenizer.py [14ms]\n2025-06-30 12:02:33.933 [info] > git config --global user.name [24ms]\n2025-06-30 12:02:33.951 [info] > git config --global user.email [17ms]\n2025-06-30 12:02:33.951 [info] [main] Stored git author name in global state: Franz Srambical <franz.srambical@gmail.com>\n2025-06-30 12:02:34.206 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [10ms]\n2025-06-30 12:02:34.207 [info] > git show --textconv :requirements.txt [11ms]\n2025-06-30 12:02:34.217 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [10ms]\n2025-06-30 12:02:34.234 [info] > git blame --root --incremental 15aa06ec6d6ba69a2d123d5e28b918fec749829a -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [14ms]\n2025-06-30 12:02:34.534 [info] > git check-ignore -v -z --stdin [18ms]\n2025-06-30 12:02:34.884 [info] > git config --get commit.template [18ms]\n2025-06-30 12:02:34.888 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [22ms]\n2025-06-30 12:02:34.902 [info] > git status -z -uall [13ms]\n2025-06-30 12:02:34.904 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:02:34.976 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:02:34.992 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [16ms]\n2025-06-30 12:02:35.019 [info] > git show --textconv :requirements.txt [25ms]\n2025-06-30 12:02:35.023 [info] > git status [21ms]\n2025-06-30 12:04:43.429 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [26ms]\n2025-06-30 12:04:43.444 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [13ms]\n2025-06-30 12:04:43.459 [info] > git show --textconv :requirements.txt [13ms]\n2025-06-30 12:04:45.656 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [30ms]\n2025-06-30 12:04:46.302 [info] > git fetch [4119ms]\n2025-06-30 12:04:46.302 [info] From github.com:p-doom/jafar\n * [new branch] grain-dataloader -> origin/grain-dataloader\n2025-06-30 12:04:46.315 [info] > git config --get commit.template [11ms]\n2025-06-30 12:04:46.319 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [16ms]\n2025-06-30 12:04:46.331 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [11ms]\n2025-06-30 12:04:46.332 [info] > git status -z -uall [13ms]\n2025-06-30 12:04:47.421 [info] > git config --get commit.template [28ms]\n2025-06-30 12:04:47.425 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [30ms]\n2025-06-30 12:04:47.440 [info] > git status -z -uall [14ms]\n2025-06-30 12:04:47.442 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:04:47.507 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:04:47.518 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [11ms]\n2025-06-30 12:04:47.530 [info] > git show --textconv :requirements.txt [11ms]\n2025-06-30 12:04:49.187 [info] > git log --oneline --cherry main...main@{upstream} -- [17ms]\n2025-06-30 12:04:50.433 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [28ms]\n2025-06-30 12:04:50.448 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [14ms]\n2025-06-30 12:04:50.462 [info] > git show --textconv :requirements.txt [12ms]\n2025-06-30 12:04:53.045 [info] > git pull --tags origin main [3857ms]\n2025-06-30 12:04:53.045 [info] From github.com:p-doom/jafar\n * branch main -> FETCH_HEAD\nerror: Your local changes to the following files would be overwritten by merge:\n\trequirements.txt\nPlease commit your changes or stash them before you merge.\nAborting\n2025-06-30 12:04:53.062 [info] > git config --get commit.template [14ms]\n2025-06-30 12:04:53.064 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [16ms]\n2025-06-30 12:04:53.077 [info] > git status -z -uall [12ms]\n2025-06-30 12:04:53.078 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [12ms]\n2025-06-30 12:04:54.214 [info] > git config --get commit.template [18ms]\n2025-06-30 12:04:54.217 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [19ms]\n2025-06-30 12:04:54.231 [info] > git status -z -uall [13ms]\n2025-06-30 12:04:54.232 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [14ms]\n2025-06-30 12:04:54.308 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:04:54.319 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [11ms]\n2025-06-30 12:04:54.330 [info] > git show --textconv :requirements.txt [10ms]\n2025-06-30 12:04:55.496 [info] > git check-ignore -v -z --stdin [21ms]\n2025-06-30 12:04:56.056 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [52ms]\n2025-06-30 12:04:57.126 [info] > git show --textconv HEAD:requirements.txt [15ms]\n2025-06-30 12:04:57.126 [info] > git ls-tree -l HEAD -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [15ms]\n2025-06-30 12:04:58.190 [info] > git status [22ms]\n2025-06-30 12:05:02.110 [info] > git checkout -q -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [24ms]\n2025-06-30 12:05:02.126 [info] > git config --get commit.template [14ms]\n2025-06-30 12:05:02.130 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [18ms]\n2025-06-30 12:05:02.146 [info] > git status -z -uall [15ms]\n2025-06-30 12:05:02.148 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [17ms]\n2025-06-30 12:05:03.218 [info] > git config --get commit.template [16ms]\n2025-06-30 12:05:03.221 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [18ms]\n2025-06-30 12:05:03.238 [info] > git status -z -uall [16ms]\n2025-06-30 12:05:03.240 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [18ms]\n2025-06-30 12:05:03.435 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [15ms]\n2025-06-30 12:05:03.435 [info] > git show --textconv :requirements.txt [16ms]\n2025-06-30 12:05:03.448 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [13ms]\n2025-06-30 12:05:04.249 [info] > git status [23ms]\n2025-06-30 12:05:05.264 [info] > git log --oneline --cherry main...main@{upstream} -- [21ms]\n2025-06-30 12:05:06.509 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [26ms]\n2025-06-30 12:05:06.526 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [16ms]\n2025-06-30 12:05:06.542 [info] > git show --textconv :requirements.txt [14ms]\n2025-06-30 12:05:08.981 [info] > git pull --tags origin main [3715ms]\n2025-06-30 12:05:08.981 [info] From github.com:p-doom/jafar\n * branch main -> FETCH_HEAD\n2025-06-30 12:05:08.992 [info] > git config --get commit.template [10ms]\n2025-06-30 12:05:08.994 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [12ms]\n2025-06-30 12:05:09.005 [info] > git status -z -uall [11ms]\n2025-06-30 12:05:09.007 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [12ms]\n2025-06-30 12:05:09.022 [info] > git config --get commit.template [12ms]\n2025-06-30 12:05:09.023 [info] > git merge-base refs/heads/main refs/remotes/origin/main [14ms]\n2025-06-30 12:05:09.024 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [13ms]\n2025-06-30 12:05:09.028 [info] > git blame --root --incremental a5377700beef39392cb955d5aa209f57348be60e -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [20ms]\n2025-06-30 12:05:09.036 [info] > git diff --name-status -z --diff-filter=ADMR a5377700beef39392cb955d5aa209f57348be60e...refs/remotes/origin/main [12ms]\n2025-06-30 12:05:09.036 [info] > git status -z -uall [12ms]\n2025-06-30 12:05:09.038 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [13ms]\n2025-06-30 12:05:10.254 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [22ms]\n2025-06-30 12:05:10.269 [info] > git cat-file -s e5ccb3bb2b6678f7bb5d57f845ea477a9d792dcb [15ms]\n2025-06-30 12:05:10.282 [info] > git show --textconv :requirements.txt [12ms]\n2025-06-30 12:05:10.659 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [13ms]\n2025-06-30 12:05:10.661 [info] > git show --textconv :models/tokenizer.py [16ms]\n2025-06-30 12:05:10.671 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [11ms]\n2025-06-30 12:05:10.686 [info] > git blame --root --incremental a5377700beef39392cb955d5aa209f57348be60e -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [12ms]\n2025-06-30 12:05:11.488 [info] > git status [28ms]\n2025-06-30 12:05:12.713 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [21ms]\n2025-06-30 12:05:12.728 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [15ms]\n2025-06-30 12:05:12.742 [info] > git show --textconv :models/tokenizer.py [13ms]\n2025-06-30 12:05:12.974 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [26ms]\n2025-06-30 12:05:14.058 [info] > git for-each-ref --format %(refname:short)%00%(upstream:short) refs/heads [28ms]\n2025-06-30 12:05:14.060 [info] > git config --get commit.template [17ms]\n2025-06-30 12:05:14.061 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [17ms]\n2025-06-30 12:05:14.075 [info] > git status -z -uall [13ms]\n2025-06-30 12:05:14.077 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:05:14.080 [info] > git checkout -q --track origin/grain-dataloader [20ms]\n2025-06-30 12:05:14.092 [info] > git config --get commit.template [11ms]\n2025-06-30 12:05:14.094 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/grain-dataloader refs/remotes/grain-dataloader [12ms]\n2025-06-30 12:05:14.106 [info] > git status -z -uall [12ms]\n2025-06-30 12:05:14.107 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [12ms]\n2025-06-30 12:05:14.122 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/grain-dataloader refs/remotes/grain-dataloader [13ms]\n2025-06-30 12:05:14.123 [info] > git blame --root --incremental 1fecb21cda58d5a0fd3af4ecf40c811aba79dbdf -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [14ms]\n2025-06-30 12:05:14.131 [info] > git config --local branch.grain-dataloader.vscode-merge-base [8ms]\n2025-06-30 12:05:14.131 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-30 12:05:14.141 [info] > git reflog grain-dataloader --grep-reflog=branch: Created from *. [9ms]\n2025-06-30 12:05:14.150 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/origin/grain-dataloader refs/remotes/origin/grain-dataloader [9ms]\n2025-06-30 12:05:14.158 [info] > git config --local branch.grain-dataloader.vscode-merge-base origin/grain-dataloader [8ms]\n2025-06-30 12:05:14.167 [info] > git config --get commit.template [8ms]\n2025-06-30 12:05:14.168 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/grain-dataloader refs/remotes/grain-dataloader [8ms]\n2025-06-30 12:05:14.178 [info] > git status -z -uall [9ms]\n2025-06-30 12:05:14.179 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [10ms]\n2025-06-30 12:05:14.190 [info] > git merge-base refs/heads/grain-dataloader refs/remotes/origin/grain-dataloader [10ms]\n2025-06-30 12:05:14.191 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/grain-dataloader refs/remotes/grain-dataloader [11ms]\n2025-06-30 12:05:14.200 [info] > git config --local branch.grain-dataloader.vscode-merge-base [8ms]\n2025-06-30 12:05:14.201 [info] > git diff --name-status -z --diff-filter=ADMR 1fecb21cda58d5a0fd3af4ecf40c811aba79dbdf...refs/remotes/origin/grain-dataloader [10ms]\n2025-06-30 12:05:14.209 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/origin/grain-dataloader refs/remotes/origin/grain-dataloader [8ms]\n2025-06-30 12:05:15.437 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [26ms]\n2025-06-30 12:05:15.451 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [13ms]\n2025-06-30 12:05:15.465 [info] > git show --textconv :models/tokenizer.py [12ms]\n",log,tab
|
| 29 |
+
28,174091,"models/tokenizer.py",0,0,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = nn.sigmoid(recon)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = nn.sigmoid(recon)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,tab
|
| 30 |
+
29,187010,"requirements.txt",0,0,"dm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.8.5\njax[cuda12]>=0.4.30\noptax>=0.2.3\nprocgen>=0.10.7\ntyro>=0.8.5\nwandb>=0.17.4\ngrain>=0.2.10\npre-commit>=4.2.0\narray-record>=0.7.2",pip-requirements,tab
|
| 31 |
+
30,188335,"requirements.txt",92,0,"",pip-requirements,selection_command
|
| 32 |
+
31,188443,"requirements.txt",76,0,"",pip-requirements,selection_command
|
| 33 |
+
32,188773,"requirements.txt",73,0,"",pip-requirements,selection_command
|
| 34 |
+
33,188909,"requirements.txt",73,0,"#",pip-requirements,content
|
| 35 |
+
34,188912,"requirements.txt",74,0,"",pip-requirements,selection_keyboard
|
| 36 |
+
35,188922,"requirements.txt",74,0," ",pip-requirements,content
|
| 37 |
+
36,188923,"requirements.txt",75,0,"",pip-requirements,selection_keyboard
|
| 38 |
+
37,189133,"requirements.txt",74,0,"",pip-requirements,selection_command
|
| 39 |
+
38,189722,"requirements.txt",61,0,"",pip-requirements,selection_command
|
| 40 |
+
39,189863,"requirements.txt",41,0,"",pip-requirements,selection_command
|
| 41 |
+
40,190051,"requirements.txt",43,0,"",pip-requirements,selection_command
|
| 42 |
+
41,190435,"requirements.txt",43,1,"[",pip-requirements,selection_command
|
| 43 |
+
42,190496,"requirements.txt",43,7,"[cuda12",pip-requirements,selection_command
|
| 44 |
+
43,190920,"requirements.txt",43,8,"[cuda12]",pip-requirements,selection_command
|
| 45 |
+
44,190969,"requirements.txt",43,8,"",pip-requirements,content
|
| 46 |
+
45,196104,"requirements.txt",43,0,"[cuda12]",pip-requirements,content
|
| 47 |
+
46,196114,"requirements.txt",43,0,"",pip-requirements,selection_command
|
| 48 |
+
47,196235,"requirements.txt",73,2,"",pip-requirements,content
|
| 49 |
+
48,196239,"requirements.txt",73,0,"",pip-requirements,selection_command
|
| 50 |
+
49,197285,"models/tokenizer.py",0,0,"",python,tab
|
| 51 |
+
50,48684142,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
| 52 |
+
51,48686605,"utils/dataloader.py",17,0,"",python,selection_command
|
| 53 |
+
52,48686851,"utils/dataloader.py",28,0,"",python,selection_command
|
| 54 |
+
53,48686882,"utils/dataloader.py",29,0,"",python,selection_command
|
| 55 |
+
54,48686915,"utils/dataloader.py",53,0,"",python,selection_command
|
| 56 |
+
55,48686949,"utils/dataloader.py",54,0,"",python,selection_command
|
| 57 |
+
56,48693437,"utils/dataloader.py",1116,0,"",python,selection_command
|
| 58 |
+
57,48693602,"utils/dataloader.py",1856,0,"",python,selection_command
|
| 59 |
+
58,48693852,"utils/dataloader.py",2666,0,"",python,selection_command
|
| 60 |
+
59,48693885,"utils/dataloader.py",3406,0,"",python,selection_command
|
| 61 |
+
60,48693918,"utils/dataloader.py",4182,0,"",python,selection_command
|
| 62 |
+
61,48693951,"utils/dataloader.py",4380,0,"",python,selection_command
|
| 63 |
+
62,48694563,"utils/dataloader.py",0,0,"",python,selection_command
|
| 64 |
+
63,48746408,"utils/dataloader.py",17,0,"",python,selection_command
|
| 65 |
+
64,48746657,"utils/dataloader.py",28,0,"",python,selection_command
|
| 66 |
+
65,48746693,"utils/dataloader.py",29,0,"",python,selection_command
|
| 67 |
+
66,48746720,"utils/dataloader.py",53,0,"",python,selection_command
|
| 68 |
+
67,48746754,"utils/dataloader.py",54,0,"",python,selection_command
|
| 69 |
+
68,48746790,"utils/dataloader.py",128,0,"",python,selection_command
|
| 70 |
+
69,48747147,"utils/dataloader.py",182,0,"",python,selection_command
|
| 71 |
+
70,48747280,"utils/dataloader.py",183,0,"",python,selection_command
|
| 72 |
+
71,48747616,"utils/dataloader.py",0,0,"",python,selection_command
|
| 73 |
+
72,48749565,"utils/dataloader.py",920,0,"",python,selection_command
|
| 74 |
+
73,48749816,"utils/dataloader.py",1697,0,"",python,selection_command
|
| 75 |
+
74,48749850,"utils/dataloader.py",2522,0,"",python,selection_command
|
| 76 |
+
75,48749876,"utils/dataloader.py",3151,0,"",python,selection_command
|
| 77 |
+
76,48749909,"utils/dataloader.py",4029,0,"",python,selection_command
|
| 78 |
+
77,48749945,"utils/dataloader.py",4380,0,"",python,selection_command
|
| 79 |
+
78,48750603,"utils/dataloader.py",0,0,"",python,selection_command
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-79bdcdff-1889-496c-9bc9-830e887f70d81751447790479-2025_07_02-11.39.26.104/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-a313d008-5546-415a-a27c-b4bbbd49fb041754912780018-2025_08_11-13.46.25.836/source.csv
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
2,313,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:46:25 PM [info] Activating crowd-code\n1:46:25 PM [info] Recording started\n1:46:25 PM [info] Initializing git provider using file system watchers...\n1:46:25 PM [info] Git repository found\n1:46:25 PM [info] Git provider initialized successfully\n1:46:25 PM [info] Initial git state: [object Object]\n",Log,tab
|
| 3 |
+
3,11854,"sz.py",0,0,"#!/usr/bin/env python3\nimport os, sys\nimport token\nimport tokenize\nimport itertools\nfrom tabulate import tabulate\n\nTOKEN_WHITELIST = [token.OP, token.NAME, token.NUMBER, token.STRING]\n\ndef is_docstring(t):\n return t.type == token.STRING and t.string.startswith('""""""') and t.line.strip().startswith('""""""')\n\ndef is_js_token(s): return len(s) and not s.startswith('//')\n\ndef gen_stats(base_path="".""):\n table = []\n for path, _, files in os.walk(os.path.join(base_path, ""tinygrad"")):\n for name in files:\n if not (name.endswith("".py"") or name.endswith("".js"")): continue\n if any(s in path.replace('\\', '/') for s in ['tinygrad/runtime/autogen', 'tinygrad/viz/assets']): continue\n filepath = os.path.join(path, name)\n relfilepath = os.path.relpath(filepath, base_path).replace('\\', '/')\n if name.endswith("".js""):\n with open(filepath) as file_: lines = [line.strip() for line in file_.readlines()]\n token_count, line_count = sum(len(line.split()) for line in lines if is_js_token(line)), sum(1 for line in lines if is_js_token(line))\n else:\n with tokenize.open(filepath) as file_:\n tokens = [t for t in tokenize.generate_tokens(file_.readline) if t.type in TOKEN_WHITELIST and not is_docstring(t)]\n token_count, line_count = len(tokens), len(set([x for t in tokens for x in range(t.start[0], t.end[0]+1)]))\n if line_count > 0: table.append([relfilepath, line_count, token_count/line_count])\n return table\n\ndef gen_diff(table_old, table_new):\n table = []\n files_new = set([x[0] for x in table_new])\n files_old = set([x[0] for x in table_old])\n added, deleted, unchanged = files_new - files_old, files_old - files_new, files_new & files_old\n if added:\n for file in added:\n file_stat = [stats for stats in table_new if file in stats]\n table.append([file_stat[0][0], file_stat[0][1], file_stat[0][1]-0, file_stat[0][2], file_stat[0][2]-0])\n if deleted:\n for file in deleted:\n file_stat = [stats for stats in table_old if file in stats]\n table.append([file_stat[0][0], 0, 0 - file_stat[0][1], 0, 0-file_stat[0][2]])\n if unchanged:\n for file in unchanged:\n file_stat_old = [stats for stats in table_old if file in stats]\n file_stat_new = [stats for stats in table_new if file in stats]\n if file_stat_new[0][1]-file_stat_old[0][1] != 0 or file_stat_new[0][2]-file_stat_old[0][2] != 0:\n table.append([file_stat_new[0][0], file_stat_new[0][1], file_stat_new[0][1]-file_stat_old[0][1], file_stat_new[0][2],\n file_stat_new[0][2]-file_stat_old[0][2]])\n return table\n\ndef display_diff(diff): return ""+""+str(diff) if diff > 0 else str(diff)\n\nif __name__ == ""__main__"":\n if len(sys.argv) == 3:\n headers = [""Name"", ""Lines"", ""Diff"", ""Tokens/Line"", ""Diff""]\n table = gen_diff(gen_stats(sys.argv[1]), gen_stats(sys.argv[2]))\n elif len(sys.argv) == 2:\n headers = [""Name"", ""Lines"", ""Tokens/Line""]\n table = gen_stats(sys.argv[1])\n else:\n headers = [""Name"", ""Lines"", ""Tokens/Line""]\n table = gen_stats(""."")\n\n if table:\n if len(sys.argv) == 3:\n print(""### Changes"")\n print(""```"")\n print(tabulate([headers] + sorted(table, key=lambda x: -x[1]), headers=""firstrow"", intfmt=(..., ""d"", ""+d""),\n floatfmt=(..., ..., ..., "".1f"", ""+.1f""))+""\n"")\n print(f""\ntotal lines changes: {display_diff(sum([x[2] for x in table]))}"")\n print(""```"")\n else:\n print(tabulate([headers] + sorted(table, key=lambda x: -x[1]), headers=""firstrow"", floatfmt="".1f"")+""\n"")\n groups = sorted([('/'.join(x[0].rsplit(""/"", 1)[0].split(""/"")[0:2]), x[1], x[2]) for x in table])\n for dir_name, group in itertools.groupby(groups, key=lambda x:x[0]):\n print(f""{dir_name:30s} : {sum([x[1] for x in group]):6d}"")\n total_lines = sum([x[1] for x in table])\n print(f""\ntotal line count: {total_lines}"")\n max_line_count = int(os.getenv(""MAX_LINE_COUNT"", ""-1""))\n assert max_line_count == -1 or total_lines <= max_line_count, f""OVER {max_line_count} LINES""\n",python,tab
|
| 4 |
+
4,17972,"test_driven_development.sh",0,0,"#!/bin/bash\npython3 test/external/process_replay/reset.py\nCAPTURE_PROCESS_REPLAY=1 pytest -n auto test/test_tiny.py test/test_uop_graph.py test/test_ops.py test/test_linearizer.py\nwhile true; do\n if python3 test/test_tiny.py; then\n PYTHONPATH=""."" python3 test/external/process_replay/process_replay.py\n fi\ndone\n",shellscript,tab
|
| 5 |
+
5,80622,"examples/stable_diffusion.py",0,0,"# https://arxiv.org/pdf/2112.10752.pdf\n# https://github.com/ekagra-ranjan/huggingface-blog/blob/main/stable_diffusion.md\nimport tempfile\nfrom pathlib import Path\nimport argparse\nfrom collections import namedtuple\nfrom typing import Dict, Any\n\nfrom PIL import Image\nimport numpy as np\nfrom tinygrad import Device, GlobalCounters, dtypes, Tensor, TinyJit\nfrom tinygrad.helpers import Timing, Context, getenv, fetch, colored, tqdm\nfrom tinygrad.nn import Conv2d, GroupNorm\nfrom tinygrad.nn.state import torch_load, load_state_dict, get_state_dict\nfrom extra.models.clip import Closed, Tokenizer\nfrom extra.models.unet import UNetModel\nfrom extra.bench_log import BenchEvent, WallTimeEvent\n\nclass AttnBlock:\n def __init__(self, in_channels):\n self.norm = GroupNorm(32, in_channels)\n self.q = Conv2d(in_channels, in_channels, 1)\n self.k = Conv2d(in_channels, in_channels, 1)\n self.v = Conv2d(in_channels, in_channels, 1)\n self.proj_out = Conv2d(in_channels, in_channels, 1)\n\n # copied from AttnBlock in ldm repo\n def __call__(self, x):\n h_ = self.norm(x)\n q,k,v = self.q(h_), self.k(h_), self.v(h_)\n\n # compute attention\n b,c,h,w = q.shape\n q,k,v = [x.reshape(b,c,h*w).transpose(1,2) for x in (q,k,v)]\n h_ = Tensor.scaled_dot_product_attention(q,k,v).transpose(1,2).reshape(b,c,h,w)\n return x + self.proj_out(h_)\n\nclass ResnetBlock:\n def __init__(self, in_channels, out_channels=None):\n self.norm1 = GroupNorm(32, in_channels)\n self.conv1 = Conv2d(in_channels, out_channels, 3, padding=1)\n self.norm2 = GroupNorm(32, out_channels)\n self.conv2 = Conv2d(out_channels, out_channels, 3, padding=1)\n self.nin_shortcut = Conv2d(in_channels, out_channels, 1) if in_channels != out_channels else lambda x: x\n\n def __call__(self, x):\n h = self.conv1(self.norm1(x).swish())\n h = self.conv2(self.norm2(h).swish())\n return self.nin_shortcut(x) + h\n\nclass Mid:\n def __init__(self, block_in):\n self.block_1 = ResnetBlock(block_in, block_in)\n self.attn_1 = AttnBlock(block_in)\n self.block_2 = ResnetBlock(block_in, block_in)\n\n def __call__(self, x):\n return x.sequential([self.block_1, self.attn_1, self.block_2])\n\nclass Decoder:\n def __init__(self):\n sz = [(128, 256), (256, 512), (512, 512), (512, 512)]\n self.conv_in = Conv2d(4,512,3, padding=1)\n self.mid = Mid(512)\n\n arr = []\n for i,s in enumerate(sz):\n arr.append({""block"":\n [ResnetBlock(s[1], s[0]),\n ResnetBlock(s[0], s[0]),\n ResnetBlock(s[0], s[0])]})\n if i != 0: arr[-1]['upsample'] = {""conv"": Conv2d(s[0], s[0], 3, padding=1)}\n self.up = arr\n\n self.norm_out = GroupNorm(32, 128)\n self.conv_out = Conv2d(128, 3, 3, padding=1)\n\n def __call__(self, x):\n x = self.conv_in(x)\n x = self.mid(x)\n\n for l in self.up[::-1]:\n print(""decode"", x.shape)\n for b in l['block']: x = b(x)\n if 'upsample' in l:\n # https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html ?\n bs,c,py,px = x.shape\n x = x.reshape(bs, c, py, 1, px, 1).expand(bs, c, py, 2, px, 2).reshape(bs, c, py*2, px*2)\n x = l['upsample']['conv'](x)\n x.realize()\n\n return self.conv_out(self.norm_out(x).swish())\n\nclass Encoder:\n def __init__(self):\n sz = [(128, 128), (128, 256), (256, 512), (512, 512)]\n self.conv_in = Conv2d(3,128,3, padding=1)\n\n arr = []\n for i,s in enumerate(sz):\n arr.append({""block"":\n [ResnetBlock(s[0], s[1]),\n ResnetBlock(s[1], s[1])]})\n if i != 3: arr[-1]['downsample'] = {""conv"": Conv2d(s[1], s[1], 3, stride=2, padding=(0,1,0,1))}\n self.down = arr\n\n self.mid = Mid(512)\n self.norm_out = GroupNorm(32, 512)\n self.conv_out = Conv2d(512, 8, 3, padding=1)\n\n def __call__(self, x):\n x = self.conv_in(x)\n\n for l in self.down:\n print(""encode"", x.shape)\n for b in l['block']: x = b(x)\n if 'downsample' in l: x = l['downsample']['conv'](x)\n\n x = self.mid(x)\n return self.conv_out(self.norm_out(x).swish())\n\nclass AutoencoderKL:\n def __init__(self):\n self.encoder = Encoder()\n self.decoder = Decoder()\n self.quant_conv = Conv2d(8, 8, 1)\n self.post_quant_conv = Conv2d(4, 4, 1)\n\n def __call__(self, x):\n latent = self.encoder(x)\n latent = self.quant_conv(latent)\n latent = latent[:, 0:4] # only the means\n print(""latent"", latent.shape)\n latent = self.post_quant_conv(latent)\n return self.decoder(latent)\n\ndef get_alphas_cumprod(beta_start=0.00085, beta_end=0.0120, n_training_steps=1000):\n betas = np.linspace(beta_start ** 0.5, beta_end ** 0.5, n_training_steps, dtype=np.float32) ** 2\n alphas = 1.0 - betas\n alphas_cumprod = np.cumprod(alphas, axis=0)\n return Tensor(alphas_cumprod)\n\nunet_params: Dict[str,Any] = {\n ""adm_in_ch"": None,\n ""in_ch"": 4,\n ""out_ch"": 4,\n ""model_ch"": 320,\n ""attention_resolutions"": [4, 2, 1],\n ""num_res_blocks"": 2,\n ""channel_mult"": [1, 2, 4, 4],\n ""n_heads"": 8,\n ""transformer_depth"": [1, 1, 1, 1],\n ""ctx_dim"": 768,\n ""use_linear"": False,\n}\n\nclass StableDiffusion:\n def __init__(self):\n self.alphas_cumprod = get_alphas_cumprod()\n self.model = namedtuple(""DiffusionModel"", [""diffusion_model""])(diffusion_model = UNetModel(**unet_params))\n self.first_stage_model = AutoencoderKL()\n self.cond_stage_model = namedtuple(""CondStageModel"", [""transformer""])(transformer = namedtuple(""Transformer"", [""text_model""])(text_model = Closed.ClipTextTransformer()))\n\n def get_x_prev_and_pred_x0(self, x, e_t, a_t, a_prev):\n temperature = 1\n sigma_t = 0\n sqrt_one_minus_at = (1-a_t).sqrt()\n #print(a_t, a_prev, sigma_t, sqrt_one_minus_at)\n\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt\n return x_prev, pred_x0\n\n def get_model_output(self, unconditional_context, context, latent, timestep, unconditional_guidance_scale):\n # put into diffuser\n latents = self.model.diffusion_model(latent.expand(2, *latent.shape[1:]), timestep, unconditional_context.cat(context, dim=0))\n unconditional_latent, latent = latents[0:1], latents[1:2]\n\n e_t = unconditional_latent + unconditional_guidance_scale * (latent - unconditional_latent)\n return e_t\n\n def decode(self, x):\n x = self.first_stage_model.post_quant_conv(1/0.18215 * x)\n x = self.first_stage_model.decoder(x)\n\n # make image correct size and scale\n x = (x + 1.0) / 2.0\n x = x.reshape(3,512,512).permute(1,2,0).clip(0,1)*255\n return x.cast(dtypes.uint8)\n\n def __call__(self, unconditional_context, context, latent, timestep, alphas, alphas_prev, guidance):\n e_t = self.get_model_output(unconditional_context, context, latent, timestep, guidance)\n x_prev, _ = self.get_x_prev_and_pred_x0(latent, e_t, alphas, alphas_prev)\n #e_t_next = get_model_output(x_prev)\n #e_t_prime = (e_t + e_t_next) / 2\n #x_prev, pred_x0 = get_x_prev_and_pred_x0(latent, e_t_prime, index)\n return x_prev.realize()\n\n# ** ldm.models.autoencoder.AutoencoderKL (done!)\n# 3x512x512 <--> 4x64x64 (16384)\n# decode torch.Size([1, 4, 64, 64]) torch.Size([1, 3, 512, 512])\n# section 4.3 of paper\n# first_stage_model.encoder, first_stage_model.decoder\n\n# ** ldm.modules.diffusionmodules.openaimodel.UNetModel\n# this is what runs each time to sample. is this the LDM?\n# input: 4x64x64\n# output: 4x64x64\n# model.diffusion_model\n# it has attention?\n\n# ** ldm.modules.encoders.modules.FrozenCLIPEmbedder\n# cond_stage_model.transformer.text_model\n\nif __name__ == ""__main__"":\n default_prompt = ""a horse sized cat eating a bagel""\n parser = argparse.ArgumentParser(description='Run Stable Diffusion', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--steps', type=int, default=6, help=""Number of steps in diffusion"")\n parser.add_argument('--prompt', type=str, default=default_prompt, help=""Phrase to render"")\n parser.add_argument('--out', type=str, default=Path(tempfile.gettempdir()) / ""rendered.png"", help=""Output filename"")\n parser.add_argument('--noshow', action='store_true', help=""Don't show the image"")\n parser.add_argument('--fp16', action='store_true', help=""Cast the weights to float16"")\n parser.add_argument('--timing', action='store_true', help=""Print timing per step"")\n parser.add_argument('--seed', type=int, help=""Set the random latent seed"")\n parser.add_argument('--guidance', type=float, default=7.5, help=""Prompt strength"")\n args = parser.parse_args()\n\n model = StableDiffusion()\n\n # load in weights\n with WallTimeEvent(BenchEvent.LOAD_WEIGHTS):\n load_state_dict(model, torch_load(fetch('https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt', 'sd-v1-4.ckpt'))['state_dict'], strict=False)\n\n if args.fp16:\n for k,v in get_state_dict(model).items():\n if k.startswith(""model""):\n v.replace(v.cast(dtypes.float16).realize())\n\n # run through CLIP to get context\n tokenizer = Tokenizer.ClipTokenizer()\n prompt = Tensor([tokenizer.encode(args.prompt)])\n context = model.cond_stage_model.transformer.text_model(prompt).realize()\n print(""got CLIP context"", context.shape)\n\n prompt = Tensor([tokenizer.encode("""")])\n unconditional_context = model.cond_stage_model.transformer.text_model(prompt).realize()\n print(""got unconditional CLIP context"", unconditional_context.shape)\n\n # done with clip model\n del model.cond_stage_model\n\n timesteps = list(range(1, 1000, 1000//args.steps))\n print(f""running for {timesteps} timesteps"")\n alphas = model.alphas_cumprod[Tensor(timesteps)]\n alphas_prev = Tensor([1.0]).cat(alphas[:-1])\n\n # start with random noise\n if args.seed is not None: Tensor.manual_seed(args.seed)\n latent = Tensor.randn(1,4,64,64)\n\n @TinyJit\n def run(model, *x): return model(*x).realize()\n\n # this is diffusion\n with Context(BEAM=getenv(""LATEBEAM"")):\n for index, timestep in (t:=tqdm(list(enumerate(timesteps))[::-1])):\n GlobalCounters.reset()\n t.set_description(""%3d %3d"" % (index, timestep))\n with Timing(""step in "", enabled=args.timing, on_exit=lambda _: f"", using {GlobalCounters.mem_used/1e9:.2f} GB""):\n with WallTimeEvent(BenchEvent.STEP):\n tid = Tensor([index])\n latent = run(model, unconditional_context, context, latent, Tensor([timestep]), alphas[tid], alphas_prev[tid], Tensor([args.guidance]))\n if args.timing: Device[Device.DEFAULT].synchronize()\n del run\n\n # upsample latent space to image with autoencoder\n x = model.decode(latent)\n print(x.shape)\n\n # save image\n im = Image.fromarray(x.numpy())\n print(f""saving {args.out}"")\n im.save(args.out)\n # Open image.\n if not args.noshow: im.show()\n\n # validation!\n if args.prompt == default_prompt and args.steps == 6 and args.seed == 0 and args.guidance == 7.5:\n ref_image = Tensor(np.array(Image.open(Path(__file__).parent / ""stable_diffusion_seed0.png"")))\n distance = (((x.cast(dtypes.float) - ref_image.cast(dtypes.float)) / ref_image.max())**2).mean().item()\n assert distance < 3e-3, colored(f""validation failed with {distance=}"", ""red"") # higher distance with WINO\n print(colored(f""output validated with {distance=}"", ""green""))\n",python,tab
|
| 6 |
+
6,120509,"examples/gpt2.py",0,0,"#!/usr/bin/env python3\nimport os, argparse, contextlib\nfrom typing import Optional, Union\nwith contextlib.suppress(ImportError): import tiktoken\nfrom tinygrad import Tensor, TinyJit, Device, GlobalCounters, Variable, dtypes\nfrom tinygrad.uop.ops import UOp\nfrom tinygrad.helpers import Timing, DEBUG, JIT, getenv, fetch, colored, trange\nfrom tinygrad.nn import Embedding, Linear, LayerNorm\nfrom tinygrad.nn.state import gguf_load, torch_load, load_state_dict, get_state_dict\nfrom extra.bench_log import BenchEvent, WallTimeEvent\n\nMAX_CONTEXT = getenv(""MAX_CONTEXT"", 128)\nHALF = getenv(""HALF"")\n\nclass Attention:\n def __init__(self, dim, n_heads):\n self.c_attn = Linear(dim, 3*dim, bias=True)\n self.c_proj = Linear(dim, dim, bias=True)\n self.n_heads = n_heads\n self.dim = dim\n self.head_dim = dim // n_heads\n\n def __call__(self, x:Tensor, start_pos:Variable, mask:Optional[Tensor]) -> Tensor:\n if mask is not None or start_pos.val == 0:\n # no symbolic shape qkv when consuming prompts\n start_pos = start_pos.val\n\n if HALF: x = x.half()\n xqkv = self.c_attn(x)\n xq, xk, xv = [xqkv.shrink((None, None, (i*self.dim, (i+1)*self.dim))).reshape(None, None, self.n_heads, self.head_dim) for i in range(3)]\n bsz, seqlen, _, _ = xq.shape\n\n # create kv cache\n if not hasattr(self, ""cache_kv""):\n self.cache_kv = Tensor.zeros(2, bsz, MAX_CONTEXT, self.n_heads, self.head_dim, dtype=x.dtype).contiguous().realize()\n\n # update the cache\n self.cache_kv.shrink((None, None,(start_pos,start_pos+seqlen),None,None)).assign(Tensor.stack(xk, xv)).realize()\n\n if start_pos > 0:\n keys = self.cache_kv[0].shrink((None, (0, start_pos+seqlen), None, None))\n values = self.cache_kv[1].shrink((None, (0, start_pos+seqlen), None, None))\n else:\n keys = xk\n values = xv\n\n xq, keys, values = xq.transpose(1, 2), keys.transpose(1, 2), values.transpose(1, 2)\n return self.c_proj(xq.scaled_dot_product_attention(keys, values, mask).transpose(1, 2).reshape(bsz, seqlen, self.dim))\n\nclass FeedForward:\n def __init__(self, dim, hidden_dim):\n self.c_fc = Linear(dim, hidden_dim, bias=True)\n self.c_proj = Linear(hidden_dim, dim, bias=True)\n\n def __call__(self, x:Tensor) -> Tensor:\n return self.c_proj(self.c_fc(x).gelu())\n\nclass TransformerBlock:\n def __init__(self, dim, n_heads, norm_eps):\n self.attn = Attention(dim, n_heads)\n self.mlp = FeedForward(dim, 4*dim)\n self.ln_1 = LayerNorm(dim, norm_eps)\n self.ln_2 = LayerNorm(dim, norm_eps)\n\n def __call__(self, x:Tensor, start_pos:Variable, mask:Optional[Tensor]):\n h = x + self.attn(self.ln_1(x), start_pos, mask).float()\n return (h + self.mlp(self.ln_2(h)))\n\nclass Transformer:\n def __init__(self, dim, n_heads, n_layers, norm_eps, vocab_size, max_seq_len=1024):\n self.vocab_size = vocab_size\n self.wte = Embedding(vocab_size, dim)\n self.wpe = Embedding(max_seq_len, dim)\n self.h = [TransformerBlock(dim, n_heads, norm_eps) for _ in range(n_layers)]\n self.ln_f = LayerNorm(dim, norm_eps)\n self.lm_head = Linear(dim, vocab_size, bias=False)\n self.forward_jit = TinyJit(self.forward)\n\n def forward(self, tokens:Union[Tensor,UOp], start_pos:Variable, temperature:float=0.0):\n if not hasattr(self, 'allpos'): self.allpos = Tensor.arange(0, MAX_CONTEXT).reshape(1, -1).realize()\n if isinstance(tokens, UOp):\n seqlen = 1\n tok_emb = self.wte.weight.shrink(((tokens, tokens+1), None))\n else:\n seqlen = tokens.shape[1]\n tok_emb = self.wte(tokens)\n\n # not symbolic when consuming the prompt\n selected_pos = (0, seqlen) if start_pos.val == 0 else (start_pos, start_pos+1)\n pos_emb = self.wpe(self.allpos.shrink((None, selected_pos)))\n\n h = tok_emb + pos_emb\n\n if HALF: h = h.half()\n\n mask = Tensor.full((1, 1, seqlen, start_pos.val+seqlen), float(""-inf""), dtype=h.dtype).triu(start_pos.val+1) if seqlen > 1 else None\n\n for hi in self.h: h = hi(h, start_pos, mask)\n\n logits = self.lm_head(self.ln_f(h))\n\n if logits.shape[1] == 0:\n # special case for empty prompt\n logits = Tensor.ones((logits.shape[0], self.vocab_size), dtype=logits.dtype, device=logits.device)\n else:\n logits = logits[:, -1, :]\n\n if temperature < 1e-6:\n ret = logits.argmax(-1)\n else:\n ret = (logits / temperature).softmax().multinomial()\n return ret.flatten().realize()\n\n def __call__(self, tokens:Union[Tensor,UOp], start_pos:Variable, temperature:float=0.0) -> Tensor:\n forward = (self.forward_jit if JIT and (isinstance(tokens, UOp) or tokens.shape[1] == 1) else self.forward)\n return forward(tokens, start_pos, temperature)\n\nVOCAB_SIZE = 50257\nMODEL_PARAMS = {\n 'gpt2': dict(n_layers=12, n_heads=12, dim=768, norm_eps=1e-5, vocab_size=VOCAB_SIZE), # 124M params\n 'gpt2-medium': dict(n_layers=24, n_heads=16, dim=1024, norm_eps=1e-5, vocab_size=VOCAB_SIZE), # 350M params\n 'gpt2-large': dict(n_layers=36, n_heads=20, dim=1280, norm_eps=1e-5, vocab_size=VOCAB_SIZE), # 774M params\n 'gpt2-xl': dict(n_layers=48, n_heads=25, dim=1600, norm_eps=1e-5, vocab_size=VOCAB_SIZE), # 1558M params\n}\n\nclass GPT2:\n @staticmethod\n def build(model_size=""gpt2""):\n tokenizer = tiktoken.get_encoding(""gpt2"")\n\n model = Transformer(**MODEL_PARAMS[model_size])\n weights = torch_load(fetch(f'https://huggingface.co/{model_size}/resolve/main/pytorch_model.bin'))\n # special treatment for the Conv1D weights we need to transpose\n transposed = ('attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight')\n for k in weights:\n if k.endswith(transposed):\n weights[k] = weights[k].T\n # lm head and wte are tied\n weights['lm_head.weight'] = weights['wte.weight']\n\n with WallTimeEvent(BenchEvent.LOAD_WEIGHTS):\n load_state_dict(model, weights)\n\n if HALF:\n for l in get_state_dict(model).values():\n l.replace(l.half().realize())\n\n return GPT2(model, tokenizer)\n\n @staticmethod\n def build_gguf(model_size: str):\n q_type = model_size[len(""gpt2_gguf_""):].upper()\n fn = fetch(f""https://huggingface.co/PrunaAI/gpt2-GGUF-smashed/resolve/main/gpt2.{q_type}.gguf?download=true"")\n gguf_tensor = Tensor.empty(os.stat(fn).st_size, dtype=dtypes.uint8, device=f""disk:{fn}"").to(Device.DEFAULT)\n kv_data, state_dict = gguf_load(gguf_tensor)\n\n gpt2_params = {\n ""dim"": kv_data[""gpt2.embedding_length""], ""n_heads"": kv_data[""gpt2.attention.head_count""],\n ""n_layers"": kv_data[""gpt2.block_count""], ""norm_eps"": kv_data[""gpt2.attention.layer_norm_epsilon""],\n ""vocab_size"": VOCAB_SIZE, ""max_seq_len"": kv_data[""gpt2.context_length""],\n }\n def _remap_gguf_key(key: str):\n replaces = [\n (""blk."", ""h.""), ("".attn_qkv.bias"", "".attn.c_attn.bias""), ("".attn_qkv.weight"", "".attn.c_attn.weight""),\n ("".ffn_norm.bias"", "".ln_2.bias""), ("".ffn_norm.weight"", "".ln_2.weight""), ("".attn_norm.bias"", "".ln_1.bias""),\n ("".attn_norm.weight"", "".ln_1.weight""), ("".attn_output.bias"", "".attn.c_proj.bias""), ("".attn_output.weight"", "".attn.c_proj.weight""),\n ("".ffn_up.bias"", "".mlp.c_fc.bias""), ("".ffn_up.weight"", "".mlp.c_fc.weight""), ("".ffn_down.bias"", "".mlp.c_proj.bias""),\n ("".ffn_down.weight"", "".mlp.c_proj.weight""), (""token_embd.weight"", ""wte.weight""), (""output.weight"", ""lm_head.weight""),\n (""output_norm.bias"", ""ln_f.bias""), (""output_norm.weight"", ""ln_f.weight""), (""position_embd.weight"", ""wpe.weight""),\n ]\n for ostr, ns in replaces: key = key.replace(ostr, ns)\n return key\n state_dict = { _remap_gguf_key(k): v for k, v in state_dict.items() }\n model = Transformer(**gpt2_params)\n with WallTimeEvent(BenchEvent.LOAD_WEIGHTS):\n load_state_dict(model, state_dict)\n return GPT2(model, tiktoken.get_encoding(""gpt2""))\n\n def __init__(self, model, tokenizer):\n self.model = model\n self.tokenizer = tokenizer\n\n def generate(self, prompt:str, max_length:int, temperature:float, timing:bool=False, batch_size:int=1):\n prompt_tokens = self.tokenizer.encode(prompt, allowed_special={""<|endoftext|>""})\n toks = [prompt_tokens[:] for _ in range(batch_size)]\n start_pos = 0\n for _ in trange(max_length, disable=(timing==True)):\n GlobalCounters.reset()\n if timing: print("""")\n st = GlobalCounters.time_sum_s\n with Timing(""ran model in "", on_exit=(lambda et: (f"", {(GlobalCounters.time_sum_s-st)*1e3:.2f} ms on GPU"" if DEBUG>=2 else """")+\n f"", {GlobalCounters.global_ops*1e-9:.2f} GOPS, {GlobalCounters.global_mem*1e-9:.2f} GB""+\n (f"", {GlobalCounters.global_mem*1e-9/(GlobalCounters.time_sum_s-st):.2f} GB/s"" if DEBUG>=2 else """")) if DEBUG else None, enabled=timing):\n with WallTimeEvent(BenchEvent.STEP):\n if batch_size == 1 and len(toks[0][start_pos:]) == 1:\n tokens = Variable(""tokens"", 0, VOCAB_SIZE-1).bind(toks[0][start_pos])\n else:\n tokens = Tensor([x[start_pos:] for x in toks])\n tok = self.model(tokens, Variable(""start_pos"", 1 if start_pos else 0, MAX_CONTEXT-1).bind(start_pos), temperature).tolist()\n start_pos = len(toks[0])\n for i,t in enumerate(tok): toks[i].append(t)\n return [self.tokenizer.decode(x) for x in toks]\n\n# **** main code ****\n\nif __name__ == ""__main__"":\n print(f""using {Device.DEFAULT} backend"")\n default_prompt = ""What is the answer to life, the universe, and everything?""\n\n parser = argparse.ArgumentParser(description='Run GPT2 in tinygrad', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--prompt', type=str, default=default_prompt, help=""Phrase to start with"")\n parser.add_argument('--count', type=int, default=100, help=""Max number of tokens to generate"")\n parser.add_argument('--temperature', type=float, default=0.8, help=""Temperature in the softmax"")\n parser.add_argument('--model_size', type=str, default=""gpt2-medium"", help=""Size of model to use [gpt2, gpt2-medium, gpt2-large, gpt2-xl]"")\n parser.add_argument('--timing', action='store_true', help=""Print timing per token"")\n parser.add_argument('--seed', type=int, help=""Set the random seed"")\n parser.add_argument('--batch_size', type=int, default=1, help=""Set the input batch size"")\n parser.add_argument('--benchmark', type=int, default=-1, help=""Benchmark GPT with the given number of tokens"")\n parser.add_argument('--noshow', action='store_true', help=""Don't show the output"")\n args = parser.parse_args()\n\n if args.seed is not None:\n Tensor.manual_seed(args.seed)\n\n print(f""using {args.model_size}"")\n gpt2 = GPT2.build_gguf(args.model_size) if args.model_size.startswith(""gpt2_gguf_"") else GPT2.build(args.model_size)\n\n if args.benchmark != -1:\n gpt2.model(Tensor.rand(args.batch_size, args.benchmark), Variable(""a"", 0, MAX_CONTEXT).bind(0)).realize()\n else:\n texts = gpt2.generate(args.prompt, args.count, args.temperature, timing=args.timing, batch_size=args.batch_size)\n if not args.noshow:\n print('Generating text...')\n if len(texts) == 1: print(texts[0])\n else:\n for i,text in enumerate(texts): print(colored(f""Response {i}:"", ""green""), text)\n\n # validate output!\n if args.temperature == 0 and args.model_size == ""gpt2-medium"" and args.count == 10:\n expected = {\n default_prompt: ""What is the answer to life, the universe, and everything?\n\nThe answer is that we are all one"",\n ""Hello."": ""Hello. I'm a little late to the party, but"",\n }\n try:\n assert texts[0] == expected[args.prompt]\n print(colored(""output validated"", ""green""))\n except KeyError:\n pass\n",python,tab
|
| 7 |
+
7,121560,"examples/gpt2.py",593,0,"",python,selection_mouse
|
| 8 |
+
8,482010,"examples/gpt2.py",864,0,"",python,selection_command
|
| 9 |
+
9,482406,"tinygrad/__init__.py",0,0,"import os\nif int(os.getenv(""TYPED"", ""0"")):\n from typeguard import install_import_hook\n install_import_hook(__name__)\nfrom tinygrad.tensor import Tensor # noqa: F401\nfrom tinygrad.engine.jit import TinyJit # noqa: F401\nfrom tinygrad.uop.ops import UOp\nVariable = UOp.variable\nfrom tinygrad.dtype import dtypes # noqa: F401\nfrom tinygrad.helpers import GlobalCounters, fetch, Context, getenv # noqa: F401\nfrom tinygrad.device import Device # noqa: F401\n",python,tab
|
| 10 |
+
10,482416,"tinygrad/__init__.py",318,0,"",python,selection_command
|
| 11 |
+
11,484052,"tinygrad/__init__.py",327,0,"",python,selection_command
|
| 12 |
+
12,484164,"tinygrad/__init__.py",329,0,"",python,selection_command
|
| 13 |
+
13,484524,"tinygrad/__init__.py",332,0,"",python,selection_command
|
| 14 |
+
14,484722,"tinygrad/__init__.py",333,0,"",python,selection_command
|
| 15 |
+
15,484999,"tinygrad/uop/ops.py",0,0,"from __future__ import annotations\nfrom typing import Any, Callable, cast, TYPE_CHECKING, Type, Sequence\nimport sys, time, functools, itertools, math, operator, hashlib, os, types, pickle, pathlib, inspect, weakref\nfrom dataclasses import dataclass, field\nfrom enum import Enum, auto\nfrom tinygrad.uop import Ops, GroupOp\nfrom tinygrad.uop.mathtraits import MathTrait\nfrom tinygrad.dtype import ConstType, ImageDType, dtypes, DType, truncate, PtrDType\nfrom tinygrad.helpers import ContextVar, all_int, prod, getenv, all_same, Context, partition, temp, unwrap, T, argfix, Metadata, flatten\nfrom tinygrad.helpers import PICKLE_BUFFERS, PROFILE, dedup, cdiv, cmod, diskcache_put, to_function_name, cpu_profile, TracingKey\nif TYPE_CHECKING:\n from tinygrad.shape.shapetracker import ShapeTracker\n from tinygrad.device import Buffer, MultiBuffer\n\n# https://en.wikipedia.org/wiki/Identity_element\ndef identity_element(op:Ops, dt:DType) -> ConstType: return dtypes.as_const({Ops.ADD:0, Ops.MUL:1, Ops.MAX:dtypes.min(dt)}[op], dt)\n\ndef can_pad(root:UOp, edges:dict[UOp, None]) -> bool:\n return all(u.op not in GroupOp.UnsafePad for u in root.toposort(gate=lambda x:x not in edges))\n\n# With True as the default, this matches the old symbolic behavior\ndef resolve(x:UOp|bool, default:bool=True):\n if isinstance(x, bool): return x\n assert x.dtype == dtypes.bool, ""UOp in resolve must be bool""\n # NOTE: generating the text for the exception is expensive, so we do this\n return bool(sx.vmin) if (sx:=x.simplify()).vmin == sx.vmax else default\n\n# smax/smin are replacements for max/min that preserve symbolic\ndef _suop(lst, uop_fxn, python_fxn):\n uops, nums = partition(lst, lambda x: isinstance(x, UOp))\n return ssimplify(functools.reduce(uop_fxn, uops + ([python_fxn(nums)] if nums else [])))\ndef smax(*lst): return _suop(argfix(*lst), UOp.maximum, max)\ndef smin(*lst): return _suop(argfix(*lst), UOp.minimum, min)\ndef srender(x) -> str: return x.render() if isinstance(x, UOp) else str(x)\n\ndef ssimplify(uop): return uop.ssimplify() if isinstance(uop, UOp) else uop\ndef sym_infer(uop: UOp|int, var_vals: dict[UOp, int]) -> int: return uop.sym_infer(var_vals) if isinstance(uop, UOp) else uop\n\n# used for UOp and UPat\ndef pretty_print(x:Any, rep:Callable, srcfn=lambda x: x.src, cache=None, d=0)->str:\n def dfs(x:Any, cache:dict):\n for s in srcfn(x) or []:\n cache.setdefault(s, [len(cache), 0, False])[1] += 1\n if cache[s][1] == 1: dfs(s, cache)\n if cache is None: dfs(x, cache:={})\n if (cx:=cache.setdefault(x, [0,0,False]))[2]: return f""{' '*d} x{cx[0]}""\n cx[2], srcs = True, ('None' if srcfn(x) is None else ''.join(f'\n{pretty_print(s, rep, srcfn, cache, d+2)},' for s in srcfn(x)))\n return f""{' '*d}{f'x{cx[0]}:=' * (cx[1]>1)}{rep(x)}"" % srcs\n\nclass UOpMetaClass(type):\n ucache:dict[tuple, weakref.ReferenceType[UOp]] = {}\n def __call__(cls, op:Ops, dtype:DType=dtypes.void, src:tuple[UOp,...]=tuple(), arg:Any=None, tag:Any=None,\n metadata:tuple[Metadata,...]|None=None, _buffer:Buffer|None=None):\n if (wret:=UOpMetaClass.ucache.get(key:=(op, dtype, src, arg, tag), None)) is not None and (ret:=wret()) is not None: return ret\n UOpMetaClass.ucache[key] = ref = weakref.ref(created:=super().__call__(*key))\n for s in src: s.children.add(ref)\n if metadata is not None: all_metadata[created] = metadata\n # NOTE: this value is set by pickle when pickling a realized tensor\n if _buffer is not None:\n assert op is Ops.BUFFER, f""trying to set Buffer {_buffer} for {op}""\n buffers[created] = _buffer\n return created\n\n# some uops map to other stuff\nbuffers:weakref.WeakKeyDictionary[UOp, Buffer|MultiBuffer] = weakref.WeakKeyDictionary() # this maps BUFFER uops to their device Buffers\nall_metadata:weakref.WeakKeyDictionary[UOp, tuple[Metadata, ...]] = weakref.WeakKeyDictionary() # TODO: should this be here?\n\n# NOTE: this should be frozen, but frozen is slower\n@dataclass(eq=False, slots=True)\nclass UOp(MathTrait, metaclass=UOpMetaClass):\n op:Ops\n dtype:DType = dtypes.void\n src:tuple[UOp, ...] = tuple()\n arg:Any = None\n tag:Any = None\n children:set[weakref.ref[UOp]] = field(default_factory=set)\n def __del__(self):\n if Ops is not None and self.op is Ops.BUFFER and (buffer:=buffers.get(self)) is not None: buffer.ref(-1)\n try:\n if (ref:=UOpMetaClass.ucache.get(k:=(self.op, self.dtype, self.src, self.arg, self.tag))) is not None:\n for s in self.src: s.children.discard(ref)\n del UOpMetaClass.ucache[k]\n except AttributeError: pass\n def __reduce__(self):\n args = [self.op, self.dtype, self.src, self.arg, self.tag, self.metadata]\n if self.op is Ops.BUFFER and self.realized is not None and PICKLE_BUFFERS: args.append(self.realized)\n return UOp, tuple(args)\n def replace(self, **kwargs) -> UOp:\n new_args = (kwargs.pop(""op"", self.op), kwargs.pop(""dtype"", self.dtype), kwargs.pop(""src"", self.src),\n kwargs.pop(""arg"", self.arg), kwargs.pop(""tag"", self.tag))\n assert len(kwargs) == 0, f""unused kwargs in replace {list(kwargs)}""\n if (self.op, self.dtype, self.src, self.arg, self.tag) == new_args: return self\n return UOp(*new_args)\n def rtag(self, tag=True): return self.replace(tag=tag)\n @functools.cached_property\n def key(self) -> bytes:\n return hashlib.sha256(str((self.op, self.dtype, self.arg)).encode() + b"""".join([s.key for s in self.src])).digest()\n def __repr__(self): return pretty_print(self, lambda x: f""{type(self).__name__}({x.op}, {x.dtype}, arg={x.argstr()}{x.tagstr()}, src=(%s))"")\n def argstr(self): return f'({"", "".join(map(str, self.arg))})' if self.op is Ops.REDUCE_AXIS else repr(self.arg)\n def tagstr(self): return f"", tag={self.tag}"" if self.tag is not None else """"\n\n @functools.cached_property\n def parents(self:UOp) -> dict[UOp, None]:\n ret = {s:None for s in self.src}\n for s in self.src: ret.update(s.parents)\n return ret\n @property\n def sparents(self:UOp) -> dict[UOp, None]: return {self:None, **self.parents}\n\n def toposort(self, gate:Callable|None=None) -> dict[UOp, None]:\n ret: dict[UOp, None] = {}\n stack: list[tuple[UOp, bool]] = [(self, False)] # each stack entry is (node, visited_flag)\n while stack:\n node, visited = stack.pop()\n if node in ret: continue\n if not visited:\n if gate is None or gate(node):\n stack.append((node, True)) # push node back on stack to process after its parents\n for parent in reversed(node.src): stack.append((parent, False)) # push parents on the stack\n else: ret[node] = None # second time i'm seeing this node, add it to returned toposort\n return ret\n\n # returns map of UOps to their children in the graph rooted by self\n def get_children_map(self) -> dict[UOp, dict[UOp, None]]:\n ret: dict[UOp, dict[UOp, None]] = {}\n for u in self.toposort():\n ret[u] = {}\n for s in u.src: ret[s][u] = None\n return ret\n\n @functools.cached_property\n def tuplize(self:UOp) -> tuple:\n return (self.op.value, self.arg, self.dtype,)+tuple([x.tuplize for x in self.src])\n\n # *** uop shape stuff ***\n\n @functools.cached_property\n def st(self) -> ShapeTracker|None:\n if self.op in GroupOp.Block or self.op is Ops.INDEX: return None\n from tinygrad.shape.shapetracker import ShapeTracker\n # VIEW and MovementOps define a new ShapeTracker from the arg\n if self.op is Ops.VIEW: return self.arg\n if self.op in GroupOp.Movement: return unwrap(self.src[0].st).mop(self.op, self.arg)\n # CONST with a DEVICE has a shape of ()\n if self.op is Ops.CONST and len(self.src) and self.src[0].op is Ops.DEVICE: return ShapeTracker.from_shape(())\n # BufferOps and ASSIGN flow ShapeTracker from a direct edge\n if self.op in {Ops.STORE, Ops.ASSIGN, Ops.LOAD}: return self.src[0].st\n if self.op in GroupOp.Buffer: return views[0] if (views:=[x.st for x in self.src if x.op is Ops.VIEW]) else None\n\n # BUFFER/BUFFER_VIEW and KERNEL only have a size\n if self.op in {Ops.BUFFER, Ops.BUFFER_VIEW}: return ShapeTracker.from_shape((self.size,))\n if self.op is Ops.KERNEL: return ShapeTracker.from_shape((self.arg.ast.size,))\n if self.op in {Ops.DEFINE_GLOBAL, Ops.DEFINE_LOCAL, Ops.DEFINE_REG}:\n sz = cast(PtrDType, self.dtype).size\n return ShapeTracker.from_shape((sz,)) if sz > 0 else None\n\n # hack for PTX, CASTing the ptr loses the shape\n if self.op is Ops.CAST and self.src[0].op is Ops.DEFINE_GLOBAL: return None\n\n # otherwise we get the shape from sources\n if not (src_sts := [x.st for x in self.src if x.st is not None]): return None\n assert all_same([x.shape for x in src_sts]), f""UOp sources must have the same shape {self} {[x.shape for x in src_sts]}""\n match self.op:\n case Ops.MULTI: shape = tuple(self.src[0].shape[a]*len(self.device) if a == self.axis else s for a,s in enumerate(self.src[0].shape))\n case Ops.BITCAST:\n shape = src_sts[0].shape\n if self.dtype.itemsize != (input_sz:=self.src[0].dtype.itemsize): shape = shape[:-1]+((shape[-1]*input_sz) // self.dtype.itemsize,)\n case Ops.REDUCE_AXIS | Ops.WMMA: shape = src_sts[0].reduce(self.axis_arg)\n case _: shape = src_sts[0].shape\n return ShapeTracker.from_shape(shape)\n\n @functools.cached_property\n def full_shape(self) -> tuple[sint, ...]:\n if self.op is Ops.VIEW: return self.shape\n # NOTE: if a parent doesn't have st its full_shape is empty\n parent_shapes = [x.full_shape for x in self.src]\n return tuple(smax(x) for x in itertools.zip_longest(*parent_shapes, fillvalue=1))\n @property\n def shape(self) -> tuple[sint, ...]:\n assert self.st is not None, f""{self.op} doesn't have a shape""\n return unwrap(self.st).shape\n @property\n def size(self) -> int: return self.arg[0] if self.op is Ops.BUFFER_VIEW else self.arg if self.op is Ops.BUFFER else unwrap(self.st).size\n\n # *** uop evaluation ***\n\n def simplify(self):\n # late import!\n from tinygrad.uop.symbolic import symbolic\n with Context(TRACK_MATCH_STATS=0):\n return graph_rewrite(self, symbolic)\n def ssimplify(self) -> UOp|ConstType: return ret.arg if (ret:=self.simplify()).op is Ops.CONST else ret\n def _eval(self, dtype, expected_type:Type[T]) -> T:\n assert self.dtype in dtype, f""eval with wrong dtype {self}""\n vmin, vmax = (simple_self:=self.simplify())._min_max\n if vmin != vmax: raise ValueError(f""eval failed to be a single number, range is {vmin} to {vmax} in {simple_self.render()}"")\n assert isinstance(vmin, expected_type), f""vmin is wrong dtype {type(vmin)} != {expected_type}""\n return vmin\n def __bool__(self): return self._eval((dtypes.bool,), bool)\n def __int__(self): return self._eval(dtypes.ints, int)\n def __float__(self): return self._eval(dtypes.floats, float)\n def substitute(self, dvars:dict[UOp, UOp], name:str|None=None):\n dvars = {k:v for k,v in dvars.items() if k is not v}\n if len(dvars) == 0: return self\n with Context(TRACK_MATCH_STATS=(0 if name is None else TRACK_MATCH_STATS.value)):\n return graph_rewrite(self, _substitute, dvars, bottom_up=True, name=name)\n\n # *** uop syntactic sugar ***\n\n @property\n def st_arg(self) -> ShapeTracker:\n assert self.op in GroupOp.Buffer, f""st_arg called on {self.op}""\n return unwrap(self.st)\n @property\n def axis_arg(self) -> tuple[int, ...]:\n assert self.op in {Ops.REDUCE_AXIS, Ops.WMMA}, f""axis_arg called on {self.op}""\n ret = self.arg[1] if self.op is Ops.REDUCE_AXIS else self.arg[7]\n assert isinstance(ret, tuple) and all(isinstance(x, int) for x in ret), f""axis_arg trying to return {ret}""\n return ret\n def sink(self, *srcs:UOp|None, **kwargs): return UOp(Ops.SINK, dtypes.void, (self,)+tuple([x for x in srcs if x is not None]), **kwargs)\n def detach(self): return UOp(Ops.DETACH, self.dtype, (self,))\n def index(self, idx:UOp, valid:UOp|None=None): return UOp(Ops.INDEX, self.dtype, (self,idx,valid) if valid is not None else (self,idx))\n def __getitem__(self, idx): return self.index(idx)\n def const_like(self, b:ConstLike):\n # constants can optionally have a DEVICE source\n return UOp.const(self.dtype, b, device=self._device, shape=self.shape if self.st is not None else None)\n def broadcast(self, count:int):\n assert self.dtype.count == 1\n if count == 1: return self\n return UOp(Ops.VECTORIZE, self.dtype.vec(count), (self,)*count)\n def cast(self, dtype:DType):\n if self.dtype == dtype: return self\n return UOp(Ops.CAST, dtype, (self,))\n def cast_vec(self, dtype:DType): return UOp(Ops.CAST, dtype.vec(self.dtype.count), (self,))\n def bitcast(self, dtype:DType): return UOp(Ops.BITCAST, dtype, (self,))\n def gep(self, i:tuple[int, ...]|int):\n if isinstance(i, tuple) and len(i) == 1: return self.gep(i[0])\n if isinstance(i, int):\n # NOTE: these are just shortcuts to not have to create and fold later\n if self.op is Ops.VECTORIZE: return self.src[i]\n if self.op is Ops.VCONST: return UOp.const(self.dtype.scalar(), self.arg[i])\n if self.op is Ops.CONST: return UOp.const(self.dtype.scalar(), self.arg)\n i = (i,)\n return UOp(Ops.GEP, self.dtype.scalar().vec(len(i)) if len(i) > 1 else self.dtype.scalar(), (self,), i)\n def load(self, *src:UOp, **kwargs): return UOp(Ops.LOAD, dtype=kwargs.pop(""dtype"", self.dtype.base), src=(self,)+src, **kwargs)\n def store(self, *src:UOp, **kwargs): return UOp(Ops.STORE, dtypes.void, (self,)+src, **kwargs)\n def assign(self, x:UOp): return UOp(Ops.ASSIGN, self.dtype, (self, x))\n def barrier(self, *src:UOp): return UOp(Ops.BARRIER, src=(self,)+src)\n def alu(self, op, *src:UOp, **kwargs):\n out_dtype = (self, *src)[-1].dtype\n if op in {Ops.CMPLT, Ops.CMPNE, Ops.CMPEQ}: out_dtype = dtypes.bool.vec(out_dtype.count) if out_dtype.count > 1 else dtypes.bool\n return UOp(op, out_dtype, (self,)+src, **kwargs)\n @staticmethod\n def const(dtype:DType, b:ConstLike, device:str|tuple[str, ...]|None=None, shape:tuple[sint, ...]|None=None):\n if isinstance(b, UOp): return b.unbind()[0] if b.op is Ops.BIND else b\n if isinstance(b, tuple) and all_same(b): b = b[0] # doesn't have to be a VCONST if they are all the same\n ret = UOp(Ops.VCONST if isinstance(b, tuple) else Ops.CONST, dtype, arg=dtypes.as_const(b, dtype))\n if shape is not None:\n from tinygrad.shape.shapetracker import ShapeTracker\n ret = ret.replace(src=(UOp(Ops.VIEW, dtypes.void, (), ShapeTracker.from_shape(shape, (0,)*len(shape))),))\n if device is not None:\n ret = ret.replace(src=(UOp(Ops.DEVICE, arg=device).view(unwrap(ret.st)),))\n return ret\n @staticmethod\n def range(dtype:DType, end:sint, idx:int): return UOp(Ops.RANGE, dtype=dtype, src=(sint_to_uop(end),), arg=idx)\n def r(self, op:Ops, axis:tuple[int, ...]):\n axis = tuple(sorted([x for x in axis if resolve(self.shape[x] != 1)]))\n if len(axis) == 0: return self\n # move any non reduce axis before the first reduce axis\n move_early, rest = partition(range(axis[0], len(self.shape)), lambda i: i not in axis and resolve(self.shape[i] != 1))\n permaxis = tuple(range(axis[0])) + tuple(move_early) + tuple(rest)\n ret = self.permute(permaxis)\n new_axis = tuple([x for x in range(axis[0]+len(move_early), len(self.shape)) if resolve(ret.shape[x] != 1)])\n assert len(axis) == len(new_axis)\n ret = UOp(Ops.REDUCE_AXIS, self.dtype, (ret,), (op, new_axis))\n return ret.reshape(tuple([x if i not in axis else 1 for i,x in enumerate(self.shape)]))\n def reduce(self, *src:UOp, **kwargs): return UOp(Ops.REDUCE, kwargs.pop('dtype', self.dtype), src=(self,)+src, **kwargs)\n def contiguous(self): return self.alu(Ops.CONTIGUOUS)\n def contiguous_backward(self): return self.alu(Ops.CONTIGUOUS_BACKWARD)\n def fuse(self): return self.alu(Ops.FUSE)\n def allreduce(self, op, device:str|tuple[str, ...]|UOp):\n assert isinstance(self.device, tuple), f""allreduce must be on tuple {self.device} isn't""\n return UOp(Ops.ALLREDUCE, self.dtype, (self, UOp(Ops.DEVICE, arg=device) if not isinstance(device, UOp) else device), op)\n\n # *** from MultiLazyBuffer ***\n\n def multi(self, axis:int|None):\n assert isinstance(self.device, tuple), f""multi device must be tuple, {self.device} isn't""\n assert axis is not None, ""multi None is no longer supported""\n return UOp(Ops.MULTI, self.dtype, (self,), axis)\n\n @property\n def bounds(self):\n if self.axis is None: raise RuntimeError(""bounds is not defined when axis is None"")\n return tuple(itertools.pairwise(itertools.accumulate([self.src[0].shape[self.axis] for _ in self.device], initial=0)))\n\n @functools.cached_property\n def axis(self) -> int|None:\n if self.op is Ops.MULTI: return self.arg\n # NOTE: they all have to share an axis, we always choose [-1]\n if self.op in GroupOp.ALU: return axes[-1] if (axes := dedup([x.axis for x in self.src if x.axis is not None])) else None\n if len(self.src) == 0: return None\n src_axis = self.src[0].axis\n if self.op is Ops.REDUCE_AXIS: return None if src_axis is not None and src_axis in self.arg[1] else src_axis\n if self.op is Ops.RESHAPE:\n if src_axis is None: return None\n arg_acc:list[sint] = list(itertools.accumulate(self.arg, operator.mul, initial=1))\n # new_axis is the last one that preserves prod(prior to new_axis) and must not move items between shards\n # TODO: what to do about shrinking to self.shape[self.axis]==1 len(self.real_lbs)==1?\n return len(arg_acc) - arg_acc[::-1].index(prod(self.src[0].shape[:src_axis])) - 1\n if self.op is Ops.PERMUTE: return self.arg.index(src_axis) if src_axis is not None else None\n return src_axis\n\n def _unshard(self, axis:int) -> UOp:\n bsz, dcount = self.shape[axis], len(self.device)\n dnum = UOp.variable(""_device_num"", 0, dcount-1)\n return self.pad(tuple((0,0) if a != axis else (bsz*dnum, bsz*(dcount-1) - bsz*dnum) for a in range(len(self.shape))))\n\n def _shard(self, axis:int) -> UOp:\n dcount = len(self.device)\n dnum = UOp.variable(""_device_num"", 0, dcount-1)\n if self.shape[axis] % dcount != 0: raise RuntimeError(f""multi axis uneven: {self.shape[axis]=} {axis=} {dcount=}"")\n sz = self.shape[axis] // dcount\n return self.shrink(tuple((0,s) if i != axis else (dnum*sz,dnum*sz+sz) for i,s in enumerate(self.shape)))\n def shard(self, devices:tuple[str, ...], axis:int) -> UOp: return self.copy_to_device(devices)._shard(axis).multi(axis)\n\n # *** from LazyBuffer ***\n\n def copy_to_device(self, device:str|tuple[str, ...]|UOp, arg=None):\n assert arg is None or isinstance(self.device, tuple)\n inp = self if arg is None else UOp(Ops.MSELECT, self.dtype, src=(self,), arg=arg)\n return UOp(Ops.COPY, self.dtype, (inp, UOp(Ops.DEVICE, arg=device) if not isinstance(device, UOp) else device))\n def mselect(self, arg:int) -> UOp: return UOp(Ops.MSELECT, self.dtype, (self,), arg)\n @property\n def metadata(self) -> tuple[Metadata, ...]|None: return all_metadata.get(self, None)\n\n # *** uop movement ops ***\n\n @property\n def base(self) -> UOp:\n if (self.op is Ops.VIEW and len(self.src) != 0) or self.op in GroupOp.Movement: return self.src[0].base\n if self.op is Ops.MULTI: return self.src[0].base # MULTI is really a VIEW\n return self\n def view(self, new_st:ShapeTracker) -> UOp: return UOp(Ops.VIEW, self.dtype, (self,), new_st)\n\n def _mop(self, op:Ops, arg) -> UOp:\n ret = UOp(op, self.dtype, (self,), arg)\n if self.st == ret.st: return self # ignore NOOPs, also check ret.st\n return ret\n\n def reshape(self, arg:tuple[sint, ...]): return self._mop(Ops.RESHAPE, arg)\n def pad(self, arg:tuple[tuple[sint, sint], ...]): return self._mop(Ops.PAD, arg)\n def expand(self, arg:tuple[sint, ...]): return self._mop(Ops.EXPAND, arg)\n def permute(self, arg:tuple[sint, ...]): return self._mop(Ops.PERMUTE, arg)\n def shrink(self, arg:tuple[tuple[sint, sint], ...]): return self._mop(Ops.SHRINK, arg)\n def flip(self, arg:tuple[bool, ...]): return self._mop(Ops.FLIP, arg)\n\n # *** uop UNIQUE ***\n\n # TODO: use this in Buffer\n unique_num = itertools.count(0)\n @staticmethod\n def unique(): return UOp(Ops.UNIQUE, arg=next(UOp.unique_num))\n\n # *** uop Buffer stuff ***\n\n @staticmethod\n def new_buffer(device:str|tuple[str, ...], size:int, dtype:DType): return UOp(Ops.BUFFER, dtype, (UOp.unique(), UOp(Ops.DEVICE, arg=device)), size)\n @property\n def device(self) -> str|tuple[str, ...]: return cast(str|tuple[str, ...], unwrap(self._device))\n @functools.cached_property\n def _device(self) -> str|tuple[str, ...]|None:\n if self.op is Ops.DEVICE: return self.arg\n if self.op is Ops.MSELECT:\n assert isinstance(self.src[0].device, tuple), ""mselect must be on tuple device""\n return self.src[0].device[self.arg]\n if self.op is Ops.MSTACK: return tuple(cast(str, x.device) for x in self.src)\n if self.op in {Ops.COPY, Ops.BUFFER, Ops.ALLREDUCE}: return self.src[1].device\n return next((x._device for x in self.src if x._device is not None), None)\n @property\n def buf_uop(self) -> UOp:\n if self.op is Ops.BUFFER: return self\n if self.op is Ops.MSELECT: return self.src[0].buf_uop.mselect(self.arg)\n if self.op is Ops.MSTACK: return UOp(Ops.MSTACK, self.dtype, src=tuple(x.buf_uop for x in self.src))\n assert self.op is Ops.ASSIGN, f""must be ASSIGN {self.op}""\n return self.src[0].base\n @property\n def buffer(self) -> Buffer|MultiBuffer:\n from tinygrad.device import Buffer, MultiBuffer\n if self is not self.base:\n assert unwrap(self.st).contiguous, ""VIEW only works here if it's contiguous""\n return self.src[0].buffer\n if self.op is Ops.MSELECT:\n ret = self.src[0].buffer\n assert isinstance(ret, MultiBuffer)\n return ret.bufs[self.arg]\n if self.op is Ops.MSTACK:\n ret = MultiBuffer.__new__(MultiBuffer)\n ret.bufs = [cast(Buffer, x.buffer) for x in self.src]\n assert all_same([x.size for x in ret.bufs]) and all_same([x.dtype for x in ret.bufs]), ""multibuffers mismatch buffers""\n return ret\n assert self.op is Ops.BUFFER, f""must be BUFFER {self.op}""\n if (cret:=buffers.get(self)) is not None: return cret\n rdtype = self.dtype if isinstance(self.dtype, ImageDType) else self.dtype.base\n if isinstance(self.device, tuple): ret = MultiBuffer(self.device, self.size, rdtype).ref(1)\n else: ret = Buffer(self.device, self.size, rdtype).ref(1)\n buffers[self] = ret\n return ret\n @property\n def realized(self) -> Buffer|MultiBuffer|None:\n # NOTE: this is used by the JIT to determine which inputs we capture\n return self.buffer if self.op in {Ops.BUFFER, Ops.MSTACK} and self.buffer.is_allocated() else None\n @property\n def is_realized(self) -> bool:\n return all(x.base.realized is not None for x in self.base.src) if self.base.op is Ops.MULTI else self.base.realized is not None\n\n # *** uop Variable stuff ***\n\n @staticmethod\n def variable(name:str, min_val:ConstType, max_val:ConstType, dtype:DType=dtypes.int) -> UOp:\n assert not isinstance(min_val, UOp) and not isinstance(max_val, UOp), f""can't create Variable {name} with {min_val}/{max_val}""\n return UOp(Ops.DEFINE_VAR, dtype, arg=(name, min_val, max_val))\n @property\n def expr(self):\n assert self.op is Ops.DEFINE_VAR, f""op is {self.op}, need DEFINE_VAR""\n return self.arg[0]\n def bind(self, val:int|UOp):\n assert self.op is Ops.DEFINE_VAR, f""op is {self.op}, need DEFINE_VAR""\n uval = self.const_like(val) if isinstance(val, int) else val\n assert self.arg[1] <= uval.vmin and uval.vmax <= self.arg[2], f""bind {val} not in range [{self.arg[1]}, {self.arg[2]}]""\n return UOp(Ops.BIND, self.dtype, (self, uval))\n def unbind(self) -> tuple[Variable, int]:\n assert self.op is Ops.BIND and self.src[0].op is Ops.DEFINE_VAR and self.src[1].op is Ops.CONST, f""can't unbind {self}""\n return self.src[0], self.src[1].arg\n @property\n def val(self) -> int: return self.unbind()[1]\n def vars(self) -> set[UOp]:\n bound_vars = set([x for x in self.toposort() if x.op is Ops.BIND and x.src[0].op is Ops.DEFINE_VAR])\n bound_var_base = set(x.src[0] for x in bound_vars)\n all_vars = set([x for x in self.toposort() if x.op is Ops.DEFINE_VAR])\n return bound_vars.union(set([x for x in all_vars if x not in bound_var_base]))\n def variables(self) -> list[Variable]:\n st_vars: list[set[Variable]] = [x.arg.vars() for x in self.toposort() if x.op is Ops.VIEW]\n return sorted(set.union(*st_vars, set([x.unbind()[0] if x.op is not Ops.DEFINE_VAR else x for x in self.vars()])), key=lambda v: v.arg)\n\n # *** uop symbolic stuff ***\n\n def is_increasing(self:UOp) -> bool:\n # is f a monotonically increasing function regards its input\n if self.op in GroupOp.Irreducible: return True\n if self.op is Ops.ADD: return self.src[0].is_increasing() and self.src[1].is_increasing()\n if self.op in (Ops.MUL, Ops.IDIV) and self.src[1].op is Ops.CONST and self.src[1].arg >= 0: return self.src[0].is_increasing()\n return False # False if not sure\n def const_factor(self) -> int:\n """"""largest known int that divides self""""""\n # TODO: for negatives it's not the largest\n if self.op is Ops.CONST: return self.arg\n if self.op is Ops.VCONST: return math.gcd(*self.arg)\n if self.op is Ops.ADD: return math.gcd(self.src[0].const_factor(), self.src[1].const_factor())\n if self.op is Ops.MUL: return self.src[0].arg if self.src[0].op is Ops.CONST else self.src[1].arg if self.src[1].op is Ops.CONST else 1\n return 1\n def divides(self, v:int) -> UOp|None:\n if v==1: return self\n if self.op is Ops.CONST: return self.const_like(self.arg//v) if self.arg%v == 0 else None\n if self.op is Ops.VCONST: return self.const_like(tuple(x//v for x in self.arg)) if all(x%v == 0 for x in self.arg) else None\n if self.op is Ops.ADD: return d0+d1 if (d0:=self.src[0].divides(v)) is not None and (d1:=self.src[1].divides(v)) is not None else None\n if self.op is Ops.MUL:\n if (d0:=self.src[0].divides(v)) is not None: return d0 * self.src[1]\n if (d1:=self.src[1].divides(v)) is not None: return self.src[0] * d1\n return None # generic None if we aren't sure\n @property\n def vmin(self) -> ConstType: return self._min_max[0]\n @property\n def vmax(self) -> ConstType: return self._min_max[1]\n @functools.cached_property\n def _min_max(self) -> tuple[ConstType, ConstType]:\n if self.op in GroupOp.Binary and not dtypes.is_float(self.dtype):\n (s0_vmin, s0_vmax), (s1_vmin, s1_vmax) = self.src[0]._min_max, self.src[1]._min_max\n if self.op is Ops.ADD: return s0_vmin+s1_vmin, s0_vmax+s1_vmax\n if self.op is Ops.SUB: return s0_vmin-s1_vmax, s0_vmax-s1_vmin\n if self.op is Ops.AND and s1_vmin == s1_vmax and s0_vmin >= 0 and s1_vmin >= 0: return min(0, s0_vmin), min(s0_vmax, s1_vmax)\n if self.op is Ops.MUL: return min(vals:=(s0_vmin*s1_vmin, s0_vmin*s1_vmax, s0_vmax*s1_vmin, s0_vmax*s1_vmax)), max(vals)\n # SHL/SHR on consts only\n if self.op is Ops.SHL and s1_vmin == s1_vmax and all_int(t:=(s0_vmin, s0_vmax, s1_vmin)): return t[0] << t[2], t[1] << t[2]\n if self.op is Ops.SHR and s1_vmin == s1_vmax and all_int(t:=(s0_vmin, s0_vmax, s1_vmin)): return t[0] >> t[2], t[1] >> t[2]\n if self.op is Ops.MOD:\n if s1_vmin > 0: return (0, s1_vmax-1) if s0_vmin >= 0 else (-(s1_vmax-1), 0) if s0_vmax <= 0 else (-(s1_vmax-1), s1_vmax-1)\n if s1_vmax < 0: return (0, -s1_vmin-1) if s0_vmin >= 0 else (-(-s1_vmin-1), 0) if s0_vmax <= 0 else (-(-s1_vmin-1), -s1_vmin-1)\n if self.op is Ops.IDIV:\n assert isinstance(s0_vmin, int) and isinstance(s0_vmax, int) and isinstance(s1_vmin, int) and isinstance(s1_vmax, int)\n if (c:=s1_vmin) == s1_vmax: # s1 is a const\n if c > 0: return cdiv(s0_vmin, c), cdiv(s0_vmax, c)\n if c < 0: return cdiv(s0_vmax, c), cdiv(s0_vmin, c)\n if (s0_vmax <= 0 and s1_vmax < 0): return cdiv(s0_vmax, s1_vmin), cdiv(s0_vmin, s1_vmax)\n if (s0_vmin >= 0 and s1_vmin > 0): return cdiv(s0_vmin, s1_vmax), cdiv(s0_vmax, s1_vmin)\n if (s0_vmax <= 0 and s1_vmin > 0): return cdiv(s0_vmin, s1_vmin), cdiv(s0_vmax, s1_vmax)\n if (s0_vmin >= 0 and s1_vmax < 0): return cdiv(s0_vmax, s1_vmax), cdiv(s0_vmin, s1_vmin)\n if self.op is Ops.MAX: return max(s0_vmin, s1_vmin), max(s0_vmax, s1_vmax)\n if self.op is Ops.CMPLT: return (s0_vmax<s1_vmin, s0_vmin<s1_vmax)\n if self.op is Ops.CMPNE: return ((s0_vmax < s1_vmin) or (s1_vmax < s0_vmin), not (s0_vmin == s0_vmax == s1_vmin == s1_vmax))\n if self.dtype == dtypes.bool:\n if self.op is Ops.OR: return s0_vmin or s1_vmin, s0_vmax or s1_vmax\n if self.op is Ops.AND: return s0_vmin and s1_vmin, s0_vmax and s1_vmax\n # float has NAN issue and we use explicit NAN in transcendental\n if self.op is Ops.WHERE and dtypes.is_int(self.dtype): return min(self.src[1].vmin, self.src[2].vmin), max(self.src[1].vmax, self.src[2].vmax)\n # NOTE: returned UOp is assumed to be CONST\n if self.op is Ops.DEFINE_VAR and self.arg: return self.arg[1], self.arg[2]\n if self.op is Ops.RANGE: return 0, (self.src[0]-1).vmax\n if self.op is Ops.BIND: return self.src[0]._min_max # ignore the bound value\n if self.op in {Ops.UNROLL, Ops.VECTORIZE}: return min(x.vmin for x in self.src), max(x.vmax for x in self.src)\n # TODO: Ops.SPECIAL is Ops.DEFINE_VAR\n if self.op is Ops.SPECIAL: return 0, self.arg[1]-1 if isinstance(self.arg[1], int) else self.arg[1].vmax\n if self.op is Ops.CONST: return self.arg, self.arg\n if self.op is Ops.VCONST: return (min(self.arg), max(self.arg))\n # TODO: CAST to bool/unsigned is not monotone, still some case can be simplified\n if self.op is Ops.CAST and self.dtype in (dtypes.floats+dtypes.sints):\n return max(dtypes.min(self.dtype), self.src[0].vmin), min(self.src[0].vmax, dtypes.max(self.dtype))\n return dtypes.min(self.dtype), dtypes.max(self.dtype)\n\n @functools.cached_property\n def _sym_fxn(self):\n sself = self.simplify()\n varnames = tuple(x.arg[0] for x in sself.toposort() if x.op is Ops.DEFINE_VAR)\n # TODO: sanitize varnames, or don't use naked eval while staying fast\n return eval(""lambda ""+','.join(varnames)+"": ""+sself.render(pm=renderer_infer)), varnames # pylint: disable=eval-used\n\n def sym_infer(self, var_vals:dict[UOp, int]):\n fxn, varnames = self._sym_fxn\n return fxn(**{k.arg[0]:v for k,v in var_vals.items() if k.arg[0] in varnames})\n\n def render(self, simplify=True, pm:PatternMatcher|None=None) -> str:\n ret = graph_rewrite(self.simplify() if simplify else self, renderer if pm is None else pm)\n return ret.arg if ret.op is Ops.NOOP else str(ret)\n\nclass AxisType(Enum):\n GLOBAL = auto(); LOCAL = auto(); LOOP = auto(); GROUP_REDUCE = auto(); REDUCE = auto(); UPCAST = auto(); UNROLL = auto() # noqa: E702\n\n@dataclass(frozen=True)\nclass KernelInfo:\n name: str = ""test"" # name of the kernel\n axis_types: tuple[AxisType, ...] = tuple()\n dont_use_locals: bool = False # don't use local indexing\n applied_opts: tuple = tuple()\n opts_to_apply: tuple|None = None\n @property\n def function_name(self): return to_function_name(self.name)\n\n# ******** ops in python ********\n\ndef safe_exp2(x):\n try: return 2 ** x\n except OverflowError: return math.inf\n\ndef safe_pow(x, y):\n try: return math.nan if isinstance(p:=pow(x, y), complex) else p\n except ZeroDivisionError: return math.inf\n except ValueError: return math.inf if x > 0 else -math.inf\n\npython_alu: dict[Ops, Callable] = {\n Ops.LOG2: lambda x: math.log2(x) if x > 0 else -math.inf if x == 0 else math.nan, Ops.EXP2: safe_exp2,\n Ops.SQRT: lambda x: math.sqrt(x) if x >= 0 else math.nan, Ops.RECIP: lambda x: 1/x if x != 0 else math.copysign(math.inf, x),\n Ops.SIN: lambda x: math.sin(x) if not math.isinf(x) else math.nan, Ops.POW: safe_pow,\n Ops.NEG: operator.neg, Ops.ADD: operator.add, Ops.SUB: operator.sub, Ops.MUL: operator.mul, Ops.CMPNE: operator.ne, Ops.CMPLT: operator.lt,\n Ops.XOR: operator.xor, Ops.OR: operator.or_, Ops.AND: operator.and_, Ops.SHR: operator.rshift, Ops.SHL: operator.lshift, Ops.MAX: max,\n Ops.MOD: cmod, Ops.IDIV: cdiv, Ops.MULACC: lambda x,y,z: (x*y)+z, Ops.WHERE: lambda x,y,z: y if x else z, Ops.CMPEQ: operator.eq}\n\ndef exec_alu(op:Ops, dtype:DType, operands, truncate_output=True):\n if dtype.count > 1:\n return tuple([exec_alu(op, dtype.scalar(), [x[i] if isinstance(x, tuple) else x for x in operands]) for i in range(dtype.count)])\n alu = python_alu[op](*operands)\n return truncate.get(dtype, lambda x: x)(alu) if truncate_output else alu\n\n# ***** uop helpers *****\n\ndef print_uops(uops:list[UOp]):\n for i,u in enumerate(uops):\n formatted_parents = [(uops.index(x) if x.op is not Ops.CONST else f""{x.arg}"") if x in uops else ""--"" for x in u.src]\n print(f""{i:4d} {str(u.op):20s}: {str(u.dtype):30s} "" f""{str(formatted_parents):32s} {u.arg}"")\n\n# ***** pattern matcher *****\n\ndef get_location() -> tuple[str, int]:\n frm = sys._getframe(1)\n # skip over ops.py/mathtraits.py (unless there's nothing but ops.py/mathtraits.py)\n while pathlib.Path(frm.f_code.co_filename).name in (""ops.py"", ""mathtraits.py"") and frm.f_back is not None and \\n not frm.f_back.f_code.co_filename.startswith(""<frozen""):\n frm = frm.f_back\n return frm.f_code.co_filename, frm.f_lineno\n\n@functools.cache\ndef lines(fn) -> list[str]:\n with open(fn) as f: return f.readlines()\n\ndef printable(loc:tuple[str, int]) -> str:\n try: return lines(loc[0])[loc[1]-1].strip()\n except FileNotFoundError: return ""<missing>""\n\nclass UPat(MathTrait):\n __slots__ = (""op"", ""dtype"", ""arg"", ""name"", ""src"")\n def __init__(self, op:Ops|tuple[Ops, ...]|set[Ops]|None=None, dtype:DType|tuple[DType, ...]|None=None,\n src:tuple[UPat, ...]|list[UPat]|UPat|None=None, arg:Any=None,\n name:str|None=None, allow_any_len:bool=False, custom_early_reject:set[Ops]|None=None, location=None):\n assert op is None or isinstance(op, (Ops, tuple, set)), ""op must be Ops or tuple of Ops""\n self.op: tuple[Ops, ...]|None = (op,) if isinstance(op, Ops) else (tuple(op) if isinstance(op, set) else op)\n self.dtype: tuple[DType, ...]|None = (dtype,) if isinstance(dtype, DType) else dtype\n self.arg, self.name, self._in_src, self.custom_early_reject = arg, name, src, custom_early_reject\n self.src: Any = None\n assert self.name != ""ctx"", ""UPat can't be named ctx""\n assert dtype is None or isinstance(dtype, DType) or all(isinstance(x, DType) for x in dtype), f""invalid dtype {dtype}""\n\n # try all permutations if it's a list\n if isinstance(src, list): self.src = list(itertools.permutations(src)) if not all_same(src) else [tuple(src)]\n # only one if it's a tuple\n elif isinstance(src, tuple): self.src = [src]\n # repeat if it's a UPat\n elif isinstance(src, UPat): self.src = [itertools.repeat(src)]\n\n self.strict_length = not (allow_any_len or isinstance(src, UPat) or src is None)\n self.required_len: int = 0 if isinstance(src, UPat) or src is None else len(src)\n self.location = location or get_location()\n\n if custom_early_reject is not None: self.early_reject = custom_early_reject\n else:\n upat_match = [src] if isinstance(src, UPat) else ([] if src is None else self.src[0])\n self.early_reject = {pp.op[0] for pp in upat_match if pp.op is not None and len(pp.op) == 1}\n\n def __reduce__(self):\n return UPat, (self.op, self.dtype, self._in_src, self.arg, self.name, not self.strict_length, self.custom_early_reject, self.location)\n def named(self, name:str): return UPat(self.op, self.dtype, self._in_src, self.arg, name, not self.strict_length, self.custom_early_reject)\n\n @staticmethod\n def any(*src): return UPatAny(src=src)\n def or_casted(self, name:str|None=None): return UPat.any(self if name is None else self.named(name), UPat(Ops.CAST, name=name, src=(self,)))\n\n @staticmethod\n @functools.cache\n def var(name:str|None=None, dtype:DType|tuple[DType, ...]|None=None): return UPat(dtype=dtype, name=name)\n @staticmethod\n @functools.cache\n def cvar(name:str|None=None, dtype:DType|None=None, vec=True): return UPat((Ops.CONST,Ops.VCONST) if vec else Ops.CONST, dtype, name=name)\n @staticmethod\n def const(dtype:DType|tuple[DType, ...]|None, b:ConstType): return UPat(Ops.CONST, dtype=dtype, arg=b)\n\n # copied from UOp\n def sink(self, *srcs:UPat|None, **kwargs): return UPat(Ops.SINK, dtypes.void, (self,)+tuple([x for x in srcs if x is not None]), **kwargs)\n def index(self, idx:UPat, valid:UPat|None=None): return UPat(Ops.INDEX, self.dtype, (self,idx,valid) if valid is not None else (self,idx))\n def view(self, st=None, **kwargs): return UPat(Ops.VIEW, self.dtype, (self,), st, **kwargs)\n def cast(self, dtype=None, **kwargs): return UPat(Ops.CAST, dtype, (self,), **kwargs)\n def bitcast(self, dtype=None): return UPat(Ops.BITCAST, dtype, (self,))\n def gep(self, i:int|None=None, **kwargs): return UPat(Ops.GEP, None, (self,), (i,) if i is not None else None, **kwargs)\n def load(self, *src:UPat, **kwargs): return UPat(Ops.LOAD, src=(self,)+src, **kwargs)\n def store(self, *src:UPat, **kwargs): return UPat(Ops.STORE, self.dtype, (self,)+src, **kwargs)\n def assign(self, x:UPat, **kwargs): return UPat(Ops.ASSIGN, self.dtype, (self,x), **kwargs)\n def reduce(self, *src:UPat, **kwargs): return UPat(Ops.REDUCE, self.dtype, src=(self,)+src, **kwargs)\n def fuse(self): return self.alu(Ops.FUSE)\n def or_broadcasted(self, **kwargs): return UPat.any(self, UPat(Ops.VECTORIZE, self.dtype, src=self, **kwargs))\n\n def const_like(self, b:ConstLike): return UPat.const(self.dtype, cast(ConstType, b))\n def alu(self, op:Ops, *src:UPat):\n asrc = (self,)+src\n return UPat(op, dtypes.bool if op in {Ops.CMPLT, Ops.CMPNE} else asrc[-1].dtype, list(asrc) if op in GroupOp.Commutative else asrc)\n\n def __repr__(self):\n def rep(x):\n form = ""UPat(%s, %s, name=%s, dtype=%s, allow_any_len=%s, src=%s)""\n return form % (None if x.op is None else ('(%s)'%', '.join(map(str, x.op))), x.arg, repr(x.name),\n set(x.dtype) if x.dtype else None, not x.strict_length, ""[%s]"" if x.src and len(x.src)>1 else (""(%s)"" if x.src else ""%s""))\n return pretty_print(self, rep, srcfn=lambda x:None if x.src is None else [next(x.src[0])] if isinstance(x.src[0], itertools.repeat) else x.src[0])\n\n def match(self:UPat, uop:UOp, store:dict[str, UOp]) -> list[dict[str, UOp]]:\n if (self.op is not None and uop.op not in self.op) or \\n (self.name is not None and store.setdefault(self.name, uop) is not uop) or \\n (self.dtype is not None and uop.dtype not in self.dtype and uop.dtype.scalar() not in self.dtype) or \\n (self.arg is not None and self.arg != uop.arg) or \\n (len(uop.src) < self.required_len) or \\n (self.strict_length and len(uop.src) != self.required_len): return []\n if self.src is None: return [store]\n res: list[dict[str, UOp]] = []\n for vp in self.src:\n stores, new_stores = [store.copy()], []\n for uu, vv in zip(uop.src, vp):\n for s in stores: new_stores.extend(vv.match(uu, s))\n stores, new_stores = new_stores, []\n res.extend(stores)\n return res\n\nclass UPatAny(UPat):\n def match(self:UPat, uop:UOp, store:dict[str, UOp]) -> list[dict[str, UOp]]:\n matches = [x.match(uop, store.copy()) for x in self.src[0]]\n return flatten([x for x in matches if x is not None])\n\ndef deconstruct_function(fxn:Callable) -> tuple:\n new_globals = {k:v for k,v in fxn.__globals__.items() if k in fxn.__code__.co_names}\n for co in fxn.__code__.co_consts:\n if isinstance(co, types.CodeType): new_globals.update({k:v for k,v in fxn.__globals__.items() if k in co.co_names})\n # NOTE: optional round trip through pickle!\n assert fxn.__closure__ is None, ""closures are not supported in pattern matchers""\n ret = fxn.__code__, new_globals, fxn.__name__, fxn.__defaults__\n return pickle.loads(pickle.dumps(ret)) if getenv(""TEST_PICKLE"") else ret\n\n@functools.cache\ndef upat_interpret(p:UPat, fxn:Callable) -> Callable:\n real_fxn = types.FunctionType(*deconstruct_function(fxn))\n if 'ctx' in inspect.signature(real_fxn).parameters:\n def universal_match(uop, ctx):\n for match in p.match(uop, {}):\n if (ret:=real_fxn(ctx=ctx, **match)) is not None: return ret # pylint: disable=not-callable\n return None\n else:\n def universal_match(uop, _):\n for match in p.match(uop, {}):\n if (ret:=real_fxn(**match)) is not None: return ret # pylint: disable=not-callable\n return None\n return universal_match\n\nclass PatternMatcher:\n def __init__(self, patterns:Sequence[tuple[UPat, Callable|tuple]], compiled=bool(getenv(""UPAT_COMPILE"", 1))):\n if compiled: from tinygrad.uop.upat import upat_compile\n # if this comes from a pickle, we reconstruct the lambda functions here\n self.patterns:list[tuple[UPat, Callable]] = [(p,types.FunctionType(*fxn) if isinstance(fxn, tuple) else fxn) for p,fxn in patterns]\n # NOTE: use of DefaultDict here is very dangerous! all keys will live for the lifetime of the PatternMatcher!\n self.pdict: dict[Ops, list[tuple[UPat, Callable, set]]] = {}\n # uop is required, arg is optional\n for p,fxn in self.patterns:\n assert p.op is not None\n if compiled and (match:=upat_compile(p, fxn)) is not None: pass # pylint: disable=E0606\n else: match = upat_interpret(p, fxn)\n for uop in p.op: self.pdict.setdefault(uop, []).append((p, match, p.early_reject))\n\n def __reduce__(self): return PatternMatcher, ([(x,deconstruct_function(fxn) if fxn.__name__ == ""<lambda>"" else fxn) for x,fxn in self.patterns],)\n\n @functools.cache # pylint: disable=method-cache-max-size-none\n def __add__(self, more:PatternMatcher): return PatternMatcher(self.patterns+more.patterns)\n\n def rewrite(self, uop:UOp, ctx=None) -> UOp|None:\n ler = {u.op for u in uop.src}\n for _,match,early_reject in self.pdict.get(uop.op, []):\n if not early_reject.issubset(ler): continue\n if (ret:=match(uop, ctx)) is not None and ret is not uop: return ret\n return None\n\n def fixed_point_rewrite(self, uop:UOp, ctx=None) -> UOp:\n # apply rewrite rules until a fixed point is reached. may return `uop` itself if PatternMatcher doesn't match\n new_n: UOp|None = uop\n seen = set()\n while new_n is not None:\n if new_n in seen: raise RuntimeError(""infinite loop in fixed_point_rewrite"")\n seen.add(new_n)\n last_n, new_n = new_n, self.rewrite(new_n, ctx)\n return last_n\n\n# *** non-blocking UOp tracker ***\n\nucount = itertools.count()\nuop_number:weakref.WeakKeyDictionary[UOp, int] = weakref.WeakKeyDictionary()\nuop_fields:dict[int, tuple] = {}\ndef track_uop(u:UOp):\n if (cret:=uop_number.get(u)) is not None: return cret\n uop_number[u] = num = next(ucount)\n # KERNEL also has a UOp in the arg\n arg = type(u.arg)(track_uop(u.arg.ast), u.arg.metadata) if u.op is Ops.KERNEL else u.arg\n uop_fields[num] = (u.op, u.dtype, tuple(track_uop(s) for s in u.src), arg, u.tag)\n return num\n\n# *** tracking pattern matcher ***\n\nVIZ = ContextVar(""VIZ"", 0)\nTRACK_MATCH_STATS = ContextVar(""TRACK_MATCH_STATS"", 2 if VIZ else 0)\nmatch_stats:dict[UPat, list[int|float]] = dict()\n\n@dataclass(frozen=True)\nclass TrackedGraphRewrite:\n loc:tuple[str, int] # location that called graph_rewrite\n sink:int # the sink input to graph_rewrite\n matches:list[tuple[int, int, tuple]] # before/after UOp, UPat location\n name:str|None # optional name of the rewrite\n depth:int # depth if it's a subrewrite\n bottom_up:bool\n\ntracked_keys:list[TracingKey] = []\ntracked_ctxs:list[list[TrackedGraphRewrite]] = []\n_name_cnt:dict[str, itertools.count] = {}\n\nif getenv(""CAPTURE_PROCESS_REPLAY""):\n replay_capture: dict[str, bytes] = {}\n import atexit\n @atexit.register\n def save_to_diskcache():\n for k,v in replay_capture.items(): diskcache_put(""process_replay"", k, v, prepickled=True)\n\ndef track_rewrites(name:Callable[..., str|TracingKey]|bool=True):\n def _decorator(func):\n def __wrapper(*args, **kwargs):\n fn = key = func.__name__\n if TRACK_MATCH_STATS >= 2:\n tracked_keys.append(key:=TracingKey(n:=f""{fn} n{next(_name_cnt.setdefault(fn, itertools.count(1)))}"", (n,), cat=fn))\n tracked_ctxs.append([])\n with cpu_profile(key, ""TINY"") as e:\n ret = func(*args, **kwargs)\n if TRACK_MATCH_STATS >= 2 and callable(name):\n name_ret = name(*args, **kwargs, ret=ret)\n assert isinstance(name_ret, (TracingKey, str)), f""name function returned {type(name_ret)}""\n tracked_keys[-1] = k = TracingKey(n:=tracked_keys[-1].display_name.replace(fn, name_ret), (n,)) if isinstance(name_ret, str) else name_ret\n e.name = TracingKey(k.display_name if isinstance(name_ret, str) else f""{fn} for {k.display_name}"", k.keys, cat=fn)\n if getenv(""CAPTURE_PROCESS_REPLAY""):\n # find the unittest frame we're capturing in\n frm = sys._getframe(1)\n while (f_back:=frm.f_back) is not None and ""unittest"" not in f_back.f_code.co_filename: frm = f_back\n loc = f""{frm.f_code.co_filename.split('/')[-1]}:{frm.f_lineno} {frm.f_code.co_name}""\n # capture global context vars and all the args passed in\n with Context(PICKLE_BUFFERS=0):\n inputs = (fn, args, kwargs, ContextVar._cache)\n replay_capture[hashlib.sha256(pickle.dumps(inputs)).hexdigest()] = pickle.dumps(inputs+(loc, ret))\n return ret\n return __wrapper\n return _decorator\n\nactive_rewrites:list[TrackedGraphRewrite] = []\ndef track_matches(func):\n def _track_func(*args, **kwargs):\n if tracking:=(TRACK_MATCH_STATS >= 2 and tracked_ctxs):\n loc = ((frm:=sys._getframe(1)).f_code.co_filename, frm.f_lineno)\n depth = len(active_rewrites)\n tracked_ctxs[-1].append(ctx:=TrackedGraphRewrite(loc, track_uop(args[0]), [], kwargs.get(""name"", None), depth, kwargs.get(""bottom_up"", False)))\n active_rewrites.append(ctx)\n with cpu_profile(kwargs.get(""name"", ""<unnamed>""), ""TINY"", display=tracking):\n ret = func(*args, **kwargs)\n if tracking: active_rewrites.pop()\n return ret\n return _track_func\n\nclass TrackedPatternMatcher(PatternMatcher):\n def rewrite(self, uop:UOp, ctx=None) -> UOp|None:\n ret = None\n ler = {u.op for u in uop.src}\n for p,match,early_reject in self.pdict.get(uop.op, []):\n if p not in match_stats: match_stats[p] = [0,0,0.0,0.0]\n st = time.perf_counter()\n if not early_reject.issubset(ler):\n match_stats[p][2] += time.perf_counter()-st\n continue\n match_stats[p][1] += 1\n if (ret:=match(uop, ctx)) is not None and ret is not uop:\n match_stats[p][0] += 1\n match_stats[p][3] += (et:=time.perf_counter()-st)\n if TRACK_MATCH_STATS >= 3: print(f""{et*1e6:7.2f} us -- "", printable(p.location))\n if TRACK_MATCH_STATS >= 2 and isinstance(ret, UOp) and active_rewrites:\n active_rewrites[-1].matches.append((track_uop(uop), track_uop(ret), p.location))\n return ret\n match_stats[p][2] += time.perf_counter()-st\n return None\n\nif TRACK_MATCH_STATS or PROFILE:\n PatternMatcher = TrackedPatternMatcher # type: ignore\n import atexit\n @atexit.register\n def print_match_stats():\n if TRACK_MATCH_STATS >= 2:\n with open(fn:=temp(""rewrites.pkl"", append_user=True), ""wb"") as f:\n print(f""rewrote {len(tracked_ctxs)} graphs and matched {sum(len(r.matches) for x in tracked_ctxs for r in x)} times, saved to {fn}"")\n pickle.dump((tracked_keys, tracked_ctxs, uop_fields), f)\n if VIZ: launch_viz(VIZ, temp(""rewrites.pkl"", append_user=True))\n if getenv(""PRINT_MATCH_STATS"", TRACK_MATCH_STATS.value):\n ret = [0,0,0.0,0.0]\n for k,v in sorted(list(match_stats.items()), key=lambda x: x[1][2]+x[1][3]):\n loc_str = f""{k.location[0].split('/')[-1]}:{k.location[1]}""\n if v[1] != 0: print(f""{v[0]:6d} / {v[1]:7d} -- {v[3]*1000.:9.2f} / {(v[2]+v[3])*1000.:9.2f} ms -- {loc_str:20s}"", printable(k.location))\n ret = [x+y for x,y in zip(ret, v)]\n print(f""{ret[0]:6d} / {ret[1]:7d} -- {ret[3]*1000.:9.2f} / {(ret[2]+ret[3])*1000.:9.2f} ms -- TOTAL"")\n print(f""{len(match_stats)} rules, {sum(v[0] > 0 for v in match_stats.values())} matched once"")\n\n def launch_viz(var:ContextVar, data:str):\n os.environ[(env_str:=var.key)] = ""0""\n os.environ[f""{env_str}_DATA""] = data\n os.environ[f""{env_str}_VALUE""] = str(var.value)\n if not int(os.getenv(""VIZ"", ""0"")) and not int(os.getenv(""PROFILE"", ""0"")):\n args = ['--kernels', getenv(""VIZ_DATA"", """")] if getenv(""VIZ_DATA"", """") else []\n args += ['--profile', getenv(""PROFILE_DATA"", """")] if getenv(""PROFILE_DATA"", """") else []\n os.execv(sys.executable, [sys.executable] + [os.path.join(os.path.dirname(__file__), ""../"", ""viz"", ""serve.py"")] + args)\n\n# *** simple graph rewrite engine ***\n\nclass RewriteNotReady(Exception): pass\nclass RewriteContext:\n def __init__(self, pm, bpm, ctx=None):\n self.pm: PatternMatcher|None = pm\n self.bpm: PatternMatcher|None = bpm\n self.ctx = ctx\n self.replace: dict[UOp, UOp] = {}\n\n def unified_rewrite(self, root:UOp) -> UOp:\n stack: list[tuple[UOp, int, UOp]] = [(root, 0, root)]\n while stack:\n if len(stack) >= 200000: raise RuntimeError(""infinite loop in graph_rewrite"")\n n, stage, new_n = stack.pop()\n if n in self.replace: continue # skip any nodes we have seen\n try:\n if stage == 0:\n # if bottom up, we rewrite this node early. in both cases, we add its parents to the stack\n if self.bpm is not None: new_n = self.bpm.fixed_point_rewrite(new_n, self.ctx)\n stack.append((n, 1, new_n))\n for x in reversed(new_n.src): stack.append((x, 0, x))\n elif stage == 1:\n try: new_src = tuple([self.replace[x] for x in new_n.src])\n except KeyError: raise RewriteNotReady # pylint: disable=raise-missing-from\n if new_src == new_n.src:\n # if top down, do the rewrite. if no rewrite or bottom up, we are done rewriting this node so we add it to the dict\n if self.pm is None or (new_src_n:=self.pm.rewrite(new_n, self.ctx)) is None:\n self.replace[n] = new_n\n continue\n else:\n # if srcs changed from rewrites, construct a new UOp with the new srcs\n new_src_n = UOp(new_n.op, new_n.dtype, new_src, new_n.arg, new_n.tag)\n # trigger a rewrite of new_src_n, then after that rewrite is done, link it back to n\n stack.append((n, 2, new_src_n))\n stack.append((new_src_n, 0, new_src_n))\n else:\n # in stage 2, we link the result of new_n to the result of n\n try: self.replace[n] = self.replace[new_n]\n except KeyError: raise RewriteNotReady # pylint: disable=raise-missing-from\n except RewriteNotReady:\n # retry this later\n stack.insert(0, (n, stage, new_n))\n return self.replace[root]\n\n@track_matches\ndef graph_rewrite(sink:UOp, pm:PatternMatcher, ctx=None, bottom_up=False, name=None, bpm=None) -> UOp:\n rewrite_ctx = RewriteContext(pm if not bottom_up else None, pm if bottom_up else bpm, ctx)\n return rewrite_ctx.unified_rewrite(sink)\n\n@track_matches\ndef graph_rewrite_map(sink:UOp, pm:PatternMatcher, ctx=None, bottom_up=False, name=None, bpm=None,\n input_map:dict[UOp, UOp]|None=None, ) -> dict[UOp, UOp]:\n rewrite_ctx = RewriteContext(pm if not bottom_up else None, pm if bottom_up else bpm, ctx)\n new_map: dict[UOp, UOp] = {}\n for k in sink.toposort():\n new_map[k] = v = rewrite_ctx.unified_rewrite(k)\n if k is not v and k.metadata is not None: all_metadata[v] = tuple(dedup(all_metadata.get(v, ())))+k.metadata\n if input_map is not None:\n for k,v in input_map.items(): new_map[k] = new_map.get(v,v)\n return new_map\n\ndef sint_to_uop(x:sint, dtype:DType=dtypes.int) -> UOp: return UOp.const(dtype, x) if isinstance(x, int) else x\n\n_substitute = PatternMatcher([(UPat(tuple(Ops), name=""x""), lambda ctx,x: ctx.get(x,None))])\n\n# for debug\nsyms = { Ops.ADD: ""+"", Ops.SUB: ""-"", Ops.IDIV: ""//"", Ops.MOD: ""%"", Ops.SHL: ""<<"", Ops.SHR: "">>"",\n Ops.MUL: ""*"", Ops.CMPLT: ""<"", Ops.CMPNE: ""!="", Ops.AND: ""&"", Ops.OR: ""|"", Ops.XOR: ""^""}\nrenderer = PatternMatcher([\n (UPat((Ops.DEFINE_VAR, Ops.SPECIAL), name=""x""), lambda x: UOp(Ops.NOOP, arg=x.arg[0])),\n (UPat(Ops.RANGE, name=""x""), lambda x: UOp(Ops.NOOP, arg=f""ridx{x.arg}"")),\n (UPat((Ops.CONST, Ops.VCONST), name=""x""), lambda x: UOp(Ops.NOOP, arg=str(x.arg))),\n (UPat(Ops.UNROLL, name=""x""), lambda x: UOp(Ops.NOOP, arg=f""UNROLL({x.src[0].arg}, {x.arg})"")),\n (UPat(Ops.CAST, name=""x""), lambda x: UOp(Ops.NOOP, arg=f""({str(x.dtype)[7:]})({x.src[0].arg})"")),\n (UPat(Ops.LOAD), lambda: UOp(Ops.NOOP, arg=""load"")),\n (UPat(Ops.BIND, src=UPat(Ops.NOOP), name=""x""), lambda x: x.src[0]),\n #(UPat(Ops.BIND, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""{x.src[0].arg}[={x.src[1].arg}]"")),\n (UPat(Ops.NEG, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""(-{x.src[0].arg})"")),\n (UPat(Ops.RECIP, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""(1/{x.src[0].arg})"")),\n (UPat(Ops.MAX, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""max({x.src[0].arg}, {x.src[1].arg})"")),\n (UPat(Ops.MULACC, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""({x.src[0].arg}*{x.src[1].arg}+{x.src[2].arg})"")),\n (UPat(Ops.WHERE, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""({x.src[1].arg} if {x.src[0].arg} else {x.src[2].arg})"")),\n (UPat(GroupOp.ALU, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""({x.src[0].arg}{syms[x.op]}{x.src[1].arg})"")),\n])\nrenderer_infer = PatternMatcher([\n (UPat(Ops.MOD, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""cmod({x.src[0].arg}, {x.src[1].arg})"")),\n (UPat(Ops.IDIV, src=UPat(Ops.NOOP), name=""x""), lambda x: UOp(Ops.NOOP, arg=f""cdiv({x.src[0].arg}, {x.src[1].arg})"")),\n *renderer.patterns\n])\n\n# *** what was symbolic.py ***\n\nsint = int|UOp\nVariable = UOp\n\nConstLike = ConstType|Variable|tuple[ConstType, ...]\n",python,tab
|
| 16 |
+
16,485019,"tinygrad/uop/ops.py",22784,0,"",python,selection_command
|
| 17 |
+
17,486621,"tinygrad/__init__.py",0,0,"",python,tab
|
| 18 |
+
18,486633,"tinygrad/__init__.py",333,0,"",python,selection_command
|
| 19 |
+
19,486760,"examples/gpt2.py",0,0,"",python,tab
|
| 20 |
+
20,486769,"examples/gpt2.py",864,0,"",python,selection_command
|
| 21 |
+
21,490868,"examples/gpt2.py",823,0,"",python,selection_command
|
| 22 |
+
22,495277,"examples/gpt2.py",908,0,"",python,selection_command
|
| 23 |
+
23,495533,"examples/gpt2.py",955,0,"",python,selection_command
|
| 24 |
+
24,495562,"examples/gpt2.py",1008,0,"",python,selection_command
|
| 25 |
+
25,495589,"examples/gpt2.py",1040,0,"",python,selection_command
|
| 26 |
+
26,495625,"examples/gpt2.py",1041,0,"",python,selection_command
|
| 27 |
+
27,495658,"examples/gpt2.py",1067,0,"",python,selection_command
|
| 28 |
+
28,495689,"examples/gpt2.py",1093,0,"",python,selection_command
|
| 29 |
+
29,495830,"examples/gpt2.py",1235,0,"",python,selection_command
|
| 30 |
+
30,496088,"examples/gpt2.py",1268,0,"",python,selection_command
|
| 31 |
+
31,496123,"examples/gpt2.py",1269,0,"",python,selection_command
|
| 32 |
+
32,496153,"examples/gpt2.py",1291,0,"",python,selection_command
|
| 33 |
+
33,496190,"examples/gpt2.py",1329,0,"",python,selection_command
|
| 34 |
+
34,498903,"examples/gpt2.py",1335,0,"",python,selection_command
|
| 35 |
+
35,499152,"examples/gpt2.py",1339,0,"",python,selection_command
|
| 36 |
+
36,499527,"examples/gpt2.py",1340,0,"",python,selection_command
|
| 37 |
+
37,500851,"examples/gpt2.py",1349,0,"",python,selection_command
|
| 38 |
+
38,501076,"examples/gpt2.py",1351,0,"",python,selection_command
|
| 39 |
+
39,501567,"examples/gpt2.py",1357,0,"",python,selection_command
|
| 40 |
+
40,501785,"examples/gpt2.py",1358,0,"",python,selection_command
|
| 41 |
+
41,520618,"examples/gpt2.py",1363,0,"",python,selection_command
|
| 42 |
+
42,521029,"examples/gpt2.py",1364,0,"",python,selection_command
|
| 43 |
+
43,521897,"examples/gpt2.py",1365,0,"",python,selection_command
|
| 44 |
+
44,522144,"examples/gpt2.py",1367,0,"",python,selection_command
|
| 45 |
+
45,522600,"examples/gpt2.py",1370,0,"",python,selection_command
|
| 46 |
+
46,522817,"examples/gpt2.py",1372,0,"",python,selection_command
|
| 47 |
+
47,523065,"examples/gpt2.py",1383,0,"",python,selection_command
|
| 48 |
+
48,523095,"examples/gpt2.py",1385,0,"",python,selection_command
|
| 49 |
+
49,523258,"examples/gpt2.py",1389,0,"",python,selection_command
|
| 50 |
+
50,523516,"examples/gpt2.py",1390,0,"",python,selection_command
|
| 51 |
+
51,523546,"examples/gpt2.py",1397,0,"",python,selection_command
|
| 52 |
+
52,523572,"examples/gpt2.py",1399,0,"",python,selection_command
|
| 53 |
+
53,523800,"examples/gpt2.py",1403,0,"",python,selection_command
|
| 54 |
+
54,524051,"examples/gpt2.py",1404,0,"",python,selection_command
|
| 55 |
+
55,524078,"examples/gpt2.py",1412,0,"",python,selection_command
|
| 56 |
+
56,524115,"examples/gpt2.py",1414,0,"",python,selection_command
|
| 57 |
+
57,524307,"examples/gpt2.py",1419,0,"",python,selection_command
|
| 58 |
+
58,524561,"examples/gpt2.py",1420,0,"",python,selection_command
|
| 59 |
+
59,524596,"examples/gpt2.py",1421,0,"",python,selection_command
|
| 60 |
+
60,524623,"examples/gpt2.py",1422,0,"",python,selection_command
|
| 61 |
+
61,524663,"examples/gpt2.py",1427,0,"",python,selection_command
|
| 62 |
+
62,524689,"examples/gpt2.py",1429,0,"",python,selection_command
|
| 63 |
+
63,524728,"examples/gpt2.py",1439,0,"",python,selection_command
|
| 64 |
+
64,524763,"examples/gpt2.py",1442,0,"",python,selection_command
|
| 65 |
+
65,524792,"examples/gpt2.py",1449,0,"",python,selection_command
|
| 66 |
+
66,524949,"examples/gpt2.py",1452,0,"",python,selection_command
|
| 67 |
+
67,525953,"examples/gpt2.py",1329,0,"",python,selection_command
|
| 68 |
+
68,526084,"examples/gpt2.py",1335,0,"",python,selection_command
|
| 69 |
+
69,607176,"examples/gpt2.py",1329,0,"",python,selection_command
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-a8a1505c-044b-48cb-8f8f-813d10c86e631752143082919-2025_07_10-12.25.38.442/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-ac1e2da2-a2d2-4327-aaa3-5900bc2b3a561753469517123-2025_07_25-20.52.05.763/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-ae61aa9e-3c01-45fb-b595-af4191213c4d1752828764609-2025_07_18-10.53.17.363/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-b07db19d-68eb-49f3-ac94-3d4c9ee495c61751056974607-2025_06_27-13.42.58.489/source.csv
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab
|
| 3 |
+
2,58,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:42:58 PM [info] Activating crowd-code\n1:42:58 PM [info] Recording started\n1:42:58 PM [info] Initializing git provider using file system watchers...\n1:42:58 PM [info] Git repository found\n1:42:58 PM [info] Git provider initialized successfully\n",Log,tab
|
| 4 |
+
3,161,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"1:42:58 PM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,8793389,"models/dynamics.py",0,0,"",python,tab
|
| 6 |
+
5,8794936,"models/dynamics.py",375,0,"",python,selection_mouse
|
| 7 |
+
6,8794945,"models/dynamics.py",374,0,"",python,selection_command
|
| 8 |
+
7,8800138,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\n@jax.jit\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
| 9 |
+
8,8801525,"train_tokenizer.py",7056,0,"",python,selection_command
|
| 10 |
+
9,8808394,"train_tokenizer.py",7092,0,"",python,selection_mouse
|
| 11 |
+
10,8808797,"train_tokenizer.py",7058,0,"",python,selection_mouse
|
| 12 |
+
11,8874794,"train_tokenizer.py",7092,0,"",python,selection_mouse
|
| 13 |
+
12,8875429,"train_tokenizer.py",7048,0,"",python,selection_mouse
|
| 14 |
+
13,8876329,"train_tokenizer.py",7092,0,"",python,selection_mouse
|
| 15 |
+
14,8877310,"train_tokenizer.py",7041,0,"",python,selection_mouse
|
| 16 |
+
15,8878062,"train_tokenizer.py",7007,0,"",python,selection_command
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-b49e6270-a637-4365-8782-4c6523f19f151751436712098-2025_07_02-08.12.19.616/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-e7d20f74-415c-47d0-ad95-3f6da31696d51753194904459-2025_07_22-16.35.52.74/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-f0382786-979c-4a6d-8e9b-f5977f18eb4f1753726151187-2025_08_02-06.58.58.573/source.csv
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,1,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n """"""\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n jax.distributed.initialize()\n\n rng = jax.random.key(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.Optimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n def _sampling_fn(model: Genie, batch: dict) -> jax.Array:\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n return model.sample(\n batch,\n args.seq_len,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n )\n\n # --- Define autoregressive sampling loop ---\n @nnx.jit\n def _autoreg_sample(rng, video_batch_BSHWC, action_batch_E):\n input_video_BTHWC = video_batch_BSHWC[:, : args.start_frame + 1]\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=input_video_BTHWC, latent_actions=action_batch_E, rng=_rng)\n generated_vid_BSHWC = _sampling_fn(genie, batch)\n return generated_vid_BSHWC\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n # We don't use workers in order to avoid grain shutdown issues (https://github.com/google/grain/issues/398)\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = iter(dataloader)\n video_batch_BSHWC = next(dataloader)\n gt_video = jnp.asarray(video_batch_BSHWC, dtype=jnp.float32) / 255.0\n video_batch_BSHWC = gt_video.astype(args.dtype)\n # Get latent actions for all videos in the batch\n batch = dict(videos=video_batch_BSHWC)\n action_batch_E = genie.vq_encode(batch, training=False)\n\n # --- Sample + evaluate video ---\n recon_video_BSHWC = _autoreg_sample(rng, video_batch_BSHWC, action_batch_E)\n recon_video_BSHWC = recon_video_BSHWC.astype(jnp.float32)\n gt = gt_video[:, : recon_video_BSHWC.shape[1]].clip(0, 1).reshape(-1, *gt_video.shape[2:])\n recon = recon_video_BSHWC.clip(0, 1).reshape(-1, *recon_video_BSHWC.shape[2:])\n ssim = jnp.asarray(\n pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :])\n ).mean()\n print(f""SSIM: {ssim}"")\n\n # --- Construct video ---\n true_videos = (gt_video * 255).astype(np.uint8)\n pred_videos = (recon_video_BSHWC * 255).astype(np.uint8)\n video_comparison = np.zeros((2, *recon_video_BSHWC.shape), dtype=np.uint8)\n video_comparison[0] = true_videos[:, : args.seq_len]\n video_comparison[1] = pred_videos\n frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # --- Save video ---\n imgs = [Image.fromarray(img) for img in frames]\n # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n B, S, _, _, _ = video_batch_BSHWC.shape\n action_batch_BSm11 = jnp.reshape(action_batch_E, (B, S-1, 1))\n for t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch_BSm11.shape[0]):\n action = action_batch_BSm11[row, t, 0]\n y_offset = row * video_batch_BSHWC.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\n imgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n )\n",python,tab
|
| 3 |
+
2,2823,"sample.py",5162,0,"",python,selection_mouse
|
| 4 |
+
3,2829,"sample.py",5161,0,"",python,selection_command
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-f23be3cf-4da5-450c-91f1-df9de045459c1752656830830-2025_07_16-11.08.01.978/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-fbd09e27-2302-4b0c-83a4-a77b7bc2e3dc1751440721102-2025_07_02-09.19.16.832/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-0f165f77-177d-4df8-8da6-833f9d4dc2621758655771904-2025_09_23-21.29.35.488/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-1505f3d0-0cb4-4cc0-84bf-678810d0ac8f1757148592235-2025_09_06-10.49.56.658/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-1c39eab1-1a04-48fa-9b08-becafb3fa49e1764420703814-2025_11_29-13.51.46.850/source.csv
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
2,128,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:51:46 PM [info] Activating crowd-code\n1:51:46 PM [info] Recording started\n1:51:46 PM [info] Initializing git provider using file system watchers...\n1:51:46 PM [info] No workspace folder found\n",Log,tab
|
| 3 |
+
3,2096,"extension-output-pdoom-org.crowd-code-#1-crowd-code",194,0,"1:51:48 PM [info] Retrying git provider initialization...\n1:51:48 PM [info] No workspace folder found\n",Log,content
|
| 4 |
+
4,14447,"Untitled-1",0,0,"",plaintext,tab
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-23a9afd3-c333-4e29-b2ed-efddc66dd34c1757847239961-2025_09_14-11.54.02.348/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-2e0f3382-d392-45fd-ba41-93c983d734d11764453259461-2025_11_29-22.54.23.496/source.csv
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,24,"tasks",0,0,"",Log,tab
|
| 3 |
+
2,69,"Untitled-1",0,0,"",plaintext,tab
|
| 4 |
+
3,75,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 5 |
+
4,879,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:54:23 PM [info] Activating crowd-code\n10:54:23 PM [info] Recording started\n10:54:23 PM [info] Initializing git provider using file system watchers...\n10:54:23 PM [info] No workspace folder found\n",Log,content
|
| 6 |
+
5,1833,"Untitled-1",0,0,"",plaintext,tab
|
| 7 |
+
6,3998,"TERMINAL",0,0,"Test",,terminal_focus
|
| 8 |
+
7,4005,"Untitled-1",0,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content
|
| 9 |
+
8,6237,"Untitled-1",46,0,"",plaintext,selection_command
|
| 10 |
+
9,6369,"Untitled-1",39,0,"",plaintext,selection_command
|
| 11 |
+
10,6543,"Untitled-1",32,0,"",plaintext,selection_command
|
| 12 |
+
11,6748,"Untitled-1",0,0,"",plaintext,selection_command
|
| 13 |
+
12,7346,"Untitled-1",32,0,"",plaintext,selection_command
|
| 14 |
+
13,7514,"Untitled-1",39,0,"",plaintext,selection_command
|
| 15 |
+
14,7661,"Untitled-1",46,0,"",plaintext,selection_command
|
| 16 |
+
15,7834,"Untitled-1",76,0,"",plaintext,selection_command
|
| 17 |
+
16,8621,"Untitled-1",46,0,"",plaintext,selection_command
|
| 18 |
+
17,9083,"Untitled-1",76,0,"",plaintext,selection_command
|
| 19 |
+
18,9805,"Untitled-1",46,0,"",plaintext,selection_command
|
| 20 |
+
19,12646,"Untitled-1",39,0,"",plaintext,selection_command
|
| 21 |
+
20,18008,"Untitled-1",76,0,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content
|
| 22 |
+
21,20282,"Untitled-1",32,0,"",plaintext,selection_command
|
| 23 |
+
22,20580,"Untitled-1",0,0,"",plaintext,selection_command
|
| 24 |
+
23,20952,"Untitled-1",32,0,"",plaintext,selection_command
|
| 25 |
+
24,21099,"Untitled-1",39,0,"",plaintext,selection_command
|
| 26 |
+
25,21248,"Untitled-1",46,0,"",plaintext,selection_command
|
| 27 |
+
26,21401,"Untitled-1",76,0,"",plaintext,selection_command
|
| 28 |
+
27,21563,"Untitled-1",107,0,"",plaintext,selection_command
|
| 29 |
+
28,21709,"Untitled-1",123,0,"",plaintext,selection_command
|
| 30 |
+
29,22167,"Untitled-1",138,0,"\n",plaintext,content
|
| 31 |
+
30,23300,"Untitled-1",123,0,"",plaintext,selection_command
|
| 32 |
+
31,23412,"Untitled-1",107,0,"",plaintext,selection_command
|
| 33 |
+
32,23566,"Untitled-1",76,0,"",plaintext,selection_command
|
| 34 |
+
33,23880,"Untitled-1",46,0,"",plaintext,selection_command
|
| 35 |
+
34,24198,"Untitled-1",76,0,"",plaintext,selection_command
|
| 36 |
+
35,24344,"Untitled-1",107,0,"",plaintext,selection_command
|
| 37 |
+
36,24511,"Untitled-1",123,0,"",plaintext,selection_command
|
| 38 |
+
37,24647,"Untitled-1",139,0,"",plaintext,selection_command
|
| 39 |
+
38,25323,"Untitled-1",138,1,"",plaintext,content
|
| 40 |
+
39,26131,"Untitled-1",137,0,"",plaintext,selection_command
|
| 41 |
+
40,26412,"Untitled-1",121,0,"",plaintext,selection_command
|
| 42 |
+
41,26534,"Untitled-1",90,0,"",plaintext,selection_command
|
| 43 |
+
42,26761,"Untitled-1",121,0,"",plaintext,selection_command
|
| 44 |
+
43,27048,"Untitled-1",90,0,"",plaintext,selection_command
|
| 45 |
+
44,27212,"Untitled-1",60,0,"",plaintext,selection_command
|
| 46 |
+
45,27328,"Untitled-1",44,0,"",plaintext,selection_command
|
| 47 |
+
46,27580,"Untitled-1",37,0,"",plaintext,selection_command
|
| 48 |
+
47,27612,"Untitled-1",14,0,"",plaintext,selection_command
|
| 49 |
+
48,28285,"Untitled-1",123,15,"",plaintext,content
|
| 50 |
+
49,30096,"TERMINAL",0,0,"echo ""Hello World""",,terminal_command
|
| 51 |
+
50,30097,"TERMINAL",0,0,"]633;CHello World\r\n[1m[7m%[27m[1m[0m \r \r",,terminal_output
|
| 52 |
+
51,30703,"Untitled-1",123,0,"",plaintext,selection_command
|
| 53 |
+
52,32236,"Untitled-1",107,0,"",plaintext,selection_command
|
| 54 |
+
53,38167,"Untitled-1",76,0,"",plaintext,selection_command
|
| 55 |
+
54,38444,"Untitled-1",46,0,"",plaintext,selection_command
|
| 56 |
+
55,38598,"Untitled-1",39,0,"",plaintext,selection_command
|
| 57 |
+
56,38714,"Untitled-1",32,0,"",plaintext,selection_command
|
| 58 |
+
57,38962,"Untitled-1",0,0,"",plaintext,selection_command
|
| 59 |
+
58,40133,"Untitled-1",32,0,"",plaintext,selection_command
|
| 60 |
+
59,40218,"Untitled-1",39,0,"",plaintext,selection_command
|
| 61 |
+
60,40395,"Untitled-1",46,0,"",plaintext,selection_command
|
| 62 |
+
61,40613,"Untitled-1",39,0,"",plaintext,selection_command
|
| 63 |
+
62,40816,"Untitled-1",46,0,"",plaintext,selection_command
|
| 64 |
+
63,40978,"Untitled-1",76,0,"",plaintext,selection_command
|
| 65 |
+
64,41114,"Untitled-1",107,0,"",plaintext,selection_command
|
| 66 |
+
65,41267,"Untitled-1",76,0,"",plaintext,selection_command
|
| 67 |
+
66,41518,"Untitled-1",46,0,"",plaintext,selection_command
|
| 68 |
+
67,41548,"Untitled-1",39,0,"",plaintext,selection_command
|
| 69 |
+
68,41581,"Untitled-1",32,0,"",plaintext,selection_command
|
| 70 |
+
69,41614,"Untitled-1",0,0,"",plaintext,selection_command
|
| 71 |
+
70,42180,"Untitled-1",32,0,"",plaintext,selection_command
|
| 72 |
+
71,42330,"Untitled-1",39,0,"",plaintext,selection_command
|
| 73 |
+
72,42481,"Untitled-1",46,0,"",plaintext,selection_command
|
| 74 |
+
73,42597,"Untitled-1",76,0,"",plaintext,selection_command
|
| 75 |
+
74,42848,"Untitled-1",107,0,"",plaintext,selection_command
|
| 76 |
+
75,42881,"Untitled-1",123,0,"",plaintext,selection_command
|
| 77 |
+
76,43036,"Untitled-1",107,0,"",plaintext,selection_command
|
| 78 |
+
77,43247,"Untitled-1",76,0,"",plaintext,selection_command
|
| 79 |
+
78,43383,"Untitled-1",46,0,"",plaintext,selection_command
|
| 80 |
+
79,43636,"Untitled-1",39,0,"",plaintext,selection_command
|
| 81 |
+
80,43665,"Untitled-1",32,0,"",plaintext,selection_command
|
| 82 |
+
81,43696,"Untitled-1",0,0,"",plaintext,selection_command
|
| 83 |
+
82,44530,"Untitled-1",32,0,"",plaintext,selection_command
|
| 84 |
+
83,46200,"Untitled-1",39,0,"",plaintext,selection_command
|
| 85 |
+
84,48490,"Untitled-1",76,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content
|
| 86 |
+
85,53040,"Untitled-1",108,13,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content
|
| 87 |
+
86,57817,"Untitled-1",171,77,"",plaintext,content
|
| 88 |
+
87,59866,"TERMINAL",0,0,"echo ""Hello World""",,terminal_command
|
| 89 |
+
88,59866,"TERMINAL",0,0,"]633;CHello World\r\n[1m[7m%[27m[1m[0m \r \r",,terminal_output
|
| 90 |
+
89,60308,"Untitled-1",171,0,"",plaintext,selection_command
|
| 91 |
+
90,113406,"Untitled-1",155,0,"",plaintext,selection_command
|
| 92 |
+
91,117397,"Untitled-1",171,0,"",plaintext,selection_command
|
| 93 |
+
92,119550,"Untitled-1",155,16,"REPLACED LINE 2\n",plaintext,selection_command
|
| 94 |
+
93,119627,"Untitled-1",139,32,"REPLACED LINE 1\nREPLACED LINE 2\n",plaintext,selection_command
|
| 95 |
+
94,119882,"Untitled-1",108,63,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2\n",plaintext,selection_command
|
| 96 |
+
95,119914,"Untitled-1",76,95,"/* crowd-pilot: insert start */\n/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2\n",plaintext,selection_command
|
| 97 |
+
96,119944,"Untitled-1",46,125,"/* crowd-pilot: insert end */\n/* crowd-pilot: insert start */\n/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2\n",plaintext,selection_command
|
| 98 |
+
97,119976,"Untitled-1",39,132,"line B\n/* crowd-pilot: insert end */\n/* crowd-pilot: insert start */\n/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2\n",plaintext,selection_command
|
| 99 |
+
98,120009,"Untitled-1",32,139,"line A\nline B\n/* crowd-pilot: insert end */\n/* crowd-pilot: insert start */\n/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2\n",plaintext,selection_command
|
| 100 |
+
99,120044,"Untitled-1",0,171,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n/* crowd-pilot: insert start */\n/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2\n",plaintext,selection_command
|
| 101 |
+
100,120217,"Untitled-1",0,171,"",plaintext,content
|
| 102 |
+
101,131627,"Untitled-1",0,0," ",plaintext,content
|
| 103 |
+
102,131630,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 104 |
+
103,131702,"Untitled-1",0,1,"",plaintext,content
|
| 105 |
+
104,134008,"Untitled-1",0,0," ",plaintext,content
|
| 106 |
+
105,134009,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 107 |
+
106,134572,"Untitled-1",0,1,"",plaintext,content
|
| 108 |
+
107,137710,"Untitled-1",0,0," ",plaintext,content
|
| 109 |
+
108,137712,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 110 |
+
109,137840,"Untitled-1",0,1,"",plaintext,content
|
| 111 |
+
110,139096,"Untitled-1",0,0," ",plaintext,content
|
| 112 |
+
111,139099,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 113 |
+
112,141426,"Untitled-1",0,1,"",plaintext,content
|
| 114 |
+
113,142423,"Untitled-1",0,0," ",plaintext,content
|
| 115 |
+
114,142425,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 116 |
+
115,142535,"Untitled-1",0,1,"",plaintext,content
|
| 117 |
+
116,144957,"Untitled-1",0,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content
|
| 118 |
+
117,146729,"Untitled-1",46,0,"",plaintext,selection_command
|
| 119 |
+
118,146983,"Untitled-1",39,0,"",plaintext,selection_command
|
| 120 |
+
119,147014,"Untitled-1",32,0,"",plaintext,selection_command
|
| 121 |
+
120,147046,"Untitled-1",0,0,"",plaintext,selection_command
|
| 122 |
+
121,147961,"Untitled-1",32,0,"",plaintext,selection_command
|
| 123 |
+
122,148210,"Untitled-1",39,0,"",plaintext,selection_command
|
| 124 |
+
123,148242,"Untitled-1",46,0,"",plaintext,selection_command
|
| 125 |
+
124,148275,"Untitled-1",76,0,"",plaintext,selection_command
|
| 126 |
+
125,148612,"Untitled-1",75,1,"",plaintext,content
|
| 127 |
+
126,148864,"Untitled-1",46,29,"",plaintext,content
|
| 128 |
+
127,149035,"Untitled-1",45,1,"",plaintext,content
|
| 129 |
+
128,149370,"Untitled-1",44,1,"",plaintext,content
|
| 130 |
+
129,149664,"Untitled-1",39,5,"",plaintext,content
|
| 131 |
+
130,149828,"Untitled-1",38,1,"",plaintext,content
|
| 132 |
+
131,149980,"Untitled-1",32,6,"",plaintext,content
|
| 133 |
+
132,150131,"Untitled-1",31,1,"",plaintext,content
|
| 134 |
+
133,150270,"Untitled-1",0,31,"",plaintext,content
|
| 135 |
+
134,152155,"Untitled-1",0,0,"\n",plaintext,content
|
| 136 |
+
135,152455,"Untitled-1",0,0,"",plaintext,selection_command
|
| 137 |
+
136,152924,"Untitled-1",0,0,"\n",plaintext,content
|
| 138 |
+
137,156644,"Untitled-1",0,0,"",plaintext,selection_command
|
| 139 |
+
138,156912,"Untitled-1",1,0,"",plaintext,selection_command
|
| 140 |
+
139,158206,"Untitled-1",0,0,"",plaintext,selection_command
|
| 141 |
+
140,159714,"Untitled-1",1,0,"",plaintext,selection_command
|
| 142 |
+
141,159864,"Untitled-1",2,0,"",plaintext,selection_command
|
| 143 |
+
142,160281,"Untitled-1",1,0,"",plaintext,selection_command
|
| 144 |
+
143,160461,"Untitled-1",0,0,"",plaintext,selection_command
|
| 145 |
+
144,162829,"Untitled-1",2,0,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content
|
| 146 |
+
145,164185,"Untitled-1",49,15,"",plaintext,content
|
| 147 |
+
146,166047,"Untitled-1",1,0,"",plaintext,selection_command
|
| 148 |
+
147,166978,"Untitled-1",2,0,"",plaintext,selection_command
|
| 149 |
+
148,168810,"Untitled-1",33,0,"",plaintext,selection_command
|
| 150 |
+
149,169194,"Untitled-1",49,0,"",plaintext,selection_command
|
| 151 |
+
150,170117,"Untitled-1",33,0,"",plaintext,selection_command
|
| 152 |
+
151,170297,"Untitled-1",2,0,"",plaintext,selection_command
|
| 153 |
+
152,171172,"Untitled-1",33,0,"",plaintext,selection_command
|
| 154 |
+
153,171312,"Untitled-1",49,0,"",plaintext,selection_command
|
| 155 |
+
154,171863,"Untitled-1",33,0,"",plaintext,selection_command
|
| 156 |
+
155,172014,"Untitled-1",2,0,"",plaintext,selection_command
|
| 157 |
+
156,172149,"Untitled-1",1,0,"",plaintext,selection_command
|
| 158 |
+
157,172525,"Untitled-1",0,0,"",plaintext,selection_command
|
| 159 |
+
158,173313,"Untitled-1",1,0,"",plaintext,selection_command
|
| 160 |
+
159,173917,"Untitled-1",0,0,"",plaintext,selection_command
|
| 161 |
+
160,174658,"Untitled-1",1,0,"",plaintext,selection_command
|
| 162 |
+
161,174870,"Untitled-1",2,0,"",plaintext,selection_command
|
| 163 |
+
162,175249,"Untitled-1",1,0,"",plaintext,selection_command
|
| 164 |
+
163,175594,"Untitled-1",0,0,"",plaintext,selection_command
|
| 165 |
+
164,203602,"Untitled-1",1,0,"",plaintext,selection_command
|
| 166 |
+
165,203731,"Untitled-1",2,0,"",plaintext,selection_command
|
| 167 |
+
166,203874,"Untitled-1",33,0,"",plaintext,selection_command
|
| 168 |
+
167,204060,"Untitled-1",49,0,"",plaintext,selection_command
|
| 169 |
+
168,204639,"Untitled-1",33,0,"",plaintext,selection_command
|
| 170 |
+
169,204814,"Untitled-1",2,0,"",plaintext,selection_command
|
| 171 |
+
170,205316,"Untitled-1",2,0,"\n",plaintext,content
|
| 172 |
+
171,206496,"Untitled-1",2,1,"",plaintext,content
|
| 173 |
+
172,206872,"Untitled-1",2,0,"R",plaintext,content
|
| 174 |
+
173,207366,"Untitled-1",2,1,"",plaintext,content
|
| 175 |
+
174,208621,"Untitled-1",32,0,"\n",plaintext,content
|
| 176 |
+
175,211484,"Untitled-1",2,0,"",plaintext,selection_command
|
| 177 |
+
176,211495,"Untitled-1",33,0,"",plaintext,selection_command
|
| 178 |
+
177,248573,"Untitled-1",2,0,"",plaintext,selection_command
|
| 179 |
+
178,250205,"Untitled-1",33,0,"",plaintext,selection_command
|
| 180 |
+
179,251861,"Untitled-1",2,0,"",plaintext,selection_command
|
| 181 |
+
180,252220,"Untitled-1",3,0,"",plaintext,selection_command
|
| 182 |
+
181,252442,"Untitled-1",4,0,"",plaintext,selection_command
|
| 183 |
+
182,252629,"Untitled-1",5,0,"",plaintext,selection_command
|
| 184 |
+
183,252809,"Untitled-1",6,0,"",plaintext,selection_command
|
| 185 |
+
184,252969,"Untitled-1",7,0,"",plaintext,selection_command
|
| 186 |
+
185,253194,"Untitled-1",8,0,"",plaintext,selection_command
|
| 187 |
+
186,253400,"Untitled-1",9,0,"",plaintext,selection_command
|
| 188 |
+
187,253667,"Untitled-1",10,0,"",plaintext,selection_command
|
| 189 |
+
188,254236,"Untitled-1",9,0,"",plaintext,selection_command
|
| 190 |
+
189,254513,"Untitled-1",8,0,"",plaintext,selection_command
|
| 191 |
+
190,254626,"Untitled-1",7,0,"",plaintext,selection_command
|
| 192 |
+
191,254808,"Untitled-1",6,0,"",plaintext,selection_command
|
| 193 |
+
192,254962,"Untitled-1",5,0,"",plaintext,selection_command
|
| 194 |
+
193,255109,"Untitled-1",4,0,"",plaintext,selection_command
|
| 195 |
+
194,255264,"Untitled-1",3,0,"",plaintext,selection_command
|
| 196 |
+
195,255413,"Untitled-1",2,0,"",plaintext,selection_command
|
| 197 |
+
196,256369,"Untitled-1",12,0,"",plaintext,selection_command
|
| 198 |
+
197,257481,"Untitled-1",22,0,"",plaintext,selection_command
|
| 199 |
+
198,260088,"Untitled-1",23,0,"",plaintext,selection_command
|
| 200 |
+
199,260424,"Untitled-1",24,0,"",plaintext,selection_command
|
| 201 |
+
200,260942,"Untitled-1",25,0,"",plaintext,selection_command
|
| 202 |
+
201,261520,"Untitled-1",33,0,"",plaintext,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-477e72ce-2f20-4f16-86c7-8d47149aaaf41762423822508-2025_11_06-11.10.25.383/source.csv
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,5,"src/extension.ts",0,0,"// The module 'vscode' contains the VS Code extensibility API\n// Import the module and reference it with the alias vscode in your code below\nimport * as vscode from 'vscode';\n\n// This method is called when your extension is activated\n// Your extension is activated the very first time the command is executed\nexport function activate(context: vscode.ExtensionContext) {\n\n\t// Use the console to output diagnostic information (console.log) and errors (console.error)\n\t// This line of code will only be executed once when your extension is activated\n\tconsole.log('Congratulations, your extension ""crowd-pilot"" is now active!');\n\n\t// Configure terminal to allow tab keybinding to work\n\t// This makes the command skip the shell so VS Code can intercept tab in terminals\n\tconst config = vscode.workspace.getConfiguration('terminal.integrated');\n\tconst commandsToSkipShell = config.get<string[]>('commandsToSkipShell', []);\n\tif (!commandsToSkipShell.includes('crowd-pilot.testRun')) {\n\t\tcommandsToSkipShell.push('crowd-pilot.testRun');\n\t\tconfig.update('commandsToSkipShell', commandsToSkipShell, vscode.ConfigurationTarget.Global);\n\t}\n\n\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n\t});\n\n\tcontext.subscriptions.push(disposable);\n\tconst testRun = vscode.commands.registerCommand('crowd-pilot.testRun', async () => {\n\t\tconst editor = vscode.window.activeTextEditor;\n\t\tconst doc = editor!.document;\n\t\tconst term = vscode.window.terminals[0] ?? vscode.window.createTerminal('Test');\n\t\tconst git = vscode.extensions.getExtension('vscode.git')?.exports?.getAPI(1);\n\t\tconst repo = git?.repositories?.[0];\n\t\n\t\t// Emit a few actions:\n\t\tawait vscode.window.showTextDocument(doc);\n\t\teditor!.selections = [new vscode.Selection(0, 0, 0, 0)];\n\t\tawait editor!.edit(e => e.insert(new vscode.Position(0, 0), 'hello world\n'));\n\t\tterm.show();\n\t\tterm.sendText('echo VSCode test');\n\t\t//await repo?.pull();\n\t\n\t\tvscode.window.showInformationMessage('All actions emitted');\n\t });\n\n\tcontext.subscriptions.push(testRun);\n}\n\n// This method is called when your extension is deactivated\nexport function deactivate() {}\n",typescript,tab
|
| 3 |
+
2,68,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:10:25 AM [info] Activating crowd-code\n11:10:25 AM [info] Recording started\n11:10:25 AM [info] Initializing git provider using file system watchers...\n11:10:25 AM [info] Git repository found\n11:10:25 AM [info] Git provider initialized successfully\n",Log,tab
|
| 4 |
+
3,104,"extension-output-pdoom-org.crowd-code-#1-crowd-code",250,0,"11:10:25 AM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,939,"src/extension.ts",0,0,"",typescript,tab
|
| 6 |
+
5,2685,"TERMINAL",0,0,"",,terminal_focus
|
| 7 |
+
6,23581,"src/extension.ts",840,0,"",typescript,selection_command
|
| 8 |
+
7,23715,"src/extension.ts",918,0,"",typescript,selection_command
|
| 9 |
+
8,23869,"src/extension.ts",979,0,"",typescript,selection_command
|
| 10 |
+
9,25338,"src/extension.ts",980,0,"",typescript,selection_command
|
| 11 |
+
10,55725,"package.json",0,0,"{\n ""name"": ""crowd-pilot"",\n ""displayName"": ""crowd-pilot-extension"",\n ""description"": ""Teaching language models to code like humans."",\n ""version"": ""0.0.1"",\n ""engines"": {\n ""vscode"": ""^1.99.3""\n },\n ""categories"": [\n ""Other""\n ],\n ""activationEvents"": [],\n ""main"": ""./out/extension.js"",\n ""contributes"": {\n ""commands"": [\n {\n ""command"": ""crowd-pilot.helloWorld"",\n ""title"": ""Hello World!""\n },\n {\n ""command"": ""crowd-pilot.testRun"",\n ""title"": ""Test Run""\n }\n ],\n ""keybindings"": [\n {\n ""command"": ""crowd-pilot.testRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""editorTextFocus || terminalFocus""\n }\n ]\n },\n ""scripts"": {\n ""vscode:prepublish"": ""npm run compile"",\n ""compile"": ""tsc -p ./"",\n ""watch"": ""tsc -watch -p ./"",\n ""pretest"": ""npm run compile && npm run lint"",\n ""lint"": ""eslint src"",\n ""test"": ""vscode-test""\n },\n ""devDependencies"": {\n ""@types/vscode"": ""^1.105.0"",\n ""@types/mocha"": ""^10.0.10"",\n ""@types/node"": ""22.x"",\n ""@typescript-eslint/eslint-plugin"": ""^8.45.0"",\n ""@typescript-eslint/parser"": ""^8.45.0"",\n ""eslint"": ""^9.36.0"",\n ""typescript"": ""^5.9.3"",\n ""@vscode/test-cli"": ""^0.0.11"",\n ""@vscode/test-electron"": ""^2.5.2""\n }\n}\n",json,tab
|
| 12 |
+
11,60296,"package.json",633,50," ""when"": ""editorTextFocus || terminalFocus""",json,selection_command
|
| 13 |
+
12,60916,"package.json",641,0,"",json,selection_command
|
| 14 |
+
13,110022,".vscode/extensions.json",0,0,"{\n\t// See http://go.microsoft.com/fwlink/?LinkId=827846\n\t// for the documentation about the extensions.json format\n\t""recommendations"": [\n\t\t""dbaeumer.vscode-eslint"",\n\t\t""ms-vscode.extension-test-runner""\n\t]\n}\n",jsonc,tab
|
| 15 |
+
14,112539,".vscode/launch.json",0,0,"// A launch configuration that compiles the extension and then opens it inside a new window\n// Use IntelliSense to learn about possible attributes.\n// Hover to view descriptions of existing attributes.\n// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n{\n\t""version"": ""0.2.0"",\n\t""configurations"": [\n\t\t{\n\t\t\t""name"": ""Run Extension"",\n\t\t\t""type"": ""extensionHost"",\n\t\t\t""request"": ""launch"",\n\t\t\t""args"": [\n\t\t\t\t""--extensionDevelopmentPath=${workspaceFolder}""\n\t\t\t],\n\t\t\t""timeout"": 20000, // Increase the timeout to 20 seconds\n\t\t\t""outFiles"": [\n\t\t\t\t""${workspaceFolder}/out/**/*.js""\n\t\t\t],\n\t\t\t""preLaunchTask"": ""${defaultBuildTask}""\n\t\t}\n\t]\n}\n",jsonc,tab
|
| 16 |
+
15,116894,".vscode/extensions.json",0,0,"",jsonc,tab
|
| 17 |
+
16,122801,"src/extension.ts",0,0,"",typescript,tab
|
| 18 |
+
17,122822,"src/extension.ts",625,0,"",typescript,selection_command
|
| 19 |
+
18,130160,"out/extension.js.map",0,0,"{""version"":3,""file"":""extension.js"",""sourceRoot"":"""",""sources"":[""../src/extension.ts""],""names"":[],""mappings"":"";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAMA,4BA4CC;AAGD,gCAA+B;AArD/B,6DAA6D;AAC7D,8EAA8E;AAC9E,+CAAiC;AAEjC,yDAAyD;AACzD,0EAA0E;AAC1E,SAAgB,QAAQ,CAAC,OAAgC;IAExD,4FAA4F;IAC5F,gFAAgF;IAChF,OAAO,CAAC,GAAG,CAAC,8DAA8D,CAAC,CAAC;IAE5E,qDAAqD;IACrD,kFAAkF;IAClF,MAAM,MAAM,GAAG,MAAM,CAAC,SAAS,CAAC,gBAAgB,CAAC,qBAAqB,CAAC,CAAC;IACxE,MAAM,mBAAmB,GAAG,MAAM,CAAC,GAAG,CAAW,qBAAqB,EAAE,EAAE,CAAC,CAAC;IAC5E,IAAI,CAAC,mBAAmB,CAAC,QAAQ,CAAC,qBAAqB,CAAC,EAAE,CAAC;QAC1D,mBAAmB,CAAC,IAAI,CAAC,qBAAqB,CAAC,CAAC;QAChD,MAAM,CAAC,MAAM,CAAC,qBAAqB,EAAE,mBAAmB,EAAE,MAAM,CAAC,mBAAmB,CAAC,MAAM,CAAC,CAAC;IAC9F,CAAC;IAED,wDAAwD;IACxD,qEAAqE;IACrE,uEAAuE;IACvE,MAAM,UAAU,GAAG,MAAM,CAAC,QAAQ,CAAC,eAAe,CAAC,wBAAwB,EAAE,GAAG,EAAE;QACjF,+EAA+E;QAC/E,oCAAoC;QACpC,MAAM,CAAC,MAAM,CAAC,sBAAsB,CAAC,yCAAyC,CAAC,CAAC;IACjF,CAAC,CAAC,CAAC;IAEH,OAAO,CAAC,aAAa,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;IACvC,MAAM,OAAO,GAAG,MAAM,CAAC,QAAQ,CAAC,eAAe,CAAC,qBAAqB,EAAE,KAAK,IAAI,EAAE;QACjF,MAAM,MAAM,GAAG,MAAM,CAAC,MAAM,CAAC,gBAAgB,CAAC;QAC9C,MAAM,GAAG,GAAG,MAAO,CAAC,QAAQ,CAAC;QAC7B,MAAM,IAAI,GAAG,MAAM,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,CAAC,IAAI,MAAM,CAAC,MAAM,CAAC,cAAc,CAAC,MAAM,CAAC,CAAC;QAChF,MAAM,GAAG,GAAG,MAAM,CAAC,UAAU,CAAC,YAAY,CAAC,YAAY,CAAC,EAAE,OAAO,EAAE,MAAM,CAAC,CAAC,CAAC,CAAC;QAC7E,MAAM,IAAI,GAAG,GAAG,EAAE,YAAY,EAAE,CAAC,CAAC,CAAC,CAAC;QAEpC,sBAAsB;QACtB,MAAM,MAAM,CAAC,MAAM,CAAC,gBAAgB,CAAC,GAAG,CAAC,CAAC;QAC1C,MAAO,CAAC,UAAU,GAAG,CAAC,IAAI,MAAM,CAAC,SAAS,CAAC,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;QACxD,MAAM,MAAO,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,MAAM,CAAC,IAAI,MAAM,CAAC,QAAQ,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,CAAC;QAC9E,IAAI,CAAC,IAAI,EAAE,CAAC;QACZ,IAAI,CAAC,QAAQ,CAAC,kBAAkB,CAAC,CAAC;QAClC,qBAAqB;QAErB,MAAM,CAAC,MAAM,CAAC,sBAAsB,CAAC,qBAAqB,CAAC,CAAC;IAC3D,CAAC,CAAC,CAAC;IAEL,OAAO,CAAC,aAAa,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;AACrC,CAAC;AAED,2DAA2D;AAC3D,SAAgB,UAAU,KAAI,CAAC""}",json,tab
|
| 20 |
+
19,136924,"out/extension.js",0,0,"""use strict"";\nvar __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (""get"" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n}));\nvar __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {\n Object.defineProperty(o, ""default"", { enumerable: true, value: v });\n}) : function(o, v) {\n o[""default""] = v;\n});\nvar __importStar = (this && this.__importStar) || (function () {\n var ownKeys = function(o) {\n ownKeys = Object.getOwnPropertyNames || function (o) {\n var ar = [];\n for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;\n return ar;\n };\n return ownKeys(o);\n };\n return function (mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== ""default"") __createBinding(result, mod, k[i]);\n __setModuleDefault(result, mod);\n return result;\n };\n})();\nObject.defineProperty(exports, ""__esModule"", { value: true });\nexports.activate = activate;\nexports.deactivate = deactivate;\n// The module 'vscode' contains the VS Code extensibility API\n// Import the module and reference it with the alias vscode in your code below\nconst vscode = __importStar(require(""vscode""));\n// This method is called when your extension is activated\n// Your extension is activated the very first time the command is executed\nfunction activate(context) {\n // Use the console to output diagnostic information (console.log) and errors (console.error)\n // This line of code will only be executed once when your extension is activated\n console.log('Congratulations, your extension ""crowd-pilot"" is now active!');\n // Configure terminal to allow tab keybinding to work\n // This makes the command skip the shell so VS Code can intercept tab in terminals\n const config = vscode.workspace.getConfiguration('terminal.integrated');\n const commandsToSkipShell = config.get('commandsToSkipShell', []);\n if (!commandsToSkipShell.includes('crowd-pilot.testRun')) {\n commandsToSkipShell.push('crowd-pilot.testRun');\n config.update('commandsToSkipShell', commandsToSkipShell, vscode.ConfigurationTarget.Global);\n }\n // The command has been defined in the package.json file\n // Now provide the implementation of the command with registerCommand\n // The commandId parameter must match the command field in package.json\n const disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n // The code you place here will be executed every time your command is executed\n // Display a message box to the user\n vscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n });\n context.subscriptions.push(disposable);\n const testRun = vscode.commands.registerCommand('crowd-pilot.testRun', async () => {\n const editor = vscode.window.activeTextEditor;\n const doc = editor.document;\n const term = vscode.window.terminals[0] ?? vscode.window.createTerminal('Test');\n const git = vscode.extensions.getExtension('vscode.git')?.exports?.getAPI(1);\n const repo = git?.repositories?.[0];\n // Emit a few actions:\n await vscode.window.showTextDocument(doc);\n editor.selections = [new vscode.Selection(0, 0, 0, 0)];\n await editor.edit(e => e.insert(new vscode.Position(0, 0), 'hello world\n'));\n term.show();\n term.sendText('echo VSCode test');\n //await repo?.pull();\n vscode.window.showInformationMessage('All actions emitted');\n });\n context.subscriptions.push(testRun);\n}\n// This method is called when your extension is deactivated\nfunction deactivate() { }\n//# sourceMappingURL=extension.js.map",javascript,tab
|
| 21 |
+
20,137074,"out/extension.js",2112,0,"",javascript,selection_command
|
| 22 |
+
21,143601,".vscodeignore",0,0,".vscode/**\n.vscode-test/**\nsrc/**\n.gitignore\n.yarnrc\nvsc-extension-quickstart.md\n**/tsconfig.json\n**/eslint.config.mjs\n**/*.map\n**/*.ts\n**/.vscode-test.*\n",ignore,tab
|
| 23 |
+
22,158104,".gitignore",0,0,"",ignore,tab
|
| 24 |
+
23,159461,".gitignore",0,0,"*",ignore,content
|
| 25 |
+
24,159466,".gitignore",1,0,"",ignore,selection_keyboard
|
| 26 |
+
25,159893,".gitignore",0,1,"",ignore,content
|
| 27 |
+
26,160796,".gitignore",0,0,"o",ignore,content
|
| 28 |
+
27,160798,".gitignore",1,0,"",ignore,selection_keyboard
|
| 29 |
+
28,160857,".gitignore",1,0,"u",ignore,content
|
| 30 |
+
29,160859,".gitignore",2,0,"",ignore,selection_keyboard
|
| 31 |
+
30,160936,".gitignore",2,0,"t",ignore,content
|
| 32 |
+
31,160937,".gitignore",3,0,"",ignore,selection_keyboard
|
| 33 |
+
32,161163,".gitignore",3,0,"/",ignore,content
|
| 34 |
+
33,161166,".gitignore",4,0,"",ignore,selection_keyboard
|
| 35 |
+
34,161265,".gitignore",3,0,"",ignore,selection_command
|
| 36 |
+
35,168123,".gitignore",4,0,"",ignore,selection_mouse
|
| 37 |
+
36,168129,".gitignore",3,0,"",ignore,selection_command
|
| 38 |
+
37,168731,".gitignore",4,0,"\n",ignore,content
|
| 39 |
+
38,169023,".gitignore",5,0,".",ignore,content
|
| 40 |
+
39,169024,".gitignore",6,0,"",ignore,selection_keyboard
|
| 41 |
+
40,169119,".gitignore",6,0,"v",ignore,content
|
| 42 |
+
41,169120,".gitignore",7,0,"",ignore,selection_keyboard
|
| 43 |
+
42,169406,".gitignore",7,0,"s",ignore,content
|
| 44 |
+
43,169409,".gitignore",8,0,"",ignore,selection_keyboard
|
| 45 |
+
44,169585,".gitignore",8,0,"c",ignore,content
|
| 46 |
+
45,169588,".gitignore",9,0,"",ignore,selection_keyboard
|
| 47 |
+
46,169770,".gitignore",9,0,"o",ignore,content
|
| 48 |
+
47,169773,".gitignore",10,0,"",ignore,selection_keyboard
|
| 49 |
+
48,169920,".gitignore",10,0,"d",ignore,content
|
| 50 |
+
49,169923,".gitignore",11,0,"",ignore,selection_keyboard
|
| 51 |
+
50,170003,".gitignore",11,0,"e",ignore,content
|
| 52 |
+
51,170007,".gitignore",12,0,"",ignore,selection_keyboard
|
| 53 |
+
52,170177,".gitignore",12,0,"/",ignore,content
|
| 54 |
+
53,170180,".gitignore",13,0,"",ignore,selection_keyboard
|
| 55 |
+
54,170273,".gitignore",12,0,"",ignore,selection_command
|
| 56 |
+
55,176964,".gitignore",13,0,"\n",ignore,content
|
| 57 |
+
56,177490,".gitignore",14,0,".",ignore,content
|
| 58 |
+
57,177491,".gitignore",15,0,"",ignore,selection_keyboard
|
| 59 |
+
58,177502,".gitignore",15,0,"v",ignore,content
|
| 60 |
+
59,177504,".gitignore",16,0,"",ignore,selection_keyboard
|
| 61 |
+
60,177721,".gitignore",16,0,"s",ignore,content
|
| 62 |
+
61,177723,".gitignore",17,0,"",ignore,selection_keyboard
|
| 63 |
+
62,178002,".gitignore",17,0,"c",ignore,content
|
| 64 |
+
63,178005,".gitignore",18,0,"",ignore,selection_keyboard
|
| 65 |
+
64,178430,".gitignore",18,0,"o",ignore,content
|
| 66 |
+
65,178435,".gitignore",19,0,"",ignore,selection_keyboard
|
| 67 |
+
66,178585,".gitignore",19,0,"d",ignore,content
|
| 68 |
+
67,178590,".gitignore",20,0,"",ignore,selection_keyboard
|
| 69 |
+
68,178652,".gitignore",20,0,"e",ignore,content
|
| 70 |
+
69,178656,".gitignore",21,0,"",ignore,selection_keyboard
|
| 71 |
+
70,179129,".gitignore",21,0,"*",ignore,content
|
| 72 |
+
71,179132,".gitignore",22,0,"",ignore,selection_keyboard
|
| 73 |
+
72,179381,".gitignore",21,0,"",ignore,selection_command
|
| 74 |
+
73,183014,"package.json",0,0,"",json,tab
|
| 75 |
+
74,183027,"package.json",384,0,"",json,selection_command
|
| 76 |
+
75,184543,"out/extension.js",0,0,"",javascript,tab
|
| 77 |
+
76,184579,"out/extension.js",2112,0,"",javascript,selection_command
|
| 78 |
+
77,185874,"package.json",0,0,"",json,tab
|
| 79 |
+
78,187553,".gitignore",0,0,"",ignore,tab
|
| 80 |
+
79,188251,".gitignore",12,0,"",ignore,selection_command
|
| 81 |
+
80,188335,".gitignore",3,0,"",ignore,selection_command
|
| 82 |
+
81,192053,".gitignore",22,0,"",ignore,selection_mouse
|
| 83 |
+
82,192055,".gitignore",21,0,"",ignore,selection_command
|
| 84 |
+
83,199087,".gitignore",22,0,"",ignore,selection_mouse
|
| 85 |
+
84,199091,".gitignore",21,0,"",ignore,selection_command
|
| 86 |
+
85,205568,".gitignore",22,0,"",ignore,selection_mouse
|
| 87 |
+
86,205573,".gitignore",21,0,"",ignore,selection_command
|
| 88 |
+
87,207138,".gitignore",4,0,"",ignore,selection_mouse
|
| 89 |
+
88,207139,".gitignore",3,0,"",ignore,selection_command
|
| 90 |
+
89,207809,".gitignore",4,0,"",ignore,selection_command
|
| 91 |
+
90,208037,".gitignore",4,0,"&",ignore,content
|
| 92 |
+
91,208040,".gitignore",5,0,"",ignore,selection_keyboard
|
| 93 |
+
92,208437,".gitignore",4,1,"",ignore,content
|
| 94 |
+
93,208752,".gitignore",4,0,"*",ignore,content
|
| 95 |
+
94,208754,".gitignore",5,0,"",ignore,selection_keyboard
|
| 96 |
+
95,209011,".gitignore",4,0,"",ignore,selection_command
|
| 97 |
+
96,215032,".gitignore",4,1,"",ignore,content
|
| 98 |
+
97,215048,".gitignore",3,0,"",ignore,selection_command
|
| 99 |
+
98,218218,".gitignore",0,0,"",ignore,selection_command
|
| 100 |
+
99,218572,".gitignore",0,0,".",ignore,content
|
| 101 |
+
100,218574,".gitignore",1,0,"",ignore,selection_keyboard
|
| 102 |
+
101,218903,".gitignore",1,0,"/",ignore,content
|
| 103 |
+
102,218905,".gitignore",2,0,"",ignore,selection_keyboard
|
| 104 |
+
103,219112,".gitignore",1,0,"",ignore,selection_command
|
| 105 |
+
104,223395,"out/extension.js",0,0,"",javascript,tab
|
| 106 |
+
105,226262,"out/extension.js.map",0,0,"",json,tab
|
| 107 |
+
106,228203,".gitignore",0,0,"",ignore,tab
|
| 108 |
+
107,234758,"src/extension.ts",0,0,"// The module 'vscode' contains the VS Code extensibility API\n// Import the module and reference it with the alias vscode in your code below\nimport * as vscode from 'vscode';\n\n// This method is called when your extension is activated\n// Your extension is activated the very first time the command is executed\nexport function activate(context: vscode.ExtensionContext) {\n\n\t// Use the console to output diagnostic information (console.log) and errors (console.error)\n\t// This line of code will only be executed once when your extension is activated\n\tconsole.log('Congratulations, your extension ""crowd-pilot"" is now active!');\n\n\t// Configure terminal to allow tab keybinding to work\n\t// This makes the command skip the shell so VS Code can intercept tab in terminals\n\tconst config = vscode.workspace.getConfiguration('terminal.integrated');\n\tconst commandsToSkipShell = config.get<string[]>('commandsToSkipShell', []);\n\tif (!commandsToSkipShell.includes('crowd-pilot.testRun')) {\n\t\tcommandsToSkipShell.push('crowd-pilot.testRun');\n\t\tconfig.update('commandsToSkipShell', commandsToSkipShell, vscode.ConfigurationTarget.Global);\n\t}\n\n\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n\t});\n\n\tcontext.subscriptions.push(disposable);\n\tconst testRun = vscode.commands.registerCommand('crowd-pilot.testRun', async () => {\n\t\tconst editor = vscode.window.activeTextEditor;\n\t\tconst doc = editor!.document;\n\t\tconst term = vscode.window.terminals[0] ?? vscode.window.createTerminal('Test');\n\t\tconst git = vscode.extensions.getExtension('vscode.git')?.exports?.getAPI(1);\n\t\tconst repo = git?.repositories?.[0];\n\t\n\t\t// Emit a few actions:\n\t\tawait vscode.window.showTextDocument(doc);\n\t\teditor!.selections = [new vscode.Selection(0, 0, 0, 0)];\n\t\tawait editor!.edit(e => e.insert(new vscode.Position(0, 0), 'hello world\n'));\n\t\tterm.show();\n\t\tterm.sendText('echo VSCode test');\n\t\t//await repo?.pull();\n\t\n\t\tvscode.window.showInformationMessage('All actions emitted');\n\t });\n\n\tcontext.subscriptions.push(testRun);\n}\n\n// This method is called when your extension is deactivated\nexport function deactivate() {}\n",typescript,tab
|
| 109 |
+
108,234777,"src/extension.ts",625,0,"",typescript,selection_command
|
| 110 |
+
109,235401,"package.json",0,0,"{\n ""name"": ""crowd-pilot"",\n ""displayName"": ""crowd-pilot-extension"",\n ""description"": ""Teaching language models to code like humans."",\n ""version"": ""0.0.1"",\n ""engines"": {\n ""vscode"": ""^1.99.3""\n },\n ""categories"": [\n ""Other""\n ],\n ""activationEvents"": [],\n ""main"": ""./out/extension.js"",\n ""contributes"": {\n ""commands"": [\n {\n ""command"": ""crowd-pilot.helloWorld"",\n ""title"": ""Hello World!""\n },\n {\n ""command"": ""crowd-pilot.testRun"",\n ""title"": ""Test Run""\n }\n ],\n ""keybindings"": [\n {\n ""command"": ""crowd-pilot.testRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""editorTextFocus || terminalFocus""\n }\n ]\n },\n ""scripts"": {\n ""vscode:prepublish"": ""npm run compile"",\n ""compile"": ""tsc -p ./"",\n ""watch"": ""tsc -watch -p ./"",\n ""pretest"": ""npm run compile && npm run lint"",\n ""lint"": ""eslint src"",\n ""test"": ""vscode-test""\n },\n ""devDependencies"": {\n ""@types/vscode"": ""^1.105.0"",\n ""@types/mocha"": ""^10.0.10"",\n ""@types/node"": ""22.x"",\n ""@typescript-eslint/eslint-plugin"": ""^8.45.0"",\n ""@typescript-eslint/parser"": ""^8.45.0"",\n ""eslint"": ""^9.36.0"",\n ""typescript"": ""^5.9.3"",\n ""@vscode/test-cli"": ""^0.0.11"",\n ""@vscode/test-electron"": ""^2.5.2""\n }\n}\n",json,tab
|
| 111 |
+
110,235410,"package.json",384,0,"",json,selection_command
|
| 112 |
+
111,260236,"package.json",0,0,"Switched from branch 'main' to 'tab-in-editor-and-terminal'",json,git_branch_checkout
|
| 113 |
+
112,342303,".gitignore",0,0,"./out/\n.vscode/\n.vscode*",ignore,tab
|
| 114 |
+
113,347240,"src/extension.ts",0,0,"// The module 'vscode' contains the VS Code extensibility API\n// Import the module and reference it with the alias vscode in your code below\nimport * as vscode from 'vscode';\n\n// This method is called when your extension is activated\n// Your extension is activated the very first time the command is executed\nexport function activate(context: vscode.ExtensionContext) {\n\n\t// Use the console to output diagnostic information (console.log) and errors (console.error)\n\t// This line of code will only be executed once when your extension is activated\n\tconsole.log('Congratulations, your extension ""crowd-pilot"" is now active!');\n\n\t// Configure terminal to allow tab keybinding to work\n\t// This makes the command skip the shell so VS Code can intercept tab in terminals\n\tconst config = vscode.workspace.getConfiguration('terminal.integrated');\n\tconst commandsToSkipShell = config.get<string[]>('commandsToSkipShell', []);\n\tif (!commandsToSkipShell.includes('crowd-pilot.testRun')) {\n\t\tcommandsToSkipShell.push('crowd-pilot.testRun');\n\t\tconfig.update('commandsToSkipShell', commandsToSkipShell, vscode.ConfigurationTarget.Global);\n\t}\n\n\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n\t});\n\n\tcontext.subscriptions.push(disposable);\n\tconst testRun = vscode.commands.registerCommand('crowd-pilot.testRun', async () => {\n\t\tconst editor = vscode.window.activeTextEditor;\n\t\tconst doc = editor!.document;\n\t\tconst term = vscode.window.terminals[0] ?? vscode.window.createTerminal('Test');\n\t\tconst git = vscode.extensions.getExtension('vscode.git')?.exports?.getAPI(1);\n\t\tconst repo = git?.repositories?.[0];\n\t\n\t\t// Emit a few actions:\n\t\tawait vscode.window.showTextDocument(doc);\n\t\teditor!.selections = [new vscode.Selection(0, 0, 0, 0)];\n\t\tawait editor!.edit(e => e.insert(new vscode.Position(0, 0), 'hello world\n'));\n\t\tterm.show();\n\t\tterm.sendText('echo VSCode test');\n\t\t//await repo?.pull();\n\t\n\t\tvscode.window.showInformationMessage('All actions emitted');\n\t });\n\n\tcontext.subscriptions.push(testRun);\n}\n\n// This method is called when your extension is deactivated\nexport function deactivate() {}\n",typescript,tab
|
| 115 |
+
114,348599,"src/extension.ts",0,0,"",typescript,selection_command
|
| 116 |
+
115,349153,"src/extension.ts",62,0,"",typescript,selection_command
|
| 117 |
+
116,349402,"src/extension.ts",141,0,"",typescript,selection_command
|
| 118 |
+
117,349436,"src/extension.ts",175,0,"",typescript,selection_command
|
| 119 |
+
118,349468,"src/extension.ts",176,0,"",typescript,selection_command
|
| 120 |
+
119,349504,"src/extension.ts",234,0,"",typescript,selection_command
|
| 121 |
+
120,349535,"src/extension.ts",309,0,"",typescript,selection_command
|
| 122 |
+
121,349568,"src/extension.ts",370,0,"",typescript,selection_command
|
| 123 |
+
122,349603,"src/extension.ts",371,0,"",typescript,selection_command
|
| 124 |
+
123,349637,"src/extension.ts",465,0,"",typescript,selection_command
|
| 125 |
+
124,349906,"src/extension.ts",371,0,"",typescript,selection_command
|
| 126 |
+
125,350395,"src/extension.ts",371,93,"\t// Use the console to output diagnostic information (console.log) and errors (console.error)",typescript,selection_command
|
| 127 |
+
126,350649,"src/extension.ts",371,175,"\t// Use the console to output diagnostic information (console.log) and errors (console.error)\n\t// This line of code will only be executed once when your extension is activated",typescript,selection_command
|
| 128 |
+
127,350789,"src/extension.ts",371,253,"\t// Use the console to output diagnostic information (console.log) and errors (console.error)\n\t// This line of code will only be executed once when your extension is activated\n\tconsole.log('Congratulations, your extension ""crowd-pilot"" is now active!');",typescript,selection_command
|
| 129 |
+
128,353264,"src/extension.ts",371,175,"\t// Use the console to output diagnostic information (console.log) and errors (console.error)\n\t// This line of code will only be executed once when your extension is activated",typescript,selection_command
|
| 130 |
+
129,353579,"src/extension.ts",371,176,"",typescript,content
|
| 131 |
+
130,353599,"src/extension.ts",372,0,"",typescript,selection_command
|
| 132 |
+
131,354087,"src/extension.ts",379,0,"",typescript,selection_command
|
| 133 |
+
132,354263,"src/extension.ts",380,0,"",typescript,selection_command
|
| 134 |
+
133,354427,"src/extension.ts",383,0,"",typescript,selection_command
|
| 135 |
+
134,354623,"src/extension.ts",385,0,"",typescript,selection_command
|
| 136 |
+
135,355609,"src/extension.ts",385,60,"",typescript,content
|
| 137 |
+
136,358656,"src/extension.ts",385,0,"C",typescript,content
|
| 138 |
+
137,358657,"src/extension.ts",386,0,"",typescript,selection_keyboard
|
| 139 |
+
138,361643,"src/extension.ts",386,0,"r",typescript,content
|
| 140 |
+
139,361644,"src/extension.ts",387,0,"",typescript,selection_keyboard
|
| 141 |
+
140,362094,"src/extension.ts",387,0,"o",typescript,content
|
| 142 |
+
141,362097,"src/extension.ts",388,0,"",typescript,selection_keyboard
|
| 143 |
+
142,362216,"src/extension.ts",388,0,"w",typescript,content
|
| 144 |
+
143,362220,"src/extension.ts",389,0,"",typescript,selection_keyboard
|
| 145 |
+
144,362403,"src/extension.ts",389,0,"d",typescript,content
|
| 146 |
+
145,362406,"src/extension.ts",390,0,"",typescript,selection_keyboard
|
| 147 |
+
146,362967,"src/extension.ts",390,0," ",typescript,content
|
| 148 |
+
147,362971,"src/extension.ts",391,0,"",typescript,selection_keyboard
|
| 149 |
+
148,363280,"src/extension.ts",391,0,"P",typescript,content
|
| 150 |
+
149,363283,"src/extension.ts",392,0,"",typescript,selection_keyboard
|
| 151 |
+
150,363387,"src/extension.ts",392,0,"p",typescript,content
|
| 152 |
+
151,363388,"src/extension.ts",393,0,"",typescript,selection_keyboard
|
| 153 |
+
152,363419,"src/extension.ts",393,0,"i",typescript,content
|
| 154 |
+
153,363421,"src/extension.ts",394,0,"",typescript,selection_keyboard
|
| 155 |
+
154,363634,"src/extension.ts",394,0,"l",typescript,content
|
| 156 |
+
155,363635,"src/extension.ts",395,0,"",typescript,selection_keyboard
|
| 157 |
+
156,363906,"src/extension.ts",394,1,"",typescript,content
|
| 158 |
+
157,364049,"src/extension.ts",393,1,"",typescript,content
|
| 159 |
+
158,364189,"src/extension.ts",392,1,"",typescript,content
|
| 160 |
+
159,364347,"src/extension.ts",392,0,"i",typescript,content
|
| 161 |
+
160,364349,"src/extension.ts",393,0,"",typescript,selection_keyboard
|
| 162 |
+
161,364574,"src/extension.ts",393,0,"l",typescript,content
|
| 163 |
+
162,364575,"src/extension.ts",394,0,"",typescript,selection_keyboard
|
| 164 |
+
163,364668,"src/extension.ts",394,0,"o",typescript,content
|
| 165 |
+
164,364671,"src/extension.ts",395,0,"",typescript,selection_keyboard
|
| 166 |
+
165,364715,"src/extension.ts",395,0,"t",typescript,content
|
| 167 |
+
166,364718,"src/extension.ts",396,0,"",typescript,selection_keyboard
|
| 168 |
+
167,367361,"src/extension.ts",396,0,"\t",typescript,content
|
| 169 |
+
168,368262,"src/extension.ts",396,1,"",typescript,content
|
| 170 |
+
169,378112,"src/extension.ts",396,0," ",typescript,content
|
| 171 |
+
170,378116,"src/extension.ts",397,0,"",typescript,selection_keyboard
|
| 172 |
+
171,378802,"src/extension.ts",397,0,"e",typescript,content
|
| 173 |
+
172,378803,"src/extension.ts",398,0,"",typescript,selection_keyboard
|
| 174 |
+
173,379085,"src/extension.ts",398,0,"x",typescript,content
|
| 175 |
+
174,379086,"src/extension.ts",399,0,"",typescript,selection_keyboard
|
| 176 |
+
175,379228,"src/extension.ts",399,0,"t",typescript,content
|
| 177 |
+
176,379234,"src/extension.ts",400,0,"",typescript,selection_keyboard
|
| 178 |
+
177,379303,"src/extension.ts",400,0,"e",typescript,content
|
| 179 |
+
178,379305,"src/extension.ts",401,0,"",typescript,selection_keyboard
|
| 180 |
+
179,380890,"src/extension.ts",401,0,"n",typescript,content
|
| 181 |
+
180,380895,"src/extension.ts",402,0,"",typescript,selection_keyboard
|
| 182 |
+
181,381067,"src/extension.ts",402,0,"s",typescript,content
|
| 183 |
+
182,381071,"src/extension.ts",403,0,"",typescript,selection_keyboard
|
| 184 |
+
183,381087,"src/extension.ts",403,0,"i",typescript,content
|
| 185 |
+
184,381089,"src/extension.ts",404,0,"",typescript,selection_keyboard
|
| 186 |
+
185,381103,"src/extension.ts",404,0,"o",typescript,content
|
| 187 |
+
186,381105,"src/extension.ts",405,0,"",typescript,selection_keyboard
|
| 188 |
+
187,381168,"src/extension.ts",405,0,"n",typescript,content
|
| 189 |
+
188,381171,"src/extension.ts",406,0,"",typescript,selection_keyboard
|
| 190 |
+
189,381371,"src/extension.ts",406,0," ",typescript,content
|
| 191 |
+
190,381373,"src/extension.ts",407,0,"",typescript,selection_keyboard
|
| 192 |
+
191,381492,"src/extension.ts",407,0,"a",typescript,content
|
| 193 |
+
192,381499,"src/extension.ts",408,0,"",typescript,selection_keyboard
|
| 194 |
+
193,381610,"src/extension.ts",408,0,"c",typescript,content
|
| 195 |
+
194,381611,"src/extension.ts",409,0,"",typescript,selection_keyboard
|
| 196 |
+
195,381809,"src/extension.ts",409,0,"t",typescript,content
|
| 197 |
+
196,381812,"src/extension.ts",410,0,"",typescript,selection_keyboard
|
| 198 |
+
197,381850,"src/extension.ts",410,0,"i",typescript,content
|
| 199 |
+
198,381853,"src/extension.ts",411,0,"",typescript,selection_keyboard
|
| 200 |
+
199,381999,"src/extension.ts",411,0,"v",typescript,content
|
| 201 |
+
200,382002,"src/extension.ts",412,0,"",typescript,selection_keyboard
|
| 202 |
+
201,382286,"src/extension.ts",412,0,"a",typescript,content
|
| 203 |
+
202,382289,"src/extension.ts",413,0,"",typescript,selection_keyboard
|
| 204 |
+
203,382300,"src/extension.ts",413,0,"t",typescript,content
|
| 205 |
+
204,382303,"src/extension.ts",414,0,"",typescript,selection_keyboard
|
| 206 |
+
205,382390,"src/extension.ts",414,0,"e",typescript,content
|
| 207 |
+
206,382392,"src/extension.ts",415,0,"",typescript,selection_keyboard
|
| 208 |
+
207,382664,"src/extension.ts",415,0,"d",typescript,content
|
| 209 |
+
208,382666,"src/extension.ts",416,0,"",typescript,selection_keyboard
|
| 210 |
+
209,382820,"src/extension.ts",415,0,"",typescript,selection_command
|
| 211 |
+
210,384422,"src/extension.ts",420,0,"",typescript,selection_command
|
| 212 |
+
211,384585,"src/extension.ts",465,0,"",typescript,selection_command
|
| 213 |
+
212,384729,"src/extension.ts",520,0,"",typescript,selection_command
|
| 214 |
+
213,384856,"src/extension.ts",604,0,"",typescript,selection_command
|
| 215 |
+
214,387710,"src/extension.ts",520,0,"",typescript,selection_command
|
| 216 |
+
215,387795,"src/extension.ts",465,0,"",typescript,selection_command
|
| 217 |
+
216,390281,"src/extension.ts",421,54,"\t// Configure terminal to allow tab keybinding to work",typescript,selection_command
|
| 218 |
+
217,390472,"src/extension.ts",421,138,"\t// Configure terminal to allow tab keybinding to work\n\t// This makes the command skip the shell so VS Code can intercept tab in terminals",typescript,selection_command
|
| 219 |
+
218,391918,"src/extension.ts",421,54,"\t// Configure terminal to allow tab keybinding to work",typescript,selection_command
|
| 220 |
+
219,399354,"src/extension.ts",465,0,"",typescript,selection_command
|
| 221 |
+
220,400366,"src/extension.ts",520,0,"",typescript,selection_command
|
| 222 |
+
221,400615,"src/extension.ts",604,0,"",typescript,selection_command
|
| 223 |
+
222,400651,"src/extension.ts",678,0,"",typescript,selection_command
|
| 224 |
+
223,400682,"src/extension.ts",756,0,"",typescript,selection_command
|
| 225 |
+
224,400714,"src/extension.ts",817,0,"",typescript,selection_command
|
| 226 |
+
225,400749,"src/extension.ts",868,0,"",typescript,selection_command
|
| 227 |
+
226,400781,"src/extension.ts",921,0,"",typescript,selection_command
|
| 228 |
+
227,400815,"src/extension.ts",923,0,"",typescript,selection_command
|
| 229 |
+
228,401165,"src/extension.ts",968,0,"",typescript,selection_command
|
| 230 |
+
229,401342,"src/extension.ts",1026,0,"",typescript,selection_command
|
| 231 |
+
230,401813,"src/extension.ts",968,0,"",typescript,selection_command
|
| 232 |
+
231,402369,"src/extension.ts",924,57,"\t// The command has been defined in the package.json file",typescript,selection_command
|
| 233 |
+
232,403403,"src/extension.ts",924,128,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand",typescript,selection_command
|
| 234 |
+
233,403652,"src/extension.ts",924,201,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json",typescript,selection_command
|
| 235 |
+
234,403685,"src/extension.ts",924,287,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {",typescript,selection_command
|
| 236 |
+
235,403717,"src/extension.ts",924,369,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed",typescript,selection_command
|
| 237 |
+
236,403751,"src/extension.ts",924,408,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user",typescript,selection_command
|
| 238 |
+
237,403785,"src/extension.ts",924,491,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');",typescript,selection_command
|
| 239 |
+
238,403818,"src/extension.ts",924,496,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n\t});",typescript,selection_command
|
| 240 |
+
239,405446,"src/extension.ts",924,497,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n\t});\n",typescript,selection_command
|
| 241 |
+
240,405784,"src/extension.ts",924,538,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n\t});\n\n\tcontext.subscriptions.push(disposable);",typescript,selection_command
|
| 242 |
+
241,406692,"src/extension.ts",924,497,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n\t});\n",typescript,selection_command
|
| 243 |
+
242,406829,"src/extension.ts",924,538,"\t// The command has been defined in the package.json file\n\t// Now provide the implementation of the command with registerCommand\n\t// The commandId parameter must match the command field in package.json\n\tconst disposable = vscode.commands.registerCommand('crowd-pilot.helloWorld', () => {\n\t\t// The code you place here will be executed every time your command is executed\n\t\t// Display a message box to the user\n\t\tvscode.window.showInformationMessage('Hello World from crowd-pilot-extension!');\n\t});\n\n\tcontext.subscriptions.push(disposable);",typescript,selection_command
|
| 244 |
+
243,411075,"src/extension.ts",924,539,"",typescript,content
|
| 245 |
+
244,411101,"src/extension.ts",925,0,"",typescript,selection_command
|
| 246 |
+
245,415561,"package.json",0,0,"{\n ""name"": ""crowd-pilot"",\n ""displayName"": ""crowd-pilot-extension"",\n ""description"": ""Teaching language models to code like humans."",\n ""version"": ""0.0.1"",\n ""engines"": {\n ""vscode"": ""^1.99.3""\n },\n ""categories"": [\n ""Other""\n ],\n ""activationEvents"": [],\n ""main"": ""./out/extension.js"",\n ""contributes"": {\n ""commands"": [\n {\n ""command"": ""crowd-pilot.helloWorld"",\n ""title"": ""Hello World!""\n },\n {\n ""command"": ""crowd-pilot.testRun"",\n ""title"": ""Test Run""\n }\n ],\n ""keybindings"": [\n {\n ""command"": ""crowd-pilot.testRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""editorTextFocus || terminalFocus""\n }\n ]\n },\n ""scripts"": {\n ""vscode:prepublish"": ""npm run compile"",\n ""compile"": ""tsc -p ./"",\n ""watch"": ""tsc -watch -p ./"",\n ""pretest"": ""npm run compile && npm run lint"",\n ""lint"": ""eslint src"",\n ""test"": ""vscode-test""\n },\n ""devDependencies"": {\n ""@types/vscode"": ""^1.105.0"",\n ""@types/mocha"": ""^10.0.10"",\n ""@types/node"": ""22.x"",\n ""@typescript-eslint/eslint-plugin"": ""^8.45.0"",\n ""@typescript-eslint/parser"": ""^8.45.0"",\n ""eslint"": ""^9.36.0"",\n ""typescript"": ""^5.9.3"",\n ""@vscode/test-cli"": ""^0.0.11"",\n ""@vscode/test-electron"": ""^2.5.2""\n }\n}\n",json,tab
|
| 247 |
+
246,419179,"package.json",619,0,"",json,selection_command
|
| 248 |
+
247,419353,"package.json",597,0,"",json,selection_command
|
| 249 |
+
248,419595,"package.json",555,0,"",json,selection_command
|
| 250 |
+
249,419628,"package.json",545,0,"",json,selection_command
|
| 251 |
+
250,419660,"package.json",526,0,"",json,selection_command
|
| 252 |
+
251,419695,"package.json",516,0,"",json,selection_command
|
| 253 |
+
252,419815,"package.json",526,0,"",json,selection_command
|
| 254 |
+
253,419996,"package.json",545,0,"",json,selection_command
|
| 255 |
+
254,420153,"package.json",526,0,"",json,selection_command
|
| 256 |
+
255,420403,"package.json",516,0,"",json,selection_command
|
| 257 |
+
256,420436,"package.json",509,0,"",json,selection_command
|
| 258 |
+
257,420470,"package.json",483,0,"",json,selection_command
|
| 259 |
+
258,420504,"package.json",441,0,"",json,selection_command
|
| 260 |
+
259,420537,"package.json",431,0,"",json,selection_command
|
| 261 |
+
260,420571,"package.json",423,0,"",json,selection_command
|
| 262 |
+
261,420605,"package.json",392,0,"",json,selection_command
|
| 263 |
+
262,420868,"package.json",423,0,"",json,selection_command
|
| 264 |
+
263,421045,"package.json",392,0,"",json,selection_command
|
| 265 |
+
264,421219,"package.json",347,0,"",json,selection_command
|
| 266 |
+
265,423492,"package.json",339,44," ""command"": ""crowd-pilot.helloWorld"",",json,selection_command
|
| 267 |
+
266,423645,"package.json",339,76," ""command"": ""crowd-pilot.helloWorld"",\n ""title"": ""Hello World!""",json,selection_command
|
| 268 |
+
267,423788,"package.json",339,85," ""command"": ""crowd-pilot.helloWorld"",\n ""title"": ""Hello World!""\n },",json,selection_command
|
| 269 |
+
268,423925,"package.json",339,93," ""command"": ""crowd-pilot.helloWorld"",\n ""title"": ""Hello World!""\n },\n {",json,selection_command
|
| 270 |
+
269,424227,"package.json",339,94,"",json,content
|
| 271 |
+
270,424247,"package.json",347,0,"",json,selection_command
|
| 272 |
+
271,425592,"src/extension.ts",0,0,"",typescript,tab
|
| 273 |
+
272,426776,"package.json",0,0,"",json,tab
|
| 274 |
+
273,428352,"src/extension.ts",0,0,"",typescript,tab
|
| 275 |
+
274,428833,"package.json",0,0,"",json,tab
|
| 276 |
+
275,436634,"src/extension.ts",0,0,"",typescript,tab
|
| 277 |
+
276,438056,"package.json",0,0,"",json,tab
|
| 278 |
+
277,477460,"src/extension.ts",0,0,"",typescript,tab
|
| 279 |
+
278,479097,"src/extension.ts",649,0,"",typescript,selection_mouse
|
| 280 |
+
279,485971,"src/extension.ts",727,0,"",typescript,selection_command
|
| 281 |
+
280,486093,"src/extension.ts",788,0,"",typescript,selection_command
|
| 282 |
+
281,486431,"src/extension.ts",839,0,"",typescript,selection_command
|
| 283 |
+
282,486871,"src/extension.ts",919,0,"\n\t\t",typescript,content
|
| 284 |
+
283,492259,"src/extension.ts",922,0,"c",typescript,content
|
| 285 |
+
284,492261,"src/extension.ts",923,0,"",typescript,selection_keyboard
|
| 286 |
+
285,492384,"src/extension.ts",923,0,"o",typescript,content
|
| 287 |
+
286,492387,"src/extension.ts",924,0,"",typescript,selection_keyboard
|
| 288 |
+
287,492743,"src/extension.ts",924,0,"n",typescript,content
|
| 289 |
+
288,492746,"src/extension.ts",925,0,"",typescript,selection_keyboard
|
| 290 |
+
289,492989,"src/extension.ts",925,0,"s",typescript,content
|
| 291 |
+
290,492994,"src/extension.ts",926,0,"",typescript,selection_keyboard
|
| 292 |
+
291,493013,"src/extension.ts",926,0,"o",typescript,content
|
| 293 |
+
292,493015,"src/extension.ts",927,0,"",typescript,selection_keyboard
|
| 294 |
+
293,493195,"src/extension.ts",927,0,"l",typescript,content
|
| 295 |
+
294,493200,"src/extension.ts",928,0,"",typescript,selection_keyboard
|
| 296 |
+
295,493228,"src/extension.ts",928,0,"e",typescript,content
|
| 297 |
+
296,493232,"src/extension.ts",929,0,"",typescript,selection_keyboard
|
| 298 |
+
297,493737,"src/extension.ts",922,7,"console",typescript,content
|
| 299 |
+
298,493739,"src/extension.ts",929,0,".",typescript,content
|
| 300 |
+
299,493741,"src/extension.ts",930,0,"",typescript,selection_keyboard
|
| 301 |
+
300,493903,"src/extension.ts",930,0,"l",typescript,content
|
| 302 |
+
301,493907,"src/extension.ts",931,0,"",typescript,selection_keyboard
|
| 303 |
+
302,494016,"src/extension.ts",931,0,"o",typescript,content
|
| 304 |
+
303,494020,"src/extension.ts",932,0,"",typescript,selection_keyboard
|
| 305 |
+
304,494082,"src/extension.ts",932,0,"g",typescript,content
|
| 306 |
+
305,494086,"src/extension.ts",933,0,"",typescript,selection_keyboard
|
| 307 |
+
306,494439,"src/extension.ts",929,4,".log",typescript,content
|
| 308 |
+
307,494441,"src/extension.ts",933,0,"()",typescript,content
|
| 309 |
+
308,494444,"src/extension.ts",934,0,"",typescript,selection_keyboard
|
| 310 |
+
309,494541,"src/extension.ts",934,1,")",typescript,content
|
| 311 |
+
310,494543,"src/extension.ts",935,0,"",typescript,selection_keyboard
|
| 312 |
+
311,494964,"src/extension.ts",934,0,"",typescript,selection_command
|
| 313 |
+
312,495241,"src/extension.ts",934,0,"''",typescript,content
|
| 314 |
+
313,495244,"src/extension.ts",935,0,"",typescript,selection_keyboard
|
| 315 |
+
314,495911,"src/extension.ts",935,0,"A",typescript,content
|
| 316 |
+
315,495915,"src/extension.ts",936,0,"",typescript,selection_keyboard
|
| 317 |
+
316,496134,"src/extension.ts",936,0,"d",typescript,content
|
| 318 |
+
317,496137,"src/extension.ts",937,0,"",typescript,selection_keyboard
|
| 319 |
+
318,496269,"src/extension.ts",937,0,"d",typescript,content
|
| 320 |
+
319,496273,"src/extension.ts",938,0,"",typescript,selection_keyboard
|
| 321 |
+
320,496379,"src/extension.ts",938,0,"e",typescript,content
|
| 322 |
+
321,496382,"src/extension.ts",939,0,"",typescript,selection_keyboard
|
| 323 |
+
322,496595,"src/extension.ts",939,0,"d",typescript,content
|
| 324 |
+
323,496598,"src/extension.ts",940,0,"",typescript,selection_keyboard
|
| 325 |
+
324,496712,"src/extension.ts",940,0," ",typescript,content
|
| 326 |
+
325,496715,"src/extension.ts",941,0,"",typescript,selection_keyboard
|
| 327 |
+
326,497194,"src/extension.ts",941,0,"t",typescript,content
|
| 328 |
+
327,497197,"src/extension.ts",942,0,"",typescript,selection_keyboard
|
| 329 |
+
328,497252,"src/extension.ts",942,0,"e",typescript,content
|
| 330 |
+
329,497255,"src/extension.ts",943,0,"",typescript,selection_keyboard
|
| 331 |
+
330,497579,"src/extension.ts",943,0,"s",typescript,content
|
| 332 |
+
331,497583,"src/extension.ts",944,0,"",typescript,selection_keyboard
|
| 333 |
+
332,497614,"src/extension.ts",944,0,"t",typescript,content
|
| 334 |
+
333,497617,"src/extension.ts",945,0,"",typescript,selection_keyboard
|
| 335 |
+
334,497946,"src/extension.ts",945,0,"R",typescript,content
|
| 336 |
+
335,497949,"src/extension.ts",946,0,"",typescript,selection_keyboard
|
| 337 |
+
336,498067,"src/extension.ts",946,0,"u",typescript,content
|
| 338 |
+
337,498069,"src/extension.ts",947,0,"",typescript,selection_keyboard
|
| 339 |
+
338,498217,"src/extension.ts",947,0,"h",typescript,content
|
| 340 |
+
339,498219,"src/extension.ts",948,0,"",typescript,selection_keyboard
|
| 341 |
+
340,498352,"src/extension.ts",948,0," ",typescript,content
|
| 342 |
+
341,498353,"src/extension.ts",949,0,"",typescript,selection_keyboard
|
| 343 |
+
342,498634,"src/extension.ts",948,1,"",typescript,content
|
| 344 |
+
343,498787,"src/extension.ts",947,1,"",typescript,content
|
| 345 |
+
344,498935,"src/extension.ts",947,0," ",typescript,content
|
| 346 |
+
345,498936,"src/extension.ts",948,0,"",typescript,selection_keyboard
|
| 347 |
+
346,499294,"src/extension.ts",947,1,"",typescript,content
|
| 348 |
+
347,499500,"src/extension.ts",947,0,"n",typescript,content
|
| 349 |
+
348,499502,"src/extension.ts",948,0,"",typescript,selection_keyboard
|
| 350 |
+
349,499750,"src/extension.ts",948,0," ",typescript,content
|
| 351 |
+
350,499754,"src/extension.ts",949,0,"",typescript,selection_keyboard
|
| 352 |
+
351,499765,"src/extension.ts",949,0,"t",typescript,content
|
| 353 |
+
352,499767,"src/extension.ts",950,0,"",typescript,selection_keyboard
|
| 354 |
+
353,499800,"src/extension.ts",950,0,"o",typescript,content
|
| 355 |
+
354,499802,"src/extension.ts",951,0,"",typescript,selection_keyboard
|
| 356 |
+
355,499991,"src/extension.ts",951,0," ",typescript,content
|
| 357 |
+
356,499993,"src/extension.ts",952,0,"",typescript,selection_keyboard
|
| 358 |
+
357,500154,"src/extension.ts",952,0,"c",typescript,content
|
| 359 |
+
358,500157,"src/extension.ts",953,0,"",typescript,selection_keyboard
|
| 360 |
+
359,500362,"src/extension.ts",953,0,"o",typescript,content
|
| 361 |
+
360,500365,"src/extension.ts",954,0,"",typescript,selection_keyboard
|
| 362 |
+
361,500522,"src/extension.ts",954,0,"m",typescript,content
|
| 363 |
+
362,500524,"src/extension.ts",955,0,"",typescript,selection_keyboard
|
| 364 |
+
363,500666,"src/extension.ts",955,0,"m",typescript,content
|
| 365 |
+
364,500669,"src/extension.ts",956,0,"",typescript,selection_keyboard
|
| 366 |
+
365,500838,"src/extension.ts",956,0,"a",typescript,content
|
| 367 |
+
366,500841,"src/extension.ts",957,0,"",typescript,selection_keyboard
|
| 368 |
+
367,500861,"src/extension.ts",957,0,"n",typescript,content
|
| 369 |
+
368,500863,"src/extension.ts",958,0,"",typescript,selection_keyboard
|
| 370 |
+
369,501019,"src/extension.ts",958,0,"d",typescript,content
|
| 371 |
+
370,501022,"src/extension.ts",959,0,"",typescript,selection_keyboard
|
| 372 |
+
371,501092,"src/extension.ts",959,0,"s",typescript,content
|
| 373 |
+
372,501094,"src/extension.ts",960,0,"",typescript,selection_keyboard
|
| 374 |
+
373,501254,"src/extension.ts",960,0,"t",typescript,content
|
| 375 |
+
374,501256,"src/extension.ts",961,0,"",typescript,selection_keyboard
|
| 376 |
+
375,501515,"src/extension.ts",960,1,"",typescript,content
|
| 377 |
+
376,501763,"src/extension.ts",960,0,"T",typescript,content
|
| 378 |
+
377,501764,"src/extension.ts",961,0,"",typescript,selection_keyboard
|
| 379 |
+
378,501893,"src/extension.ts",961,0,"o",typescript,content
|
| 380 |
+
379,501895,"src/extension.ts",962,0,"",typescript,selection_keyboard
|
| 381 |
+
380,502953,"src/extension.ts",962,0,"S",typescript,content
|
| 382 |
+
381,502957,"src/extension.ts",963,0,"",typescript,selection_keyboard
|
| 383 |
+
382,503394,"src/extension.ts",963,0,"k",typescript,content
|
| 384 |
+
383,503396,"src/extension.ts",964,0,"",typescript,selection_keyboard
|
| 385 |
+
384,503466,"src/extension.ts",964,0,"i",typescript,content
|
| 386 |
+
385,503468,"src/extension.ts",965,0,"",typescript,selection_keyboard
|
| 387 |
+
386,503530,"src/extension.ts",965,0,"p",typescript,content
|
| 388 |
+
387,503533,"src/extension.ts",966,0,"",typescript,selection_keyboard
|
| 389 |
+
388,503903,"src/extension.ts",966,0,"S",typescript,content
|
| 390 |
+
389,503905,"src/extension.ts",967,0,"",typescript,selection_keyboard
|
| 391 |
+
390,504036,"src/extension.ts",967,0,"h",typescript,content
|
| 392 |
+
391,504039,"src/extension.ts",968,0,"",typescript,selection_keyboard
|
| 393 |
+
392,504117,"src/extension.ts",968,0,"e",typescript,content
|
| 394 |
+
393,504119,"src/extension.ts",969,0,"",typescript,selection_keyboard
|
| 395 |
+
394,504254,"src/extension.ts",969,0,"l",typescript,content
|
| 396 |
+
395,504256,"src/extension.ts",970,0,"",typescript,selection_keyboard
|
| 397 |
+
396,504433,"src/extension.ts",970,0,"l",typescript,content
|
| 398 |
+
397,504435,"src/extension.ts",971,0,"",typescript,selection_keyboard
|
| 399 |
+
398,504708,"src/extension.ts",971,1,"'",typescript,content
|
| 400 |
+
399,504711,"src/extension.ts",972,0,"",typescript,selection_keyboard
|
| 401 |
+
400,505286,"src/extension.ts",972,0,"_",typescript,content
|
| 402 |
+
401,505288,"src/extension.ts",973,0,"",typescript,selection_keyboard
|
| 403 |
+
402,505710,"src/extension.ts",972,1,"",typescript,content
|
| 404 |
+
403,505998,"src/extension.ts",972,0,")",typescript,content
|
| 405 |
+
404,505999,"src/extension.ts",973,0,"",typescript,selection_keyboard
|
| 406 |
+
405,507214,"src/extension.ts",974,0,"",typescript,selection_command
|
| 407 |
+
406,507417,"src/extension.ts",973,1,"",typescript,content
|
| 408 |
+
407,509720,"src/extension.ts",973,0,";",typescript,content
|
| 409 |
+
408,509721,"src/extension.ts",974,0,"",typescript,selection_keyboard
|
| 410 |
+
409,510217,"src/extension.ts",973,0,"",typescript,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-4a0c523e-3509-46d2-9ab8-f144f364f7ff1755356823323-2025_08_16-17.07.06.759/source.csv
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,7,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n # gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n # inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n # gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n # recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n # psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n # ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n # _, index_counts_lam = jnp.unique_counts(\n # jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n # )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n # codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n # psnr=psnr,\n # ssim=ssim,\n # codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (None, metrics)\n\n\n@nnx.jit\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n # dataloader = (\n # jax.make_array_from_process_local_data(videos_sharding, elem)\n # for elem in grain_iterator\n # )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for _ in range(100000):\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n pass\n # gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n # recon_seq = recon[0].clip(0, 1)\n # comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n # comparison_seq = einops.rearrange(\n # comparison_seq * 255, ""t h w c -> h (t w) c""\n # )\n # if jax.process_index() == 0:\n # log_images = dict(\n # image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n # recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n # true_vs_recon=wandb.Image(\n # np.asarray(comparison_seq.astype(np.uint8))\n # ),\n # )\n # wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
| 3 |
+
2,55,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:07:06 PM [info] Activating crowd-code\n5:07:06 PM [info] Recording started\n5:07:06 PM [info] Initializing git provider using file system watchers...\n5:07:06 PM [info] Git repository found\n5:07:06 PM [info] Git provider initialized successfully\n",Log,tab
|
| 4 |
+
3,125,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"5:07:06 PM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,5102,"train_dynamics.py",0,0,"",python,tab
|
| 6 |
+
5,6325,"train_dynamics.py",14301,0,"",python,selection_command
|
| 7 |
+
6,6456,"train_dynamics.py",13406,0,"",python,selection_command
|
| 8 |
+
7,6600,"train_dynamics.py",12313,0,"",python,selection_command
|
| 9 |
+
8,8781,"input_pipeline/download/download_array_records.sh",0,0,"#!/bin/bash\n\n# Download and extract array records from Hugging Face\n# \n# This script performs a two-step process:\n# 1. Downloads compressed array records from a Hugging Face dataset repository\n# 2. Extracts the compressed tar files in parallel for better performance\n#\n# Usage:\n# ./download_array_records.sh [hf_download_dir] [final_dataset_dir]\n#\n# Arguments:\n# hf_download_dir - Directory to store compressed downloads (default: data/minecraft_arrayrecords_compressed)\n# final_dataset_dir - Directory for extracted array records (default: data/minecraft_arrayrecords)\n\n# Set default directories if not provided as arguments\nhf_download_dir=""${1:-data/minecraft_arrayrecords_compressed}"" \nfinal_dataset_dir=""${2:-data/minecraft_arrayrecords}"" \n\nmkdir -p $hf_download_dir\nmkdir -p $final_dataset_dir\n\n# Step 1: Download compressed dataset from Hugging Face\necho ""Starting download from Hugging Face...""\nrepo_id=p-doom/open_ai_minecraft_arrayrecords_chunked\nstart_time_hf_download=$(date +%s)\n\nHF_HUB_ENABLE_HF_TRANSFER=1 HF_HUB_DISABLE_SYMLINKS=1 \\nhuggingface-cli download --repo-type dataset $repo_id --local-dir $hf_download_dir\n\nend_time_hf_download=$(date +%s)\necho ""Download completed. Time taken: $((end_time_hf_download - start_time_hf_download)) seconds""\n\n# Step 2: Extract compressed array records in parallel\necho ""Starting parallel extraction of tar files...""\nnum_workers=64 # Number of parallel extraction processes\nstart_time_uncompress=$(date +%s)\n\n# Find all shard tar files and extract them in parallel:\nxargs -0 -P $num_workers -I {} bash -c 'echo ""Extracting {}""; tar -xf ""{}"" -C ""'$final_dataset_dir'""'\n\nend_time_uncompress=$(date +%s)\n\n# Display timing summary\necho ""================================""\necho ""Extraction completed successfully!""\necho ""Uncompress time: $((end_time_uncompress - start_time_uncompress)) seconds""\necho ""Download time: $((end_time_hf_download - start_time_hf_download)) seconds""\necho ""Total time: $((end_time_uncompress - start_time_hf_download)) seconds""\necho ""Final dataset location: $final_dataset_dir""\n",shellscript,tab
|
| 10 |
+
9,9744,"train_dynamics.py",0,0,"",python,tab
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-4c69dcf8-a147-4975-8e49-6c7ed4761fb81758276984897-2025_09_19-12.16.27.253/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-544d5f3a-a597-4531-ac2f-947fc20565021764422378535-2025_11_29-14.19.42.617/source.csv
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,11,"Untitled-1",0,0,"",plaintext,tab
|
| 3 |
+
2,116,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:19:42 PM [info] Activating crowd-code\n2:19:42 PM [info] Recording started\n2:19:42 PM [info] Initializing git provider using file system watchers...\n2:19:42 PM [info] No workspace folder found\n",Log,tab
|
| 4 |
+
3,1057,"Untitled-1",0,0,"",plaintext,tab
|
| 5 |
+
4,8658,"TERMINAL",0,0,"Test",,terminal_focus
|
| 6 |
+
5,8665,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 7 |
+
6,19634,"Untitled-1",27,0,"INSERT INTO `crowd_pilot` (`id`, `name`, `description`, `created_at`, `updated_at`) VALUES",plaintext,content
|
| 8 |
+
7,24609,"Untitled-1",27,90,"",plaintext,content
|
| 9 |
+
8,26850,"Untitled-1",27,0,"i",plaintext,content
|
| 10 |
+
9,26854,"Untitled-1",28,0,"",plaintext,selection_keyboard
|
| 11 |
+
10,27637,"Untitled-1",27,1,"",plaintext,content
|
| 12 |
+
11,30869,"Untitled-1",27,0," // replaced by crowd-pilot",plaintext,content
|
| 13 |
+
12,33349,"Untitled-1",27,27,"",plaintext,content
|
| 14 |
+
13,35996,"Untitled-1",0,0,"",plaintext,selection_command
|
| 15 |
+
14,42312,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 16 |
+
15,45369,"Untitled-1",27,27,"",plaintext,content
|
| 17 |
+
16,49046,"Untitled-1",0,0,"",plaintext,selection_command
|
| 18 |
+
17,52225,"Untitled-1",0,26,"// crowd-pilot mock insert // replaced by crowd-pilot",plaintext,content
|
| 19 |
+
18,54092,"Untitled-1",0,53,"",plaintext,content
|
| 20 |
+
19,55620,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 21 |
+
20,57429,"Untitled-1",0,0,"",plaintext,selection_command
|
| 22 |
+
21,58909,"Untitled-1",27,0," // replaced by crowd-pilot",plaintext,content
|
| 23 |
+
22,60563,"Untitled-1",0,26,"",plaintext,content
|
| 24 |
+
23,61774,"Untitled-1",1,0,"",plaintext,selection_command
|
| 25 |
+
24,61841,"Untitled-1",29,0,"",plaintext,selection_command
|
| 26 |
+
25,62254,"Untitled-1",1,0,"",plaintext,selection_command
|
| 27 |
+
26,63644,"Untitled-1",1,0,"d",plaintext,content
|
| 28 |
+
27,63648,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 29 |
+
28,63790,"Untitled-1",2,0,"d",plaintext,content
|
| 30 |
+
29,63793,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 31 |
+
30,64356,"Untitled-1",2,0,"",plaintext,selection_command
|
| 32 |
+
31,64489,"Untitled-1",1,0,"",plaintext,selection_command
|
| 33 |
+
32,65096,"Untitled-1",1,30,"",plaintext,content
|
| 34 |
+
33,65471,"Untitled-1",0,1,"",plaintext,content
|
| 35 |
+
34,66871,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 36 |
+
35,67942,"Untitled-1",0,0,"",plaintext,selection_command
|
| 37 |
+
36,68660,"Untitled-1",27,0," // replaced by crowd-pilot",plaintext,content
|
| 38 |
+
37,73376,"Untitled-1",27,0,"",plaintext,selection_command
|
| 39 |
+
38,73714,"Untitled-1",26,28,"",plaintext,content
|
| 40 |
+
39,73726,"Untitled-1",0,0,"",plaintext,selection_command
|
| 41 |
+
40,75353,"Untitled-1",0,26,"",plaintext,content
|
| 42 |
+
41,77180,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 43 |
+
42,77493,"Untitled-1",27,0," // replaced by crowd-pilot",plaintext,content
|
| 44 |
+
43,77866,"Untitled-1",27,27,"",plaintext,content
|
| 45 |
+
44,78228,"Untitled-1",27,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 46 |
+
45,78827,"Untitled-1",54,0," // replaced by crowd-pilot",plaintext,content
|
| 47 |
+
46,79175,"Untitled-1",54,27,"",plaintext,content
|
| 48 |
+
47,79461,"Untitled-1",54,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 49 |
+
48,79661,"Untitled-1",81,0," // replaced by crowd-pilot",plaintext,content
|
| 50 |
+
49,79883,"Untitled-1",81,27,"",plaintext,content
|
| 51 |
+
50,80063,"Untitled-1",81,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 52 |
+
51,80255,"Untitled-1",108,0," // replaced by crowd-pilot",plaintext,content
|
| 53 |
+
52,82082,"Untitled-1",134,0,"",plaintext,selection_command
|
| 54 |
+
53,82494,"Untitled-1",108,27," // replaced by crowd-pilot",plaintext,selection_command
|
| 55 |
+
54,83080,"Untitled-1",81,54,"// crowd-pilot mock insert\n // replaced by crowd-pilot",plaintext,selection_command
|
| 56 |
+
55,83221,"Untitled-1",54,81,"// crowd-pilot mock insert\n// crowd-pilot mock insert\n // replaced by crowd-pilot",plaintext,selection_command
|
| 57 |
+
56,83373,"Untitled-1",27,108,"// crowd-pilot mock insert\n// crowd-pilot mock insert\n// crowd-pilot mock insert\n // replaced by crowd-pilot",plaintext,selection_command
|
| 58 |
+
57,83578,"Untitled-1",0,135,"// crowd-pilot mock insert\n// crowd-pilot mock insert\n// crowd-pilot mock insert\n// crowd-pilot mock insert\n // replaced by crowd-pilot",plaintext,selection_command
|
| 59 |
+
58,83748,"Untitled-1",0,135,"",plaintext,content
|
| 60 |
+
59,85181,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 61 |
+
60,85870,"Untitled-1",0,0,"",plaintext,selection_command
|
| 62 |
+
61,86253,"Untitled-1",27,0," // replaced by crowd-pilot",plaintext,content
|
| 63 |
+
62,88809,"Untitled-1",0,27,"",plaintext,content
|
| 64 |
+
63,88813,"Untitled-1",1,0,"",plaintext,selection_command
|
| 65 |
+
64,89141,"Untitled-1",0,27,"",plaintext,content
|
| 66 |
+
65,92332,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 67 |
+
66,94480,"Untitled-1",0,0,"",plaintext,selection_command
|
| 68 |
+
67,95257,"Untitled-1",27,0," // replaced by crowd-pilot",plaintext,content
|
| 69 |
+
68,97741,"Untitled-1",0,27,"",plaintext,content
|
| 70 |
+
69,97746,"Untitled-1",1,0,"",plaintext,selection_command
|
| 71 |
+
70,98075,"Untitled-1",0,27,"",plaintext,content
|
| 72 |
+
71,102386,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 73 |
+
72,105938,"Untitled-1",0,0,"",plaintext,selection_command
|
| 74 |
+
73,126085,"Untitled-1",0,26,"// crowd-pilot mock insert // replaced by crowd-pilot",plaintext,content
|
| 75 |
+
74,136785,"Untitled-1",0,53,"",plaintext,content
|
| 76 |
+
75,137494,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 77 |
+
76,149303,"Untitled-1",27,0," // replaced by crowd-pilot",plaintext,content
|
| 78 |
+
77,150748,"Untitled-1",27,27,"",plaintext,content
|
| 79 |
+
78,152476,"Untitled-1",28,0,"",plaintext,selection_command
|
| 80 |
+
79,152821,"Untitled-1",27,0,"",plaintext,selection_command
|
| 81 |
+
80,153061,"Untitled-1",0,0,"",plaintext,selection_command
|
| 82 |
+
81,153607,"Untitled-1",0,27,"",plaintext,content
|
| 83 |
+
82,153929,"Untitled-1",0,1,"",plaintext,content
|
| 84 |
+
83,254974,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 85 |
+
84,258647,"Untitled-1",0,0,"",plaintext,selection_command
|
| 86 |
+
85,259013,"Untitled-1",27,0,"",plaintext,selection_command
|
| 87 |
+
86,260308,"Untitled-1",0,27,"// crowd-pilot mock insert\n",plaintext,selection_command
|
| 88 |
+
87,260597,"Untitled-1",0,27,"",plaintext,content
|
| 89 |
+
88,262380,"Untitled-1",0,0," // replaced by crowd-pilot",plaintext,content
|
| 90 |
+
89,264998,"Untitled-1",26,0,"",plaintext,selection_command
|
| 91 |
+
90,270712,"Untitled-1",0,27," ",plaintext,content
|
| 92 |
+
91,272070,"Untitled-1",0,1,"",plaintext,content
|
| 93 |
+
92,273725,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 94 |
+
93,279243,"Untitled-1",27,0," // replaced by crowd-pilot",plaintext,content
|
| 95 |
+
94,282551,"Untitled-1",53,0,"",plaintext,selection_command
|
| 96 |
+
95,284037,"Untitled-1",25,0,"",plaintext,selection_command
|
| 97 |
+
96,284629,"Untitled-1",53,0,"",plaintext,selection_command
|
| 98 |
+
97,284996,"Untitled-1",27,27," ",plaintext,content
|
| 99 |
+
98,285558,"Untitled-1",27,1,"",plaintext,content
|
| 100 |
+
99,285725,"Untitled-1",26,1,"",plaintext,content
|
| 101 |
+
100,285884,"Untitled-1",25,1,"",plaintext,content
|
| 102 |
+
101,286205,"Untitled-1",0,25,"",plaintext,content
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-55b74e48-50e3-4bf3-8e02-f03e464c22ac1750632538084-2025_06_22-15.48.59.681/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-6dbde8ba-25c3-4408-ace8-c25f1d6c04e31764455107698-2025_11_29-23.25.11.741/source.csv
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,12,"Untitled-1",0,0,"from typing import IO\nfrom io import IOBase\nfrom sys import stdout\n\n\nclass MessageFormatter():\n __terminator: str\n def __init__(self, terminator: str):\n """"""Create a new MessageFormatter""""""\n if not isinstance(terminator, str):\n raise TypeError(f""terminator must be a str"")\n \n self.__terminator = terminator\n \n\n def set_terminator(self, terminator: str):\n """"""Set termination character(s)""""""\n if not isinstance(terminator, str):\n raise TypeError(f""terminator must be a str"")\n \n self.__terminator = terminator\n\n def get_terminator(self) -> str:\n """"""Get termination character(s)""""""\n return self.__terminator\n\n\n def format(self, message: str) -> str:\n """"""Formats a given message""""""\n if not isinstance(message, str):\n raise TypeError(f""message must be a str"")\n return f""{message}{self.__terminator}""\n\n\nclass Printer:\n __target: IO\n __message: str\n\n def __init__(self, target: IO, message: str):\n """"""Creates a new Printer""""""\n if not isinstance(target, IOBase):\n raise TypeError(f""target must be a file object"")\n if not isinstance(message, str):\n raise TypeError(f""message must be a str"")\n \n self.__target = target\n self.__message = message\n\n\n def set_target(self, target: IO):\n """"""Sets the printer target (destination file object)""""""\n if not isinstance(target, IOBase):\n raise TypeError(f""target must be a file object"")\n\n self.__target = target\n \n def get_target(self) -> IO:\n """"""Gets the printer target (destination file object)""""""\n return self.__target\n \n\n def set_message(self, message: str):\n """"""Sets the printer message""""""\n if not isinstance(message, str):\n raise TypeError(f""message must be a str"")\n\n self.__message = message\n \n def get_message(self) -> str:\n """"""Gets the printer message""""""\n return self.__message\n \n\n def print_message(self, formatter: MessageFormatter):\n """"""Formats the message with the given formatter, and prints to target""""""\n output = formatter.format(self.__message)\n self.__target.write(output)\n\n\nif __name__ == ""__main__"":\n formatter = MessageFormatter(""\n"")\n\n printer = Printer(stdout, ""Hello world"")\n printer.print_message(formatter)",python,tab
|
| 3 |
+
2,65,"Untitled-1",2428,0,"",python,selection_command
|
| 4 |
+
3,77,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:25:11 PM [info] Activating crowd-code\n11:25:11 PM [info] Recording started\n11:25:11 PM [info] Initializing git provider using file system watchers...\n11:25:11 PM [info] No workspace folder found\n",Log,tab
|
| 5 |
+
4,840,"Untitled-1",0,0,"",python,tab
|
| 6 |
+
5,1752,"Untitled-1",0,0,"",python,selection_command
|
| 7 |
+
6,3825,"Untitled-1",22,0,"",python,selection_command
|
| 8 |
+
7,7325,"Untitled-1",67,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",python,content
|
| 9 |
+
8,12258,"Untitled-1",99,13,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",python,content
|
| 10 |
+
9,16370,"Untitled-1",21,0,"\n",python,content
|
| 11 |
+
10,17137,"Untitled-1",21,1,"",python,content
|
| 12 |
+
11,18292,"Untitled-1",20,0,"",python,selection_command
|
| 13 |
+
12,18709,"Untitled-1",0,0,"\n",python,content
|
| 14 |
+
13,19220,"Untitled-1",0,0,"\n",python,content
|
| 15 |
+
14,20569,"Untitled-1",0,0,"",python,selection_command
|
| 16 |
+
15,21793,"Untitled-1",101,63,"",python,content
|
| 17 |
+
16,23292,"Untitled-1",1,0,"",python,selection_command
|
| 18 |
+
17,23406,"Untitled-1",2,0,"",python,selection_command
|
| 19 |
+
18,26392,"Untitled-1",117,0,"replacement */\nREPLACED LINE 1\nREPLACED LINE 2\n/* crowd-pilot: ",python,content
|
| 20 |
+
19,26885,"Untitled-1",0,2,"",python,content
|
| 21 |
+
20,26890,"Untitled-1",20,0,"",python,selection_command
|
| 22 |
+
21,27086,"Untitled-1",67,125,"",python,content
|
| 23 |
+
22,27089,"Untitled-1",22,0,"",python,selection_command
|
| 24 |
+
23,28039,"TERMINAL",0,0,"undefinedfranzsrambical@MBF6N9WFVKFV ~ % echo ""Hello World""",,terminal_command
|
| 25 |
+
24,28039,"TERMINAL",0,0,"]633;CHello World\r\n[1m[7m%[27m[1m[0m \r \r",,terminal_output
|
| 26 |
+
25,28943,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 27 |
+
26,32101,"TERMINAL",0,0,"",,terminal_focus
|
| 28 |
+
27,32105,"Untitled-1",0,0,"",python,tab
|
| 29 |
+
28,33685,"Untitled-1",2397,0,"",python,selection_command
|
| 30 |
+
29,34704,"Untitled-1",2352,0,"",python,selection_command
|
| 31 |
+
30,34901,"Untitled-1",2347,0,"",python,selection_command
|
| 32 |
+
31,35070,"Untitled-1",2312,0,"",python,selection_command
|
| 33 |
+
32,35214,"Untitled-1",2285,0,"",python,selection_command
|
| 34 |
+
33,36277,"Untitled-1",2429,0,"",python,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-72482fee-a24c-4c9f-bb09-3efbfa32b9fa1765978902238-2025_12_17-14.41.49.56/source.csv
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,3,"src/extension.ts",0,0,"import * as vscode from 'vscode';\nimport * as http from 'http';\nimport * as fs from 'fs';\nimport * as path from 'path';\nimport { Buffer } from 'buffer';\nimport { ConversationStateManager, estimateTokens } from '@crowd-pilot/serializer';\n\n// -------------------- Preference Data Collection --------------------\n\ninterface PreferenceSample {\n\ttimestamp: number;\n\tcontext: Array<{ role: string; content: string }>;\n\tcompletion: {\n\t\trawModelOutput: string;\n\t\tparsedAction: Action | null;\n\t\tavgLogprob: number;\n\t};\n\toutcome: 'accepted' | 'rejected' | 'ignored' | null;\n\toutcomeTimestamp: number | null;\n\tmodelName: string;\n}\n\ninterface PendingPreferenceSample {\n\tsample: PreferenceSample;\n\tshownAt: number;\n}\n\nlet pendingPreferenceSample: PendingPreferenceSample | null = null;\n\nfunction getPreferenceLogPath(): string {\n\tconst cfg = getConfig();\n\tif (cfg.preferenceLogPath) {\n\t\treturn cfg.preferenceLogPath;\n\t}\n\tconst workspaceFolders = vscode.workspace.workspaceFolders;\n\tif (workspaceFolders) {\n\t\treturn path.join(workspaceFolders[0].uri.fsPath, '.crowd-pilot-preferences.jsonl');\n\t}\n\tthrow new Error(""No preference log path found."");\n}\n\n/**\n * Log a preference sample to the JSONL file.\n * Each line is a complete JSON object for easy streaming/parsing.\n */\nfunction logPreferenceSample(sample: PreferenceSample): void {\n\tconst cfg = getConfig();\n\tif (!cfg.enablePreferenceLogging) {\n\t\tconsole.log(`[crowd-pilot] Preference logging disabled, skipping sample`);\n\t\treturn;\n\t}\n\n\tconst logPath = getPreferenceLogPath();\n\tconst line = JSON.stringify(sample) + '\n';\n\t\n\tfs.appendFile(logPath, line, (err) => {\n\t\tif (err) {\n\t\t\tconsole.error('[crowd-pilot] Failed to log preference sample:', err);\n\t\t} else {\n\t\t\tconsole.log(`[crowd-pilot] Logged preference sample, outcome: (${sample.outcome})`);\n\t\t}\n\t});\n}\n\n/**\n * Create a new pending preference sample when showing a preview.\n * This captures all context needed for reward model training.\n */\nfunction createPendingPreferenceSample(\n\tconversationMessages: Array<{ role: string; content: string }>,\n\trawModelOutput: string,\n\tparsedAction: Action | null,\n\tavgLogprob: number,\n\tmodelName: string\n): void {\n\tconst sample: PreferenceSample = {\n\t\ttimestamp: Date.now(),\n\t\tcontext: conversationMessages,\n\t\tcompletion: {\n\t\t\trawModelOutput,\n\t\t\tparsedAction,\n\t\t\tavgLogprob,\n\t\t},\n\t\toutcome: null,\n\t\toutcomeTimestamp: null,\n\t\tmodelName,\n\t};\n\n\tpendingPreferenceSample = {\n\t\tsample,\n\t\tshownAt: Date.now(),\n\t};\n}\n\n/**\n * Record the outcome of the current pending sample and log it.\n */\nfunction recordPreferenceOutcome(outcome: 'accepted' | 'rejected' | 'ignored'): void {\n\tif (!pendingPreferenceSample) {\n\t\treturn;\n\t}\n\n\tconst sample = pendingPreferenceSample.sample;\n\tsample.outcome = outcome;\n\tsample.outcomeTimestamp = Date.now();\n\n\tlogPreferenceSample(sample);\n\n\tpendingPreferenceSample = null;\n}\n\n/**\n * Mark any pending sample as ignored (user moved on without explicit accept/reject).\n */\nfunction markPendingAsIgnored(): void {\n\tif (pendingPreferenceSample) {\n\t\trecordPreferenceOutcome('ignored');\n\t}\n}\n\ntype Action =\n| { kind: 'showTextDocument' }\n| { kind: 'setSelections', selections: Array<{ start: [number, number], end: [number, number] }> }\n| { kind: 'editInsert', position: [number, number], text: string }\n| { kind: 'editDelete', range: { start: [number, number], end: [number, number] } }\n| { kind: 'editReplace', range: { start: [number, number], end: [number, number] }, text: string }\n| { kind: 'terminalShow' }\n| { kind: 'terminalSendText', text: string }\n| { kind: 'openFile', filePath: string, selections?: Array<{ start: [number, number], end: [number, number] }> };\n\n// Configuration helper\nfunction getConfig() {\n\tconst config = vscode.workspace.getConfiguration('crowd-pilot');\n\treturn {\n\t\thostname: config.get<string>('hostname', 'hai001'),\n\t\tport: config.get<number>('port', 30000),\n\t\tbasePath: config.get<string>('basePath', '/v1/chat/completions'),\n\t\tmodelName: config.get<string>('modelName', 'qwen/qwen3-8b'),\n\t\tminAvgLogprob: config.get<number>('minAvgLogprob', -1.0),\n\t\tmaxContextTokens: config.get<number>('maxContextTokens', 120000),\n\t\tpreferenceLogPath: config.get<string>('preferenceLogPath', ''),\n\t\tenablePreferenceLogging: config.get<boolean>('enablePreferenceLogging', true),\n\t};\n}\n\n// -------------------- Context Window Management --------------------\n\n/**\n * Truncate conversation messages to fit within the context window.\n * Assumes system prompt is the first message. Drops oldest conversation messages first.\n */\nfunction truncateToContextLimit(\n\tmessages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }>,\n\tmaxTokens: number\n): Array<{ role: 'system' | 'user' | 'assistant'; content: string }> {\n\tif (messages.length === 0) { return messages; }\n\n\tconst systemTokens = estimateTokens(messages[0].content);\n\tconst availableTokens = maxTokens - systemTokens;\n\n\tconst tokenCounts = messages.slice(1).map(m => estimateTokens(m.content));\n\tconst totalConversationTokens = tokenCounts.reduce((a, b) => a + b, 0);\n\n\tif (totalConversationTokens <= availableTokens) {\n\t\treturn messages;\n\t}\n\n\tlet keptTokens = 0;\n\tlet cutoffIndex = tokenCounts.length;\n\tfor (let i = tokenCounts.length - 1; i >= 0; i--) {\n\t\tif (keptTokens + tokenCounts[i] <= availableTokens) {\n\t\t\tkeptTokens += tokenCounts[i];\n\t\t\tcutoffIndex = i;\n\t\t} else {\n\t\t\tbreak;\n\t\t}\n\t}\n\n\tconsole.log(`[crowd-pilot] Truncated ${cutoffIndex} oldest messages (${systemTokens + totalConversationTokens} -> ${systemTokens + keptTokens} tokens)`);\n\treturn [messages[0], ...messages.slice(cutoffIndex + 1)];\n}\n\n\n// Global conversation state manager instance\nconst conversationManager = new ConversationStateManager();\n\n// Track activated files (files whose content we've captured)\n// TODO (f.srambical): This logic remains on the extension-side\n// for backwards-compatibility (with the crowd-code dataset).\n// Eventually, we should move the file tracking logic to\n// p-doom/crowd-pilot-serializer.\nconst activatedFiles = new Set<string>();\n\n/**\n * Clear all conversation context - resets the conversation manager and activated files.\n * Call this to start fresh without accumulated history.\n */\nfunction clearContext(): void {\n\tconversationManager.reset();\n\tactivatedFiles.clear();\n\tconsole.log('[crowd-pilot] Context cleared');\n}\n\nlet suggestionsEnabled = true;\nlet statusBarItem: vscode.StatusBarItem | undefined;\n\nfunction updateStatusBarItem(): void {\n\tif (!statusBarItem) { return; }\n\tif (suggestionsEnabled) {\n\t\tstatusBarItem.text = '$(lightbulb) crowd-pilot';\n\t\tstatusBarItem.tooltip = 'crowd-pilot: Tab suggestions enabled (click to disable)';\n\t\tstatusBarItem.backgroundColor = undefined;\n\t} else {\n\t\tstatusBarItem.text = '$(lightbulb-autofix) crowd-pilot';\n\t\tstatusBarItem.tooltip = 'crowd-pilot: Tab suggestions disabled (click to enable)';\n\t\tstatusBarItem.backgroundColor = new vscode.ThemeColor('statusBarItem.warningBackground');\n\t}\n}\n\nexport function activate(context: vscode.ExtensionContext) {\n\n\tconsole.log('[crowd-pilot] Extension activated');\n\n\t(async () => {\n\t\tconst config = vscode.workspace.getConfiguration('terminal.integrated');\n\t\tconst commandsToSkipShell = config.get<string[]>('commandsToSkipShell', []);\n\t\tlet updated = false;\n\t\tif (!commandsToSkipShell.includes('crowd-pilot.modelRun')) {\n\t\t\tcommandsToSkipShell.push('crowd-pilot.modelRun');\n\t\t\tupdated = true;\n\t\t}\n\t\tif (!commandsToSkipShell.includes('crowd-pilot.hideUi')) {\n\t\t\tcommandsToSkipShell.push('crowd-pilot.hideUi');\n\t\t\tupdated = true;\n\t\t}\n\t\tif (updated) {\n\t\t\tawait config.update('commandsToSkipShell', commandsToSkipShell, vscode.ConfigurationTarget.Global);\n\t\t}\n\t})().catch((err) => console.error('[crowd-pilot] Startup initialization error:', err));\n\n\tstatusBarItem = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Right, 100);\n\tstatusBarItem.command = 'crowd-pilot.toggleSuggestions';\n\tupdateStatusBarItem();\n\tstatusBarItem.show();\n\tcontext.subscriptions.push(statusBarItem);\n\n\tconst toggleSuggestions = vscode.commands.registerCommand('crowd-pilot.toggleSuggestions', () => {\n\t\tsuggestionsEnabled = !suggestionsEnabled;\n\t\tupdateStatusBarItem();\n\t\tif (!suggestionsEnabled) {\n\t\t\thidePreviewUI(true);\n\t\t}\n\t\tvscode.window.showInformationMessage(\n\t\t\tsuggestionsEnabled \n\t\t\t\t? '[crowd-pilot]: Tab suggestions enabled' \n\t\t\t\t: '[crowd-pilot]: Tab suggestions disabled'\n\t\t);\n\t});\n\n\tconst hideUi = vscode.commands.registerCommand('crowd-pilot.hideUi', () => {\n\t\trecordPreferenceOutcome('rejected');\n\t\thidePreviewUI(true);\n\t});\n\n\tconst clearContextCmd = vscode.commands.registerCommand('crowd-pilot.clearContext', () => {\n\t\tclearContext();\n\t\tvscode.window.showInformationMessage('[crowd-pilot]: Context cleared');\n\t});\n\n\tconst openPreferenceLogCmd = vscode.commands.registerCommand('crowd-pilot.openPreferenceLog', async () => {\n\t\tconst logPath = getPreferenceLogPath();\n\t\ttry {\n\t\t\tconst uri = vscode.Uri.file(logPath);\n\t\t\tawait vscode.window.showTextDocument(uri);\n\t\t} catch (err: any) {\n\t\t\tif (err.code === 'ENOENT' || err.message?.includes('ENOENT')) {\n\t\t\t\tvscode.window.showInformationMessage('[crowd-pilot] No preference log file exists yet. Accept or reject some suggestions first.');\n\t\t\t} else {\n\t\t\t\tvscode.window.showErrorMessage(`[crowd-pilot] Error opening preference log: ${err.message}`);\n\t\t\t}\n\t\t}\n\t});\n\n\tconst modelRun = vscode.commands.registerCommand('crowd-pilot.modelRun', async () => {\n\t\tconst editor = vscode.window.activeTextEditor;\n\t\tif (!editor) {\n\t\t\treturn;\n\t\t}\n\t\ttry {\n\t\t\tif (!previewVisible) { return; }\n\t\t\tlet action: Action | undefined = currentAction;\n\t\t\tif (!action) {\n\t\t\t\tconst single = await requestModelActions(editor);\n\t\t\t\tcurrentAction = single;\n\t\t\t\taction = single;\n\t\t\t}\n\t\t\tif (!action) {\n\t\t\t\thidePreviewUI();\n\t\t\t\treturn;\n\t\t\t}\n\t\t\trecordPreferenceOutcome('accepted');\n\t\t\thidePreviewUI(false);\n\t\t\tawait executeAction(action);\n\t\t\tautoShowNextAction();\n\t\t} catch (err) {\n\t\t\tconst errorMessage = err instanceof Error ? err.message : String(err);\n\t\t\tvscode.window.showErrorMessage(`Model run failed: ${errorMessage}`);\n\t\t}\n\t});\n\n\tconst sglangTest = vscode.commands.registerCommand('crowd-pilot.sglangTest', async () => {\n\t\ttry {\n\t\t\tawait callSGLangChat();\n\t\t} catch (err) {\n\t\t\tconst errorMessage = err instanceof Error ? err.message : String(err);\n\t\t\tvscode.window.showErrorMessage(`SGLang test failed: ${errorMessage}`);\n\t\t}\n\t});\n\n\tconst onSelChange = vscode.window.onDidChangeTextEditorSelection((e) => {\n\t\tif (e.textEditor === vscode.window.activeTextEditor) {\n\t\t\tsuppressAutoPreview = false;\n\t\t\tschedulePredictionRefresh(true, false);\n\n\t\t\tconst editor = e.textEditor;\n\t\t\tconst selection = e.selections[0];\n\t\t\tif (selection) {\n\t\t\t\tconst filePath = editor.document.uri.fsPath;\n\t\t\t\tconst offset = editor.document.offsetAt(selection.start);\n\t\t\t\tconversationManager.handleSelectionEvent(filePath, offset);\n\t\t\t}\n\t\t}\n\t});\n\n\tconst onActiveChange = vscode.window.onDidChangeActiveTextEditor((editor) => {\n\t\tsuppressAutoPreview = false;\n\t\tschedulePredictionRefresh(true, false);\n\n\t\tif (editor) {\n\t\t\tconst filePath = editor.document.uri.fsPath;\n\t\t\tconst currentFileUri = editor.document.uri.toString();\n\t\t\tlet tabEventText: string | null = null;\n\n\t\t\tif (!activatedFiles.has(currentFileUri)) {\n\t\t\t\ttabEventText = editor.document.getText();\n\t\t\t\tactivatedFiles.add(currentFileUri);\n\t\t\t}\n\n\t\t\tconversationManager.handleTabEvent(filePath, tabEventText);\n\t\t}\n\t});\n\n\tconst onDocChange = vscode.workspace.onDidChangeTextDocument((e) => {\n\t\tif (vscode.window.activeTextEditor?.document === e.document) {\n\t\t\tsuppressAutoPreview = false;\n\t\t\tschedulePredictionRefresh(true, false);\n\n\t\t\tconst filePath = e.document.uri.fsPath;\n\t\t\tfor (const change of e.contentChanges) {\n\t\t\t\tconst offset = change.rangeOffset;\n\t\t\t\tconst length = change.rangeLength;\n\t\t\t\tconst newText = change.text;\n\t\t\t\tconversationManager.handleContentEvent(filePath, offset, length, newText);\n\t\t\t}\n\t\t}\n\t});\n\n\t// Terminal focus event\n\tconst onTerminalChange = vscode.window.onDidChangeActiveTerminal((terminal) => {\n\t\tif (terminal) {\n\t\t\tconversationManager.handleTerminalFocusEvent();\n\t\t}\n\t});\n\n\t// Terminal command execution event\n\tconst onTerminalCommand = vscode.window.onDidStartTerminalShellExecution(async (event) => {\n\t\tconst commandLine = event.execution.commandLine.value;\n\t\tconversationManager.handleTerminalCommandEvent(commandLine);\n\n\t\t// Capture terminal output\n\t\tconst stream = event.execution.read();\n\t\tfor await (const data of stream) {\n\t\t\tconversationManager.handleTerminalOutputEvent(data);\n\t\t}\n\t});\n\n\tcontext.subscriptions.push(\n\t\ttoggleSuggestions,\n\t\thideUi,\n\t\tclearContextCmd,\n\t\topenPreferenceLogCmd,\n\t\tsglangTest,\n\t\tmodelRun,\n\t\tonSelChange,\n\t\tonActiveChange,\n\t\tonDocChange,\n\t\tonTerminalChange,\n\t\tonTerminalCommand\n\t);\n\n\t// Initialize: capture current active editor if any\n\tconst initialEditor = vscode.window.activeTextEditor;\n\tif (initialEditor) {\n\t\tconst filePath = initialEditor.document.uri.fsPath;\n\t\tconst currentFileUri = initialEditor.document.uri.toString();\n\t\tconst tabEventText = initialEditor.document.getText();\n\t\tactivatedFiles.add(currentFileUri);\n\t\tconversationManager.handleTabEvent(filePath, tabEventText);\n\t}\n}\n\nexport function deactivate() {}\n\n// -------------------- Execution --------------------\nlet currentAction: Action | undefined;\n\nfunction getActiveOrCreateTerminal(): vscode.Terminal {\n\tif (vscode.window.activeTerminal) {\n\t\treturn vscode.window.activeTerminal;\n\t}\n\treturn vscode.window.createTerminal('crowd-pilot');\n}\n\nasync function executeAction(action: Action): Promise<void> {\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) { return; }\n\tconst doc = editor.document;\n\tif (action.kind === 'showTextDocument') {\n\t\tawait vscode.window.showTextDocument(doc);\n\t\treturn;\n\t}\n\tif (action.kind === 'setSelections') {\n\t\teditor.selections = action.selections.map(s => new vscode.Selection(\n\t\t\tnew vscode.Position(s.start[0], s.start[1]),\n\t\t\tnew vscode.Position(s.end[0], s.end[1])\n\t\t));\n\t\teditor.revealRange(editor.selections[0], vscode.TextEditorRevealType.InCenterIfOutsideViewport);\n\t\treturn;\n\t}\n\tif (action.kind === 'editInsert') {\n\t\tawait editor.edit((e: vscode.TextEditorEdit) => e.insert(new vscode.Position(action.position[0], action.position[1]), action.text));\n\t\treturn;\n\t}\n\tif (action.kind === 'editDelete') {\n\t\tconst range = new vscode.Range(\n\t\t\tnew vscode.Position(action.range.start[0], action.range.start[1]),\n\t\t\tnew vscode.Position(action.range.end[0], action.range.end[1])\n\t\t);\n\t\tawait editor.edit((e: vscode.TextEditorEdit) => e.delete(range));\n\t\treturn;\n\t}\n\tif (action.kind === 'editReplace') {\n\t\tconst range = new vscode.Range(\n\t\t\tnew vscode.Position(action.range.start[0], action.range.start[1]),\n\t\t\tnew vscode.Position(action.range.end[0], action.range.end[1])\n\t\t);\n\t\tawait editor.edit((e: vscode.TextEditorEdit) => e.replace(range, action.text));\n\t\treturn;\n\t}\n\tif (action.kind === 'terminalShow') {\n\t\tconst term = getActiveOrCreateTerminal();\n\t\tterm.show();\n\t\treturn;\n\t}\n\tif (action.kind === 'terminalSendText') {\n\t\tconst term = getActiveOrCreateTerminal();\n\t\tterm.show();\n\t\tterm.sendText(action.text, false);\n\t\treturn;\n\t}\n\tif (action.kind === 'openFile') {\n\t\tconst uri = vscode.Uri.file(action.filePath);\n\t\tconst openedEditor = await vscode.window.showTextDocument(uri);\n\t\tif (action.selections) {\n\t\t\topenedEditor.selections = action.selections.map(s => new vscode.Selection(\n\t\t\t\tnew vscode.Position(s.start[0], s.start[1]),\n\t\t\t\tnew vscode.Position(s.end[0], s.end[1])\n\t\t\t));\n\t\t\topenedEditor.revealRange(openedEditor.selections[0], vscode.TextEditorRevealType.InCenterIfOutsideViewport);\n\t\t}\n\t\treturn;\n\t}\n}\n\n// -------------------- UI State & Helpers --------------------\nconst UI_CONTEXT_KEY = 'crowdPilot.uiVisible';\nlet previewVisible = false;\nlet decorationDeleteType: vscode.TextEditorDecorationType | undefined;\nlet decorationReplaceType: vscode.TextEditorDecorationType | undefined;\nlet decorationReplaceBlockType: vscode.TextEditorDecorationType | undefined;\nlet mockStep = 0;\nlet suppressAutoPreview = false;\nlet latestRequestId = 0;\nlet currentAbortController: AbortController | undefined;\n\nconst PREDICTION_DEBOUNCE_MS = 150;\nconst PREDICTION_THROTTLE_MS = 300;\n\ntype PendingPrediction = { id: number; timer: NodeJS.Timeout };\n\nlet nextQueuedPredictionId = 0;\nlet pendingPredictions: PendingPrediction[] = [];\nconst cancelledPredictionIds = new Set<number>();\nlet lastPredictionTimestamp: number | undefined;\n\nfunction disposePreviewDecorations() {\n\ttry { decorationDeleteType?.dispose(); } catch {}\n\ttry { decorationReplaceType?.dispose(); } catch {}\n\ttry { decorationReplaceBlockType?.dispose(); } catch {}\n\tdecorationDeleteType = undefined;\n\tdecorationReplaceType = undefined;\n\tdecorationReplaceBlockType = undefined;\n}\n\nfunction getDynamicMargin(editor: vscode.TextEditor, anchorLine: number, text: string): string {\n\tconst lines = text.split(/\r?\n/);\n\tconst height = lines.length;\n\t\n\t// We need to check the document lines that will be covered by this panel.\n\t// The panel starts at 'anchorLine' and extends downwards by 'height' lines.\n\t// However, visually, since it's 'after', it sits to the right of 'anchorLine',\n\t// and then flows down.\n\t// So we check document lines from anchorLine to anchorLine + height - 1.\n\t\n\tconst doc = editor.document;\n\tlet maxLen = 0;\n\tconst startLine = anchorLine;\n\tconst endLine = Math.min(doc.lineCount - 1, anchorLine + height - 1);\n\t\n\tfor (let i = startLine; i <= endLine; i++) {\n\t\tconst lineText = doc.lineAt(i).text;\n\t\tconst len = lineText.replace(/\t/g, ' ').length;\n\t\tif (len > maxLen) {\n\t\t\tmaxLen = len;\n\t\t}\n\t}\n\t\n\tconst anchorLineText = doc.lineAt(anchorLine).text;\n\tconst anchorLen = anchorLineText.replace(/\t/g, ' ').length;\n\t\n\tconst diff = Math.max(0, maxLen - anchorLen);\n\tconst margin = diff + 4; \n\treturn `${margin}ch`;\n}\n\nfunction showPreviewUI(action: Action): void {\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) { return; }\n\tdisposePreviewDecorations();\n\n\tconst next = (action.kind === 'editInsert' || action.kind === 'editDelete' || action.kind === 'editReplace' || action.kind === 'terminalSendText' || action.kind === 'setSelections' || action.kind === 'openFile') ? action : undefined;\n\tif (!next) {\n\t\tpreviewVisible = false;\n\t\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, false);\n\t\tcurrentAction = action;\n\t\treturn;\n\t}\n\n\tconst trimText = (t: string) => {\n\t\tconst oneLine = t.replace(/\r?\n/g, '\\n');\n\t\treturn oneLine.length > 80 ? oneLine.slice(0, 77) + '…' : oneLine;\n\t};\n\n\tif (next.kind === 'setSelections') {\n\t\tconst selection = next.selections[0];\n\t\tconst targetPos = new vscode.Position(selection.start[0], selection.start[1]);\n\t\tconst isVisible = editor.visibleRanges.some(r => r.contains(targetPos));\n\t\t\n\t\tlet anchorPos = targetPos;\n\t\tlet label = ""↳ Move Cursor Here"";\n\n\t\tif (!isVisible && editor.visibleRanges.length > 0) {\n\t\t\tconst firstVisible = editor.visibleRanges[0].start;\n\t\t\tconst lastVisible = editor.visibleRanges[editor.visibleRanges.length - 1].end;\n\t\t\t\n\t\t\tif (targetPos.isBefore(firstVisible)) {\n\t\t\t\tanchorPos = new vscode.Position(firstVisible.line, Number.MAX_VALUE);\n\t\t\t} else {\n\t\t\t\tanchorPos = new vscode.Position(lastVisible.line, Number.MAX_VALUE);\n\t\t\t}\n\n\t\t\tif (targetPos.line < anchorPos.line) {\n\t\t\t\tlabel = `↑ Move Cursor to Line ${targetPos.line + 1}`;\n\t\t\t} else {\n\t\t\t\tlabel = `↓ Move Cursor to Line ${targetPos.line + 1}`;\n\t\t\t}\n\t\t}\n\n\t\tconst margin = getDynamicMargin(editor, anchorPos.line, label);\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${label}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top;`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t} else if (next.kind === 'terminalSendText') {\n\t\tconst cursor = editor.selection.active;\n\t\tconst isVisible = editor.visibleRanges.some(r => r.contains(cursor));\n\t\t\n\t\tlet anchorPos = new vscode.Position(cursor.line, Number.MAX_VALUE);\n\t\t\n\t\tif (!isVisible && editor.visibleRanges.length > 0) {\n\t\t\tconst firstVisible = editor.visibleRanges[0].start;\n\t\t\tconst lastVisible = editor.visibleRanges[editor.visibleRanges.length - 1].end;\n\t\t\t\n\t\t\tif (cursor.isBefore(firstVisible)) {\n\t\t\t\tanchorPos = new vscode.Position(firstVisible.line, Number.MAX_VALUE);\n\t\t\t} else {\n\t\t\t\tanchorPos = new vscode.Position(lastVisible.line, Number.MAX_VALUE);\n\t\t\t}\n\t\t}\n\t\t\n\t\tconst summary = trimText(next.text || '');\n\t\tconst label = `↳ Execute shell command in terminal: ${summary}`;\n\t\tconst margin = getDynamicMargin(editor, anchorPos.line, label);\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${label.replace(/""/g, '\\""')}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top;`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t} else if (next.kind === 'editInsert') {\n\t\tconst posLine = next.position[0];\n\t\tconst fullBlock = next.text;\n\t\tconst cssContent = fullBlock\n\t\t\t.replace(/""/g, '\\""')\n\t\t\t.replace(/\r?\n/g, '\\A ');\n\n\t\tconst docLineCount = editor.document.lineCount;\n\t\tlet anchorLine = posLine;\n\t\tlet shiftUp = true;\n\t\t\n\t\tif (anchorLine >= docLineCount) {\n\t\t\tanchorLine = docLineCount - 1;\n\t\t\tshiftUp = false;\n\t\t}\n\n\t\tconst anchorPos = new vscode.Position(anchorLine, Number.MAX_VALUE); \n\t\t\n\t\tconst marginCheckLine = anchorLine;\n\t\tconst margin = getDynamicMargin(editor, marginCheckLine, fullBlock);\n\n\t\tconst topOffset = '0';\n\n\t\tconst beforeDecoration = {\n\t\t\tcontentText: '',\n\t\t\ttextDecoration: `none; position: absolute; left: 0; width: 100vw; border-top: 1px dashed var(--vscode-charts-purple); top: 0; height: 0; z-index: 99; pointer-events: none;`\n\t\t};\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tbefore: beforeDecoration,\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${cssContent}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top; top: ${topOffset};`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t} else if (next.kind === 'editDelete') {\n\t\tconst range = new vscode.Range(\n\t\t\tnew vscode.Position(next.range.start[0], next.range.start[1]),\n\t\t\tnew vscode.Position(next.range.end[0], next.range.end[1])\n\t\t);\n\t\tdecorationDeleteType = vscode.window.createTextEditorDecorationType({\n\t\t\tbackgroundColor: 'rgba(255, 60, 60, 0.18)',\n\t\t\tborder: '1px solid rgba(255, 60, 60, 0.35)',\n\t\t\ttextDecoration: 'line-through'\n\t\t});\n\t\teditor.setDecorations(decorationDeleteType, [{ range }]);\n\t} else if (next.kind === 'editReplace') {\n\t\tconst range = new vscode.Range(\n\t\t\tnew vscode.Position(next.range.start[0], next.range.start[1]),\n\t\t\tnew vscode.Position(next.range.end[0], next.range.end[1])\n\t\t);\n\t\tdecorationReplaceType = vscode.window.createTextEditorDecorationType({\n\t\t\tbackgroundColor: 'rgba(255,165,0,0.15)',\n\t\t\tborder: '1px dashed rgba(255,165,0,0.45)',\n\t\t\tcolor: new vscode.ThemeColor('disabledForeground'),\n\t\t\ttextDecoration: 'line-through'\n\t\t});\n\t\teditor.setDecorations(decorationReplaceType, [{ range }]);\n\n\t\tconst fullBlock = next.text;\n\t\t\n\t\tconst cssContent = fullBlock\n\t\t\t.replace(/""/g, '\\""')\n\t\t\t.replace(/\r?\n/g, '\\A '); \n\n\t\tconst anchorLine = range.start.line;\n\t\tconst anchorPos = new vscode.Position(anchorLine, Number.MAX_VALUE);\n\t\tconst margin = getDynamicMargin(editor, anchorLine, fullBlock);\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${cssContent}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top;`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t} else if (next.kind === 'openFile') {\n\t\tconst cursor = editor.selection.active;\n\t\tconst isVisible = editor.visibleRanges.some(r => r.contains(cursor));\n\t\t\n\t\tlet anchorPos = new vscode.Position(cursor.line, Number.MAX_VALUE);\n\t\t\n\t\tif (!isVisible && editor.visibleRanges.length > 0) {\n\t\t\tconst firstVisible = editor.visibleRanges[0].start;\n\t\t\tconst lastVisible = editor.visibleRanges[editor.visibleRanges.length - 1].end;\n\t\t\t\n\t\t\tif (cursor.isBefore(firstVisible)) {\n\t\t\t\tanchorPos = new vscode.Position(firstVisible.line, Number.MAX_VALUE);\n\t\t\t} else {\n\t\t\t\tanchorPos = new vscode.Position(lastVisible.line, Number.MAX_VALUE);\n\t\t\t}\n\t\t}\n\t\t\n\t\tconst fileName = next.filePath.split(/[/\\]/).pop() || next.filePath;\n\t\tconst targetLine = next.selections?.[0]?.start[0];\n\t\tconst label = targetLine !== undefined\n\t\t\t? `↳ Switch to file: ${fileName}:${targetLine + 1}` // Display as 1-based\n\t\t\t: `↳ Switch to file: ${fileName}`;\n\t\tconst margin = getDynamicMargin(editor, anchorPos.line, label);\n\n\t\tdecorationReplaceBlockType = vscode.window.createTextEditorDecorationType({\n\t\t\tafter: {\n\t\t\t\tcontentText: '',\n\t\t\t\tcolor: new vscode.ThemeColor('charts.purple'),\n\t\t\t\tbackgroundColor: new vscode.ThemeColor('editor.background'),\n\t\t\t\tfontStyle: 'italic',\n\t\t\t\tfontWeight: '600',\n\t\t\t\tmargin: `0 0 0 ${margin}`,\n\t\t\t\ttextDecoration: `none; display: inline-block; white-space: pre; content: ""${label.replace(/""/g, '\\""')}""; border: 1px solid var(--vscode-charts-purple); padding: 4px; border-radius: 4px; box-shadow: 0 4px 8px rgba(0,0,0,0.25); pointer-events: none; position: relative; z-index: 100; vertical-align: top;`\n\t\t\t}\n\t\t});\n\t\teditor.setDecorations(decorationReplaceBlockType, [{ range: new vscode.Range(anchorPos, anchorPos) }]);\n\t}\n\n\tpreviewVisible = true;\n\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, true);\n\tcurrentAction = action;\n}\n\nfunction hidePreviewUI(suppress?: boolean): void {\n\tdisposePreviewDecorations();\n\tpreviewVisible = false;\n\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, false);\n\tif (suppress) {\n\t\tsuppressAutoPreview = true;\n\t}\n}\n\n/**\n * Schedule a model preview refresh, coalescing rapid editor events and\n * throttling how often we actually talk to the model.\n */\nfunction schedulePredictionRefresh(debounce: boolean, userRequested: boolean): void {\n\tif (!suggestionsEnabled) {\n\t\treturn;\n\t}\n\tif (!userRequested && suppressAutoPreview) {\n\t\treturn;\n\t}\n\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) {\n\t\thidePreviewUI();\n\t\treturn;\n\t}\n\n\tif (!userRequested) {\n\t\tif (!vscode.window.state.focused) {\n\t\t\thidePreviewUI();\n\t\t\treturn;\n\t\t}\n\t\tif (editor.document.getText().length === 0) {\n\t\t\thidePreviewUI();\n\t\t\treturn;\n\t\t}\n\t}\n\n\tconst now = Date.now();\n\tconst id = ++nextQueuedPredictionId;\n\n\tlet delay = 0;\n\tif (debounce) {\n\t\tdelay = Math.max(delay, PREDICTION_DEBOUNCE_MS);\n\t}\n\tif (lastPredictionTimestamp !== null && lastPredictionTimestamp !== undefined) {\n\t\tconst elapsed = now - lastPredictionTimestamp;\n\t\tif (elapsed < PREDICTION_THROTTLE_MS) {\n\t\t\tdelay = Math.max(delay, PREDICTION_THROTTLE_MS - elapsed);\n\t\t}\n\t}\n\n\tconst timer = setTimeout(() => {\n\t\tif (cancelledPredictionIds.has(id)) {\n\t\t\tcancelledPredictionIds.delete(id);\n\t\t\treturn;\n\t\t}\n\n\t\tlastPredictionTimestamp = Date.now();\n\t\tpendingPredictions = pendingPredictions.filter(p => p.id !== id);\n\n\t\tvoid autoShowNextAction();\n\t}, delay);\n\n\tpendingPredictions.push({ id, timer });\n\n\tif (pendingPredictions.length > 2) {\n\t\tconst oldest = pendingPredictions.shift();\n\t\tif (oldest) {\n\t\t\tcancelledPredictionIds.add(oldest.id);\n\t\t\tclearTimeout(oldest.timer);\n\t\t}\n\t}\n}\n\nasync function autoShowNextAction(): Promise<void> {\n\tif (suppressAutoPreview) { return; }\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) { return; }\n\ttry {\n\t\tcurrentAbortController?.abort();\n\t\tconst controller = new AbortController();\n\t\tcurrentAbortController = controller;\n\t\tconst requestId = ++latestRequestId;\n\t\tconst next = await requestModelActions(editor, controller.signal);\n\t\tif (requestId !== latestRequestId) { return; }\n\t\tif (next) { showPreviewUI(next); } else { hidePreviewUI(); }\n\t} catch (err) {\n\t\tconst e = err as any;\n\t\tconst isAbort = e?.name === 'AbortError' || /aborted/i.test(String(e?.message ?? ''));\n\t\tif (isAbort) { return; }\n\t\thidePreviewUI();\n\t}\n}\n\n// -------------------- SGLang Client (simple test) --------------------\nasync function callSGLangChat(): Promise<void> {\n\tconst cfg = getConfig();\n\tconst headers: any = {\n\t\t'Content-Type': 'application/json'\n\t};\n\n\n\tconst requestBody: any = {\n\t\tmodel: cfg.modelName,\n\t\tmessages: [\n\t\t\t{ role: 'user', content: 'What is the capital of France?' }\n\t\t]\n\t};\n\trequestBody.temperature = 0.7;\n\trequestBody.top_p = 0.8;\n\trequestBody.top_k = 20;\n\trequestBody.min_p = 0;\n\trequestBody.chat_template_kwargs = {\n\t\tenable_thinking: false\n\t};\n\tconst postData = JSON.stringify(requestBody);\n\theaders['Content-Length'] = Buffer.byteLength(postData);\n\n\tconst options = {\n\t\thostname: cfg.hostname,\n\t\tport: cfg.port,\n\t\tpath: cfg.basePath,\n\t\tmethod: 'POST',\n\t\theaders\n\t};\n\n\n\ttry {\n\t\tconst json = await new Promise<any>((resolve, reject) => {\n\t\t\tconst req = http.request(options, (res: http.IncomingMessage) => {\n\t\t\t\tlet data = '';\n\t\t\t\tres.on('data', (chunk: Buffer) => {\n\t\t\t\t\tdata += chunk.toString();\n\t\t\t\t});\n\t\t\t\tres.on('end', () => {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tresolve(JSON.parse(data));\n\t\t\t\t\t} catch (err) {\n\t\t\t\t\t\treject(new Error(`Failed to parse response: ${err instanceof Error ? err.message : String(err)}`));\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t});\n\n\t\t\treq.on('error', (err: Error) => {\n\t\t\t\treject(err);\n\t\t\t});\n\n\t\t\treq.write(postData);\n\t\t\treq.end();\n\t\t});\n\n\t\tvscode.window.showInformationMessage(`Response: ${JSON.stringify(json, null, 2)}`);\n\t} catch (err) {\n\t\tconst errorMessage = err instanceof Error ? err.message : String(err);\n\t\tvscode.window.showErrorMessage(`Request failed: ${errorMessage}`);\n\t}\n}\n\n// -------------------- Model-planned Actions --------------------\nasync function requestModelActions(editor: vscode.TextEditor, signal?: AbortSignal): Promise<Action> {\n\tconst cfg = getConfig();\n\tconst headers: any = {\n\t\t'Content-Type': 'application/json'\n\t};\n\n\tconst doc = editor.document;\n\n\t// FIXME (f.srambical): This should be the system prompt that was used during serialization.\n\tconst systemPrompt = [\n\t\t'You are a helpful assistant that interacts with a computer shell to solve programming tasks.',\n\t\t'Your goal is to predict the next bash command a developer would most likely execute, given their editing and navigation history.',\n\t\t'',\n\t\t'=== CONVERSATION FORMAT ===',\n\t\t'The conversation history alternates between:',\n\t\t'- Assistant messages: bash commands in fenced code blocks',\n\t\t'- User messages: command output wrapped in <stdout>...</stdout> tags',\n\t\t'',\n\t\t'File contents are displayed with 6-character right-aligned line numbers followed by a tab, e.g.:',\n\t\t' 1\tfirst line',\n\t\t' 2\tsecond line',\n\t\t'',\n\t\t'File content is typically shown in viewports of ~20 lines around the area of interest.',\n\t\t'',\n\t\t'=== RESPONSE FORMAT ===',\n\t\t'Your response must contain exactly ONE bash code block with one command or two commands connected with &&.',\n\t\t'',\n\t\t'<format_example>',\n\t\t'```bash',\n\t\t'your_command_here',\n\t\t'```',\n\t\t'</format_example>',\n\t\t'',\n\t\t'Failure to follow these rules will cause your response to be rejected.',\n\t\t'',\n\t\t'=== EDIT COMMAND FORMAT (IMPORTANT) ===',\n\t\t'When you want to EDIT a file, you MUST encode the edit using line-based sed commands in ONE of the following forms,',\n\t\t'and you MUST NOT use substitution commands like ""Ns/old/new/g"".',\n\t\t'',\n\t\t'Assume all line numbers are 1-based and paths are absolute.',\n\t\t'Allowed edit encodings (choose exactly one per response):',\n\t\t'',\n\t\t'1) Replace a contiguous block of lines:',\n\t\t"" sed -i 'START,ENDc\\"",\n\t\t'NEW_LINE_1',\n\t\t'NEW_LINE_2',\n\t\t""..."",\n\t\t""' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'"",\n\t\t'',\n\t\t'2) Delete a contiguous block of lines:',\n\t\t"" sed -i 'START,ENDd' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'"",\n\t\t'',\n\t\t'3) Insert new lines BEFORE a given line:',\n\t\t"" sed -i 'STARTi\\"",\n\t\t'NEW_LINE_1',\n\t\t'NEW_LINE_2',\n\t\t""..."",\n\t\t""' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'"",\n\t\t'',\n\t\t'4) Append new lines at the END of the file:',\n\t\t"" sed -i '$a\\"",\n\t\t'NEW_LINE_1',\n\t\t'NEW_LINE_2',\n\t\t""..."",\n\t\t""' /abs/path/to/file && cat -n /abs/path/to/file | sed -n 'VSTART,VENDp'"",\n\t\t'',\n\t\t'Where VSTART and VEND specify a small viewport around the edited region.',\n\t\t'',\n\t\t'Do NOT emit commands like ""3s/print/print()/g"" or any other ""s/old/new/"" style sed substitution; instead,',\n\t\t'always rewrite the affected lines using one of the line-based forms above.',\n\t\t'',\n\t\t'When you are NOT editing files (e.g., running tests, git commands, tools, etc.), you may emit arbitrary bash commands.'\n\t].join('\n');\n\n\tconst accumulatedMessages = conversationManager.finalizeForModel();\n\t\n\tlet conversationMessages: Array<{ role: 'system' | 'user' | 'assistant'; content: string }> = [\n\t\t{ role: 'system', content: systemPrompt },\n\t];\n\t\n\tfor (const msg of accumulatedMessages) {\n\t\tconst role = msg.from === 'User' ? 'user' : 'assistant';\n\t\tconversationMessages.push({ role, content: msg.value });\n\t}\n\n\tconversationMessages = truncateToContextLimit(conversationMessages, cfg.maxContextTokens);\n\n\tconst requestBody: any = {\n\t\tmodel: cfg.modelName,\n\t\tmessages: conversationMessages\n\t};\n\trequestBody.temperature = 0.7;\n\trequestBody.top_p = 0.8;\n\trequestBody.top_k = 20;\n\trequestBody.min_p = 0;\n\trequestBody.logprobs = true;\n\trequestBody.chat_template_kwargs = {\n\t\tenable_thinking: false\n\t};\n\n\tconst postData = JSON.stringify(requestBody);\n\theaders['Content-Length'] = Buffer.byteLength(postData);\n\n\tconst options: any = {\n\t\thostname: cfg.hostname,\n\t\tport: cfg.port,\n\t\tpath: cfg.basePath,\n\t\tmethod: 'POST',\n\t\theaders\n\t};\n\tif (signal) {\n\t\toptions.signal = signal;\n\t}\n\n\tconst json = await new Promise<any>((resolve, reject) => {\n\t\tconst req = http.request(options, (res: http.IncomingMessage) => {\n\t\t\tlet data = '';\n\t\t\tres.on('data', (chunk: Buffer) => { data += chunk.toString(); });\n\t\t\tres.on('end', () => {\n\t\t\t\ttry {\n\t\t\t\t\tresolve(JSON.parse(data));\n\t\t\t\t} catch (err) {\n\t\t\t\t\treject(new Error(`Failed to parse response: ${err instanceof Error ? err.message : String(err)}`));\n\t\t\t\t}\n\t\t\t});\n\t\t});\n\t\treq.on('error', (err: Error) => reject(err));\n\t\treq.write(postData);\n\t\treq.end();\n\t});\n\n\tconst avgLogprob = calculateAverageLogprob(json);\n\tif (avgLogprob < cfg.minAvgLogprob) {\n\t\treturn undefined as any; // Low confidence, silently skip suggestion\n\t}\n\n\tconst content = extractChatContent(json);\n\tif (typeof content !== 'string' || content.trim().length === 0) {\n\t\tthrow new Error('Empty model content');\n\t}\n\tconst action = parseAction(content, doc);\n\tif (!action) {\n\t\tthrow new Error('No valid action parsed from model output');\n\t}\n\n\tmarkPendingAsIgnored();\n\n\tcreatePendingPreferenceSample(\n\t\tconversationMessages,\n\t\tcontent,\n\t\taction,\n\t\tavgLogprob,\n\t\tcfg.modelName\n\t);\n\n\treturn action;\n}\n\nfunction extractChatContent(json: any): string | undefined {\n\ttry {\n\t\tif (json && Array.isArray(json.choices) && json.choices[0]) {\n\t\t\tconst choice = json.choices[0];\n\t\t\tif (choice.message && typeof choice.message.content === 'string') {\n\t\t\t\treturn choice.message.content;\n\t\t\t}\n\t\t\tif (typeof choice.text === 'string') {\n\t\t\t\treturn choice.text;\n\t\t\t}\n\t\t}\n\t\treturn undefined;\n\t} catch {\n\t\treturn undefined;\n\t}\n}\n\n/**\n * Calculate average logprob per token from the API response.\n * Returns the mean of logprobs across all tokens (negative value, closer to 0 = more confident).\n * Returns -Infinity if logprobs are not available.\n */\nfunction calculateAverageLogprob(json: any): number {\n\tconst logprobs = json.choices[0]?.logprobs;\n\tconst sum = logprobs.content.reduce((s: number, t: any) => s + t.logprob, 0);\n\treturn sum / logprobs.content.length;\n}\n\nfunction parseAction(raw: string, doc?: vscode.TextDocument): Action | undefined {\n\tconst command = extractBashCommand(raw);\n\tif (!command) {\n\t\treturn undefined;\n\t}\n\tconst normalized = command.replace(/<think>[\s\S]*?<\/think>/gi, '').trim();\n\tif (!normalized) {\n\t\treturn undefined;\n\t}\n\tif (doc) {\n\t\tconst editAction = parseEditFromSedCommand(normalized, doc);\n\t\tif (editAction) {\n\t\t\treturn editAction;\n\t\t}\n\t\tconst viewportAction = parseViewportFromCatCommand(normalized, doc);\n\t\tif (viewportAction) {\n\t\t\treturn viewportAction;\n\t\t}\n\t}\n\treturn { kind: 'terminalSendText', text: normalized };\n}\n\n/**\n * Parse a sed-based edit command of the form emitted by the NeMo serializer into a VS Code edit action.\n *\n * Supported patterns (1-based line numbers, mirroring serialization_utils.py):\n * sed -i 'START,ENDc\n<replacement...>' <file> -> editReplace\n * sed -i 'START,ENDd' <file> -> editDelete\n * sed -i 'STARTi\n<insert...>' <file> -> editInsert (before START)\n * sed -i '$a\n<append...>' <file> -> editInsert (append at EOF)\n *\n * If the command does not match these patterns, returns undefined.\n */\nfunction parseEditFromSedCommand(command: string, doc: vscode.TextDocument): Action | undefined {\n\t// Only consider the first command before && / ||, since cat -n etc. are for viewport only.\n\tconst main = command.split(/&&|\|\|/)[0]?.trim() ?? '';\n\tif (!main) {\n\t\treturn undefined;\n\t}\n\n\t// Match: sed -i '<script>' <file>\n\tconst sedMatch = main.match(/sed\s+-i\s+'([\s\S]*?)'\s+([^\s&|]+)\s*$/);\n\tif (!sedMatch) {\n\t\treturn undefined;\n\t}\n\tconst script = sedMatch[1] ?? '';\n\tconst targetFile = sedMatch[2] ?? '';\n\tconst activePath = doc.uri.fsPath;\n\tif (targetFile !== activePath) {\n\t\treturn undefined;\n\t}\n\n\t// Delete: ""START,ENDd""\n\tconst deleteMatch = script.match(/^(\d+),(\d+)d$/);\n\tif (deleteMatch) {\n\t\tconst startLine1 = Number(deleteMatch[1]);\n\t\tconst endLine1 = Number(deleteMatch[2]);\n\t\tif (!Number.isFinite(startLine1) || !Number.isFinite(endLine1)) {\n\t\t\treturn undefined;\n\t\t}\n\t\tconst startLine0 = Math.max(0, startLine1 - 1);\n\t\tconst endLine0 = Math.max(0, endLine1 - 1);\n\n\t\tlet endPosLine = endLine0 + 1;\n\t\tlet endPosChar = 0;\n\t\tif (endPosLine >= doc.lineCount) {\n\t\t\tendPosLine = doc.lineCount - 1;\n\t\t\tendPosChar = doc.lineAt(endPosLine).range.end.character;\n\t\t}\n\t\treturn {\n\t\t\tkind: 'editDelete',\n\t\t\trange: {\n\t\t\t\tstart: [startLine0, 0],\n\t\t\t\tend: [endPosLine, endPosChar],\n\t\t\t},\n\t\t};\n\t}\n\n\t// Replace: ""START,ENDc\newline<payload...>""\n\tconst replaceMatch = script.match(/^(\d+),(\d+)c\\\n([\s\S]*)$/);\n\tif (replaceMatch) {\n\t\tconst startLine1 = Number(replaceMatch[1]);\n\t\tconst endLine1 = Number(replaceMatch[2]);\n\t\tlet payload = replaceMatch[3] ?? '';\n\t\tif (!Number.isFinite(startLine1) || !Number.isFinite(endLine1)) {\n\t\t\treturn undefined;\n\t\t}\n\t\tpayload = payload.replace(/'\""'\""'/g, ""'"");\n\t\t// Convert escape sequences to actual characters\n\t\tpayload = payload.replace(/\\n/g, '\n').replace(/\\t/g, '\t').replace(/\\'/g, ""'"").replace(/\\\\/g, '\\');\n\t\tconst startLine0 = Math.max(0, startLine1 - 1);\n\t\tconst endLine0 = Math.max(0, endLine1 - 1);\n\t\tconst startPos: [number, number] = [startLine0, 0];\n\n\t\tlet endPosLine = endLine0 + 1;\n\t\tlet endPosChar = 0;\n\t\tif (endPosLine >= doc.lineCount) {\n\t\t\tendPosLine = doc.lineCount - 1;\n\t\t\tendPosChar = doc.lineAt(endPosLine).range.end.character;\n\t\t}\n\n\t\tconst text = payload.endsWith('\n') ? payload : payload + '\n';\n\t\treturn {\n\t\t\tkind: 'editReplace',\n\t\t\trange: { start: startPos, end: [endPosLine, endPosChar] },\n\t\t\ttext,\n\t\t};\n\t}\n\n\tconst insertMatch = script.match(/^(\d+)i\\\n([\s\S]*)$/);\n\tif (insertMatch) {\n\t\tconst line1 = Number(insertMatch[1]);\n\t\tlet payload = insertMatch[2] ?? '';\n\t\tif (!Number.isFinite(line1)) {\n\t\t\treturn undefined;\n\t\t}\n\t\tpayload = payload.replace(/'\""'\""'/g, ""'"");\n\t\t// Convert escape sequences to actual characters\n\t\tpayload = payload.replace(/\\n/g, '\n').replace(/\\t/g, '\t').replace(/\\'/g, ""'"").replace(/\\\\/g, '\\');\n\t\tconst insertLine0 = Math.max(0, line1 - 1);\n\t\tconst position: [number, number] = [insertLine0, 0];\n\t\tconst text = payload.endsWith('\n') ? payload : payload + '\n';\n\t\treturn {\n\t\t\tkind: 'editInsert',\n\t\t\tposition,\n\t\t\ttext,\n\t\t};\n\t}\n\n\tconst appendMatch = script.match(/^\$a\\\n([\s\S]*)$/);\n\tif (appendMatch) {\n\t\tlet payload = appendMatch[1] ?? '';\n\t\tpayload = payload.replace(/'\""'\""'/g, ""'"");\n\t\t// Convert escape sequences to actual characters\n\t\tpayload = payload.replace(/\\n/g, '\n').replace(/\\t/g, '\t').replace(/\\'/g, ""'"").replace(/\\\\/g, '\\');\n\t\tconst insertLine0 = doc.lineCount;\n\t\tconst position: [number, number] = [insertLine0, 0];\n\t\tconst needsLeadingNewline = doc.lineCount > 0;\n\t\tconst base = payload.endsWith('\n') ? payload : payload + '\n';\n\t\tconst text = needsLeadingNewline ? '\n' + base : base;\n\t\treturn {\n\t\t\tkind: 'editInsert',\n\t\t\tposition,\n\t\t\ttext,\n\t\t};\n\t}\n\n\treturn undefined;\n}\n\n/**\n * Parse viewport / selection commands of the form:\n * cat -n <file> | sed -n 'START,ENDp'\n *\n * into a lightweight VS Code selection move (setSelections). This mirrors how\n * selection and viewport events are serialized in serialization_utils.py.\n */\nfunction parseViewportFromCatCommand(command: string, doc: vscode.TextDocument): Action | undefined {\n\tconst main = command.split(/&&|\|\|/)[0]?.trim() ?? '';\n\tif (!main) {\n\t\treturn undefined;\n\t}\n\n\t// Simple file-open: cat -n <file>\n\tconst simpleCatMatch = main.match(/^cat\s+-n\s+([^\s|]+)\s*$/);\n\tif (simpleCatMatch) {\n\t\tconst targetFile = simpleCatMatch[1] ?? '';\n\t\tif (targetFile !== doc.uri.fsPath) {\n\t\t\treturn { kind: 'openFile', filePath: targetFile };\n\t\t}\n\t\t// Ensure the active document is visible; rely on existing editor to handle this.\n\t\treturn { kind: 'showTextDocument' };\n\t}\n\n\t// Viewport slice: cat -n <file> | sed -n 'START,ENDp'\n\tconst viewportMatch = main.match(/^cat\s+-n\s+([^\s|]+)\s*\|\s*sed\s+-n\s+'(\d+),(\d+)p'\s*$/);\n\tif (!viewportMatch) {\n\t\treturn undefined;\n\t}\n\n\tconst targetFile = viewportMatch[1] ?? '';\n\tconst startStr = viewportMatch[2] ?? '';\n\tconst endStr = viewportMatch[3] ?? '';\n\n\tconst startLine1 = Number(startStr);\n\tconst endLine1 = Number(endStr);\n\n\t// Place the cursor in the middle of the viewport (1-based to 0-based).\n\tconst center1 = Math.floor((startLine1 + endLine1) / 2);\n\tconst center0 = Math.max(0, center1 - 1);\n\n\tif (targetFile !== doc.uri.fsPath) {\n\t\treturn {\n\t\t\tkind: 'openFile',\n\t\t\tfilePath: targetFile,\n\t\t\tselections: [{ start: [center0, 0], end: [center0, 0] }]\n\t\t};\n\t}\n\tconst lastLine = Math.max(0, doc.lineCount - 1);\n\tconst line = Math.min(center0, lastLine);\n\n\treturn {\n\t\tkind: 'setSelections',\n\t\tselections: [\n\t\t\t{\n\t\t\t\tstart: [line, 0],\n\t\t\t\tend: [line, 0],\n\t\t\t},\n\t\t],\n\t};\n}\n\nfunction extractBashCommand(raw: string): string | undefined {\n\tif (!raw) {\n\t\treturn undefined;\n\t}\n\tconst trimmed = raw.trim();\n\tconst fenceMatch = trimmed.match(/```(?:bash)?\s*([\s\S]*?)```/i);\n\tif (fenceMatch && fenceMatch[1]) {\n\t\treturn fenceMatch[1];\n\t}\n\t// Fallback: treat entire response as the command\n\treturn trimmed.length > 0 ? trimmed : undefined;\n}",typescript,tab
|
| 3 |
+
2,237,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:41:49 PM [info] Activating crowd-code\n2:41:49 PM [info] Recording started\n2:41:49 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 4 |
+
3,273,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"2:41:49 PM [info] Git repository found\n2:41:49 PM [info] Git provider initialized successfully\n2:41:49 PM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,8412098,"src/extension.ts",0,0,"",typescript,tab
|
| 6 |
+
5,8496488,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 7 |
+
6,8498117,"TERMINAL",0,0,"",,terminal_focus
|
| 8 |
+
7,8498118,"src/extension.ts",0,0,"",typescript,tab
|
| 9 |
+
8,8722187,"README.md",0,0,"# crowd-pilot-extension\n\nThis is the Cursor/VS Code extension of crowd-pilot.",markdown,tab
|
| 10 |
+
9,8723660,"README.md",24,0,"",markdown,selection_command
|
| 11 |
+
10,8723761,"README.md",25,0,"",markdown,selection_command
|
| 12 |
+
11,8724317,"README.md",24,0,"",markdown,selection_command
|
| 13 |
+
12,8724469,"README.md",0,0,"",markdown,selection_command
|
| 14 |
+
13,8725311,"README.md",24,0,"",markdown,selection_command
|
| 15 |
+
14,8725351,"README.md",25,0,"",markdown,selection_command
|
| 16 |
+
15,8725935,"README.md",77,0,"\n",markdown,content
|
| 17 |
+
16,8726267,"README.md",78,0,"o",markdown,content
|
| 18 |
+
17,8726268,"README.md",79,0,"",markdown,selection_keyboard
|
| 19 |
+
18,8726745,"README.md",78,1,"",markdown,content
|
| 20 |
+
19,8726937,"README.md",78,0,"\n",markdown,content
|
| 21 |
+
20,8727366,"README.md",79,0,"#",markdown,content
|
| 22 |
+
21,8727367,"README.md",80,0,"",markdown,selection_keyboard
|
| 23 |
+
22,8727522,"README.md",80,0,"#",markdown,content
|
| 24 |
+
23,8727523,"README.md",81,0,"",markdown,selection_keyboard
|
| 25 |
+
24,8728022,"README.md",81,0," ",markdown,content
|
| 26 |
+
25,8728023,"README.md",82,0,"",markdown,selection_keyboard
|
| 27 |
+
26,8728841,"README.md",82,0,"B",markdown,content
|
| 28 |
+
27,8728842,"README.md",83,0,"",markdown,selection_keyboard
|
| 29 |
+
28,8729036,"README.md",83,0,"u",markdown,content
|
| 30 |
+
29,8729037,"README.md",84,0,"",markdown,selection_keyboard
|
| 31 |
+
30,8729116,"README.md",84,0,"i",markdown,content
|
| 32 |
+
31,8729116,"README.md",85,0,"",markdown,selection_keyboard
|
| 33 |
+
32,8729296,"README.md",85,0,"l",markdown,content
|
| 34 |
+
33,8729296,"README.md",86,0,"",markdown,selection_keyboard
|
| 35 |
+
34,8729359,"README.md",86,0,"d",markdown,content
|
| 36 |
+
35,8729359,"README.md",87,0,"",markdown,selection_keyboard
|
| 37 |
+
36,8729448,"README.md",87,0," ",markdown,content
|
| 38 |
+
37,8729448,"README.md",88,0,"",markdown,selection_keyboard
|
| 39 |
+
38,8729700,"README.md",82,6,"",markdown,content
|
| 40 |
+
39,8731411,"README.md",79,3,"",markdown,content
|
| 41 |
+
40,8732078,"README.md",78,1,"",markdown,content
|
| 42 |
+
41,8733477,"README.md",77,1,"",markdown,content
|
| 43 |
+
42,8733483,"README.md",25,0,"",markdown,selection_command
|
| 44 |
+
43,8734623,"src/extension.ts",0,0,"",typescript,tab
|
| 45 |
+
44,8743745,"TERMINAL",0,0,"vsce package",,terminal_command
|
| 46 |
+
45,8743752,"TERMINAL",0,0,"]633;C/usr/bin/env: ‘node’: No such file or directory\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 47 |
+
46,8748589,"TERMINAL",0,0,"module load nodejs",,terminal_command
|
| 48 |
+
47,8748640,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 49 |
+
48,8749221,"TERMINAL",0,0,"]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 50 |
+
49,8862726,"TERMINAL",0,0,"vsce package",,terminal_command
|
| 51 |
+
50,8862770,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 52 |
+
51,8866792,"TERMINAL",0,0,"Executing prepublish script 'npm run vscode:prepublish'...\r\n",,terminal_output
|
| 53 |
+
52,8868729,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 vscode:prepublish\r\n> npm run compile\r\n\r\n",,terminal_output
|
| 54 |
+
53,8869823,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 compile\r\n> tsc -p ./\r\n\r\n",,terminal_output
|
| 55 |
+
54,8871452,"TERMINAL",0,0,"^C\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 56 |
+
55,8893572,".gitmodules",0,0,"[submodule ""crowd-pilot-serializer""]\n\tpath = crowd-pilot-serializer\n\turl = https://github.com/p-doom/crowd-pilot-serializer\n",properties,tab
|
| 57 |
+
56,8894233,".gitmodules",124,0,"",properties,selection_mouse
|
| 58 |
+
57,8912638,"TERMINAL",0,0,"git submodule update --remote",,terminal_command
|
| 59 |
+
58,8912683,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 60 |
+
59,8914077,"TERMINAL",0,0,"remote: Enumerating objects: 45, done.[K\r\nremote: Counting objects: 2% (1/45)[K\rremote: Counting objects: 4% (2/45)[K\rremote: Counting objects: 6% (3/45)[K\rremote: Counting objects: 8% (4/45)[K\rremote: Counting objects: 11% (5/45)[K\rremote: Counting objects: 13% (6/45)[K\rremote: Counting objects: 15% (7/45)[K\rremote: Counting objects: 17% (8/45)[K\rremote: Counting objects: 20% (9/45)[K\rremote: Counting objects: 22% (10/45)[K\rremote: Counting objects: 24% (11/45)[K\rremote: Counting objects: 26% (12/45)[K\rremote: Counting objects: 28% (13/45)[K\rremote: Counting objects: 31% (14/45)[K\rremote: Counting objects: 33% (15/45)[K\rremote: Counting objects: 35% (16/45)[K\rremote: Counting objects: 37% (17/45)[K\rremote: Counting objects: 40% (18/45)[K\rremote: Counting objects: 42% (19/45)[K\rremote: Counting objects: 44% (20/45)[K\rremote: Counting objects: 46% (21/45)[K\rremote: Counting objects: 48% (22/45)[K\rremote: Counting objects: 51% (23/45)[K\rremote: Counting objects: 53% (24/45)[K\rremote: Counting objects: 55% (25/45)[K\rremote: Counting objects: 57% (26/45)[K\rremote: Counting objects: 60% (27/45)[K\rremote: Counting objects: 62% (28/45)[K\rremote: Counting objects: 64% (29/45)[K\rremote: Counting objects: 66% (30/45)[K\rremote: Counting objects: 68% (31/45)[K\rremote: Counting objects: 71% (32/45)[K\rremote: Counting objects: 73% (33/45)[K\rremote: Counting objects: 75% (34/45)[K\rremote: Counting objects: 77% (35/45)[K\rremote: Counting objects: 80% (36/45)[K\rremote: Counting objects: 82% (37/45)[K\rremote: Counting objects: 84% (38/45)[K\rremote: Counting objects: 86% (39/45)[K\rremote: Counting objects: 88% (40/45)[K\rremote: Counting objects: 91% (41/45)[K\rremote: Counting objects: 93% (42/45)[K\rremote: Counting objects: 95% (43/45)[K\rremote: Counting objects: 97% (44/45)[K\rremote: Counting objects: 100% (45/45)[K\rremote: Counting objects: 100% (45/45), done.[K\r\nremote: Compressing objects: 5% (1/20)[K\rremote: Compressing objects: 10% (2/20)[K\rremote: Compressing objects: 15% (3/20)[K\rremote: Compressing objects: 20% (4/20)[K\rremote: Compressing objects: 25% (5/20)[K\rremote: Compressing objects: 30% (6/20)[K\rremote: Compressing objects: 35% (7/20)[K\rremote: Compressing objects: 40% (8/20)[K\rremote: Compressing objects: 45% (9/20)[K\rremote: Compressing objects: 50% (10/20)[K\rremote: Compressing objects: 55% (11/20)[K\rremote: Compressing objects: 60% (12/20)[K\rremote: Compressing objects: 65% (13/20)[K\rremote: Compressing objects: 70% (14/20)[K\rremote: Compressing objects: 75% (15/20)[K\rremote: Compressing objects: 80% (16/20)[K\rremote: Compressing objects: 85% (17/20)[K\rremote: Compressing objects: 90% (18/20)[K\rremote: Compressing objects: 95% (19/20)[K\rremote: Compressing objects: 100% (20/20)[K\rremote: Compressing objects: 100% (20/20), done.[K\r\nremote: Total 33 (delta 14), reused 27 (delta 11), pack-reused 0 (from 0)[K\r\nUnpacking objects: 3% (1/33)\rUnpacking objects: 6% (2/33)\rUnpacking objects: 9% (3/33)\rUnpacking objects: 12% (4/33)\rUnpacking objects: 15% (5/33)\rUnpacking objects: 18% (6/33)\rUnpacking objects: 21% (7/33)\rUnpacking objects: 24% (8/33)\rUnpacking objects: 27% (9/33)\rUnpacking objects: 30% (10/33)\rUnpacking objects: 33% (11/33)\rUnpacking objects: 36% (12/33)\rUnpacking objects: 39% (13/33)\rUnpacking objects: 42% (14/33)\rUnpacking objects: 45% (15/33)\rUnpacking objects: 48% (16/33)\rUnpacking objects: 51% (17/33)\rUnpacking objects: 54% (18/33)\rUnpacking objects: 57% (19/33)\rUnpacking objects: 60% (20/33)\rUnpacking objects: 63% (21/33)\rUnpacking objects: 66% (22/33)\rUnpacking objects: 69% (23/33)\rUnpacking objects: 72% (24/33)\rUnpacking objects: 75% (25/33)\rUnpacking objects: 78% (26/33)\rUnpacking objects: 81% (27/33)\rUnpacking objects: 84% (28/33)\rUnpacking objects: 87% (29/33)\rUnpacking objects: 90% (30/33)\rUnpacking objects: 93% (31/33)\rUnpacking objects: 96% (32/33)\rUnpacking objects: 100% (33/33)\rUnpacking objects: 100% (33/33), 7.48 KiB | 36.00 KiB/s, done.\r\n",,terminal_output
|
| 61 |
+
60,8914204,"TERMINAL",0,0,"From https://github.com/p-doom/crowd-pilot-serializer\r\n 9c35c9d..c36290c main -> origin/main\r\n * [new branch] panic-to-warning -> origin/panic-to-warning\r\n * [new branch] parallel-serialization -> origin/parallel-serialization\r\n * [new branch] save-slicing -> origin/save-slicing\r\n",,terminal_output
|
| 62 |
+
61,8914436,"TERMINAL",0,0,"Submodule path 'crowd-pilot-serializer': checked out 'c36290cf8961266f6a1ab8031d640d2b34a2504f'\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 63 |
+
62,8929957,"TERMINAL",0,0,"vsce package",,terminal_command
|
| 64 |
+
63,8929999,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 65 |
+
64,8931536,"TERMINAL",0,0,"Executing prepublish script 'npm run vscode:prepublish'...\r\n",,terminal_output
|
| 66 |
+
65,8932769,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 vscode:prepublish\r\n> npm run compile\r\n\r\n",,terminal_output
|
| 67 |
+
66,8933943,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 compile\r\n> tsc -p ./\r\n\r\n",,terminal_output
|
| 68 |
+
67,8936166,"TERMINAL",0,0,"[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m New [31mmajor[39m version of npm available! [31m10.5.2[39m -> [32m11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Changelog: [36mhttps://github.com/npm/cli/releases/tag/v11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Run [32mnpm install -g npm@11.7.0[39m to update!\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m New [31mmajor[39m version of npm available! [31m10.5.2[39m -> [32m11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Changelog: [36mhttps://github.com/npm/cli/releases/tag/v11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Run [32mnpm install -g npm@11.7.0[39m to update!\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m",,terminal_output
|
| 69 |
+
68,8938523,"TERMINAL",0,0,"[104m[30m INFO [39m[49m [1m[34mFiles included in the VSIX:[39m[22m\r\n[1m[34m[39m[22m[1mcrowd-pilot-0.0.1.vsix[22m\r\n├─ [Content_Types].xml \r\n├─ extension.vsixmanifest \r\n└─ [1mextension/[22m\r\n ├─ LICENSE.txt [90m[0.55 KB][39m\r\n ├─ package.json [90m[4.25 KB][39m\r\n ├─ readme.md \r\n ├─ [1mnode_modules/[22m\r\n │ └─ [1m@crowd-pilot/[22m\r\n │ └─ [1mserializer/[22m\r\n │ ├─ index.js [90m[9.6 KB][39m\r\n │ ├─ package.json [90m[0.92 KB][39m\r\n │ └─ serializer.linux-x64-gnu.node [31m[3.05 MB][39m\r\n └─ [1mout/[22m\r\n ├─ extension.js [90m[47.8 KB][39m\r\n ├─ extension.js.map [90m[40.04 KB][39m\r\n └─ [1mtest/[22m\r\n ├─ extension.test.js [90m[1.94 KB][39m\r\n └─ extension.test.js.map [90m[0.6 KB][39m\r\n\r\nThe file extension/node_modules/@crowd-pilot/serializer/serializer.linux-x64-gnu.node is [31mlarge[39m (3.05 MB)\r\n\r\n",,terminal_output
|
| 70 |
+
69,8939766,"TERMINAL",0,0,"[42m[30m DONE [39m[49m Packaged: /fast/home/franz.srambical/crowd-pilot-extension/crowd-pilot-0.0.1.vsix [1m(12 files, 1.02 MB)[22m\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 71 |
+
70,8949409,"package.json",0,0,"{\n ""name"": ""crowd-pilot"",\n ""displayName"": ""crowd-pilot-extension"",\n ""description"": ""Teaching language models to code like humans."",\n ""publisher"": ""p-doom"",\n ""version"": ""0.0.1"",\n ""repository"": {\n ""type"": ""git"",\n ""url"": ""https://github.com/p-doom/crowd-pilot-extension""\n },\n ""engines"": {\n ""vscode"": ""^1.99.3""\n },\n ""categories"": [\n ""Other""\n ],\n ""activationEvents"": [\n ""onStartupFinished""\n ],\n ""main"": ""./out/extension.js"",\n ""contributes"": {\n ""commands"": [\n {\n ""command"": ""crowd-pilot.toggleSuggestions"",\n ""title"": ""crowd-pilot: Toggle Tab Suggestions""\n },\n {\n ""command"": ""crowd-pilot.hideUi"",\n ""title"": ""crowd-pilot: Hide Preview""\n },\n {\n ""command"": ""crowd-pilot.sglangTest"",\n ""title"": ""crowd-pilot: Test SGLang""\n },\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""title"": ""crowd-pilot: Model Plan & Run""\n },\n {\n ""command"": ""crowd-pilot.clearContext"",\n ""title"": ""crowd-pilot: Clear Context""\n },\n {\n ""command"": ""crowd-pilot.openPreferenceLog"",\n ""title"": ""crowd-pilot: Open Preference Log""\n }\n ],\n ""configuration"": {\n ""title"": ""crowd-pilot"",\n ""properties"": {\n ""crowd-pilot.hostname"": {\n ""type"": ""string"",\n ""default"": ""hai002"",\n ""description"": ""Hostname of the SGLang server""\n },\n ""crowd-pilot.port"": {\n ""type"": ""number"",\n ""default"": 30000,\n ""description"": ""Port of the SGLang server""\n },\n ""crowd-pilot.basePath"": {\n ""type"": ""string"",\n ""default"": ""/v1/chat/completions"",\n ""description"": ""Base path for the SGLang API endpoint""\n },\n ""crowd-pilot.modelName"": {\n ""type"": ""string"",\n ""default"": ""qwen/qwen3-8b"",\n ""description"": ""Model name to use for completions""\n },\n ""crowd-pilot.minAvgLogprob"": {\n ""type"": ""number"",\n ""default"": -1.0,\n ""description"": ""Minimum average log-probability per token for displaying suggestions. Higher values (closer to 0) require more confidence. -1.0 ≈ perplexity 2.7""\n },\n ""crowd-pilot.maxContextTokens"": {\n ""type"": ""number"",\n ""default"": 120000,\n ""description"": ""Context length (in tokens). Older messages are truncated to fit. Set below your model's limit to leave room for the response.""\n },\n ""crowd-pilot.enablePreferenceLogging"": {\n ""type"": ""boolean"",\n ""default"": true,\n ""description"": ""Enable logging of accept/reject data for reward model training and RLHF/DPO""\n },\n ""crowd-pilot.preferenceLogPath"": {\n ""type"": ""string"",\n ""default"": """",\n ""description"": ""Custom path for the preference log file (JSONL format). If empty, uses workspace/.crowd-pilot-preferences.jsonl""\n }\n }\n },\n ""keybindings"": [\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""editorTextFocus && crowdPilot.uiVisible""\n },\n {\n ""command"": ""crowd-pilot.modelRun"",\n ""key"": ""tab"",\n ""mac"": ""tab"",\n ""when"": ""inQuickOpen && crowdPilot.uiVisible""\n },\n {\n ""command"": ""crowd-pilot.hideUi"",\n ""key"": ""escape"",\n ""mac"": ""escape"",\n ""when"": ""crowdPilot.uiVisible""\n }\n ]\n },\n ""scripts"": {\n ""vscode:prepublish"": ""npm run compile"",\n ""compile"": ""tsc -p ./"",\n ""watch"": ""tsc -watch -p ./"",\n ""pretest"": ""npm run compile && npm run lint"",\n ""lint"": ""eslint src"",\n ""test"": ""vscode-test"",\n ""clean"": ""rm -rf out *.tgz"",\n ""clean:all"": ""rm -rf out *.tgz node_modules package-lock.json"",\n ""rebuild-serializer"": ""cd crowd-pilot-serializer/crates/napi && npm install && npm run build && npm pack && mv *.tgz ../../../ && cd ../../.. && npm install""\n },\n ""dependencies"": {\n ""@crowd-pilot/serializer"": ""file:./crowd-pilot-serializer-0.1.0.tgz""\n },\n ""devDependencies"": {\n ""@types/vscode"": ""^1.99.3"",\n ""@types/mocha"": ""^10.0.10"",\n ""@types/node"": ""22.x"",\n ""@typescript-eslint/eslint-plugin"": ""^8.45.0"",\n ""@typescript-eslint/parser"": ""^8.45.0"",\n ""eslint"": ""^9.36.0"",\n ""typescript"": ""^5.9.3"",\n ""@vscode/test-cli"": ""^0.0.11"",\n ""@vscode/test-electron"": ""^2.5.2""\n }\n}\n",json,tab
|
| 72 |
+
71,8951723,"package.json",4354,0,"",json,selection_command
|
| 73 |
+
72,8953082,"package.json",4352,0,"",json,selection_command
|
| 74 |
+
73,8953322,"package.json",4348,0,"",json,selection_command
|
| 75 |
+
74,8953359,"package.json",4310,0,"",json,selection_command
|
| 76 |
+
75,8953388,"package.json",4275,0,"",json,selection_command
|
| 77 |
+
76,8953417,"package.json",4247,0,"",json,selection_command
|
| 78 |
+
77,8953452,"package.json",4222,0,"",json,selection_command
|
| 79 |
+
78,8953486,"package.json",4178,0,"",json,selection_command
|
| 80 |
+
79,8953518,"package.json",4127,0,"",json,selection_command
|
| 81 |
+
80,8953551,"package.json",4100,0,"",json,selection_command
|
| 82 |
+
81,8953584,"package.json",4068,0,"",json,selection_command
|
| 83 |
+
82,8953619,"package.json",4036,0,"",json,selection_command
|
| 84 |
+
83,8953654,"package.json",4013,0,"",json,selection_command
|
| 85 |
+
84,8953689,"package.json",4008,0,"",json,selection_command
|
| 86 |
+
85,8953721,"package.json",3935,0,"",json,selection_command
|
| 87 |
+
86,8953775,"package.json",3915,0,"",json,selection_command
|
| 88 |
+
87,8953791,"package.json",3910,0,"",json,selection_command
|
| 89 |
+
88,8953826,"package.json",3748,0,"",json,selection_command
|
| 90 |
+
89,8953857,"package.json",3680,0,"",json,selection_command
|
| 91 |
+
90,8953889,"package.json",3647,0,"",json,selection_command
|
| 92 |
+
91,8953918,"package.json",3620,0,"",json,selection_command
|
| 93 |
+
92,8953949,"package.json",3594,0,"",json,selection_command
|
| 94 |
+
93,8953984,"package.json",3544,0,"",json,selection_command
|
| 95 |
+
94,8986674,"TERMINAL",0,0,"npm run rebuild-serializer",,terminal_command
|
| 96 |
+
95,8986722,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 97 |
+
96,8987890,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 rebuild-serializer\r\n> cd crowd-pilot-serializer/crates/napi && npm install && npm run build && npm pack && mv *.tgz ../../../ && cd ../../.. && npm install\r\n\r\n",,terminal_output
|
| 98 |
+
97,8989355,"TERMINAL",0,0,"[?25l([107;97m#########[0m[100;90m⠂⠂⠂⠂⠂⠂⠂⠂⠂[0m) ⠼ idealTree: [32;40mtiming[0m [35midealTree[0m Completed in 12ms[0m[K\r([107;97m#########[0m[100;90m⠂⠂⠂⠂⠂⠂⠂⠂⠂[0m) ⠴ idealTree: [32;40mtiming[0m [35midealTree[0m Completed in 12ms[0m[K\r([107;97m#########[0m[100;90m⠂⠂⠂⠂⠂⠂⠂⠂⠂[0m) ⠦ idealTree: [32;40mtiming[0m [35midealTree[0m Completed in 12ms[0m[K\r",,terminal_output
|
| 99 |
+
98,8989411,"TERMINAL",0,0,"([107;97m#########[0m[100;90m⠂⠂⠂⠂⠂⠂⠂⠂⠂[0m) ⠦ idealTree: [32;40mtiming[0m [35midealTree[0m Completed in 12ms[0m[K\r",,terminal_output
|
| 100 |
+
99,8989505,"TERMINAL",0,0,"([107;97m#########[0m[100;90m⠂⠂⠂⠂⠂⠂⠂⠂⠂[0m) ⠦ idealTree: [32;40mtiming[0m [35midealTree[0m Completed in 12ms[0m[K\r\r[K[?25h\r\nup to date, audited 2 packages in 1s\r\n\r\n1 package is looking for funding\r\n run `npm fund` for details\r\n\r\nfound [32m[1m0[22m[39m vulnerabilities\r\n[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m New [31mmajor[39m version of npm available! [31m10.5.2[39m -> [32m11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Changelog: [36mhttps://github.com/npm/cli/releases/tag/v11.7.0[39m\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m Run [32mnpm install -g npm@11.7.0[39m to update!\r\n[0m[37;40mnpm[0m [0m[36;40mnotice[0m[35m[0m \r\n[0m",,terminal_output
|
| 101 |
+
100,8990622,"TERMINAL",0,0,"\r\n> @crowd-pilot/serializer@0.1.0 build\r\n> napi build --platform --release\r\n\r\n",,terminal_output
|
| 102 |
+
101,8993015,"TERMINAL",0,0,"^C\r\n]0;franz.srambical@hai-login1:~/crowd-pilot-extension",,terminal_output
|
| 103 |
+
102,8996557,"TERMINAL",0,0,"npm run clean:all",,terminal_command
|
| 104 |
+
103,8996606,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 105 |
+
104,8997717,"TERMINAL",0,0,"\r\n> crowd-pilot@0.0.1 clean:all\r\n> rm -rf out *.tgz node_modules package-lock.json\r\n\r\n",,terminal_output
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-7ae86302-2ae7-4165-bd93-90b8ccd6716a1754576773319-2025_08_07-16.26.15.956/source.csv
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,1,"examples/bibliography.bib",0,0,"@article{radford2018improving,\n title = {Improving language understanding by generative pre-training},\n author = {Radford, Alec and Narasimhan, Karthik and Salimans, Tim and\n Sutskever, Ilya and others},\n}\n\n@article{radford2019language,\n title = {Language models are unsupervised multitask learners},\n author = {Radford, Alec and Wu, Jeffrey and Child, Rewon and Luan, David and\n Amodei, Dario and Sutskever, Ilya and others},\n journal = {OpenAI blog},\n volume = {1},\n number = {8},\n pages = {9},\n year = {2019},\n}\n\n@article{brown2020language,\n title = {Language models are few-shot learners},\n author = {Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie\n and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind\n and Shyam, Pranav and Sastry, Girish and Askell, Amanda and others},\n journal = {Advances in neural information processing systems},\n volume = {33},\n pages = {1877--1901},\n year = {2020},\n}\n\n@article{raffel2020exploring,\n title = {Exploring the limits of transfer learning with a unified text-to-text\n transformer},\n author = {Raffel, Colin and Shazeer, Noam and Roberts, Adam and Lee, Katherine\n and Narang, Sharan and Matena, Michael and Zhou, Yanqi and Li, Wei\n and Liu, Peter J},\n journal = {Journal of machine learning research},\n volume = {21},\n number = {140},\n pages = {1--67},\n year = {2020},\n}\n\n@article{touvron2023llama,\n title = {Llama 2: Open foundation and fine-tuned chat models},\n author = {Touvron, Hugo and Martin, Louis and Stone, Kevin and Albert, Peter\n and Almahairi, Amjad and Babaei, Yasmine and Bashlykov, Nikolay and\n Batra, Soumya and Bhargava, Prajjwal and Bhosale, Shruti and others},\n journal = {arXiv preprint arXiv:2307.09288},\n year = {2023},\n}\n\n@article{bai2023qwen,\n title = {Qwen technical report},\n author = {Bai, Jinze and Bai, Shuai and Chu, Yunfei and Cui, Zeyu and Dang,\n Kai and Deng, Xiaodong and Fan, Yang and Ge, Wenbin and Han, Yu and\n Huang, Fei and others},\n journal = {arXiv preprint arXiv:2309.16609},\n year = {2023},\n}\n\n@article{young2024yi,\n title = {Yi: Open foundation models by 01. ai},\n author = {Young, Alex and Chen, Bei and Li, Chao and Huang, Chengen and Zhang,\n Ge and Zhang, Guanwei and Li, Heng and Zhu, Jiangcheng and Chen,\n Jianqun and Chang, Jing and others},\n journal = {arXiv preprint arXiv:2403.04652},\n year = {2024},\n}\n\n@article{vaswani2017attention,\n title = {Attention is all you need},\n author = {Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit,\n Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, {\L}ukasz and\n Polosukhin, Illia},\n journal = {Advances in neural information processing systems},\n volume = {30},\n year = {2017},\n}\n\n@article{raffel2020exploring,\n title = {Exploring the limits of transfer learning with a unified text-to-text\n transformer},\n author = {Raffel, Colin and Shazeer, Noam and Roberts, Adam and Lee, Katherine\n and Narang, Sharan and Matena, Michael and Zhou, Yanqi and Li, Wei\n and Liu, Peter J},\n journal = {Journal of machine learning research},\n volume = {21},\n number = {140},\n pages = {1--67},\n year = {2020},\n}\n\n@inproceedings{zhou2024what,\n title = {What Algorithms can Transformers Learn? A Study in Length\n Generalization},\n author = {Hattie Zhou and Arwen Bradley and Etai Littwin and Noam Razin and\n Omid Saremi and Joshua M. Susskind and Samy Bengio and Preetum\n Nakkiran},\n booktitle = {The Twelfth International Conference on Learning Representations},\n year = {2024},\n url = {https://openreview.net/forum?id=AssIuHnmHX},\n}\n\n@inproceedings{ding2024causallm,\n title = {Causal{LM} is not optimal for in-context learning},\n author = {Nan Ding and Tomer Levinboim and Jialin Wu and Sebastian Goodman and\n Radu Soricut},\n booktitle = {The Twelfth International Conference on Learning Representations},\n year = {2024},\n url = {https://openreview.net/forum?id=guRNebwZBb},\n}\n\n@article{williams1989learning,\n title = {A learning algorithm for continually running fully recurrent neural\n networks},\n author = {Williams, Ronald J and Zipser, David},\n journal = {Neural computation},\n volume = {1},\n number = {2},\n pages = {270--280},\n year = {1989},\n publisher = {MIT Press One Rogers Street, Cambridge, MA 02142-1209, USA\n journals-info~…},\n}\n\n@article{tay2022ul2,\n title = {Ul2: Unifying language learning paradigms},\n author = {Tay, Yi and Dehghani, Mostafa and Tran, Vinh Q and Garcia, Xavier\n and Wei, Jason and Wang, Xuezhi and Chung, Hyung Won and Shakeri,\n Siamak and Bahri, Dara and Schuster, Tal and others},\n journal = {arXiv preprint arXiv:2205.05131},\n year = {2022},\n}\n\n@misc{pfau2023last,\n title = {Last I checked, it was still not possible for a neural network alone\n (i.e. no MCTS) to beat the world's best Go players...},\n author = {Pfau, David},\n year = {2023},\n url = {https://twitter.com/pfau/status/1732785418565796167},\n note = {Accessed: 2023-12-07},\n}\n\n@article{deepmind2023alphacode,\n title = {AlphaCode 2 Technical Report},\n author = {Team, AlphaCode and Deepmind, Google},\n year = {2023},\n journal = {Google Deepmind},\n url = {\n https://storage.googleapis.com/deepmind-media/AlphaCode2/AlphaCode2_Tech_Report.pdf\n },\n}\n\n@article{reuters2023sam,\n author = {Tong, Anna and Dastin, Jeffrey and Hu, Krystal},\n title = {Sam Altman's ouster from OpenAI was precipitated by letter to board\n about AI breakthrough},\n journal = {Reuters},\n year = {2023},\n url = {\n https://www.reuters.com/technology/sam-altmans-ouster-openai-was-precipitated-by-letter-board-about-ai-breakthrough-2023-11-22/\n },\n note = {Accessed: 2023-12-07},\n}\n\n@misc{imbue2023podcast,\n title = {Noam Brown, FAIR: On achieving human-level performance in poker and\n Diplomacy, and the power of spending compute at inference time},\n author = {Noam Brown},\n howpublished = {\n https://imbue.com/podcast/2023-02-09-podcast-episode-27-noam-brown/\n },\n year = {2023},\n note = {Podcast episode 27, February 9, 2023},\n}\n\n@misc{karpathy2023youtube,\n author = {Karpathy, Andrej},\n title = {[1hr Talk] Intro to Large Language Models},\n howpublished = {YouTube},\n year = {2023},\n note = {Accessed: 2023-12-07},\n url = {https://www.youtube.com/watch?v=zjkBMFhNj_g&t=2100s},\n}\n\n@article{brown2019superhuman,\n title = {Superhuman AI for multiplayer poker},\n author = {Brown, Noam and Sandholm, Tuomas},\n journal = {Science},\n volume = {365},\n number = {6456},\n pages = {885--890},\n year = {2019},\n publisher = {American Association for the Advancement of Science},\n}\n\n@article{silver2016mastering,\n title = {Mastering the game of Go with deep neural networks and tree search},\n author = {Silver, David and Huang, Aja and Maddison, Chris J and Guez, Arthur\n and Sifre, Laurent and Van Den Driessche, George and Schrittwieser,\n Julian and Antonoglou, Ioannis and Panneershelvam, Veda and Lanctot,\n Marc and others},\n journal = {nature},\n volume = {529},\n number = {7587},\n pages = {484--489},\n year = {2016},\n publisher = {Nature Publishing Group},\n}\n\n@article{schrittwieser2020mastering,\n title = {Mastering atari, go, chess and shogi by planning with a learned model\n },\n author = {Schrittwieser, Julian and Antonoglou, Ioannis and Hubert, Thomas and\n Simonyan, Karen and Sifre, Laurent and Schmitt, Simon and Guez,\n Arthur and Lockhart, Edward and Hassabis, Demis and Graepel, Thore\n and others},\n journal = {Nature},\n volume = {588},\n number = {7839},\n pages = {604--609},\n year = {2020},\n publisher = {Nature Publishing Group UK London},\n}\n\n@article{wei2022chain,\n title = {Chain-of-thought prompting elicits reasoning in large language models\n },\n author = {Wei, Jason and Wang, Xuezhi and Schuurmans, Dale and Bosma, Maarten\n and Xia, Fei and Chi, Ed and Le, Quoc V and Zhou, Denny and others},\n journal = {Advances in neural information processing systems},\n volume = {35},\n pages = {24824--24837},\n year = {2022},\n}\n\n@article{yao2024tree,\n title = {Tree of thoughts: Deliberate problem solving with large language\n models},\n author = {Yao, Shunyu and Yu, Dian and Zhao, Jeffrey and Shafran, Izhak and\n Griffiths, Tom and Cao, Yuan and Narasimhan, Karthik},\n journal = {Advances in Neural Information Processing Systems},\n volume = {36},\n year = {2024},\n}\n\n@article{lecun2022path,\n title = {A path towards autonomous machine intelligence version 0.9. 2,\n 2022-06-27},\n author = {LeCun, Yann},\n journal = {Open Review},\n volume = {62},\n number = {1},\n year = {2022},\n}\n\n@article{hoffmann2022training,\n title = {Training compute-optimal large language models},\n author = {Hoffmann, Jordan and Borgeaud, Sebastian and Mensch, Arthur and\n Buchatskaya, Elena and Cai, Trevor and Rutherford, Eliza and Casas,\n Diego de Las and Hendricks, Lisa Anne and Welbl, Johannes and Clark,\n Aidan and others},\n journal = {arXiv preprint arXiv:2203.15556},\n year = {2022},\n}\n\n@article{meta2024introducing,\n title = {Introducing meta llama 3: The most capable openly available llm to\n date},\n author = {Meta, AI},\n journal = {Meta AI.},\n year = {2024},\n}\n\n@misc{riley2024it,\n title = {It's just not a very useful scaling law.},\n author = {@riley_stews},\n year = {2024},\n url = {https://x.com/riley_stews/status/1781019732122198288},\n note = {Accessed: 2023-04-20},\n}\n\n@article{shazeer2017outrageously,\n title = {Outrageously large neural networks: The sparsely-gated\n mixture-of-experts layer},\n author = {Shazeer, Noam and Mirhoseini, Azalia and Maziarz, Krzysztof and\n Davis, Andy and Le, Quoc and Hinton, Geoffrey and Dean, Jeff},\n journal = {arXiv preprint arXiv:1701.06538},\n year = {2017},\n}\n\n@article{fedus2022switch,\n title = {Switch transformers: Scaling to trillion parameter models with simple\n and efficient sparsity},\n author = {Fedus, William and Zoph, Barret and Shazeer, Noam},\n journal = {Journal of Machine Learning Research},\n volume = {23},\n number = {120},\n pages = {1--39},\n year = {2022},\n}\n\n@article{schulman2015high,\n title = {High-dimensional continuous control using generalized advantage\n estimation},\n author = {Schulman, John and Moritz, Philipp and Levine, Sergey and Jordan,\n Michael and Abbeel, Pieter},\n journal = {arXiv preprint arXiv:1506.02438},\n year = {2015},\n}\n\n@article{srambical2025ppo,\n author = {Srambical, Franz},\n title = {PPO Is Secretly Using Monte Carlo Advantage Estimation In LLM\n Post-Training},\n journal = {p(doom) blog},\n year = {2025},\n note = {https://pdoom.org/blog.html},\n}\n\n@article{williams1992simple,\n title = {Simple statistical gradient-following algorithms for connectionist\n reinforcement learning},\n author = {Williams, Ronald J},\n journal = {Machine learning},\n volume = {8},\n pages = {229--256},\n year = {1992},\n publisher = {Springer},\n}\n\n@software{deepmind2020jax,\n title = {The {D}eep{M}ind {JAX} {E}cosystem},\n author = {DeepMind and Babuschkin, Igor and Baumli, Kate and Bell, Alison and\n Bhupatiraju, Surya and Bruce, Jake and Buchlovsky, Peter and Budden,\n David and Cai, Trevor and Clark, Aidan and Danihelka, Ivo and Dedieu,\n Antoine and Fantacci, Claudio and Godwin, Jonathan and Jones, Chris\n and Hemsley, Ross and Hennigan, Tom and Hessel, Matteo and Hou,\n Shaobo and Kapturowski, Steven and Keck, Thomas and Kemaev, Iurii and\n King, Michael and Kunesch, Markus and Martens, Lena and Merzic, Hamza\n and Mikulik, Vladimir and Norman, Tamara and Papamakarios, George and\n Quan, John and Ring, Roman and Ruiz, Francisco and Sanchez, Alvaro\n and Sartran, Laurent and Schneider, Rosalia and Sezener, Eren and\n Spencer, Stephen and Srinivasan, Srivatsan and Stanojevi\'{c}, Milo\v\n {s} and Stokowiec, Wojciech and Wang, Luyu and Zhou, Guangyao and\n Viola, Fabio},\n url = {http://github.com/deepmind},\n year = {2020},\n}\n\n@misc{jax2025jit,\n title = {JAX: Just-in-time compilation},\n author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James\n Johnson and Chris Leary and Dougal Maclaurin and George Necula and\n Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao\n Zhang},\n year = {2025},\n url = {https://docs.jax.dev/en/latest/jit-compilation.html},\n note = {Accessed: 2025-03-26},\n}\n\n@misc{jax2025callbacks,\n title = {JAX: External callbacks},\n author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James\n Johnson and Chris Leary and Dougal Maclaurin and George Necula and\n Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao\n Zhang},\n year = {2025},\n url = {https://docs.jax.dev/en/latest/external-callbacks.html},\n note = {Accessed: 2025-03-26},\n}\n\n@misc{jax2025checkify,\n title = {JAX: The `checkify` transformation},\n author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James\n Johnson and Chris Leary and Dougal Maclaurin and George Necula and\n Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao\n Zhang},\n year = {2025},\n url = {https://docs.jax.dev/en/latest/debugging/checkify_guide.html},\n note = {Accessed: 2025-03-26},\n}\n\n@misc{jax2025key,\n title = {JAX: Key concepts},\n author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James\n Johnson and Chris Leary and Dougal Maclaurin and George Necula and\n Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao\n Zhang},\n year = {2025},\n url = {https://docs.jax.dev/en/latest/key-concepts.html},\n note = {Accessed: 2025-03-26},\n}\n\n@software{deepmind2020chex,\n title = {Chex},\n author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James\n Johnson and Chris Leary and Dougal Maclaurin and George Necula and\n Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao\n Zhang},\n url = {http://github.com/google-deepmind/chex},\n year = {2020},\n}\n\n@misc{jax2025control,\n title = {JAX: Control flow and logical operators with JIT},\n author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James\n Johnson and Chris Leary and Dougal Maclaurin and George Necula and\n Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao\n Zhang},\n year = {2025},\n url = {https://docs.jax.dev/en/latest/control-flow.html},\n note = {Accessed: 2025-03-26},\n}\n\n@misc{xla2025conditional,\n title = {XLA:Operation Semantics:Conditional},\n author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James\n Johnson and Chris Leary and Dougal Maclaurin and George Necula and\n Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao\n Zhang},\n year = {2025},\n url = {https://openxla.org/xla/operation_semantics#conditional},\n note = {Accessed: 2025-03-26},\n}\n\n@misc{ayaka76822025error,\n author = {ayaka7682},\n title = {Message on public Discord server: Try this:\n <d-code block="""", language=""python"">\n import jax from jax._src.error_check import set_error_if, raise_if_error\n </d-code>\n <d-code block="""", language=""python"">\n import jax.numpy as jnp\n </d-code>\n <d-code block="""", language=""python"">\n @jax.jit\n </d-code>\n <d-code block="""", language=""python"">\n def f(x, y):\n </d-code>\n <d-code block="""", language=""python"">\n set_error_if(x != 0, 'x must be 0')\n </d-code>\n <d-code block="""", language=""python"">\n return jnp.multiply(x, y)\n </d-code>\n <d-code block="""", language=""python"">\n f(0, 0)\n </d-code>\n <d-code block="""", language=""python"">\n raise_if_error()\n </d-code> },\n year = {2025},\n url = {\n https://discord.com/channels/1107832795377713302/1107832795688083561/1354171414596419854\n },\n note = {Accessed: 2025-03-26},\n}\n\n@book{sutton1998reinforcement,\n title={Reinforcement learning: An introduction},\n author={Sutton, Richard S and Barto, Andrew G and others},\n volume={1},\n number={1},\n year={1998},\n publisher={MIT press Cambridge}\n}\n\n@article{sutton1999policy,\n title={Policy gradient methods for reinforcement learning with function approximation},\n author={Sutton, Richard S and McAllester, David and Singh, Satinder and Mansour, Yishay},\n journal={Advances in neural information processing systems},\n volume={12},\n year={1999}\n}\n\n@article{degris2012off,\n title={Off-policy actor-critic},\n author={Degris, Thomas and White, Martha and Sutton, Richard S},\n journal={arXiv preprint arXiv:1205.4839},\n year={2012}\n}\n\n@article{schulman2017proximal,\n title={Proximal policy optimization algorithms},\n author={Schulman, John and Wolski, Filip and Dhariwal, Prafulla and Radford, Alec and Klimov, Oleg},\n journal={arXiv preprint arXiv:1707.06347},\n year={2017}\n}\n\n@article{ouyang2022training,\n title={Training language models to follow instructions with human feedback},\n author={Ouyang, Long and Wu, Jeffrey and Jiang, Xu and Almeida, Diogo and Wainwright, Carroll and Mishkin, Pamela and Zhang, Chong and Agarwal, Sandhini and Slama, Katarina and Ray, Alex and others},\n journal={Advances in neural information processing systems},\n volume={35},\n pages={27730--27744},\n year={2022}\n}\n\n\n@misc{openai2025imo,\n title = {We achieved gold medal-level performance 🥇on the 2025 International Mathematical Olympiad with a general-purpose reasoning LLM!},\n author = {OpenAI},\n year = {2025},\n url = {https://x.com/OpenAI/status/1946594928945148246},\n note = {Accessed: 2025-08-05},\n}\n\n@misc{deepmind2025imo,\n title = {Advanced version of Gemini with Deep Think officially achieves gold-medal standard at the International Mathematical Olympiad},\n author = {Luong, Thang and Lockhart, Edward},\n year = {2025},\n url = {https://deepmind.google/discover/blog/advanced-version-of-gemini-with-deep-think-officially-achieves-gold-medal-standard-at-the-international-mathematical-olympiad/},\n note = {DeepMind Blog, July 21, 2025},\n}\n\n@misc{deepseekai2025r1,\n title={DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning}, \n author={DeepSeek-AI},\n year={2025},\n eprint={2501.12948},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n url={https://arxiv.org/abs/2501.12948}, \n}\n\n@misc{cursor2025tab,\n title = {A New Tab Model},\n author = {Cursor},\n year = {2025},\n url = {https://cursor.com/blog/tab-update},\n note = {Accessed: 2025-08-05},\n}\n\n@inproceedings{bruce2024genie,\n title={Genie: Generative Interactive Environments},\n author={Jake Bruce and Michael D Dennis and Ashley Edwards and Jack Parker-Holder and Yuge Shi and Edward Hughes and Matthew Lai and Aditi Mavalankar and Richie Steigerwald and Chris Apps and Yusuf Aytar and Sarah Maria Elisabeth Bechtle and Feryal Behbahani and Stephanie C.Y. Chan and Nicolas Heess and Lucy Gonzalez and Simon Osindero and Sherjil Ozair and Scott Reed and Jingwei Zhang and Konrad Zolna and Jeff Clune and Nando de Freitas and Satinder Singh and Tim Rockt{\""a}schel},\n booktitle={Forty-first International Conference on Machine Learning},\n year={2024},\n url={https://openreview.net/forum?id=bJbSbJskOS}\n}\n\n@article{parkerholder2024genie2,\n title = {Genie 2: A Large-Scale Foundation World Model},\n author = {Jack Parker-Holder and Philip Ball and Jake Bruce and Vibhavari Dasagi and Kristian Holsheimer and Christos Kaplanis and Alexandre Moufarek and Guy Scully and Jeremy Shar and Jimmy Shi and Stephen Spencer and Jessica Yung and Michael Dennis and Sultan Kenjeyev and Shangbang Long and Vlad Mnih and Harris Chan and Maxime Gazeau and Bonnie Li and Fabio Pardo and Luyu Wang and Lei Zhang and Frederic Besse and Tim Harley and Anna Mitenkova and Jane Wang and Jeff Clune and Demis Hassabis and Raia Hadsell and Adrian Bolton and Satinder Singh and Tim Rockt{\""a}schel},\n year = {2024},\n url = {https://deepmind.google/discover/blog/genie-2-a-large-scale-foundation-world-model/}\n}\n\n@article{deepmind2025genie3,\n title = {Genie 3: A New Frontier for World Models},\n author = {Philip J. Ball and Jakob Bauer and Frank Belletti and Bethanie Brownfield and Ariel Ephrat and Shlomi Fruchter and Agrim Gupta and Kristian Holsheimer and Aleksander Holynski and Jiri Hron and Christos Kaplanis and Marjorie Limont and Matt McGill and Yanko Oliveira and Jack Parker-Holder and Frank Perbet and Guy Scully and Jeremy Shar and Stephen Spencer and Omer Tov and Ruben Villegas and Emma Wang and Jessica Yung and Cip Baetu and Jordi Berbel and David Bridson and Jake Bruce and Gavin Buttimore and Sarah Chakera and Bilva Chandra and Paul Collins and Alex Cullum and Bogdan Damoc and Vibha Dasagi and Maxime Gazeau and Charles Gbadamosi and Woohyun Han and Ed Hirst and Ashyana Kachra and Lucie Kerley and Kristian Kjems and Eva Knoepfel and Vika Koriakin and Jessica Lo and Cong Lu and Zeb Mehring and Alex Moufarek and Henna Nandwani and Valeria Oliveira and Fabio Pardo and Jane Park and Andrew Pierson and Ben Poole and Helen Ran and Tim Salimans and Manuel Sanchez and Igor Saprykin and Amy Shen and Sailesh Sidhwani and Duncan Smith and Joe Stanton and Hamish Tomlinson and Dimple Vijaykumar and Luyu Wang and Piers Wingfield and Nat Wong and Keyang Xu and Christopher Yew and Nick Young and Vadim Zubov and Douglas Eck and Dumitru Erhan and Koray Kavukcuoglu and Demis Hassabis and Zoubin Gharamani and Raia Hadsell and A{\""a}ron van den Oord and Inbar Mosseri and Adrian Bolton and Satinder Singh and Tim Rockt{\""a}schel},\n year = {2025},\n url = {}\n}\n\n@InProceedings{parkerholder2022evolving,\n title = \t {Evolving Curricula with Regret-Based Environment Design},\n author = {Parker-Holder, Jack and Jiang, Minqi and Dennis, Michael and Samvelyan, Mikayel and Foerster, Jakob and Grefenstette, Edward and Rockt{\""a}schel, Tim},\n booktitle = \t {Proceedings of the 39th International Conference on Machine Learning},\n pages = \t {17473--17498},\n year = \t {2022},\n editor = \t {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},\n volume = \t {162},\n series = \t {Proceedings of Machine Learning Research},\n month = \t {17--23 Jul},\n publisher = {PMLR},\n pdf = \t {https://proceedings.mlr.press/v162/parker-holder22a/parker-holder22a.pdf},\n url = \t {https://proceedings.mlr.press/v162/parker-holder22a.html},\n abstract = \t {Training generally-capable agents with reinforcement learning (RL) remains a significant challenge. A promising avenue for improving the robustness of RL agents is through the use of curricula. One such class of methods frames environment design as a game between a student and a teacher, using regret-based objectives to produce environment instantiations (or levels) at the frontier of the student agent’s capabilities. These methods benefit from theoretical robustness guarantees at equilibrium, yet they often struggle to find effective levels in challenging design spaces in practice. By contrast, evolutionary approaches incrementally alter environment complexity, resulting in potentially open-ended learning, but often rely on domain-specific heuristics and vast amounts of computational resources. This work proposes harnessing the power of evolution in a principled, regret-based curriculum. Our approach, which we call Adversarially Compounding Complexity by Editing Levels (ACCEL), seeks to constantly produce levels at the frontier of an agent’s capabilities, resulting in curricula that start simple but become increasingly complex. ACCEL maintains the theoretical benefits of prior regret-based methods, while providing significant empirical gains in a diverse set of environments. An interactive version of this paper is available at https://accelagent.github.io.}\n}\n\n@article{agarwal2025cosmos,\n title = {Cosmos World Foundation Model Platform for Physical AI},\n author = {Agarwal, Niket and others},\n journal = {arXiv preprint arXiv:2501.03575},\n year = {2025}\n}\n\n@article{bellemare2013arcade,\n title = {The arcade learning environment: An evaluation platform for general agents},\n author = {Bellemare, Marc G and others},\n journal = {Journal of artificial intelligence research},\n volume = {47},\n pages = {253--279},\n year = {2013}\n}\n\n@article{nichol2018retro,\n title={Gotta Learn Fast: A New Benchmark for Generalization in RL},\n author={Nichol, Alex and Pfau, Vicki and Hesse, Christopher and Klimov, Oleg and Schulman, John},\n journal={arXiv preprint arXiv:1804.03720},\n year={2018}\n}\n\n@inproceedings{matthews2024craftax,\n author={Michael Matthews and Michael Beukman and Benjamin Ellis and Mikayel Samvelyan and Matthew Jackson and Samuel Coward and Jakob Foerster},\n title = {Craftax: A Lightning-Fast Benchmark for Open-Ended Reinforcement Learning},\n booktitle = {International Conference on Machine Learning ({ICML})},\n year = {2024}\n}\n\n@inproceedings{NEURIPS2022_9c7008af,\n author = {Baker, Bowen and Akkaya, Ilge and Zhokov, Peter and Huizinga, Joost and Tang, Jie and Ecoffet, Adrien and Houghton, Brandon and Sampedro, Raul and Clune, Jeff},\n booktitle = {Advances in Neural Information Processing Systems},\n editor = {S. Koyejo and S. Mohamed and A. Agarwal and D. Belgrave and K. Cho and A. Oh},\n pages = {24639--24654},\n publisher = {Curran Associates, Inc.},\n title = {Video PreTraining (VPT): Learning to Act by Watching Unlabeled Online Videos},\n url = {https://proceedings.neurips.cc/paper_files/paper/2022/file/9c7008aff45b5d8f0973b23e1a22ada0-Paper-Conference.pdf},\n volume = {35},\n year = {2022}\n}\n\n@inproceedings{osband2020bsuite,\n title={Behaviour Suite for Reinforcement Learning},\n author={Osband, Ian and\n Doron, Yotam and\n Hessel, Matteo and\n Aslanides, John and\n Sezener, Eren and\n Saraiva, Andre and\n McKinney, Katrina and\n Lattimore, Tor and\n {Sz}epesv{\'a}ri, Csaba and\n Singh, Satinder and\n Van Roy, Benjamin and\n Sutton, Richard and\n Silver, David and\n van Hasselt, Hado},\n booktitle={International Conference on Learning Representations},\n year={2020},\n url={https://openreview.net/forum?id=rygf-kSYwH}\n}\n\n@article{nguyen2025crowd-sourcing,\n author = {Nguyen, Alfred and Mahajan, Mihir and Srambical, Franz},\n title = {Crowd-Sourcing A Dataset To Make Agents Code Like Humans},\n journal = {p(doom) blog},\n year = {2025},\n note = {https://pdoom.org/blog.html}\n}",bibtex,tab
|
| 3 |
+
2,64,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"4:26:15 PM [info] Activating crowd-code\n4:26:15 PM [info] Recording started\n4:26:15 PM [info] Initializing git provider using file system watchers...\n4:26:15 PM [info] Git repository found\n4:26:15 PM [info] Git provider initialized successfully\n4:26:15 PM [info] Initial git state: [object Object]\n",Log,tab
|
| 4 |
+
3,1076,"examples/bibliography.bib",0,0,"",bibtex,tab
|
| 5 |
+
4,279083,"examples/jasmine.html",0,0,"<!--\n Copyright 2018 p(doom)\n\n Licensed under the Apache License, Version 2.0 (the ""License"");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an ""AS IS"" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n<!doctype html>\n\n<head>\n <script src=""template.v2.js""></script>\n <meta name=""viewport"" content=""width=device-width, initial-scale=1"">\n <meta charset=""utf8"">\n <link rel=""icon"" type=""image/png"" href=""favicon.png"">\n</head>\n\n<body>\n <!--\n <distill-header></distill-header>\n -->\n <d-front-matter>\n <script id='distill-front-matter' type=""text/json"">{\n ""title"": ""🧞♀️ Jasmine: A Simple, Performant and Scalable JAX-based World Modeling Codebase"",\n ""description"": ""We introduce Jasmine, a production-ready JAX-based codebase for world modeling from unlabeled videos. Scale from single hosts to hundreds of xPUs thanks to XLA."",\n ""published"": ""August 5, 2025"",\n ""url"": ""https://pdoom.org/jasmine.html"",\n ""authors"": [\n {\n ""author"":""Mihir Mahajan"",\n ""authorURL"":""https://maharajamihir.github.io/"",\n ""affiliations"": [{""name"": ""p(doom)"", ""url"": ""https://pdoom.org/""},\n {""name"": ""TUM""}]\n },\n {\n ""author"":""Alfred Nguyen"",\n ""authorURL"":""https://avocadoali.github.io/"",\n ""affiliations"": [{""name"": ""p(doom)"", ""url"": ""https://pdoom.org/""},\n {""name"": ""TUM""}]\n },\n {\n ""author"":""Franz Srambical"",\n ""authorURL"":""https://srambical.fr/"",\n ""affiliations"": [{""name"": ""p(doom)"", ""url"": ""https://pdoom.org/""},\n {""name"": ""TUM""}]\n },\n {\n ""author"":""Stefan Bauer"",\n ""authorURL"":""https://www.professoren.tum.de/en/bauer-stefan"",\n ""affiliations"": [{""name"": ""TUM""}]\n }\n ],\n ""katex"": {\n ""delimiters"": [\n {""left"": ""$$"", ""right"": ""$$"", ""display"": false}\n ]\n }\n }</script>\n </d-front-matter>\n <d-title>\n <p>\n We introduce <a href=""https://github.com/p-doom/jasmine"">Jasmine</a>, a production-ready JAX-based codebase for world modeling from unlabeled videos.\n Scale from single hosts to hundreds of xPUs thanks to XLA.\n </p>\n </d-title>\n <d-byline></d-byline>\n <d-article>\n <a class=""marker"" href=""#section-1"" id=""section-1""><span>1</span></a>\n <figure style=""grid-column: page; margin: 1rem 0; display: flex; justify-content: center""><img src=""jasmine_preview.gif""\n style=""width:100%; border-radius: 8px;"" /></figure>\n <figcaption style=""grid-column: page; text-align: center; margin-bottom: 2rem; font-size: 0.8em; color: rgba(0, 0, 0, 0.5);"">Figure 1: Jasmine in action.</figcaption>\n <h2>Introduction</h2>\n <p>\n We are at the cusp of an intelligence revolution. Neural networks are able to clone the behaviour of peak human intellectual performance <d-cite key=""openai2025imo,deepmind2025imo""></d-cite>\n given enough compute, data, and the right algorithms <d-cite key=""deepseekai2025r1""></d-cite>. While an increasing amount of capital expenditure is allocated to compute clusters, and a well-working\n recipe of equipping models with the required priors and capacity to reason is publicly available, the path to human-level intelligence with the ability to automate\n large fractions of the economy will increasingly be shaped by paradigms that are able to find and efficiently use untouched data troves.\n </p>\n <p>\n While product-feedback-loops <d-cite key=""cursor2025tab""></d-cite> constitute an adaptive data trove, many domains like robotics are not mature enough to yield a product with wide enough\n adoption to create a feedback-loop of sufficient magnitude, prompting the search for alternatives.\n One paradigm proposed by the research community to overcome the data scarcity in those domains is that of world models. While world models can help frontier model\n development in numerous ways, an ambitious goal of the community is to train a world model to act as a simulation of the world <d-cite key=""bruce2024genie,parkerholder2024genie2,deepmind2025genie3""></d-cite>, in order to\n train an agent in that simulation, via an adaptive curriculum <d-cite key=""parkerholder2022evolving""></d-cite> or otherwise.\n </p>\n <h2>Deriving Empirical Environment Complexity Scaling Trends</h2>\n <p>\n While numerous previous works have investigated large-scale world modeling and its application to robotics <d-cite key=""agarwal2025cosmos""></d-cite>, world modeling for agent training calls for a vastly different treatment.\n Such regime requires the compounding error of world models to be orders of magnitude smaller than when solely used for short-term look-ahead. The feasibility of such a world model in its truest sense is entirely\n understudied, and Jasmine, a world modeling codebase, is our first milestone towards studying the setting using rigorous evaluations. Specifically, we want to develop <i>Empirical Environment Complexity Scaling Trends</i>, where we train world models to full convergence\n in environments of increasing complexity (Atari <d-cite key=""bellemare2013arcade""></d-cite>, RetroGym <d-cite key=""nichol2018retro""></d-cite>, Craftax <d-cite key=""matthews2024craftax""></d-cite>, Minecraft <d-cite key=""NEURIPS2022_9c7008af""></d-cite>)\n and under the synthetic infinite-data regime. Subsequently, we want to evaluate those models two-fold: i) via a taxonomy of granular benchmarks probing\n specific world modeling capabilities (reconstruction quality, environment dynamics at the body/tail of the data distribution, long-horizon consistency) <d-cite key=""osband2020bsuite""></d-cite>, and ii) by training reinforcement learning (RL) agents in both\n the world model and the corresponding ground-truth environment, and measuring the performance difference between those agents.\n </p>\n <p>\n Ultimately, such treatment permits us to derive empirical estimates of compute and data requirements to model environments of increasing complexity sufficiently well (as determined by our evaluation procedure). Only given such estimates can we try to draw conclusions\n about the feasibility of world modeling of environments as complex as the real world for agent training. If our empirical estimates show resource requirement trends that are feasible under the assumption of the continuation of Moore's Law and increased capital\n expenditure, that would manifest world modeling as a paradigm with high likelihood of success in overcoming the data-scarcity in domains as general as (humanoid) robotics. Otherwise, the world modeling research community must realign its direction with downstream goals\n that are feasible.\n </p>\n <h2>A batteries-included foundation for world modeling research</h2>\n <p>\n Jasmine, our first milestone towards deriving <i>Empirical Environment Complexity Scaling Trends</i>, is the result of weeks of infrastructure work to make large-scale world modeling research more accessible. What started off as a fork of\n <a href=""https://github.com/flairox/jafar"">Jafar</a> grew into a full-fledged world\n modeling codebase amenable to large-scale training, implementing multiple dynamics model baselines, asynchronous checkpointing, process-parallel dataloading, checkpointing of model weights, optimizer and dataloader states, checkpointing policies, full reproducibility with <strong>identical</strong>\n training curves, mixed precision training, optimized FlashAttention (via <a href=""https://github.com/jax-ml/jax/blob/a155c5a9997924170e0067d552351a9833c12c11/jax/_src/cudnn/fused_attention_stablehlo.py#L842"">cuDNN SDPA</a>), activation checkpointing, DDP\n (with FSDP/HSDP requiring changing a singe LoC), WSD schedule, index-shuffling during dataloading, and native <a href=""https://github.com/google-deepmind/treescope"">Treescope</a> support. Jasmine implements the new\n <a href=""https://flax.readthedocs.io/en/latest/migrating/linen_to_nnx.html"">flax.nnx</a> API and strictly adheres to Noam Shazeer's <a href=""https://medium.com/@NoamShazeer/shape-suffixes-good-coding-style-f836e72e24fd"">shape suffix convention</a>, thereby providing\n a didactic implementation of world modeling architectures. Jasmine solely depends\n on battle-tested libraries from the Google ecosystem (<a href=""https://github.com/google/flax"">Flax</a>, <a href=""https://github.com/google-deepmind/optax"">Optax</a>, <a href=""https://github.com/google/orbax"">Orbax</a>, <a href=""https://github.com/google/grain"">Grain</a>,\n <a href=""https://github.com/google-deepmind/dm_pix"">PIX</a>, <a href=""https://github.com/google/array_record"">ArrayRecord</a>).\n </p>\n <h2>Releasing a dataset of fine-grained research engineering</h2>\n <p>\n We captured every step of the research engineering process behind Jasmine using <a href=""https://github.com/p-doom/crowd-code"">crowd-code</a> <d-cite key=""nguyen2025crowd-sourcing""></d-cite>,\n a VS Code/ Cursor extension that captures fine-grained IDE interactions (character-level edits, navigation, debugging patterns, terminal usage) and allows researchers to contribute their \n engineering process to a crowd-sourced dataset. Today, we release <a href=""https://huggingface.co/datasets/p-doom/crowd-code-0.1""><code>crowd-code-0.1</code></a>, our first dataset of dense IDE interactions, which encompasses the entire development of Jasmine.\n <code>crowd-code-0.1</code> is unfiltered, uncleaned, and uncurated, but only contains IDE interactions of the Jasmine authors. We are actively working on cleaning and curating the full dataset,\n which will be released in the future.\n </p>\n </d-article>\n\n <d-appendix>\n\n <h3>Contributions</h3>\n <p>MM, AN and FS worked on research, ideation and implementation. FS wrote the manuscript. SB provided feedback and guidance.</p>\n <d-bibliography src=""bibliography.bib""></d-bibliography>\n <distill-appendix>\n </distill-appendix>\n </d-appendix>\n\n <distill-footer></distill-footer>\n\n</body>\n",html,tab
|
| 6 |
+
5,280441,"examples/jasmine.html",8038,0,"",html,selection_command
|
| 7 |
+
6,284744,"examples/bibliography.bib",0,0,"",bibtex,tab
|
| 8 |
+
7,289106,"examples/bibliography.bib",1157,0,"",bibtex,selection_command
|
| 9 |
+
8,289514,"examples/bibliography.bib",2610,0,"",bibtex,selection_command
|
| 10 |
+
9,289862,"examples/bibliography.bib",3036,0,"",bibtex,selection_command
|
| 11 |
+
10,290114,"examples/bibliography.bib",9805,0,"",bibtex,selection_command
|
| 12 |
+
11,291556,"examples/bibliography.bib",9946,0,"",bibtex,selection_command
|
| 13 |
+
12,291654,"examples/bibliography.bib",10343,0,"",bibtex,selection_command
|
| 14 |
+
13,291869,"examples/bibliography.bib",1157,0,"",bibtex,selection_command
|
| 15 |
+
14,292480,"examples/bibliography.bib",2610,0,"",bibtex,selection_command
|
| 16 |
+
15,292732,"examples/bibliography.bib",3036,0,"",bibtex,selection_command
|
| 17 |
+
16,292759,"examples/bibliography.bib",9805,0,"",bibtex,selection_command
|
| 18 |
+
17,292794,"examples/bibliography.bib",9946,0,"",bibtex,selection_command
|
| 19 |
+
18,292824,"examples/bibliography.bib",10343,0,"",bibtex,selection_command
|
| 20 |
+
19,292855,"examples/bibliography.bib",1157,0,"",bibtex,selection_command
|
| 21 |
+
20,292889,"examples/bibliography.bib",2610,0,"",bibtex,selection_command
|
| 22 |
+
21,292922,"examples/bibliography.bib",3036,0,"",bibtex,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-99fbb8cb-fb8a-45a1-8eda-f4f2025861341764421443087-2025_11_29-14.04.10.639/source.csv
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,10,"Untitled-1",0,0,"",plaintext,tab
|
| 3 |
+
2,106,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:04:10 PM [info] Activating crowd-code\n2:04:10 PM [info] Recording started\n2:04:10 PM [info] Initializing git provider using file system watchers...\n2:04:10 PM [info] No workspace folder found\n",Log,tab
|
| 4 |
+
3,1149,"Untitled-1",0,0,"",plaintext,tab
|
| 5 |
+
4,9140,"Untitled-1",0,0,"\n",plaintext,content
|
| 6 |
+
5,10014,"Untitled-1",0,1,"",plaintext,content
|
| 7 |
+
6,15958,"TERMINAL",0,0,"Test",,terminal_focus
|
| 8 |
+
7,15967,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 9 |
+
8,16300,"TERMINAL",0,0,"echo ""[crowd-pilot] mock run""",,terminal_command
|
| 10 |
+
9,16301,"TERMINAL",0,0,"]633;C[crowd-pilot] mock run\r\n[1m[7m%[27m[1m[0m \r \r",,terminal_output
|
| 11 |
+
10,18552,"Untitled-1",26,1,"",plaintext,content
|
| 12 |
+
11,19166,"Untitled-1",0,26,"",plaintext,content
|
| 13 |
+
12,26233,"Untitled-1",0,0,"/",plaintext,content
|
| 14 |
+
13,26236,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 15 |
+
14,26405,"Untitled-1",1,0,"/",plaintext,content
|
| 16 |
+
15,26407,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 17 |
+
16,28717,"Untitled-1",1,1,"",plaintext,content
|
| 18 |
+
17,28900,"Untitled-1",0,1,"",plaintext,content
|
| 19 |
+
18,39922,"Untitled-1",0,0,"// crowd-pilot mock insert\n",plaintext,content
|
| 20 |
+
19,39967,"TERMINAL",0,0,"echo ""[crowd-pilot] mock run""",,terminal_command
|
| 21 |
+
20,39967,"TERMINAL",0,0,"]633;C[crowd-pilot] mock run\r\n[1m[7m%[27m[1m[0m \r \r",,terminal_output
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-9dc22d58-e1a3-4f57-8db8-c0aa13ce6c4a1762164243344-2025_11_03-11.04.09.807/source.csv
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,2,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=1\n#SBATCH --time=24:00:00\n#SBATCH --cpus-per-task=8\n#SBATCH --gres=gpu:1\n#SBATCH --output=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/atari/data_upload/%x_%j.log\n#SBATCH --error=/fast/project/HFMI_SynergyUnit/jafar_ws/logs/franz/atari/data_upload/%x_%j.log\n#SBATCH --job-name=upload_to_hf\n\nsource .venv/bin/activate\n\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/alien p-doom/atari-alien-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/amidar p-doom/atari-amidar-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/assault p-doom/atari-assault-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/asterix p-doom/atari-asterix-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/bank_heist p-doom/atari-bank_heist-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/battle_zone p-doom/atari-battle_zone-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/boxing p-doom/atari-boxing-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/breakout p-doom/atari-breakout-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/chopper_command p-doom/atari-chopper_command-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/crazy_climber p-doom/atari-crazy_climber-dataset --repo-type dataset\n#python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/demon_attack p-doom/atari-demon_attack-dataset --repo-type dataset\npython slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/pong p-doom/atari-pong-dataset --repo-type dataset",shellscript,tab
|
| 3 |
+
2,252,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:04:09 AM [info] Activating crowd-code\n11:04:09 AM [info] Recording started\n11:04:09 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
| 4 |
+
3,1123,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"11:04:09 AM [info] Git repository found\n11:04:10 AM [info] Git provider initialized successfully\n11:04:10 AM [info] Initial git state: [object Object]\n",Log,content
|
| 5 |
+
4,1567,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",0,0,"",shellscript,tab
|
| 6 |
+
5,4998,"TERMINAL",0,0,"",,terminal_command
|
| 7 |
+
6,11725,"TERMINAL",0,0,"",,terminal_command
|
| 8 |
+
7,120034,"TERMINAL",0,0,"",,terminal_command
|
| 9 |
+
8,5833673,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",2140,147,"python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/pong p-doom/atari-pong-dataset --repo-type dataset",shellscript,selection_command
|
| 10 |
+
9,5834102,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",2140,0,"",shellscript,selection_command
|
| 11 |
+
10,5834749,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
| 12 |
+
11,5836253,"TERMINAL",0,0,"",,terminal_focus
|
| 13 |
+
12,5836253,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",0,0,"",shellscript,tab
|
| 14 |
+
13,5836766,"TERMINAL",0,0,"source /home/franz.srambical/jafar/data/.venv/bin/activate",,terminal_command
|
| 15 |
+
14,5836776,"TERMINAL",0,0,"]633;C]0;franz.srambical@hai-login1:~/jafar",,terminal_output
|
| 16 |
+
15,5840422,"TERMINAL",0,0,"deactivate",,terminal_command
|
| 17 |
+
16,5844811,"TERMINAL",0,0,"source .venv/bin/activate",,terminal_command
|
| 18 |
+
17,5845993,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",2140,147,"python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/pong p-doom/atari-pong-dataset --repo-type dataset",shellscript,selection_command
|
| 19 |
+
18,5846428,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",2140,0,"",shellscript,selection_command
|
| 20 |
+
19,5853129,"TERMINAL",0,0,"python slurm/utils/mihir/upload_hf_dataset.py /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/pong p-doom/atari-pong-dataset --repo-type dataset",,terminal_command
|
| 21 |
+
20,5853202,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 22 |
+
21,5855865,"TERMINAL",0,0,"Uploading folder: /fast/project/HFMI_SynergyUnit/jafar_ws/data/atari/pong\r\nRepository: p-doom/atari-pong-dataset\r\nRepository type: dataset\r\nPrivate: False\r\n------------------------------------------------------------\r\n",,terminal_output
|
| 23 |
+
22,5856461,"TERMINAL",0,0,"Authenticated as: emergenz\r\n",,terminal_output
|
| 24 |
+
23,5856577,"TERMINAL",0,0,"Repository 'p-doom/atari-pong-dataset' is ready.\r\n\r\nStarting upload...\r\nThis may take a while for large folders...\r\n",,terminal_output
|
| 25 |
+
24,5857323,"TERMINAL",0,0,"\rRecovering from metadata files: 0%| | 0/713 [00:00<?, ?it/s]",,terminal_output
|
| 26 |
+
25,5857426,"TERMINAL",0,0,"\rRecovering from metadata files: 4%|██████▉ | 26/713 [00:00<00:02, 250.81it/s]",,terminal_output
|
| 27 |
+
26,5857528,"TERMINAL",0,0,"\rRecovering from metadata files: 7%|██████████████ | 53/713 [00:00<00:02, 255.70it/s]",,terminal_output
|
| 28 |
+
27,5857628,"TERMINAL",0,0,"\rRecovering from metadata files: 11%|█████████████████████▏ | 80/713 [00:00<00:02, 260.07it/s]",,terminal_output
|
| 29 |
+
28,5857737,"TERMINAL",0,0,"\rRecovering from metadata files: 15%|████████████████████████████▏ | 107/713 [00:00<00:02, 259.19it/s]",,terminal_output
|
| 30 |
+
29,5857837,"TERMINAL",0,0,"\rRecovering from metadata files: 19%|███████████████████████████████████▎ | 134/713 [00:00<00:02, 260.94it/s]",,terminal_output
|
| 31 |
+
30,5857953,"TERMINAL",0,0,"\rRecovering from metadata files: 23%|██████████████████████████████████████████▍ | 161/713 [00:00<00:02, 262.41it/s]",,terminal_output
|
| 32 |
+
31,5858043,"TERMINAL",0,0,"\rRecovering from metadata files: 26%|█████████████████████████████████████████████████▌ | 188/713 [00:00<00:02, 262.39it/s]",,terminal_output
|
| 33 |
+
32,5858144,"TERMINAL",0,0,"\rRecovering from metadata files: 30%|████████████████████████████████████████████████████████▋ | 215/713 [00:00<00:01, 261.47it/s]",,terminal_output
|
| 34 |
+
33,5858252,"TERMINAL",0,0,"\rRecovering from metadata files: 34%|███████████████████████████████████████████████████████████████▊ | 242/713 [00:00<00:01, 260.16it/s]",,terminal_output
|
| 35 |
+
34,5858359,"TERMINAL",0,0,"\rRecovering from metadata files: 38%|██████████████████████████████████████████████████████████████████████▉ | 269/713 [00:01<00:01, 259.74it/s]",,terminal_output
|
| 36 |
+
35,5858459,"TERMINAL",0,0,"\rRecovering from metadata files: 42%|██████████████████████████████████████████████████████████████████████████████ | 296/713 [00:01<00:01, 261.54it/s]",,terminal_output
|
| 37 |
+
36,5858562,"TERMINAL",0,0,"\rRecovering from metadata files: 45%|█████████████████████████████████████████████████████████████████████████████████████▏ | 323/713 [00:01<00:01, 260.86it/s]",,terminal_output
|
| 38 |
+
37,5858693,"TERMINAL",0,0,"\rRecovering from metadata files: 49%|████████████████████████████████████████████████████████████████████████████████████████████▎ | 350/713 [00:01<00:01, 258.21it/s]",,terminal_output
|
| 39 |
+
38,5858771,"TERMINAL",0,0,"\rRecovering from metadata files: 53%|███████████████████████████████████████████████████████████████████████████████████████████████████▏ | 376/713 [00:01<00:01, 257.95it/s]",,terminal_output
|
| 40 |
+
39,5859077,"TERMINAL",0,0,"\rRecovering from metadata files: 56%|█████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 402/713 [00:01<00:01, 257.45it/s]\rRecovering from metadata files: 60%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 429/713 [00:01<00:01, 258.68it/s]\rRecovering from metadata files: 64%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ | 456/713 [00:01<00:00, 259.75it/s]",,terminal_output
|
| 41 |
+
40,5859177,"TERMINAL",0,0,"\rRecovering from metadata files: 68%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 483/713 [00:01<00:00, 262.13it/s]",,terminal_output
|
| 42 |
+
41,5859286,"TERMINAL",0,0,"\rRecovering from metadata files: 72%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▍ | 510/713 [00:01<00:00, 261.85it/s]",,terminal_output
|
| 43 |
+
42,5859381,"TERMINAL",0,0,"\rRecovering from metadata files: 75%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▌ | 537/713 [00:02<00:00, 263.15it/s]",,terminal_output
|
| 44 |
+
43,5859486,"TERMINAL",0,0,"\rRecovering from metadata files: 79%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▋ | 564/713 [00:02<00:00, 262.81it/s]",,terminal_output
|
| 45 |
+
44,5859586,"TERMINAL",0,0,"\rRecovering from metadata files: 83%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▊ | 591/713 [00:02<00:00, 264.71it/s]",,terminal_output
|
| 46 |
+
45,5859686,"TERMINAL",0,0,"\rRecovering from metadata files: 87%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▉ | 618/713 [00:02<00:00, 264.51it/s]",,terminal_output
|
| 47 |
+
46,5859798,"TERMINAL",0,0,"\rRecovering from metadata files: 90%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████ | 645/713 [00:02<00:00, 262.21it/s]",,terminal_output
|
| 48 |
+
47,5859896,"TERMINAL",0,0,"\rRecovering from metadata files: 94%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▏ | 672/713 [00:02<00:00, 261.01it/s]",,terminal_output
|
| 49 |
+
48,5859996,"TERMINAL",0,0,"\rRecovering from metadata files: 98%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 699/713 [00:02<00:00, 262.82it/s]",,terminal_output
|
| 50 |
+
49,5860070,"TERMINAL",0,0,"\rRecovering from metadata files: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 713/713 [00:02<00:00, 260.94it/s]\r\n\r\n\r\n\r\n---------- 2025-11-03 12:41:49 (0:00:00) ----------\r\nFiles: hashed 713/713 (753.3M/753.3M) | pre-uploaded: 712/712 (753.2M/753.3M) | committed: 713/713 (753.3M/753.3M) | ignored: 0\r\nWorkers: hashing: 0 | get upload mode: 0 | pre-uploading: 0 | committing: 0 | waiting: 0\r\n---------------------------------------------------\r\n",,terminal_output
|
| 51 |
+
50,5861074,"TERMINAL",0,0,"\r\n============================================================\r\nUpload completed successfully!\r\nRepository URL: https://huggingface.co/p-doom/atari-pong-dataset\r\nDataset URL: https://huggingface.co/datasets/p-doom/atari-pong-dataset\r\n============================================================\r\n",,terminal_output
|
| 52 |
+
51,5861079,"TERMINAL",0,0,"]0;franz.srambical@hai-login1:~/jafar",,terminal_output
|
| 53 |
+
52,5916512,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",2237,0,"",shellscript,selection_command
|
| 54 |
+
53,6090241,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",2140,0,"",shellscript,selection_command
|
| 55 |
+
54,6090773,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1975,0,"",shellscript,selection_command
|
| 56 |
+
55,6091423,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1975,1,"#",shellscript,selection_command
|
| 57 |
+
56,6091826,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1808,1,"#",shellscript,selection_command
|
| 58 |
+
57,6092068,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1637,1,"#",shellscript,selection_command
|
| 59 |
+
58,6092110,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1480,1,"#",shellscript,selection_command
|
| 60 |
+
59,6092134,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1327,1,"#",shellscript,selection_command
|
| 61 |
+
60,6092163,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1164,1,"#",shellscript,selection_command
|
| 62 |
+
61,6092212,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1003,1,"#",shellscript,selection_command
|
| 63 |
+
62,6092231,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",848,1,"#",shellscript,selection_command
|
| 64 |
+
63,6092274,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",693,1,"#",shellscript,selection_command
|
| 65 |
+
64,6092298,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",540,1,"#",shellscript,selection_command
|
| 66 |
+
65,6092414,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",389,1,"#",shellscript,selection_command
|
| 67 |
+
66,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1975,1,"",shellscript,content
|
| 68 |
+
67,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1808,1,"",shellscript,content
|
| 69 |
+
68,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1637,1,"",shellscript,content
|
| 70 |
+
69,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1480,1,"",shellscript,content
|
| 71 |
+
70,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1327,1,"",shellscript,content
|
| 72 |
+
71,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1164,1,"",shellscript,content
|
| 73 |
+
72,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",1003,1,"",shellscript,content
|
| 74 |
+
73,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",848,1,"",shellscript,content
|
| 75 |
+
74,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",693,1,"",shellscript,content
|
| 76 |
+
75,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",540,1,"",shellscript,content
|
| 77 |
+
76,6092883,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",389,1,"",shellscript,content
|
| 78 |
+
77,6092893,"slurm/jobs/franz/berlin/atari/data_upload/upload_to_hf.sh",389,0,"",shellscript,selection_command
|
| 79 |
+
78,6112289,"TERMINAL",0,0,"cd slurm",,terminal_command
|
| 80 |
+
79,6121453,"TERMINAL",0,0,"git pull",,terminal_command
|
| 81 |
+
80,6121507,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 82 |
+
81,6122768,"TERMINAL",0,0,"Updating 49b53fc..763f6c3\r\n",,terminal_output
|
| 83 |
+
82,6122855,"TERMINAL",0,0,"Fast-forward\r\n",,terminal_output
|
| 84 |
+
83,6122877,"TERMINAL",0,0," utils/mihir/upload_diffusion_checkpoint.sh | 14 [32m++++++++++++++[m\r\n utils/mihir/upload_doom_dataset.sh | 14 [32m++++++++++++++[m\r\n 2 files changed, 28 insertions(+)\r\n create mode 100644 utils/mihir/upload_diffusion_checkpoint.sh\r\n create mode 100644 utils/mihir/upload_doom_dataset.sh\r\n]0;franz.srambical@hai-login1:~/jafar/slurm",,terminal_output
|
| 85 |
+
84,6128965,"TERMINAL",0,0,"git commit -m ""update""",,terminal_command
|
| 86 |
+
85,6129019,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 87 |
+
86,6129302,"TERMINAL",0,0,"On branch main\r\nYour branch is up to date with 'origin/main'.\r\n\r\nUntracked files:\r\n (use ""git add <file>..."" to include in what will be committed)\r\n\t[31mjobs/franz/berlin/atari/data_upload/[m\r\n\r\nnothing added to commit but untracked files present (use ""git add"" to track)\r\n]0;franz.srambical@hai-login1:~/jafar/slurm",,terminal_output
|
| 88 |
+
87,6143520,"TERMINAL",0,0,"git add *",,terminal_command
|
| 89 |
+
88,6143548,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 90 |
+
89,6143616,"TERMINAL",0,0,"]0;franz.srambical@hai-login1:~/jafar/slurm",,terminal_output
|
| 91 |
+
90,6144543,"TERMINAL",0,0,"git commit -m ""update""",,terminal_command
|
| 92 |
+
91,6144595,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 93 |
+
92,6144682,"TERMINAL",0,0,"[main ea666f8] update\r\n 1 file changed, 25 insertions(+)\r\n create mode 100644 jobs/franz/berlin/atari/data_upload/upload_to_hf.sh\r\n]0;franz.srambical@hai-login1:~/jafar/slurm",,terminal_output
|
| 94 |
+
93,6146729,"TERMINAL",0,0,"git push",,terminal_command
|
| 95 |
+
94,6146765,"TERMINAL",0,0,"]633;C",,terminal_output
|
| 96 |
+
95,6147978,"TERMINAL",0,0,"Enumerating objects: 13, done.\r\nCounting objects: 7% (1/13)\rCounting objects: 15% (2/13)\rCounting objects: 23% (3/13)\rCounting objects: 30% (4/13)\rCounting objects: 38% (5/13)\rCounting objects: 46% (6/13)\rCounting objects: 53% (7/13)\rCounting objects: 61% (8/13)\rCounting objects: 69% (9/13)\rCounting objects: 76% (10/13)\rCounting objects: 84% (11/13)\rCounting objects: 92% (12/13)\rCounting objects: 100% (13/13)\rCounting objects: 100% (13/13), done.\r\nDelta compression using up to 64 threads\r\nCompressing objects: 14% (1/7)\rCompressing objects: 28% (2/7)\rCompressing objects: 42% (3/7)\rCompressing objects: 57% (4/7)\rCompressing objects: 71% (5/7)\rCompressing objects: 85% (6/7)\rCompressing objects: 100% (7/7)\rCompressing objects: 100% (7/7), done.\r\nWriting objects: 12% (1/8)\rWriting objects: 25% (2/8)\rWriting objects: 37% (3/8)\rWriting objects: 50% (4/8)\rWriting objects: 62% (5/8)\rWriting objects: 75% (6/8)\rWriting objects: 87% (7/8)\rWriting objects: 100% (8/8)\rWriting objects: 100% (8/8), 1.10 KiB | 280.00 KiB/s, done.\r\nTotal 8 (delta 2), reused 0 (delta 0), pack-reused 0 (from 0)\r\n",,terminal_output
|
| 97 |
+
96,6148042,"TERMINAL",0,0,"remote: Resolving deltas: 0% (0/2)[K\rremote: Resolving deltas: 50% (1/2)[K\rremote: Resolving deltas: 100% (2/2)[K\rremote: Resolving deltas: 100% (2/2), completed with 2 local objects.[K\r\n",,terminal_output
|
| 98 |
+
97,6148203,"TERMINAL",0,0,"To github.com:p-doom/slurm.git\r\n 763f6c3..ea666f8 main -> main\r\n]0;franz.srambical@hai-login1:~/jafar/slurm",,terminal_output
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-a48196e3-0243-4a79-b294-ca177a8db9741764454294286-2025_11_29-23.11.37.391/source.csv
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
2,154,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:11:37 PM [info] Activating crowd-code\n11:11:37 PM [info] Recording started\n11:11:37 PM [info] Initializing git provider using file system watchers...\n11:11:37 PM [info] No workspace folder found\n",Log,tab
|
| 3 |
+
3,2031,"extension-output-pdoom-org.crowd-code-#1-crowd-code",198,0,"11:11:39 PM [info] Retrying git provider initialization...\n11:11:39 PM [info] No workspace folder found\n",Log,content
|
| 4 |
+
4,2884,"Untitled-1",0,0,"",plaintext,tab
|
| 5 |
+
5,5761,"Untitled-1",0,0,"\n",plaintext,content
|
| 6 |
+
6,6424,"Untitled-1",0,0,"",plaintext,selection_command
|
| 7 |
+
7,7657,"Untitled-1",1,0,"",plaintext,selection_command
|
| 8 |
+
8,8048,"Untitled-1",0,0,"",plaintext,selection_command
|
| 9 |
+
9,8306,"Untitled-1",1,0,"",plaintext,selection_command
|
| 10 |
+
10,8844,"Untitled-1",0,0,"",plaintext,selection_command
|
| 11 |
+
11,167277,"TERMINAL",0,0,"Test",,terminal_focus
|
| 12 |
+
12,167284,"Untitled-1",1,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content
|
| 13 |
+
13,169058,"Untitled-1",0,0,"\n",plaintext,content
|
| 14 |
+
14,169455,"Untitled-1",0,0,"",plaintext,selection_command
|
| 15 |
+
15,171382,"Untitled-1",34,13,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content
|
| 16 |
+
16,173004,"Untitled-1",0,0,"\n",plaintext,content
|
| 17 |
+
17,173158,"Untitled-1",1,0,"\n",plaintext,content
|
| 18 |
+
18,173418,"Untitled-1",1,0,"",plaintext,selection_command
|
| 19 |
+
19,173490,"Untitled-1",0,0,"",plaintext,selection_command
|
| 20 |
+
20,174309,"Untitled-1",0,0,"\n",plaintext,content
|
| 21 |
+
21,174602,"Untitled-1",0,0,"",plaintext,selection_command
|
| 22 |
+
22,182003,"Untitled-1",37,63,"",plaintext,content
|
| 23 |
+
23,183817,"Untitled-1",1,0,"",plaintext,selection_command
|
| 24 |
+
24,183903,"Untitled-1",2,0,"",plaintext,selection_command
|
| 25 |
+
25,184505,"Untitled-1",1,0,"",plaintext,selection_command
|
| 26 |
+
26,184812,"Untitled-1",2,0,"",plaintext,selection_command
|
| 27 |
+
27,184956,"Untitled-1",3,0,"",plaintext,selection_command
|
| 28 |
+
28,185116,"Untitled-1",4,0,"",plaintext,selection_command
|
| 29 |
+
29,185255,"Untitled-1",5,0,"",plaintext,selection_command
|
| 30 |
+
30,186108,"Untitled-1",37,0,"",plaintext,selection_command
|
| 31 |
+
31,186477,"Untitled-1",67,0,"",plaintext,selection_command
|
| 32 |
+
32,187625,"Untitled-1",37,0,"",plaintext,selection_command
|
| 33 |
+
33,187787,"Untitled-1",5,0,"",plaintext,selection_command
|
| 34 |
+
34,188288,"Untitled-1",4,0,"",plaintext,selection_command
|
| 35 |
+
35,188843,"Untitled-1",5,0,"",plaintext,selection_command
|
| 36 |
+
36,189008,"Untitled-1",37,0,"",plaintext,selection_command
|
| 37 |
+
37,189270,"Untitled-1",5,0,"",plaintext,selection_command
|
| 38 |
+
38,189440,"Untitled-1",4,0,"",plaintext,selection_command
|
| 39 |
+
39,189607,"Untitled-1",3,0,"",plaintext,selection_command
|
| 40 |
+
40,189744,"Untitled-1",2,0,"",plaintext,selection_command
|
| 41 |
+
41,190426,"Untitled-1",1,0,"",plaintext,selection_command
|
| 42 |
+
42,190743,"Untitled-1",0,0,"",plaintext,selection_command
|
| 43 |
+
43,191252,"Untitled-1",1,0,"",plaintext,selection_command
|
| 44 |
+
44,194870,"Untitled-1",2,0,"",plaintext,selection_command
|
| 45 |
+
45,195061,"Untitled-1",3,0,"",plaintext,selection_command
|
| 46 |
+
46,195223,"Untitled-1",4,0,"",plaintext,selection_command
|
| 47 |
+
47,195815,"Untitled-1",5,0,"",plaintext,selection_command
|
| 48 |
+
48,196654,"Untitled-1",37,0,"",plaintext,selection_command
|
| 49 |
+
49,197968,"Untitled-1",67,0,"",plaintext,selection_command
|
| 50 |
+
50,198788,"Untitled-1",37,0,"",plaintext,selection_command
|
| 51 |
+
51,199038,"Untitled-1",5,0,"",plaintext,selection_command
|
| 52 |
+
52,199754,"Untitled-1",37,0,"",plaintext,selection_command
|
| 53 |
+
53,200377,"Untitled-1",67,0,"",plaintext,selection_command
|
| 54 |
+
54,200776,"Untitled-1",37,0,"",plaintext,selection_command
|
| 55 |
+
55,200952,"Untitled-1",5,0,"",plaintext,selection_command
|
| 56 |
+
56,201276,"Untitled-1",4,0,"",plaintext,selection_command
|
| 57 |
+
57,202172,"Untitled-1",3,0,"",plaintext,selection_command
|
| 58 |
+
58,202284,"Untitled-1",2,0,"",plaintext,selection_command
|
| 59 |
+
59,202507,"Untitled-1",1,0,"",plaintext,selection_command
|
| 60 |
+
60,202652,"Untitled-1",0,0,"",plaintext,selection_command
|
| 61 |
+
61,203035,"Untitled-1",1,0,"",plaintext,selection_command
|
| 62 |
+
62,203222,"Untitled-1",2,0,"",plaintext,selection_command
|
| 63 |
+
63,203421,"Untitled-1",3,0,"",plaintext,selection_command
|
| 64 |
+
64,203825,"Untitled-1",4,0,"",plaintext,selection_command
|
| 65 |
+
65,204181,"Untitled-1",5,0,"",plaintext,selection_command
|
| 66 |
+
66,204478,"Untitled-1",37,0,"",plaintext,selection_command
|
| 67 |
+
67,205109,"Untitled-1",67,0,"",plaintext,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-aa99b858-9894-42ca-bd30-69bb000349481764445038488-2025_11_29-20.37.21.510/source.csv
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,11,"Untitled-1",0,0,"",plaintext,tab
|
| 3 |
+
2,129,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"8:37:21 PM [info] Activating crowd-code\n8:37:21 PM [info] Recording started\n8:37:21 PM [info] Initializing git provider using file system watchers...\n8:37:21 PM [info] No workspace folder found\n",Log,tab
|
| 4 |
+
3,586,"Untitled-1",0,0,"",plaintext,tab
|
| 5 |
+
4,1807,"TERMINAL",0,0,"Test",,terminal_focus
|
| 6 |
+
5,1809,"Untitled-1",0,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content
|
| 7 |
+
6,2580,"Untitled-1",46,0,"",plaintext,selection_command
|
| 8 |
+
7,2661,"Untitled-1",39,0,"",plaintext,selection_command
|
| 9 |
+
8,2796,"Untitled-1",32,0,"",plaintext,selection_command
|
| 10 |
+
9,3010,"Untitled-1",0,0,"",plaintext,selection_command
|
| 11 |
+
10,3593,"Untitled-1",0,0,"\n",plaintext,content
|
| 12 |
+
11,4876,"Untitled-1",0,0,"\n",plaintext,content
|
| 13 |
+
12,6045,"Untitled-1",1,0,"",plaintext,selection_command
|
| 14 |
+
13,6114,"Untitled-1",0,0,"",plaintext,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-aed8fe1d-c82c-4ea1-b53e-fd7d19d3a7b31762451775561-2025_11_06-18.56.19.375/source.csv
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,1,"src/extension.ts",0,0,"import * as vscode from 'vscode';\nimport * as http from 'http';\nimport { Buffer } from 'buffer';\n\nexport function activate(context: vscode.ExtensionContext) {\n\n\tconsole.log('[crowd-pilot] Extension activated');\n\n\t// Configure terminal to allow tab keybinding to work\n\t(async () => {\n\t\tconst config = vscode.workspace.getConfiguration('terminal.integrated');\n\t\tconst commandsToSkipShell = config.get<string[]>('commandsToSkipShell', []);\n\t\tlet updated = false;\n\t\tif (!commandsToSkipShell.includes('crowd-pilot.testRun')) {\n\t\t\tcommandsToSkipShell.push('crowd-pilot.testRun');\n\t\t\tupdated = true;\n\t\t}\n\t\tif (!commandsToSkipShell.includes('crowd-pilot.hideUi')) {\n\t\t\tcommandsToSkipShell.push('crowd-pilot.hideUi');\n\t\t\tupdated = true;\n\t\t}\n\t\tif (updated) {\n\t\t\tawait config.update('commandsToSkipShell', commandsToSkipShell, vscode.ConfigurationTarget.Global);\n\t\t}\n\t\t// Prime terminal subsystem after intercept is enabled (NOTE: this is a workaround)\n\t\tawait primeTerminalSubsystem();\n\t})().catch((err) => console.error('[crowd-pilot] Startup initialization error:', err));\n\n\tconst testRun = vscode.commands.registerCommand('crowd-pilot.testRun', async () => {\n\t\tconst editor = vscode.window.activeTextEditor;\n\t\tif (!editor) {\n\t\t\treturn;\n\t\t}\n\t\tconst doc = editor.document;\n\t\tconst term = vscode.window.terminals[0] ?? vscode.window.createTerminal('Test');\n\t\tconst plan = buildTestRunPlan(editor, doc, term);\n\n\t\tif (!previewVisible) {\n\t\t\tshowPreviewUI(plan);\n\t\t\treturn;\n\t\t}\n\n\t\tconst runPlan = currentPlan ?? plan;\n\t\thidePreviewUI();\n\n\t\tawait executePlan(runPlan);\n\t\tvscode.window.showInformationMessage('All actions emitted');\n\t });\n\n\tconst hideUi = vscode.commands.registerCommand('crowd-pilot.hideUi', () => {\n\t\thidePreviewUI();\n\t});\n\n\tconst sglangTest = vscode.commands.registerCommand('crowd-pilot.sglangTest', async () => {\n\t\ttry {\n\t\t\tconst portInput = await vscode.window.showInputBox({\n\t\t\t\tprompt: 'Enter SGLang server port',\n\t\t\t\tvalue: '30000'\n\t\t\t});\n\t\t\tif (!portInput) {\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tconst port = Number(portInput);\n\t\t\tif (!Number.isFinite(port) || port <= 0) {\n\t\t\t\tvscode.window.showErrorMessage('Invalid port');\n\t\t\t\treturn;\n\t\t\t}\n\t\t\tawait callSGLangChat(port);\n\t\t} catch (err) {\n\t\t\tconst errorMessage = err instanceof Error ? err.message : String(err);\n\t\t\tvscode.window.showErrorMessage(`SGLang test failed: ${errorMessage}`);\n\t\t}\n\t});\n\n\tcontext.subscriptions.push(testRun, hideUi, sglangTest);\n}\n\nexport function deactivate() {}\n\nasync function primeTerminalSubsystem(): Promise<void> {\n\ttry {\n\t\tif (vscode.window.terminals.length > 0) {\n\t\t\treturn;\n\t\t}\n\t\tconst opened = new Promise<void>((resolve) => {\n\t\t\tconst d = vscode.window.onDidOpenTerminal(() => {\n\t\t\t\ttry { d.dispose(); } catch {}\n\t\t\t\tresolve();\n\t\t\t});\n\t\t});\n\t\tconst t = vscode.window.createTerminal('crowd-pilot prime');\n\t\tawait Promise.race([\n\t\t\topened,\n\t\t\tnew Promise<void>(r => setTimeout(r, 150))\n\t\t]);\n\t\ttry { t.dispose(); } catch {}\n\t\tawait new Promise<void>(r => setTimeout(r, 50));\n\t\tconsole.log('[crowd-pilot] Primed terminal subsystem');\n\t} catch (err) {\n\t\tconsole.error('[crowd-pilot] Failed to prime terminal subsystem:', err);\n\t}\n}\n\n// -------------------- Plan Types & Execution --------------------\ntype PlannedAction =\n| { kind: 'showTextDocument' }\n| { kind: 'setSelections', selections: Array<{ start: [number, number], end: [number, number] }> }\n| { kind: 'editInsert', position: [number, number], text: string }\n| { kind: 'terminalShow' }\n| { kind: 'terminalSendText', text: string };\n\nlet currentPlan: PlannedAction[] | undefined;\n\nfunction buildTestRunPlan(_editor: vscode.TextEditor, _doc: vscode.TextDocument, _term: vscode.Terminal): PlannedAction[] {\n\tconst plan: PlannedAction[] = [];\n\tplan.push({ kind: 'showTextDocument' });\n\tplan.push({ kind: 'setSelections', selections: [{ start: [0, 0], end: [0, 0] }] });\n\tplan.push({ kind: 'editInsert', position: [0, 0], text: 'hello world\n' });\n\tplan.push({ kind: 'terminalShow' });\n\tplan.push({ kind: 'terminalSendText', text: 'echo VSCode test' });\n\treturn plan;\n}\n\nasync function executePlan(plan: PlannedAction[]): Promise<void> {\n\tconst editor = vscode.window.activeTextEditor;\n\tif (!editor) { return; }\n\tconst doc = editor.document;\n\tconst term = vscode.window.terminals[0] ?? vscode.window.createTerminal('Test');\n\tfor (const action of plan) {\n\t\tif (action.kind === 'showTextDocument') {\n\t\t\tawait vscode.window.showTextDocument(doc);\n\t\t\tcontinue;\n\t\t}\n\t\tif (action.kind === 'setSelections') {\n\t\t\teditor.selections = action.selections.map(s => new vscode.Selection(\n\t\t\t\tnew vscode.Position(s.start[0], s.start[1]),\n\t\t\t\tnew vscode.Position(s.end[0], s.end[1])\n\t\t\t));\n\t\t\tcontinue;\n\t\t}\n\t\tif (action.kind === 'editInsert') {\n\t\t\tawait editor.edit((e: vscode.TextEditorEdit) => e.insert(new vscode.Position(action.position[0], action.position[1]), action.text));\n\t\t\tcontinue;\n\t\t}\n\t\tif (action.kind === 'terminalShow') {\n\t\t\tterm.show();\n\t\t\tcontinue;\n\t\t}\n\t\tif (action.kind === 'terminalSendText') {\n\t\t\tterm.sendText(action.text);\n\t\t\tcontinue;\n\t\t}\n\t}\n}\n\n// -------------------- UI State & Helpers --------------------\nconst UI_CONTEXT_KEY = 'crowdPilot.uiVisible';\nlet previewVisible = false;\nlet previewQuickPick: vscode.QuickPick<(vscode.QuickPickItem & { index: number })> | undefined;\n\nfunction showPreviewUI(plan: PlannedAction[]): void {\n\tconst items: (vscode.QuickPickItem & { index: number })[] = plan.map((action, index) => {\n\t\tswitch (action.kind) {\n\t\t\tcase 'showTextDocument':\n\t\t\t\treturn { index, label: '$(file) Focus active text document' };\n\t\t\tcase 'setSelections':\n\t\t\t\t{\n\t\t\t\t\tconst cursors = action.selections.map(s => `(${s.start[0]}, ${s.start[1]})`).join(', ');\n\t\t\t\t\treturn { index, label: `$(cursor) Move cursor to ${cursors}` };\n\t\t\t\t}\n\t\t\tcase 'editInsert':\n\t\t\t\treturn { index, label: `$(pencil) Insert ""${action.text.replace(/\n/g, '\\n')}"" at (${action.position[0]}, ${action.position[1]})` };\n\t\t\tcase 'terminalShow':\n\t\t\t\treturn { index, label: '$(terminal) Focus terminal' };\n\t\t\tcase 'terminalSendText':\n\t\t\t\treturn { index, label: `$(terminal) Run ""${action.text}"" in terminal` };\n\t\t}\n\t});\n if (!previewQuickPick) {\n previewQuickPick = vscode.window.createQuickPick<(vscode.QuickPickItem & { index: number })>();\n\t\tpreviewQuickPick.title = 'crowd-pilot: preview';\n\t\tpreviewQuickPick.matchOnDetail = true;\n\t\tpreviewQuickPick.ignoreFocusOut = true;\n\t\tpreviewQuickPick.canSelectMany = false;\n previewQuickPick.onDidAccept(async () => {\n const qp = previewQuickPick!;\n const selected = qp.selectedItems?.[0];\n qp.hide();\n if (selected) {\n await executePlan([plan[selected.index]]);\n vscode.window.showInformationMessage('Action executed');\n }\n });\n\t\tpreviewQuickPick.onDidHide(() => {\n\t\t\tpreviewVisible = false;\n\t\t\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, false);\n\t\t\ttry { previewQuickPick?.dispose(); } catch {}\n\t\t\tpreviewQuickPick = undefined;\n\t\t});\n\t}\n\tpreviewQuickPick.items = items;\n\tpreviewQuickPick.placeholder = 'Press Tab to run all, Enter for selected, or Esc to hide';\n\tpreviewQuickPick.show();\n\tpreviewVisible = true;\n\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, true);\n\tcurrentPlan = plan;\n}\n\nfunction hidePreviewUI(): void {\n\tif (previewQuickPick) {\n\t\ttry { previewQuickPick.hide(); } catch {}\n\t\treturn;\n\t}\n\tpreviewVisible = false;\n\tvscode.commands.executeCommand('setContext', UI_CONTEXT_KEY, false);\n}\n\n// -------------------- SGLang Client (simple test) --------------------\nasync function callSGLangChat(port: number): Promise<void> {\n\tconst requestBody = {\n\t\tmodel: 'qwen/qwen2.5-0.5b-instruct',\n\t\tmessages: [\n\t\t\t{ role: 'user', content: 'What is the capital of France?' }\n\t\t]\n\t};\n\tconst postData = JSON.stringify(requestBody);\n\n\tconst options = {\n\t\thostname: 'localhost',\n\t\tport: port,\n\t\tpath: '/v1/chat/completions',\n\t\tmethod: 'POST',\n\t\theaders: {\n\t\t\t'Content-Type': 'application/json',\n\t\t\t'Content-Length': Buffer.byteLength(postData)\n\t\t}\n\t};\n\n\ttry {\n\t\tconst json = await new Promise<any>((resolve, reject) => {\n\t\t\tconst req = http.request(options, (res: http.IncomingMessage) => {\n\t\t\t\tlet data = '';\n\t\t\t\tres.on('data', (chunk: Buffer) => {\n\t\t\t\t\tdata += chunk.toString();\n\t\t\t\t});\n\t\t\t\tres.on('end', () => {\n\t\t\t\t\ttry {\n\t\t\t\t\t\tresolve(JSON.parse(data));\n\t\t\t\t\t} catch (err) {\n\t\t\t\t\t\treject(new Error(`Failed to parse response: ${err instanceof Error ? err.message : String(err)}`));\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t});\n\n\t\t\treq.on('error', (err: Error) => {\n\t\t\t\treject(err);\n\t\t\t});\n\n\t\t\treq.write(postData);\n\t\t\treq.end();\n\t\t});\n\n\t\tvscode.window.showInformationMessage(`SGLang response: ${JSON.stringify(json, null, 2)}`);\n\t} catch (err) {\n\t\tconst errorMessage = err instanceof Error ? err.message : String(err);\n\t\tvscode.window.showErrorMessage(`SGLang request failed: ${errorMessage}`);\n\t}\n}\n",typescript,tab
|
| 3 |
+
2,111,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"6:56:19 PM [info] Activating crowd-code\n6:56:19 PM [info] Recording started\n6:56:19 PM [info] Initializing git provider using file system watchers...\n6:56:19 PM [info] Git repository found\n6:56:19 PM [info] Git provider initialized successfully\n6:56:19 PM [info] Initial git state: [object Object]\n",Log,tab
|
| 4 |
+
3,1083,"src/extension.ts",0,0,"",typescript,tab
|
| 5 |
+
4,2629,".vscode/launch.json",0,0,"// A launch configuration that compiles the extension and then opens it inside a new window\n// Use IntelliSense to learn about possible attributes.\n// Hover to view descriptions of existing attributes.\n// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n{\n\t""version"": ""0.2.0"",\n\t""configurations"": [\n\t\t{\n\t\t\t""name"": ""Run Extension"",\n\t\t\t""type"": ""extensionHost"",\n\t\t\t""request"": ""launch"",\n\t\t\t""args"": [\n\t\t\t\t""--extensionDevelopmentPath=${workspaceFolder}""\n\t\t\t],\n\t\t\t""timeout"": 20000, // Increase the timeout to 20 seconds\n\t\t\t""outFiles"": [\n\t\t\t\t""${workspaceFolder}/out/**/*.js""\n\t\t\t],\n\t\t\t""preLaunchTask"": ""${defaultBuildTask}""\n\t\t}\n\t]\n}\n",jsonc,tab
|
| 6 |
+
5,5529,".vscode/launch.json",0,650,"// A launch configuration that compiles the extension and then opens it inside a new window\n// Use IntelliSense to learn about possible attributes.\n// Hover to view descriptions of existing attributes.\n// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387\n{\n\t""version"": ""0.2.0"",\n\t""configurations"": [\n\t\t{\n\t\t\t""name"": ""Run Extension"",\n\t\t\t""type"": ""extensionHost"",\n\t\t\t""request"": ""launch"",\n\t\t\t""args"": [\n\t\t\t\t""--extensionDevelopmentPath=${workspaceFolder}""\n\t\t\t],\n\t\t\t""timeout"": 20000, // Increase the timeout to 20 seconds\n\t\t\t""outFiles"": [\n\t\t\t\t""${workspaceFolder}/out/**/*.js""\n\t\t\t],\n\t\t\t""preLaunchTask"": ""${defaultBuildTask}""\n\t\t}\n\t]\n}\n",jsonc,selection_command
|
| 7 |
+
6,5642,".vscode/launch.json",650,0,"",jsonc,selection_command
|
| 8 |
+
7,51234,".vscode/extensions.json",0,0,"{\n\t// See http://go.microsoft.com/fwlink/?LinkId=827846\n\t// for the documentation about the extensions.json format\n\t""recommendations"": [\n\t\t""dbaeumer.vscode-eslint"",\n\t\t""ms-vscode.extension-test-runner""\n\t]\n}\n",jsonc,tab
|
| 9 |
+
8,52969,".vscode/settings.json",0,0,"// Place your settings in this file to overwrite default and user settings.\n{\n\t""files.exclude"": {\n\t\t""out"": false // set this to true to hide the ""out"" folder with the compiled JS files\n\t},\n\t""search.exclude"": {\n\t\t""out"": true // set this to false to include ""out"" folder in search results\n\t},\n\t// Turn off tsc task auto detection since we have the necessary tasks as npm scripts\n\t""typescript.tsc.autoDetect"": ""off""\n}\n",jsonc,tab
|
| 10 |
+
9,54794,".vscode/tasks.json",0,0,"// See https://go.microsoft.com/fwlink/?LinkId=733558\n// for the documentation about the tasks.json format\n{\n\t""version"": ""2.0.0"",\n\t""tasks"": [\n\t\t{\n\t\t\t""type"": ""npm"",\n\t\t\t""script"": ""watch"",\n\t\t\t""problemMatcher"": ""$tsc-watch"",\n\t\t\t""isBackground"": true,\n\t\t\t""presentation"": {\n\t\t\t\t""reveal"": ""never""\n\t\t\t},\n\t\t\t""group"": {\n\t\t\t\t""kind"": ""build"",\n\t\t\t\t""isDefault"": true\n\t\t\t}\n\t\t}\n\t]\n}\n",jsonc,tab
|
| 11 |
+
10,57850,".vscode/settings.json",0,0,"",jsonc,tab
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b0378404-ad4c-4db7-a171-f843ccf34d071764845103157-2025_12_04-11.45.09.784/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b08d92a3-9c0a-4526-b12f-c973e9c3c43f1752071802867-2025_07_09-16.36.43.962/source.csv
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
2,53,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"4:36:43 PM [info] Activating crowd-code\n4:36:43 PM [info] Recording started\n4:36:43 PM [info] Initializing git provider using file system watchers...\n4:36:43 PM [info] No workspace folder found\n",Log,tab
|
| 3 |
+
3,94480,"/Users/franzsrambical/.ssh/config",0,0,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical\n\nHost hpc-build01.scidom.de\n HostName hpc-build01.scidom.de\n User franz.srambical\n\nHost horeka.scc.kit.edu\n HostName horeka.scc.kit.edu\n User tum_dbd0378\n\nHost juwels-cluster.fz-juelich.de\n HostName juwels-cluster.fz-juelich.de\n IdentityFile ~/.ssh/id_ed25519\n User srambical2\n\nHost hpc-submit02.scidom.de\n HostName hpc-submit02.scidom.de\n User franz.srambical\n\nHost hpc-submit01.scidom.de\n HostName hpc-submit01.scidom.de\n User franz.srambical",plaintext,tab
|
| 4 |
+
4,95685,"/Users/franzsrambical/.ssh/config",26,0,"",plaintext,selection_command
|
| 5 |
+
5,95929,"/Users/franzsrambical/.ssh/config",58,0,"",plaintext,selection_command
|
| 6 |
+
6,95947,"/Users/franzsrambical/.ssh/config",81,0,"",plaintext,selection_command
|
| 7 |
+
7,95981,"/Users/franzsrambical/.ssh/config",82,0,"",plaintext,selection_command
|
| 8 |
+
8,96015,"/Users/franzsrambical/.ssh/config",109,0,"",plaintext,selection_command
|
| 9 |
+
9,96048,"/Users/franzsrambical/.ssh/config",142,0,"",plaintext,selection_command
|
| 10 |
+
10,96082,"/Users/franzsrambical/.ssh/config",165,0,"",plaintext,selection_command
|
| 11 |
+
11,96117,"/Users/franzsrambical/.ssh/config",166,0,"",plaintext,selection_command
|
| 12 |
+
12,96150,"/Users/franzsrambical/.ssh/config",190,0,"",plaintext,selection_command
|
| 13 |
+
13,96184,"/Users/franzsrambical/.ssh/config",220,0,"",plaintext,selection_command
|
| 14 |
+
14,96221,"/Users/franzsrambical/.ssh/config",190,0,"",plaintext,selection_command
|
| 15 |
+
15,96485,"/Users/franzsrambical/.ssh/config",166,0,"",plaintext,selection_command
|
| 16 |
+
16,96514,"/Users/franzsrambical/.ssh/config",165,0,"",plaintext,selection_command
|
| 17 |
+
17,96547,"/Users/franzsrambical/.ssh/config",142,0,"",plaintext,selection_command
|
| 18 |
+
18,96575,"/Users/franzsrambical/.ssh/config",109,0,"",plaintext,selection_command
|
| 19 |
+
19,96607,"/Users/franzsrambical/.ssh/config",82,0,"",plaintext,selection_command
|
| 20 |
+
20,96638,"/Users/franzsrambical/.ssh/config",81,0,"",plaintext,selection_command
|
| 21 |
+
21,96671,"/Users/franzsrambical/.ssh/config",58,0,"",plaintext,selection_command
|
| 22 |
+
22,96705,"/Users/franzsrambical/.ssh/config",26,0,"",plaintext,selection_command
|
| 23 |
+
23,96739,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,selection_command
|
| 24 |
+
24,100915,"/Users/franzsrambical/.ssh/config",0,25,"Host login.haicore.berlin",plaintext,selection_command
|
| 25 |
+
25,101063,"/Users/franzsrambical/.ssh/config",0,57,"Host login.haicore.berlin\n HostName login.haicore.berlin",plaintext,selection_command
|
| 26 |
+
26,101231,"/Users/franzsrambical/.ssh/config",0,80,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical",plaintext,selection_command
|
| 27 |
+
27,101369,"/Users/franzsrambical/.ssh/config",0,81,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical\n",plaintext,selection_command
|
| 28 |
+
28,101616,"/Users/franzsrambical/.ssh/config",0,82,"",plaintext,content
|
| 29 |
+
29,106243,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
| 30 |
+
30,106263,"/Users/franzsrambical/.ssh/config",454,0,"\n",plaintext,content
|
| 31 |
+
31,108988,"/Users/franzsrambical/.ssh/config",454,1,"",plaintext,content
|
| 32 |
+
32,112261,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
| 33 |
+
33,112284,"/Users/franzsrambical/.ssh/config",454,0,"\n",plaintext,content
|
| 34 |
+
34,117860,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
| 35 |
+
35,119181,"/Users/franzsrambical/.ssh/config",27,0,"",plaintext,selection_command
|
| 36 |
+
36,119428,"/Users/franzsrambical/.ssh/config",60,0,"",plaintext,selection_command
|
| 37 |
+
37,119459,"/Users/franzsrambical/.ssh/config",83,0,"",plaintext,selection_command
|
| 38 |
+
38,119491,"/Users/franzsrambical/.ssh/config",84,0,"",plaintext,selection_command
|
| 39 |
+
39,119521,"/Users/franzsrambical/.ssh/config",108,0,"",plaintext,selection_command
|
| 40 |
+
40,119553,"/Users/franzsrambical/.ssh/config",138,0,"",plaintext,selection_command
|
| 41 |
+
41,119587,"/Users/franzsrambical/.ssh/config",157,0,"",plaintext,selection_command
|
| 42 |
+
42,119621,"/Users/franzsrambical/.ssh/config",158,0,"",plaintext,selection_command
|
| 43 |
+
43,119654,"/Users/franzsrambical/.ssh/config",192,0,"",plaintext,selection_command
|
| 44 |
+
44,119688,"/Users/franzsrambical/.ssh/config",232,0,"",plaintext,selection_command
|
| 45 |
+
45,119721,"/Users/franzsrambical/.ssh/config",265,0,"",plaintext,selection_command
|
| 46 |
+
46,119755,"/Users/franzsrambical/.ssh/config",283,0,"",plaintext,selection_command
|
| 47 |
+
47,119788,"/Users/franzsrambical/.ssh/config",284,0,"",plaintext,selection_command
|
| 48 |
+
48,119821,"/Users/franzsrambical/.ssh/config",312,0,"",plaintext,selection_command
|
| 49 |
+
49,119855,"/Users/franzsrambical/.ssh/config",346,0,"",plaintext,selection_command
|
| 50 |
+
50,119889,"/Users/franzsrambical/.ssh/config",369,0,"",plaintext,selection_command
|
| 51 |
+
51,119922,"/Users/franzsrambical/.ssh/config",370,0,"",plaintext,selection_command
|
| 52 |
+
52,119958,"/Users/franzsrambical/.ssh/config",398,0,"",plaintext,selection_command
|
| 53 |
+
53,119991,"/Users/franzsrambical/.ssh/config",432,0,"",plaintext,selection_command
|
| 54 |
+
54,120022,"/Users/franzsrambical/.ssh/config",455,0,"",plaintext,selection_command
|
| 55 |
+
55,128496,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
| 56 |
+
56,128583,"/Users/franzsrambical/.ssh/config",456,0,"ssh franz.srambical@login.haicore.berlin",plaintext,content
|
| 57 |
+
57,128584,"/Users/franzsrambical/.ssh/config",496,0,"",plaintext,selection_keyboard
|
| 58 |
+
58,130098,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
| 59 |
+
59,131052,"/Users/franzsrambical/.ssh/config",456,40,"ssh franz.srambical@login.haicore.berlin",plaintext,selection_command
|
| 60 |
+
60,132210,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
| 61 |
+
61,132523,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
| 62 |
+
62,132854,"/Users/franzsrambical/.ssh/config",456,0,"H",plaintext,content
|
| 63 |
+
63,132856,"/Users/franzsrambical/.ssh/config",457,0,"",plaintext,selection_keyboard
|
| 64 |
+
64,133188,"/Users/franzsrambical/.ssh/config",457,0,"o",plaintext,content
|
| 65 |
+
65,133191,"/Users/franzsrambical/.ssh/config",458,0,"",plaintext,selection_keyboard
|
| 66 |
+
66,133287,"/Users/franzsrambical/.ssh/config",458,0,"s",plaintext,content
|
| 67 |
+
67,133290,"/Users/franzsrambical/.ssh/config",459,0,"",plaintext,selection_keyboard
|
| 68 |
+
68,133387,"/Users/franzsrambical/.ssh/config",459,0,"t",plaintext,content
|
| 69 |
+
69,133389,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_keyboard
|
| 70 |
+
70,133432,"/Users/franzsrambical/.ssh/config",460,0," ",plaintext,content
|
| 71 |
+
71,133434,"/Users/franzsrambical/.ssh/config",461,0,"",plaintext,selection_keyboard
|
| 72 |
+
72,146263,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_command
|
| 73 |
+
73,146405,"/Users/franzsrambical/.ssh/config",456,6,"",plaintext,content
|
| 74 |
+
74,146409,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
| 75 |
+
75,146556,"/Users/franzsrambical/.ssh/config",455,41,"",plaintext,content
|
| 76 |
+
76,148933,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
| 77 |
+
77,149329,"/Users/franzsrambical/.ssh/config",456,0,"H",plaintext,content
|
| 78 |
+
78,149331,"/Users/franzsrambical/.ssh/config",457,0,"",plaintext,selection_keyboard
|
| 79 |
+
79,149492,"/Users/franzsrambical/.ssh/config",457,0,"o",plaintext,content
|
| 80 |
+
80,149495,"/Users/franzsrambical/.ssh/config",458,0,"",plaintext,selection_keyboard
|
| 81 |
+
81,149560,"/Users/franzsrambical/.ssh/config",458,0,"s",plaintext,content
|
| 82 |
+
82,149562,"/Users/franzsrambical/.ssh/config",459,0,"",plaintext,selection_keyboard
|
| 83 |
+
83,149610,"/Users/franzsrambical/.ssh/config",459,0,"t",plaintext,content
|
| 84 |
+
84,149612,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_keyboard
|
| 85 |
+
85,149655,"/Users/franzsrambical/.ssh/config",460,0," ",plaintext,content
|
| 86 |
+
86,149657,"/Users/franzsrambical/.ssh/config",461,0,"",plaintext,selection_keyboard
|
| 87 |
+
87,153162,"/Users/franzsrambical/.ssh/config",461,0,"l",plaintext,content
|
| 88 |
+
88,153164,"/Users/franzsrambical/.ssh/config",462,0,"",plaintext,selection_keyboard
|
| 89 |
+
89,153299,"/Users/franzsrambical/.ssh/config",462,0,"o",plaintext,content
|
| 90 |
+
90,153301,"/Users/franzsrambical/.ssh/config",463,0,"",plaintext,selection_keyboard
|
| 91 |
+
91,153371,"/Users/franzsrambical/.ssh/config",463,0,"g",plaintext,content
|
| 92 |
+
92,153371,"/Users/franzsrambical/.ssh/config",464,0,"",plaintext,selection_keyboard
|
| 93 |
+
93,153460,"/Users/franzsrambical/.ssh/config",464,0,"i",plaintext,content
|
| 94 |
+
94,153461,"/Users/franzsrambical/.ssh/config",465,0,"",plaintext,selection_keyboard
|
| 95 |
+
95,153530,"/Users/franzsrambical/.ssh/config",465,0,"n",plaintext,content
|
| 96 |
+
96,153531,"/Users/franzsrambical/.ssh/config",466,0,"",plaintext,selection_keyboard
|
| 97 |
+
97,153785,"/Users/franzsrambical/.ssh/config",466,0,"@",plaintext,content
|
| 98 |
+
98,153786,"/Users/franzsrambical/.ssh/config",467,0,"",plaintext,selection_keyboard
|
| 99 |
+
99,154547,"/Users/franzsrambical/.ssh/config",466,1,"",plaintext,content
|
| 100 |
+
100,155018,"/Users/franzsrambical/.ssh/config",466,0,".",plaintext,content
|
| 101 |
+
101,155020,"/Users/franzsrambical/.ssh/config",467,0,"",plaintext,selection_keyboard
|
| 102 |
+
102,155700,"/Users/franzsrambical/.ssh/config",467,0,"h",plaintext,content
|
| 103 |
+
103,155701,"/Users/franzsrambical/.ssh/config",468,0,"",plaintext,selection_keyboard
|
| 104 |
+
104,155757,"/Users/franzsrambical/.ssh/config",468,0,"a",plaintext,content
|
| 105 |
+
105,155760,"/Users/franzsrambical/.ssh/config",469,0,"",plaintext,selection_keyboard
|
| 106 |
+
106,155841,"/Users/franzsrambical/.ssh/config",469,0,"i",plaintext,content
|
| 107 |
+
107,155843,"/Users/franzsrambical/.ssh/config",470,0,"",plaintext,selection_keyboard
|
| 108 |
+
108,155901,"/Users/franzsrambical/.ssh/config",470,0,"c",plaintext,content
|
| 109 |
+
109,155902,"/Users/franzsrambical/.ssh/config",471,0,"",plaintext,selection_keyboard
|
| 110 |
+
110,156007,"/Users/franzsrambical/.ssh/config",471,0,"o",plaintext,content
|
| 111 |
+
111,156008,"/Users/franzsrambical/.ssh/config",472,0,"",plaintext,selection_keyboard
|
| 112 |
+
112,156111,"/Users/franzsrambical/.ssh/config",472,0,"r",plaintext,content
|
| 113 |
+
113,156112,"/Users/franzsrambical/.ssh/config",473,0,"",plaintext,selection_keyboard
|
| 114 |
+
114,156168,"/Users/franzsrambical/.ssh/config",473,0,"e",plaintext,content
|
| 115 |
+
115,156169,"/Users/franzsrambical/.ssh/config",474,0,"",plaintext,selection_keyboard
|
| 116 |
+
116,156248,"/Users/franzsrambical/.ssh/config",474,0,".",plaintext,content
|
| 117 |
+
117,156249,"/Users/franzsrambical/.ssh/config",475,0,"",plaintext,selection_keyboard
|
| 118 |
+
118,156396,"/Users/franzsrambical/.ssh/config",475,0,"b",plaintext,content
|
| 119 |
+
119,156397,"/Users/franzsrambical/.ssh/config",476,0,"",plaintext,selection_keyboard
|
| 120 |
+
120,156584,"/Users/franzsrambical/.ssh/config",476,0,"e",plaintext,content
|
| 121 |
+
121,156586,"/Users/franzsrambical/.ssh/config",477,0,"",plaintext,selection_keyboard
|
| 122 |
+
122,156644,"/Users/franzsrambical/.ssh/config",477,0,"r",plaintext,content
|
| 123 |
+
123,156645,"/Users/franzsrambical/.ssh/config",478,0,"",plaintext,selection_keyboard
|
| 124 |
+
124,156709,"/Users/franzsrambical/.ssh/config",478,0,"l",plaintext,content
|
| 125 |
+
125,156710,"/Users/franzsrambical/.ssh/config",479,0,"",plaintext,selection_keyboard
|
| 126 |
+
126,156824,"/Users/franzsrambical/.ssh/config",479,0,"i",plaintext,content
|
| 127 |
+
127,156825,"/Users/franzsrambical/.ssh/config",480,0,"",plaintext,selection_keyboard
|
| 128 |
+
128,156896,"/Users/franzsrambical/.ssh/config",480,0,"n",plaintext,content
|
| 129 |
+
129,156896,"/Users/franzsrambical/.ssh/config",481,0,"",plaintext,selection_keyboard
|
| 130 |
+
130,157332,"/Users/franzsrambical/.ssh/config",481,0,"\n",plaintext,content
|
| 131 |
+
131,157969,"/Users/franzsrambical/.ssh/config",482,0," ",plaintext,content
|
| 132 |
+
132,158635,"/Users/franzsrambical/.ssh/config",484,0,"H",plaintext,content
|
| 133 |
+
133,158636,"/Users/franzsrambical/.ssh/config",485,0,"",plaintext,selection_keyboard
|
| 134 |
+
134,158766,"/Users/franzsrambical/.ssh/config",485,0,"o",plaintext,content
|
| 135 |
+
135,158768,"/Users/franzsrambical/.ssh/config",486,0,"",plaintext,selection_keyboard
|
| 136 |
+
136,158794,"/Users/franzsrambical/.ssh/config",486,0,"s",plaintext,content
|
| 137 |
+
137,158797,"/Users/franzsrambical/.ssh/config",487,0,"",plaintext,selection_keyboard
|
| 138 |
+
138,158853,"/Users/franzsrambical/.ssh/config",487,0,"t",plaintext,content
|
| 139 |
+
139,158855,"/Users/franzsrambical/.ssh/config",488,0,"",plaintext,selection_keyboard
|
| 140 |
+
140,159080,"/Users/franzsrambical/.ssh/config",488,0,"N",plaintext,content
|
| 141 |
+
141,159083,"/Users/franzsrambical/.ssh/config",489,0,"",plaintext,selection_keyboard
|
| 142 |
+
142,159245,"/Users/franzsrambical/.ssh/config",489,0,"a",plaintext,content
|
| 143 |
+
143,159247,"/Users/franzsrambical/.ssh/config",490,0,"",plaintext,selection_keyboard
|
| 144 |
+
144,159281,"/Users/franzsrambical/.ssh/config",490,0,"m",plaintext,content
|
| 145 |
+
145,159283,"/Users/franzsrambical/.ssh/config",491,0,"",plaintext,selection_keyboard
|
| 146 |
+
146,159432,"/Users/franzsrambical/.ssh/config",491,0,"e",plaintext,content
|
| 147 |
+
147,159434,"/Users/franzsrambical/.ssh/config",492,0,"",plaintext,selection_keyboard
|
| 148 |
+
148,164462,"/Users/franzsrambical/.ssh/config",492,0," ",plaintext,content
|
| 149 |
+
149,164464,"/Users/franzsrambical/.ssh/config",493,0,"",plaintext,selection_keyboard
|
| 150 |
+
150,165235,"/Users/franzsrambical/.ssh/config",493,0,"l",plaintext,content
|
| 151 |
+
151,165237,"/Users/franzsrambical/.ssh/config",494,0,"",plaintext,selection_keyboard
|
| 152 |
+
152,165387,"/Users/franzsrambical/.ssh/config",494,0,"o",plaintext,content
|
| 153 |
+
153,165391,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_keyboard
|
| 154 |
+
154,165462,"/Users/franzsrambical/.ssh/config",495,0,"g",plaintext,content
|
| 155 |
+
155,165465,"/Users/franzsrambical/.ssh/config",496,0,"",plaintext,selection_keyboard
|
| 156 |
+
156,165536,"/Users/franzsrambical/.ssh/config",496,0,"i",plaintext,content
|
| 157 |
+
157,165538,"/Users/franzsrambical/.ssh/config",497,0,"",plaintext,selection_keyboard
|
| 158 |
+
158,165626,"/Users/franzsrambical/.ssh/config",497,0,"n",plaintext,content
|
| 159 |
+
159,165628,"/Users/franzsrambical/.ssh/config",498,0,"",plaintext,selection_keyboard
|
| 160 |
+
160,165774,"/Users/franzsrambical/.ssh/config",498,0,".",plaintext,content
|
| 161 |
+
161,165777,"/Users/franzsrambical/.ssh/config",499,0,"",plaintext,selection_keyboard
|
| 162 |
+
162,166016,"/Users/franzsrambical/.ssh/config",499,0,"h",plaintext,content
|
| 163 |
+
163,166018,"/Users/franzsrambical/.ssh/config",500,0,"",plaintext,selection_keyboard
|
| 164 |
+
164,166062,"/Users/franzsrambical/.ssh/config",500,0,"a",plaintext,content
|
| 165 |
+
165,166064,"/Users/franzsrambical/.ssh/config",501,0,"",plaintext,selection_keyboard
|
| 166 |
+
166,166165,"/Users/franzsrambical/.ssh/config",501,0,"i",plaintext,content
|
| 167 |
+
167,166167,"/Users/franzsrambical/.ssh/config",502,0,"",plaintext,selection_keyboard
|
| 168 |
+
168,166294,"/Users/franzsrambical/.ssh/config",502,0,"c",plaintext,content
|
| 169 |
+
169,166296,"/Users/franzsrambical/.ssh/config",503,0,"",plaintext,selection_keyboard
|
| 170 |
+
170,166400,"/Users/franzsrambical/.ssh/config",503,0,"o",plaintext,content
|
| 171 |
+
171,166402,"/Users/franzsrambical/.ssh/config",504,0,"",plaintext,selection_keyboard
|
| 172 |
+
172,166494,"/Users/franzsrambical/.ssh/config",504,0,"r",plaintext,content
|
| 173 |
+
173,166496,"/Users/franzsrambical/.ssh/config",505,0,"",plaintext,selection_keyboard
|
| 174 |
+
174,166569,"/Users/franzsrambical/.ssh/config",505,0,"e",plaintext,content
|
| 175 |
+
175,166571,"/Users/franzsrambical/.ssh/config",506,0,"",plaintext,selection_keyboard
|
| 176 |
+
176,166623,"/Users/franzsrambical/.ssh/config",506,0,".",plaintext,content
|
| 177 |
+
177,166625,"/Users/franzsrambical/.ssh/config",507,0,"",plaintext,selection_keyboard
|
| 178 |
+
178,166811,"/Users/franzsrambical/.ssh/config",507,0,"b",plaintext,content
|
| 179 |
+
179,166812,"/Users/franzsrambical/.ssh/config",508,0,"",plaintext,selection_keyboard
|
| 180 |
+
180,166871,"/Users/franzsrambical/.ssh/config",508,0,"e",plaintext,content
|
| 181 |
+
181,166873,"/Users/franzsrambical/.ssh/config",509,0,"",plaintext,selection_keyboard
|
| 182 |
+
182,166947,"/Users/franzsrambical/.ssh/config",509,0,"r",plaintext,content
|
| 183 |
+
183,166948,"/Users/franzsrambical/.ssh/config",510,0,"",plaintext,selection_keyboard
|
| 184 |
+
184,166988,"/Users/franzsrambical/.ssh/config",510,0,"l",plaintext,content
|
| 185 |
+
185,166989,"/Users/franzsrambical/.ssh/config",511,0,"",plaintext,selection_keyboard
|
| 186 |
+
186,167052,"/Users/franzsrambical/.ssh/config",511,0,"i",plaintext,content
|
| 187 |
+
187,167054,"/Users/franzsrambical/.ssh/config",512,0,"",plaintext,selection_keyboard
|
| 188 |
+
188,167115,"/Users/franzsrambical/.ssh/config",512,0,"n",plaintext,content
|
| 189 |
+
189,167117,"/Users/franzsrambical/.ssh/config",513,0,"",plaintext,selection_keyboard
|
| 190 |
+
190,167392,"/Users/franzsrambical/.ssh/config",507,6,"berlin",plaintext,content
|
| 191 |
+
191,167715,"/Users/franzsrambical/.ssh/config",513,0,"\n ",plaintext,content
|
| 192 |
+
192,169941,"/Users/franzsrambical/.ssh/config",516,0,"U",plaintext,content
|
| 193 |
+
193,169943,"/Users/franzsrambical/.ssh/config",517,0,"",plaintext,selection_keyboard
|
| 194 |
+
194,170103,"/Users/franzsrambical/.ssh/config",517,0,"s",plaintext,content
|
| 195 |
+
195,170105,"/Users/franzsrambical/.ssh/config",518,0,"",plaintext,selection_keyboard
|
| 196 |
+
196,170170,"/Users/franzsrambical/.ssh/config",518,0,"e",plaintext,content
|
| 197 |
+
197,170172,"/Users/franzsrambical/.ssh/config",519,0,"",plaintext,selection_keyboard
|
| 198 |
+
198,170250,"/Users/franzsrambical/.ssh/config",519,0,"r",plaintext,content
|
| 199 |
+
199,170251,"/Users/franzsrambical/.ssh/config",520,0,"",plaintext,selection_keyboard
|
| 200 |
+
200,171002,"/Users/franzsrambical/.ssh/config",520,0," ",plaintext,content
|
| 201 |
+
201,171005,"/Users/franzsrambical/.ssh/config",521,0,"",plaintext,selection_keyboard
|
| 202 |
+
202,171222,"/Users/franzsrambical/.ssh/config",521,0,"f",plaintext,content
|
| 203 |
+
203,171223,"/Users/franzsrambical/.ssh/config",522,0,"",plaintext,selection_keyboard
|
| 204 |
+
204,171374,"/Users/franzsrambical/.ssh/config",522,0,"r",plaintext,content
|
| 205 |
+
205,171379,"/Users/franzsrambical/.ssh/config",523,0,"",plaintext,selection_keyboard
|
| 206 |
+
206,171397,"/Users/franzsrambical/.ssh/config",523,0,"a",plaintext,content
|
| 207 |
+
207,171398,"/Users/franzsrambical/.ssh/config",524,0,"",plaintext,selection_keyboard
|
| 208 |
+
208,171500,"/Users/franzsrambical/.ssh/config",524,0,"n",plaintext,content
|
| 209 |
+
209,171501,"/Users/franzsrambical/.ssh/config",525,0,"",plaintext,selection_keyboard
|
| 210 |
+
210,171581,"/Users/franzsrambical/.ssh/config",525,0,"z",plaintext,content
|
| 211 |
+
211,171583,"/Users/franzsrambical/.ssh/config",526,0,"",plaintext,selection_keyboard
|
| 212 |
+
212,171700,"/Users/franzsrambical/.ssh/config",526,0,".",plaintext,content
|
| 213 |
+
213,171701,"/Users/franzsrambical/.ssh/config",527,0,"",plaintext,selection_keyboard
|
| 214 |
+
214,171772,"/Users/franzsrambical/.ssh/config",527,0,"s",plaintext,content
|
| 215 |
+
215,171773,"/Users/franzsrambical/.ssh/config",528,0,"",plaintext,selection_keyboard
|
| 216 |
+
216,171831,"/Users/franzsrambical/.ssh/config",528,0,"r",plaintext,content
|
| 217 |
+
217,171833,"/Users/franzsrambical/.ssh/config",529,0,"",plaintext,selection_keyboard
|
| 218 |
+
218,171914,"/Users/franzsrambical/.ssh/config",529,0,"a",plaintext,content
|
| 219 |
+
219,171916,"/Users/franzsrambical/.ssh/config",530,0,"",plaintext,selection_keyboard
|
| 220 |
+
220,171943,"/Users/franzsrambical/.ssh/config",530,0,"m",plaintext,content
|
| 221 |
+
221,171945,"/Users/franzsrambical/.ssh/config",531,0,"",plaintext,selection_keyboard
|
| 222 |
+
222,172140,"/Users/franzsrambical/.ssh/config",531,0,"b",plaintext,content
|
| 223 |
+
223,172143,"/Users/franzsrambical/.ssh/config",532,0,"",plaintext,selection_keyboard
|
| 224 |
+
224,172186,"/Users/franzsrambical/.ssh/config",532,0,"i",plaintext,content
|
| 225 |
+
225,172188,"/Users/franzsrambical/.ssh/config",533,0,"",plaintext,selection_keyboard
|
| 226 |
+
226,172245,"/Users/franzsrambical/.ssh/config",533,0,"c",plaintext,content
|
| 227 |
+
227,172247,"/Users/franzsrambical/.ssh/config",534,0,"",plaintext,selection_keyboard
|
| 228 |
+
228,172289,"/Users/franzsrambical/.ssh/config",534,0,"a",plaintext,content
|
| 229 |
+
229,172291,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_keyboard
|
| 230 |
+
230,172390,"/Users/franzsrambical/.ssh/config",535,0,"l",plaintext,content
|
| 231 |
+
231,172391,"/Users/franzsrambical/.ssh/config",536,0,"",plaintext,selection_keyboard
|
| 232 |
+
232,172556,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_command
|
| 233 |
+
233,172719,"/Users/franzsrambical/.ssh/config",503,0,"",plaintext,selection_command
|
| 234 |
+
234,172910,"/Users/franzsrambical/.ssh/config",513,0,"\n ",plaintext,content
|
| 235 |
+
235,173988,"/Users/franzsrambical/.ssh/config",516,0,"I",plaintext,content
|
| 236 |
+
236,173989,"/Users/franzsrambical/.ssh/config",517,0,"",plaintext,selection_keyboard
|
| 237 |
+
237,174082,"/Users/franzsrambical/.ssh/config",517,0,"d",plaintext,content
|
| 238 |
+
238,174084,"/Users/franzsrambical/.ssh/config",518,0,"",plaintext,selection_keyboard
|
| 239 |
+
239,174148,"/Users/franzsrambical/.ssh/config",518,0,"e",plaintext,content
|
| 240 |
+
240,174150,"/Users/franzsrambical/.ssh/config",519,0,"",plaintext,selection_keyboard
|
| 241 |
+
241,174271,"/Users/franzsrambical/.ssh/config",519,0,"n",plaintext,content
|
| 242 |
+
242,174273,"/Users/franzsrambical/.ssh/config",520,0,"",plaintext,selection_keyboard
|
| 243 |
+
243,174746,"/Users/franzsrambical/.ssh/config",516,4,"IdentityFile",plaintext,content
|
| 244 |
+
244,175993,"/Users/franzsrambical/.ssh/config",528,0," ",plaintext,content
|
| 245 |
+
245,175995,"/Users/franzsrambical/.ssh/config",529,0,"",plaintext,selection_keyboard
|
| 246 |
+
246,176464,"/Users/franzsrambical/.ssh/config",529,0,"~",plaintext,content
|
| 247 |
+
247,176467,"/Users/franzsrambical/.ssh/config",530,0,"",plaintext,selection_keyboard
|
| 248 |
+
248,177648,"/Users/franzsrambical/.ssh/config",530,0,"/",plaintext,content
|
| 249 |
+
249,177649,"/Users/franzsrambical/.ssh/config",531,0,"",plaintext,selection_keyboard
|
| 250 |
+
250,177830,"/Users/franzsrambical/.ssh/config",531,0,".",plaintext,content
|
| 251 |
+
251,177832,"/Users/franzsrambical/.ssh/config",532,0,"",plaintext,selection_keyboard
|
| 252 |
+
252,177940,"/Users/franzsrambical/.ssh/config",532,0,"s",plaintext,content
|
| 253 |
+
253,177942,"/Users/franzsrambical/.ssh/config",533,0,"",plaintext,selection_keyboard
|
| 254 |
+
254,178078,"/Users/franzsrambical/.ssh/config",533,0,"s",plaintext,content
|
| 255 |
+
255,178081,"/Users/franzsrambical/.ssh/config",534,0,"",plaintext,selection_keyboard
|
| 256 |
+
256,178173,"/Users/franzsrambical/.ssh/config",534,0,"h",plaintext,content
|
| 257 |
+
257,178175,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_keyboard
|
| 258 |
+
258,179348,"/Users/franzsrambical/.ssh/config",535,0,"/",plaintext,content
|
| 259 |
+
259,179351,"/Users/franzsrambical/.ssh/config",536,0,"",plaintext,selection_keyboard
|
| 260 |
+
260,179561,"/Users/franzsrambical/.ssh/config",536,0,"i",plaintext,content
|
| 261 |
+
261,179563,"/Users/franzsrambical/.ssh/config",537,0,"",plaintext,selection_keyboard
|
| 262 |
+
262,179646,"/Users/franzsrambical/.ssh/config",537,0,"d",plaintext,content
|
| 263 |
+
263,179648,"/Users/franzsrambical/.ssh/config",538,0,"",plaintext,selection_keyboard
|
| 264 |
+
264,179919,"/Users/franzsrambical/.ssh/config",538,0,"_",plaintext,content
|
| 265 |
+
265,179922,"/Users/franzsrambical/.ssh/config",539,0,"",plaintext,selection_keyboard
|
| 266 |
+
266,180693,"/Users/franzsrambical/.ssh/config",536,3,"id_ed25519",plaintext,content
|
| 267 |
+
267,181818,"/Users/franzsrambical/.ssh/config",545,0,"",plaintext,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b8260a6a-d6ea-4985-9e8c-0662afe503b41755760144143-2025_08_21-09.09.06.825/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-c20692df-d9df-465f-b623-69a0d9b635a71755511610414-2025_08_18-12.06.56.941/source.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-d3edf16c-6e28-41c2-b902-1ffbcf83b1411764448152506-2025_11_29-21.29.16.493/source.csv
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
| 2 |
+
1,11,"tasks",0,0,"",Log,tab
|
| 3 |
+
2,62,"Untitled-1",0,0,"",plaintext,tab
|
| 4 |
+
3,87,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:29:16 PM [info] Activating crowd-code\n9:29:16 PM [info] Recording started\n9:29:16 PM [info] Initializing git provider using file system watchers...\n9:29:16 PM [info] No workspace folder found\n",Log,tab
|
| 5 |
+
4,2035,"extension-output-pdoom-org.crowd-code-#1-crowd-code",194,0,"9:29:18 PM [info] Retrying git provider initialization...\n9:29:18 PM [info] No workspace folder found\n",Log,content
|
| 6 |
+
5,2235,"Untitled-1",0,0,"",plaintext,tab
|
| 7 |
+
6,10186,"Untitled-1",0,0,"t",plaintext,content
|
| 8 |
+
7,10189,"Untitled-1",1,0,"",plaintext,selection_keyboard
|
| 9 |
+
8,10248,"Untitled-1",1,0,"e",plaintext,content
|
| 10 |
+
9,10250,"Untitled-1",2,0,"",plaintext,selection_keyboard
|
| 11 |
+
10,10436,"Untitled-1",2,0,"s",plaintext,content
|
| 12 |
+
11,10438,"Untitled-1",3,0,"",plaintext,selection_keyboard
|
| 13 |
+
12,10445,"Untitled-1",3,0,"t",plaintext,content
|
| 14 |
+
13,10447,"Untitled-1",4,0,"",plaintext,selection_keyboard
|
| 15 |
+
14,11369,"Untitled-1",3,1,"",plaintext,content
|
| 16 |
+
15,11555,"Untitled-1",2,1,"",plaintext,content
|
| 17 |
+
16,11731,"Untitled-1",1,1,"",plaintext,content
|
| 18 |
+
17,11899,"Untitled-1",0,1,"",plaintext,content
|
| 19 |
+
18,32846,"Untitled-1",0,0,"\n",plaintext,content
|
| 20 |
+
19,33356,"Untitled-1",0,0,"",plaintext,selection_command
|
| 21 |
+
20,35256,"Untitled-1",1,0,"",plaintext,selection_command
|
| 22 |
+
21,35710,"Untitled-1",0,0,"",plaintext,selection_command
|
| 23 |
+
22,36958,"Untitled-1",1,0,"",plaintext,selection_command
|
| 24 |
+
23,39004,"Untitled-1",1,0,"\n",plaintext,content
|
| 25 |
+
24,39326,"Untitled-1",1,0,"",plaintext,selection_command
|
| 26 |
+
25,39446,"Untitled-1",0,0,"",plaintext,selection_command
|
| 27 |
+
26,41786,"TERMINAL",0,0,"Test",,terminal_focus
|
| 28 |
+
27,41788,"Untitled-1",2,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content
|
| 29 |
+
28,47287,"Untitled-1",34,13,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content
|
| 30 |
+
29,48249,"Untitled-1",97,30,"",plaintext,content
|
| 31 |
+
30,52642,"Untitled-1",1,0,"",plaintext,selection_command
|
| 32 |
+
31,53751,"Untitled-1",2,0,"",plaintext,selection_command
|
| 33 |
+
32,54438,"Untitled-1",34,0,"",plaintext,selection_command
|
| 34 |
+
33,54774,"Untitled-1",65,0,"",plaintext,selection_command
|
| 35 |
+
34,54937,"Untitled-1",81,0,"",plaintext,selection_command
|
| 36 |
+
35,55086,"Untitled-1",97,0,"",plaintext,selection_command
|
| 37 |
+
36,55681,"Untitled-1",81,0,"",plaintext,selection_command
|
| 38 |
+
37,70381,"TERMINAL",0,0,"echo ""Hello World""",,terminal_command
|
| 39 |
+
38,70383,"TERMINAL",0,0,"]633;CHello World\r\n[1m[7m%[27m[1m[0m \r \r",,terminal_output
|
| 40 |
+
39,126970,"Untitled-1",97,0,"/* crowd-pilot: insert start */\nline A\nline B\n/* crowd-pilot: insert end */\n",plaintext,content
|
| 41 |
+
40,132489,"Untitled-1",97,0,"",plaintext,selection_command
|
| 42 |
+
41,132550,"Untitled-1",81,0,"",plaintext,selection_command
|
| 43 |
+
42,157569,"Untitled-1",65,0,"",plaintext,selection_command
|
| 44 |
+
43,157707,"Untitled-1",34,0,"",plaintext,selection_command
|
| 45 |
+
44,157835,"Untitled-1",2,0,"",plaintext,selection_command
|
| 46 |
+
45,158093,"Untitled-1",1,0,"",plaintext,selection_command
|
| 47 |
+
46,158121,"Untitled-1",0,0,"",plaintext,selection_command
|
| 48 |
+
47,175655,"Untitled-1",1,0,"",plaintext,selection_command
|
| 49 |
+
48,176613,"Untitled-1",2,0,"",plaintext,selection_command
|
| 50 |
+
49,176781,"Untitled-1",34,0,"",plaintext,selection_command
|
| 51 |
+
50,176956,"Untitled-1",65,0,"",plaintext,selection_command
|
| 52 |
+
51,177167,"Untitled-1",81,0,"",plaintext,selection_command
|
| 53 |
+
52,177824,"Untitled-1",97,0,"",plaintext,selection_command
|
| 54 |
+
53,178367,"Untitled-1",81,0,"",plaintext,selection_command
|
| 55 |
+
54,183783,"Untitled-1",65,0,"",plaintext,selection_command
|
| 56 |
+
55,196610,"Untitled-1",129,13,"/* crowd-pilot: replacement */\nREPLACED LINE 1\nREPLACED LINE 2",plaintext,content
|
| 57 |
+
56,199592,"Untitled-1",192,30,"",plaintext,content
|
| 58 |
+
57,202876,"Untitled-1",34,0,"",plaintext,selection_command
|
| 59 |
+
58,203219,"Untitled-1",2,0,"",plaintext,selection_command
|
| 60 |
+
59,203400,"Untitled-1",1,0,"",plaintext,selection_command
|
| 61 |
+
60,203536,"Untitled-1",0,0,"",plaintext,selection_command
|
| 62 |
+
61,204479,"Untitled-1",1,0,"",plaintext,selection_command
|
| 63 |
+
62,204812,"Untitled-1",2,0,"",plaintext,selection_command
|
| 64 |
+
63,205452,"Untitled-1",1,0,"",plaintext,selection_command
|