operator name
stringclasses 180
values | used in model
stringclasses 155
values | args
stringlengths 19
5.24k
|
|---|---|---|
aten.sqrt.default
|
HuggingFace/DebertaForQuestionAnswering
|
((T([], f32),), {})
|
aten.sqrt.default
|
HuggingFace/DebertaV2ForMaskedLM
|
((T([], f32),), {})
|
aten.sqrt.default
|
HuggingFace/DebertaV2ForQuestionAnswering
|
((T([], f32),), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([0, 182], f16), T([0, 182], f16)], 2), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([0, 91], f16), T([0, 91], f16), T([0, 91], f16), T([0, 91], f16)], 2), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([0], f16), T([0], f16), T([0], f16), T([0], f16)], 1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([1, 88, 10, 10], f16), T([1, 88, 10, 10], f16), T([1, 88, 10, 10], f16)], -1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([1, 88, 10, 10], f16), T([1, 88, 10, 10], f16)], -1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([1, 88, 20, 20], f16), T([1, 88, 20, 20], f16), T([1, 88, 20, 20], f16)], -1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([1, 88, 20, 20], f16), T([1, 88, 20, 20], f16)], -1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([1, 88, 40, 40], f16), T([1, 88, 40, 40], f16), T([1, 88, 40, 40], f16)], -1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([1, 88, 40, 40], f16), T([1, 88, 40, 40], f16)], -1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([1, 88, 5, 5], f16), T([1, 88, 5, 5], f16)], -1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([1, 88, 80, 80], f16), T([1, 88, 80, 80], f16)], -1), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([100, 6], f32)],), {})
|
aten.stack.default
|
TIMM/swin_base_patch4_window7_224
|
(([T([1024, 8, 49, 32], f16), T([1024, 8, 49, 32], f16, stride=(12544, 1568, 1, 49)), T([1024, 8, 49, 32], f16)],), {})
|
aten.stack.default
|
TorchBench/yolov3
|
(([T([12, 16], i64, stride=(0, 1)), T([12, 16], i64, stride=(1, 0))], 2), {})
|
aten.stack.default
|
TIMM/tnt_s_patch16_224
|
(([T([12544, 4, 16, 6], f16), T([12544, 4, 16, 6], f16, stride=(384, 96, 1, 16))],), {})
|
aten.stack.default
|
TIMM/visformer_small
|
(([T([128, 6, 196, 64], f16), T([128, 6, 196, 64], f16, stride=(75264, 12544, 1, 196)), T([128, 6, 196, 64], f16)],), {})
|
aten.stack.default
|
TIMM/visformer_small
|
(([T([128, 6, 49, 128], f16), T([128, 6, 49, 128], f16, stride=(37632, 6272, 1, 49)), T([128, 6, 49, 128], f16)],), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([1406], i32), T([1406], i32), T([1406], i32), T([1406], i32)], 1), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([1438452, 1], f16), T([1438452, 1], f16), T([1438452, 1], f16), T([1438452, 1], f16)], 2), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([22496], i32), T([22496], i32), T([22496], i32), T([22496], i32)], 1), {})
|
aten.stack.default
|
TorchBench/yolov3
|
(([T([24, 32], i64, stride=(0, 1)), T([24, 32], i64, stride=(1, 0))], 2), {})
|
aten.stack.default
|
TIMM/swin_base_patch4_window7_224
|
(([T([256, 16, 49, 32], f16), T([256, 16, 49, 32], f16, stride=(25088, 1568, 1, 49)), T([256, 16, 49, 32], f16)],), {})
|
aten.stack.default
|
TIMM/mobilevit_s
|
(([T([256, 4, 16, 60], f16), T([256, 4, 16, 60], f16, stride=(3840, 960, 1, 16)), T([256, 4, 16, 60], f16)],), {})
|
aten.stack.default
|
TIMM/mobilevit_s
|
(([T([256, 4, 256, 36], f16), T([256, 4, 256, 36], f16, stride=(36864, 9216, 1, 256)), T([256, 4, 256, 36], f16)],), {})
|
aten.stack.default
|
TIMM/mobilevit_s
|
(([T([256, 4, 64, 48], f16), T([256, 4, 64, 48], f16, stride=(12288, 3072, 1, 64)), T([256, 4, 64, 48], f16)],), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([361], i32), T([361], i32), T([361], i32), T([361], i32)], 1), {})
|
aten.stack.default
|
TIMM/swin_base_patch4_window7_224
|
(([T([4096, 4, 49, 32], f16), T([4096, 4, 49, 32], f16, stride=(6272, 1568, 1, 49)), T([4096, 4, 49, 32], f16)],), {})
|
aten.stack.default
|
TorchBench/yolov3
|
(([T([48, 64], i64, stride=(0, 1)), T([48, 64], i64, stride=(1, 0))], 2), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([5000, 2], f16), T([5000, 2], f16)], 2), {})
|
aten.stack.default
|
TorchBench/timm_efficientdet
|
(([T([5000], f32), T([5000], f32), T([5000], f32), T([5000], f32)], 1), {})
|
aten.stack.default
|
TorchBench/hf_BigBird
|
(([T([504, 64], f32), T([504, 64], f32)],), {})
|
aten.stack.default
|
HuggingFace/BigBird
|
(([T([504, 64], f32)],), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([5624], i32), T([5624], i32), T([5624], i32), T([5624], i32)], 1), {})
|
aten.stack.default
|
TorchBench/Super_SloMo
|
(([T([6, 352, 352], f16), T([6, 352, 352], f16)], 3), {})
|
aten.stack.default
|
TorchBench/fastNLP_Bert
|
(([T([6, 474, 768], f16)],), {})
|
aten.stack.default
|
TIMM/crossvit_9_240
|
(([T([64, 1000], f16), T([64, 1000], f16)],), {})
|
aten.stack.default
|
TIMM/volo_d1_224
|
(([T([64, 12, 196, 32], f16), T([64, 12, 196, 32], f16, stride=(75264, 6272, 1, 196)), T([64, 12, 196, 32], f16)],), {})
|
aten.stack.default
|
TIMM/volo_d1_224
|
(([T([64, 12, 197, 32], f16, stride=(75648, 6304, 1, 197)), T([64, 12, 197, 32], f16)],), {})
|
aten.stack.default
|
TIMM/beit_base_patch16_224
|
(([T([64, 12, 197, 64], f16), T([64, 12, 197, 64], f16, stride=(151296, 12608, 1, 197)), T([64, 12, 197, 64], f16)],), {})
|
aten.stack.default
|
TIMM/vit_base_patch16_224
|
(([T([64, 12, 197, 64], f16), T([64, 12, 197, 64], f16, stride=(151296, 12608, 1, 197)), T([64, 12, 197, 64], f16)],), {})
|
aten.stack.default
|
TIMM/deit_base_distilled_patch16_224
|
(([T([64, 12, 198, 64], f16), T([64, 12, 198, 64], f16, stride=(152064, 12672, 1, 198)), T([64, 12, 198, 64], f16)],), {})
|
aten.stack.default
|
TIMM/jx_nest_base
|
(([T([64, 16, 1, 196, 32], f16), T([64, 16, 1, 196, 32], f16, stride=(100352, 6272, 6272, 1, 196)), T([64, 16, 1, 196, 32], f16)],), {})
|
aten.stack.default
|
TIMM/pit_b_224
|
(([T([64, 16, 65, 64], f16), T([64, 16, 65, 64], f16, stride=(66560, 4160, 1, 65)), T([64, 16, 65, 64], f16)],), {})
|
aten.stack.default
|
TIMM/swin_base_patch4_window7_224
|
(([T([64, 32, 49, 32], f16), T([64, 32, 49, 32], f16, stride=(50176, 1568, 1, 49)), T([64, 32, 49, 32], f16)],), {})
|
aten.stack.default
|
TIMM/jx_nest_base
|
(([T([64, 4, 16, 196, 32], f16), T([64, 4, 16, 196, 32], f16, stride=(401408, 100352, 6272, 1, 196)), T([64, 4, 16, 196, 32], f16)],), {})
|
aten.stack.default
|
TIMM/crossvit_9_240
|
(([T([64, 4, 197, 64], f16), T([64, 4, 197, 64], f16, stride=(50432, 12608, 1, 197)), T([64, 4, 197, 64], f16)],), {})
|
aten.stack.default
|
TIMM/crossvit_9_240
|
(([T([64, 4, 401, 32], f16), T([64, 4, 401, 32], f16, stride=(51328, 12832, 1, 401)), T([64, 4, 401, 32], f16)],), {})
|
aten.stack.default
|
TIMM/pit_b_224
|
(([T([64, 4, 962, 64], f16), T([64, 4, 962, 64], f16, stride=(246272, 61568, 1, 962)), T([64, 4, 962, 64], f16)],), {})
|
aten.stack.default
|
TIMM/tnt_s_patch16_224
|
(([T([64, 6, 197, 64], f16), T([64, 6, 197, 64], f16, stride=(75648, 12608, 1, 197))],), {})
|
aten.stack.default
|
TIMM/pit_b_224
|
(([T([64, 8, 257, 64], f16), T([64, 8, 257, 64], f16, stride=(131584, 16448, 1, 257)), T([64, 8, 257, 64], f16)],), {})
|
aten.stack.default
|
TIMM/jx_nest_base
|
(([T([64, 8, 4, 196, 32], f16), T([64, 8, 4, 196, 32], f16, stride=(200704, 25088, 6272, 1, 196)), T([64, 8, 4, 196, 32], f16)],), {})
|
aten.stack.default
|
TorchBench/timm_vision_transformer
|
(([T([8, 6, 197, 64], f16), T([8, 6, 197, 64], f16, stride=(75648, 12608, 1, 197)), T([8, 6, 197, 64], f16)],), {})
|
aten.stack.default
|
TorchBench/vision_maskrcnn
|
(([T([89984], i32), T([89984], i32), T([89984], i32), T([89984], i32)], 1), {})
|
aten.std.correction
|
TorchBench/BERT_pytorch
|
((T([16, 128, 768], f16), [-1]), {'correction': 1, 'keepdim': True})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([0, 91], f16), T([0, 91], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([0], f16), T([0], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([0], f32), T([0], f32)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([0], i64), 2), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([1, 1024, 1, 1], f16), T([1, 1024, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([1, 128, 1, 1], f16), T([1, 128, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([1, 2048, 1, 1], f16), T([1, 2048, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([1, 256, 1, 1], f16), T([1, 256, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([1, 512, 1, 1], f16), T([1, 512, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([1, 64, 1, 1], f16), T([1, 64, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([1438452, 1], f16), T([1438452, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([1438452], f16, stride=(4,)), T([1438452], f16, stride=(4,))), {})
|
aten.sub.Tensor
|
TorchBench/BERT_pytorch
|
((T([16, 128, 768], f16), T([16, 128, 1], f16)), {})
|
aten.sub.Tensor
|
HuggingFace/LayoutLMForMaskedLM
|
((T([16, 512], i64, stride=(2048, 4)), T([16, 512], i64, stride=(2048, 4))), {})
|
aten.sub.Tensor
|
HuggingFace/LayoutLMForSequenceClassification
|
((T([16, 512], i64, stride=(2048, 4)), T([16, 512], i64, stride=(2048, 4))), {})
|
aten.sub.Tensor
|
HuggingFace/GPTNeoForSequenceClassification
|
((T([1], i64), 1), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([3, 427, 640], f16, stride=(1, 1920, 3)), T([3, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([3, 459, 640], f16, stride=(1, 1920, 3)), T([3, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([3, 612, 612], f16, stride=(1, 1836, 3)), T([3, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([3, 640, 443], f16, stride=(1, 1329, 3)), T([3, 1, 1], f16)), {})
|
aten.sub.Tensor
|
TIMM/convnext_base
|
((T([32, 128, 56, 56], f16, stride=(401408, 1, 7168, 128)), T([32, 1, 56, 56], f16)), {})
|
aten.sub.Tensor
|
TIMM/convnext_base
|
((T([32, 256, 28, 28], f16, stride=(200704, 1, 7168, 256)), T([32, 1, 28, 28], f16)), {})
|
aten.sub.Tensor
|
TIMM/convnext_base
|
((T([32, 512, 14, 14], f16, stride=(100352, 1, 7168, 512)), T([32, 1, 14, 14], f16)), {})
|
aten.sub.Tensor
|
HuggingFace/OPTForCausalLM
|
((T([4, 128], i64), 1), {})
|
aten.sub.Tensor
|
HuggingFace/DebertaForMaskedLM
|
((T([4, 512, 768], f32), T([4, 512, 1], f32)), {})
|
aten.sub.Tensor
|
HuggingFace/DebertaForQuestionAnswering
|
((T([4, 512, 768], f32), T([4, 512, 1], f32)), {})
|
aten.sub.Tensor
|
HuggingFace/GPT2ForSequenceClassification
|
((T([4], i64), 1), {})
|
aten.sub.Tensor
|
TorchBench/timm_efficientdet
|
((T([5000], f16, stride=(4,)), T([5000], f16, stride=(4,))), {})
|
aten.sub.Tensor
|
TorchBench/vision_maskrcnn
|
((T([5000], f16, stride=(4,)), T([5000], f16, stride=(4,))), {})
|
aten.sub.Tensor
|
TorchBench/timm_efficientdet
|
((T([5000], f32), T([5000], f32)), {})
|
aten.sub.Tensor
|
TorchBench/Super_SloMo
|
((T([6, 2, 351, 352], f16, stride=(495616, 123904, 352, 1)), T([6, 2, 351, 352], f16, stride=(495616, 123904, 352, 1))), {})
|
aten.sub.Tensor
|
TorchBench/Super_SloMo
|
((T([6, 2, 352, 351], f16, stride=(495616, 123904, 352, 1)), T([6, 2, 352, 351], f16, stride=(495616, 123904, 352, 1))), {})
|
aten.sub.Tensor
|
TorchBench/Super_SloMo
|
((T([6, 3, 352, 352], f16), T([6, 3, 352, 352], f16)), {})
|
aten.sub.Tensor
|
TorchBench/Super_SloMo
|
((T([6, 352, 352], f16), 0.5), {})
|
aten.sub.Tensor
|
TIMM/poolformer_m36
|
((T([64, 192, 28, 28], f16), T([64, 192, 28, 28], f16)), {})
|
aten.sub.Tensor
|
TIMM/poolformer_m36
|
((T([64, 384, 14, 14], f16), T([64, 384, 14, 14], f16)), {})
|
aten.sub.Tensor
|
TIMM/poolformer_m36
|
((T([64, 768, 7, 7], f16), T([64, 768, 7, 7], f16)), {})
|
aten.sub.Tensor
|
TIMM/poolformer_m36
|
((T([64, 96, 56, 56], f16), T([64, 96, 56, 56], f16)), {})
|
aten.sub.Tensor
|
HuggingFace/MBartForConditionalGeneration
|
((T([8], i64), 1), {})
|
aten.sub.Tensor
|
HuggingFace/PLBartForConditionalGeneration
|
((T([8], i64), 1), {})
|
aten.sum.SymInt
|
TorchBench/vision_maskrcnn
|
((T([0, 1024], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/vision_maskrcnn
|
((T([0, 364], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/vision_maskrcnn
|
((T([0, 91], f16), [0], True), {})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.