operator name
stringclasses 180
values | used in model
stringclasses 155
values | args
stringlengths 19
5.24k
|
|---|---|---|
aten.sum.SymInt
|
TIMM/pit_b_224
|
((T([64, 256, 31, 31], f16, stride=(246272, 1, 7936, 256)), [0], True), {})
|
aten.sum.SymInt
|
TIMM/ecaresnet101d
|
((T([64, 256, 56, 56], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/crossvit_9_240
|
((T([64, 256], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/tts_angular
|
((T([64, 256], f16), [1], True), {})
|
aten.sum.SymInt
|
TIMM/crossvit_9_240
|
((T([64, 256], f16, stride=(50432, 1)), [0], True), {})
|
aten.sum.SymInt
|
TIMM/volo_d1_224
|
((T([64, 28, 28, 192], f16, stride=(150528, 28, 1, 784)), [0, 1, 2], True), {})
|
aten.sum.SymInt
|
TIMM/volo_d1_224
|
((T([64, 28, 28, 576], f16), [0, 1, 2], True), {})
|
aten.sum.SymInt
|
TIMM/swin_base_patch4_window7_224
|
((T([64, 32, 49, 49], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/mixnet_l
|
((T([64, 336, 14, 14], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/tf_mixnet_l
|
((T([64, 336, 14, 14], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/mixnet_l
|
((T([64, 336, 28, 28], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/tf_mixnet_l
|
((T([64, 336, 28, 28], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/poolformer_m36
|
((T([64, 384, 14, 14], f16), [0, 2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/gmixer_24_224
|
((T([64, 384, 384], f16), [0, 1], True), {})
|
aten.sum.SymInt
|
TIMM/volo_d1_224
|
((T([64, 384], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/volo_d1_224
|
((T([64, 384], f16, stride=(75648, 1)), [0], True), {})
|
aten.sum.SymInt
|
TIMM/jx_nest_base
|
((T([64, 4, 196, 1024], f16), [0, 1, 2], True), {})
|
aten.sum.SymInt
|
TIMM/jx_nest_base
|
((T([64, 4, 196, 256], f16), [0, 1, 2], True), {})
|
aten.sum.SymInt
|
TIMM/jx_nest_base
|
((T([64, 4, 196, 256], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/jx_nest_base
|
((T([64, 4, 196, 768], f16), [0, 1, 2], True), {})
|
aten.sum.SymInt
|
TIMM/crossvit_9_240
|
((T([64, 401, 128], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/vgg16
|
((T([64, 4096], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/mixnet_l
|
((T([64, 480, 14, 14], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/tf_mixnet_l
|
((T([64, 480, 14, 14], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
HuggingFace/ElectraForQuestionAnswering
|
((T([64, 512, 128], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/ecaresnet101d
|
((T([64, 512, 28, 28], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/mixnet_l
|
((T([64, 624, 14, 14], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/tf_mixnet_l
|
((T([64, 624, 14, 14], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/sebotnet33ts_256
|
((T([64, 64, 64, 64], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/gmlp_s16_224
|
((T([64, 768, 196], f16, stride=(150528, 1, 768)), [0, 1], True), {})
|
aten.sum.SymInt
|
TIMM/mixer_b16_224
|
((T([64, 768, 384], f16), [0, 1], True), {})
|
aten.sum.SymInt
|
TIMM/poolformer_m36
|
((T([64, 768, 7, 7], f16), [0, 2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/poolformer_m36
|
((T([64, 96, 56, 56], f16), [0, 2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/mixnet_l
|
((T([64, 960, 7, 7], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/tf_mixnet_l
|
((T([64, 960, 7, 7], f16), [2, 3], True), {})
|
aten.sum.SymInt
|
TIMM/coat_lite_mini
|
((T([6400, 1536], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/coat_lite_mini
|
((T([6400, 2048], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/coat_lite_mini
|
((T([6400, 512], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/mobilevit_s
|
((T([65536, 144], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/mobilevit_s
|
((T([65536, 288], f16), [0], True), {})
|
aten.sum.SymInt
|
TIMM/mobilevit_s
|
((T([65536, 432], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([7936, 2048], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([7936, 512], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/timm_vision_transformer
|
((T([8, 1, 384], f16, stride=(75648, 384, 1)), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/resnext50_32x4d
|
((T([8, 1000], f16, stride=(0, 0)), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/timm_vision_transformer
|
((T([8, 1000], f16, stride=(0, 0)), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/MegatronBertForQuestionAnswering
|
((T([8, 128, 1024], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/timm_vision_transformer
|
((T([8, 197, 384], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/hf_Albert
|
((T([8, 512, 128], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/hf_DistilBert
|
((T([8, 512, 768], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BlenderbotSmallForCausalLM
|
((T([8192, 2048], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BlenderbotSmallForConditionalGeneration
|
((T([8192, 2048], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/Speech2Text2ForCausalLM
|
((T([8192, 2048], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/Speech2Text2ForCausalLM
|
((T([8192, 256], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BertForQuestionAnswering
|
((T([8192, 2], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/RobertaForQuestionAnswering
|
((T([8192, 2], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BertForMaskedLM
|
((T([8192, 30522], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/LayoutLMForMaskedLM
|
((T([8192, 30522], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BertForMaskedLM
|
((T([8192, 3072], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BertForQuestionAnswering
|
((T([8192, 3072], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/LayoutLMForMaskedLM
|
((T([8192, 3072], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/LayoutLMForSequenceClassification
|
((T([8192, 3072], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/RobertaForQuestionAnswering
|
((T([8192, 3072], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BlenderbotSmallForCausalLM
|
((T([8192, 512], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BlenderbotSmallForConditionalGeneration
|
((T([8192, 512], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BertForMaskedLM
|
((T([8192, 768], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/BertForQuestionAnswering
|
((T([8192, 768], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/LayoutLMForMaskedLM
|
((T([8192, 768], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/LayoutLMForSequenceClassification
|
((T([8192, 768], f16), [0], True), {})
|
aten.sum.SymInt
|
HuggingFace/RobertaForQuestionAnswering
|
((T([8192, 768], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([8448, 2048], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([8448, 512], f16), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/mobilenet_v2
|
((T([96, 1000], f16, stride=(0, 0)), [0], True), {})
|
aten.sum.SymInt
|
TorchBench/LearningToPaint
|
((T([96, 65], f16), [0], True), {})
|
aten.sum.default
|
TorchBench/vision_maskrcnn
|
((T([0, 1, 427, 640], f16),), {})
|
aten.sum.default
|
TorchBench/vision_maskrcnn
|
((T([0, 1, 459, 640], f16),), {})
|
aten.sum.default
|
TorchBench/vision_maskrcnn
|
((T([0, 1, 612, 612], f16),), {})
|
aten.sum.default
|
TorchBench/vision_maskrcnn
|
((T([0, 1, 640, 443], f16),), {})
|
aten.sum.default
|
TorchBench/vision_maskrcnn
|
((T([0, 4], f16),), {})
|
aten.sum.default
|
TorchBench/vision_maskrcnn
|
((T([0], f16),), {})
|
aten.sum.default
|
TorchBench/vision_maskrcnn
|
((T([0], i64),), {})
|
aten.sum.default
|
TorchBench/timm_efficientdet
|
((T([1, 100, 6], f32),), {})
|
aten.sum.default
|
TorchBench/pytorch_unet
|
((T([1, 2, 640, 959], f16),), {})
|
aten.sum.default
|
TorchBench/pytorch_CycleGAN_and_pix2pix
|
((T([1, 3, 256, 256], f16),), {})
|
aten.sum.default
|
TorchBench/timm_efficientdet
|
((T([1, 88, 10, 10], f16),), {})
|
aten.sum.default
|
TorchBench/timm_efficientdet
|
((T([1, 88, 20, 20], f16),), {})
|
aten.sum.default
|
TorchBench/timm_efficientdet
|
((T([1, 88, 40, 40], f16),), {})
|
aten.sum.default
|
TorchBench/timm_efficientdet
|
((T([1, 88, 5, 5], f16),), {})
|
aten.sum.default
|
TorchBench/timm_efficientdet
|
((T([1, 88, 80, 80], f16),), {})
|
aten.sum.default
|
TorchBench/speech_transformer
|
((T([10, 22, 1014], f16),), {})
|
aten.sum.default
|
TorchBench/speech_transformer
|
((T([10, 22], i64),), {})
|
aten.sum.default
|
TorchBench/fambench_dlrm
|
((T([1024, 1], f16),), {})
|
aten.sum.default
|
TorchBench/alexnet
|
((T([128, 1000], f16),), {})
|
aten.sum.default
|
TorchBench/shufflenet_v2_x1_0
|
((T([128, 1000], f16),), {})
|
aten.sum.default
|
TorchBench/timm_nfnet
|
((T([128, 1000], f16),), {})
|
aten.sum.default
|
TIMM/dm_nfnet_f0
|
((T([128, 1536, 12, 12], f16),), {})
|
aten.sum.default
|
TorchBench/timm_nfnet
|
((T([128, 1536, 12, 12], f16),), {})
|
aten.sum.default
|
TIMM/dm_nfnet_f0
|
((T([128, 1536, 6, 6], f16),), {})
|
aten.sum.default
|
TorchBench/timm_nfnet
|
((T([128, 1536, 6, 6], f16),), {})
|
aten.sum.default
|
TIMM/dm_nfnet_f0
|
((T([128, 256, 48, 48], f16),), {})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.