operator name
stringclasses
180 values
used in model
stringclasses
155 values
args
stringlengths
19
5.24k
aten.mm.default
HuggingFace/DistillGPT2
((T([768, 512], f16, stride=(1, 768)), T([512, 2304], f16)), {})
aten.mm.default
HuggingFace/CamemBert
((T([768, 512], f16, stride=(1, 768)), T([512, 3072], f16)), {})
aten.mm.default
HuggingFace/DistillGPT2
((T([768, 512], f16, stride=(1, 768)), T([512, 3072], f16)), {})
aten.mm.default
HuggingFace/GoogleFnet
((T([768, 512], f16, stride=(1, 768)), T([512, 3072], f16)), {})
aten.mm.default
HuggingFace/OPTForCausalLM
((T([768, 512], f16, stride=(1, 768)), T([512, 3072], f16)), {})
aten.mm.default
HuggingFace/RobertaForCausalLM
((T([768, 512], f16, stride=(1, 768)), T([512, 3072], f16)), {})
aten.mm.default
HuggingFace/YituTechConvBert
((T([768, 512], f16, stride=(1, 768)), T([512, 3072], f16)), {})
aten.mm.default
HuggingFace/CamemBert
((T([768, 512], f16, stride=(1, 768)), T([512, 768], f16)), {})
aten.mm.default
HuggingFace/DistillGPT2
((T([768, 512], f16, stride=(1, 768)), T([512, 768], f16)), {})
aten.mm.default
HuggingFace/GoogleFnet
((T([768, 512], f16, stride=(1, 768)), T([512, 768], f16)), {})
aten.mm.default
HuggingFace/OPTForCausalLM
((T([768, 512], f16, stride=(1, 768)), T([512, 768], f16)), {})
aten.mm.default
HuggingFace/RobertaForCausalLM
((T([768, 512], f16, stride=(1, 768)), T([512, 768], f16)), {})
aten.mm.default
HuggingFace/YituTechConvBert
((T([768, 512], f16, stride=(1, 768)), T([512, 768], f16)), {})
aten.mm.default
TIMM/pit_b_224
((T([768, 61568], f16, stride=(1, 768)), T([61568, 256], f16)), {})
aten.mm.default
HuggingFace/BertForMaskedLM
((T([768, 8192], f16, stride=(1, 768)), T([8192, 3072], f16)), {})
aten.mm.default
HuggingFace/BertForQuestionAnswering
((T([768, 8192], f16, stride=(1, 768)), T([8192, 3072], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForMaskedLM
((T([768, 8192], f16, stride=(1, 768)), T([8192, 3072], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForSequenceClassification
((T([768, 8192], f16, stride=(1, 768)), T([8192, 3072], f16)), {})
aten.mm.default
HuggingFace/RobertaForQuestionAnswering
((T([768, 8192], f16, stride=(1, 768)), T([8192, 3072], f16)), {})
aten.mm.default
HuggingFace/BertForMaskedLM
((T([768, 8192], f16, stride=(1, 768)), T([8192, 768], f16)), {})
aten.mm.default
HuggingFace/BertForQuestionAnswering
((T([768, 8192], f16, stride=(1, 768)), T([8192, 768], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForMaskedLM
((T([768, 8192], f16, stride=(1, 768)), T([8192, 768], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForSequenceClassification
((T([768, 8192], f16, stride=(1, 768)), T([8192, 768], f16)), {})
aten.mm.default
HuggingFace/RobertaForQuestionAnswering
((T([768, 8192], f16, stride=(1, 768)), T([8192, 768], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([7936, 2048], f16), T([2048, 512], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([7936, 512], f16), T([512, 2048], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([7936, 512], f16), T([512, 512], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([7936, 512], f16), T([512, 512], f16, stride=(1, 512))), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([7936, 512], f16), T([512, 9521], f16, stride=(1, 512))), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([7936, 9521], f16), T([9521, 512], f16)), {})
aten.mm.default
TorchBench/resnext50_32x4d
((T([8, 1000], f16, stride=(0, 0)), T([1000, 2048], f16)), {})
aten.mm.default
TorchBench/timm_vision_transformer
((T([8, 1000], f16, stride=(0, 0)), T([1000, 384], f16)), {})
aten.mm.default
HuggingFace/Speech2Text2ForCausalLM
((T([8192, 10000], f16), T([10000, 256], f16)), {})
aten.mm.default
HuggingFace/GPTNeoForCausalLM
((T([8192, 128], f16, stride=(1, 8192)), T([128, 2048], f16)), {})
aten.mm.default
HuggingFace/GPTNeoForSequenceClassification
((T([8192, 128], f16, stride=(1, 8192)), T([128, 2048], f16)), {})
aten.mm.default
HuggingFace/Speech2Text2ForCausalLM
((T([8192, 2048], f16), T([2048, 256], f16)), {})
aten.mm.default
HuggingFace/BlenderbotSmallForCausalLM
((T([8192, 2048], f16), T([2048, 512], f16)), {})
aten.mm.default
HuggingFace/BlenderbotSmallForConditionalGeneration
((T([8192, 2048], f16), T([2048, 512], f16)), {})
aten.mm.default
HuggingFace/Speech2Text2ForCausalLM
((T([8192, 256], f16), T([256, 10000], f16, stride=(1, 256))), {})
aten.mm.default
HuggingFace/Speech2Text2ForCausalLM
((T([8192, 256], f16), T([256, 2048], f16)), {})
aten.mm.default
HuggingFace/Speech2Text2ForCausalLM
((T([8192, 256], f16), T([256, 256], f16)), {})
aten.mm.default
HuggingFace/BertForQuestionAnswering
((T([8192, 2], f16), T([2, 768], f16)), {})
aten.mm.default
HuggingFace/RobertaForQuestionAnswering
((T([8192, 2], f16), T([2, 768], f16)), {})
aten.mm.default
HuggingFace/BertForMaskedLM
((T([8192, 30522], f16), T([30522, 768], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForMaskedLM
((T([8192, 30522], f16), T([30522, 768], f16)), {})
aten.mm.default
HuggingFace/BertForMaskedLM
((T([8192, 3072], f16), T([3072, 768], f16)), {})
aten.mm.default
HuggingFace/BertForQuestionAnswering
((T([8192, 3072], f16), T([3072, 768], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForMaskedLM
((T([8192, 3072], f16), T([3072, 768], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForSequenceClassification
((T([8192, 3072], f16), T([3072, 768], f16)), {})
aten.mm.default
HuggingFace/RobertaForQuestionAnswering
((T([8192, 3072], f16), T([3072, 768], f16)), {})
aten.mm.default
HuggingFace/BlenderbotSmallForCausalLM
((T([8192, 50265], f16), T([50265, 512], f16)), {})
aten.mm.default
HuggingFace/BlenderbotSmallForConditionalGeneration
((T([8192, 50265], f16), T([50265, 512], f16)), {})
aten.mm.default
HuggingFace/BlenderbotSmallForCausalLM
((T([8192, 512], f16), T([512, 2048], f16)), {})
aten.mm.default
HuggingFace/BlenderbotSmallForConditionalGeneration
((T([8192, 512], f16), T([512, 2048], f16)), {})
aten.mm.default
HuggingFace/BlenderbotSmallForCausalLM
((T([8192, 512], f16), T([512, 50265], f16, stride=(1, 512))), {})
aten.mm.default
HuggingFace/BlenderbotSmallForConditionalGeneration
((T([8192, 512], f16), T([512, 50265], f16, stride=(1, 512))), {})
aten.mm.default
HuggingFace/BlenderbotSmallForCausalLM
((T([8192, 512], f16), T([512, 512], f16)), {})
aten.mm.default
HuggingFace/BlenderbotSmallForConditionalGeneration
((T([8192, 512], f16), T([512, 512], f16)), {})
aten.mm.default
HuggingFace/BertForMaskedLM
((T([8192, 768], f16), T([768, 3072], f16)), {})
aten.mm.default
HuggingFace/BertForQuestionAnswering
((T([8192, 768], f16), T([768, 3072], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForMaskedLM
((T([8192, 768], f16), T([768, 3072], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForSequenceClassification
((T([8192, 768], f16), T([768, 3072], f16)), {})
aten.mm.default
HuggingFace/RobertaForQuestionAnswering
((T([8192, 768], f16), T([768, 3072], f16)), {})
aten.mm.default
HuggingFace/BertForMaskedLM
((T([8192, 768], f16), T([768, 768], f16)), {})
aten.mm.default
HuggingFace/BertForQuestionAnswering
((T([8192, 768], f16), T([768, 768], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForMaskedLM
((T([8192, 768], f16), T([768, 768], f16)), {})
aten.mm.default
HuggingFace/LayoutLMForSequenceClassification
((T([8192, 768], f16), T([768, 768], f16)), {})
aten.mm.default
HuggingFace/RobertaForQuestionAnswering
((T([8192, 768], f16), T([768, 768], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([8448, 2048], f16), T([2048, 512], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([8448, 512], f16), T([512, 2048], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([8448, 512], f16), T([512, 512], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([8448, 512], f16), T([512, 512], f16, stride=(1, 512))), {})
aten.mm.default
TorchBench/vision_maskrcnn
((T([91, 0], f16), T([0, 1024], f16)), {})
aten.mm.default
TorchBench/attention_is_all_you_need_pytorch
((T([9521, 7936], f16, stride=(1, 9521)), T([7936, 512], f16)), {})
aten.mm.default
TorchBench/mobilenet_v2
((T([96, 1000], f16, stride=(0, 0)), T([1000, 1280], f16)), {})
aten.mm.default
TIMM/tnt_s_patch16_224
((T([96, 200704], f16, stride=(1, 96)), T([200704, 24], f16)), {})
aten.mm.default
TorchBench/LearningToPaint
((T([96, 65], f16), T([65, 512], f16)), {})
aten.mm.default
TIMM/coat_lite_mini
((T([960, 25216], f16, stride=(1, 960)), T([25216, 320], f16)), {})
aten.mse_loss.default
TorchBench/Super_SloMo
((T([6, 512, 44, 44], f16), T([6, 512, 44, 44], f16)), {})
aten.mse_loss_backward.default
TorchBench/Super_SloMo
((T([], f16), T([6, 512, 44, 44], f16), T([6, 512, 44, 44], f16), 1), {})
aten.mul.Scalar
TIMM/resmlp_12_224
((T([1, 1, 384], f16), 1), {})
aten.mul.Scalar
HuggingFace/BigBird
((T([1, 1024, 3072], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/BigBird
((T([1, 1024, 768], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/GPTNeoForCausalLM
((T([1, 128, 8192], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/GPTNeoForSequenceClassification
((T([1, 128, 8192], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/DistillGPT2
((T([1, 512, 3072], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/GoogleFnet
((T([1, 512, 3072], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/GoogleFnet
((T([1, 512, 768], f16), 3.0), {})
aten.mul.Scalar
TIMM/resmlp_12_224
((T([128, 196, 384], f16, stride=(75264, 1, 196)), 1), {})
aten.mul.Scalar
TorchBench/BERT_pytorch
((T([16, 128, 1], f16), 0.002607561929595828), {})
aten.mul.Scalar
TorchBench/BERT_pytorch
((T([16, 128, 1], f16), 2), {})
aten.mul.Scalar
TorchBench/hf_BigBird
((T([2, 1024, 3072], f16), 3.0), {})
aten.mul.Scalar
TorchBench/hf_BigBird
((T([2, 1024, 768], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/AlbertForMaskedLM
((T([2, 512, 128], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/AlbertForMaskedLM
((T([2, 512, 16384], f16), 3.0), {})
aten.mul.Scalar
HuggingFace/AlbertForQuestionAnswering
((T([2, 512, 16384], f16), 3.0), {})
aten.mul.Scalar
TIMM/convnext_base
((T([32, 1, 14, 14], f16), -0.5), {})
aten.mul.Scalar
TIMM/convnext_base
((T([32, 1, 14, 14], f16), 0.00390625), {})
aten.mul.Scalar
TIMM/convnext_base
((T([32, 1, 28, 28], f16), -0.5), {})
aten.mul.Scalar
TIMM/convnext_base
((T([32, 1, 28, 28], f16), 0.0078125), {})