operator name
stringclasses 180
values | used in model
stringclasses 155
values | args
stringlengths 19
5.24k
|
|---|---|---|
aten.pow.Tensor_Scalar
|
HuggingFace/GPT2ForSequenceClassification
|
((T([4, 1024, 3072], f16), 2.0), {})
|
aten.pow.Tensor_Scalar
|
HuggingFace/GPT2ForSequenceClassification
|
((T([4, 1024, 3072], f16), 3.0), {})
|
aten.pow.Tensor_Scalar
|
TorchBench/hf_GPT2
|
((T([4, 512, 3072], f16), 2.0), {})
|
aten.pow.Tensor_Scalar
|
TorchBench/hf_GPT2
|
((T([4, 512, 3072], f16), 3.0), {})
|
aten.pow.Tensor_Scalar
|
HuggingFace/DebertaForMaskedLM
|
((T([4, 512, 768], f32), 1.0), {})
|
aten.pow.Tensor_Scalar
|
HuggingFace/DebertaForQuestionAnswering
|
((T([4, 512, 768], f32), 1.0), {})
|
aten.pow.Tensor_Scalar
|
HuggingFace/DebertaForMaskedLM
|
((T([4, 512, 768], f32), 2), {})
|
aten.pow.Tensor_Scalar
|
HuggingFace/DebertaForQuestionAnswering
|
((T([4, 512, 768], f32), 2), {})
|
aten.pow.Tensor_Scalar
|
TorchBench/fastNLP_Bert
|
((T([6, 476, 3072], f16), 2), {})
|
aten.pow.Tensor_Scalar
|
TorchBench/hf_Albert
|
((T([8, 512, 128], f16), 2.0), {})
|
aten.pow.Tensor_Scalar
|
TorchBench/hf_Albert
|
((T([8, 512, 128], f16), 3.0), {})
|
aten.pow.Tensor_Scalar
|
TorchBench/hf_Albert
|
((T([8, 512, 3072], f16), 2.0), {})
|
aten.pow.Tensor_Scalar
|
TorchBench/hf_Albert
|
((T([8, 512, 3072], f16), 3.0), {})
|
aten.reciprocal.default
|
HuggingFace/XLNetLMHeadModel
|
((T([512], f32),), {})
|
aten.reciprocal.default
|
TorchBench/vision_maskrcnn
|
((T([], f32),), {})
|
aten.reflection_pad2d.default
|
TorchBench/pytorch_CycleGAN_and_pix2pix
|
((T([1, 256, 64, 64], f16), [1, 1, 1, 1]), {})
|
aten.reflection_pad2d.default
|
TorchBench/pytorch_CycleGAN_and_pix2pix
|
((T([1, 3, 256, 256], f16), [3, 3, 3, 3]), {})
|
aten.reflection_pad2d.default
|
TorchBench/pytorch_CycleGAN_and_pix2pix
|
((T([1, 64, 256, 256], f16), [3, 3, 3, 3]), {})
|
aten.reflection_pad2d.default
|
TorchBench/Background_Matting
|
((T([3, 1, 512, 512], f16), [3, 3, 3, 3]), {})
|
aten.reflection_pad2d.default
|
TorchBench/Background_Matting
|
((T([3, 256, 128, 128], f16), [1, 1, 1, 1]), {})
|
aten.reflection_pad2d.default
|
TorchBench/Background_Matting
|
((T([3, 3, 512, 512], f16), [3, 3, 3, 3]), {})
|
aten.reflection_pad2d.default
|
TorchBench/Background_Matting
|
((T([3, 4, 512, 512], f16), [3, 3, 3, 3]), {})
|
aten.reflection_pad2d.default
|
TorchBench/Background_Matting
|
((T([3, 64, 512, 512], f16), [3, 3, 3, 3]), {})
|
aten.reflection_pad2d_backward.default
|
TorchBench/pytorch_CycleGAN_and_pix2pix
|
((T([1, 256, 66, 66], f16), T([1, 256, 64, 64], f16), [1, 1, 1, 1]), {})
|
aten.reflection_pad2d_backward.default
|
TorchBench/pytorch_CycleGAN_and_pix2pix
|
((T([1, 64, 262, 262], f16), T([1, 64, 256, 256], f16), [3, 3, 3, 3]), {})
|
aten.reflection_pad2d_backward.default
|
TorchBench/Background_Matting
|
((T([3, 256, 130, 130], f16), T([3, 256, 128, 128], f16), [1, 1, 1, 1]), {})
|
aten.reflection_pad2d_backward.default
|
TorchBench/Background_Matting
|
((T([3, 64, 518, 518], f16), T([3, 64, 512, 512], f16), [3, 3, 3, 3]), {})
|
aten.relu.default
|
TorchBench/vision_maskrcnn
|
((T([0, 1024], f16),), {})
|
aten.relu.default
|
TorchBench/speech_transformer
|
((T([10, 204, 2048], f16),), {})
|
aten.relu.default
|
TorchBench/speech_transformer
|
((T([10, 22, 2048], f16),), {})
|
aten.relu.default
|
TorchBench/fambench_dlrm
|
((T([1024, 1500], f16),), {})
|
aten.relu.default
|
TorchBench/fambench_dlrm
|
((T([1024, 192], f16),), {})
|
aten.relu.default
|
TorchBench/fambench_dlrm
|
((T([1024, 4000], f16),), {})
|
aten.relu.default
|
TIMM/hrnet_w18
|
((T([128, 144, 7, 7], f16),), {})
|
aten.relu.default
|
TIMM/regnety_002
|
((T([128, 152, 14, 14], f16),), {})
|
aten.relu.default
|
TIMM/hrnet_w18
|
((T([128, 18, 14, 14], f16),), {})
|
aten.relu.default
|
TIMM/hrnet_w18
|
((T([128, 18, 28, 28], f16),), {})
|
aten.relu.default
|
TIMM/hrnet_w18
|
((T([128, 18, 56, 56], f16),), {})
|
aten.relu.default
|
TIMM/regnety_002
|
((T([128, 24, 56, 56], f16),), {})
|
aten.relu.default
|
TIMM/hrnet_w18
|
((T([128, 36, 14, 14], f16),), {})
|
aten.relu.default
|
TIMM/hrnet_w18
|
((T([128, 36, 28, 28], f16),), {})
|
aten.relu.default
|
TIMM/regnety_002
|
((T([128, 368, 7, 7], f16),), {})
|
aten.relu.default
|
TIMM/regnety_002
|
((T([128, 56, 28, 28], f16),), {})
|
aten.relu.default
|
TIMM/hrnet_w18
|
((T([128, 72, 14, 14], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 1008, 42, 42], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 108, 42, 42], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 108, 83, 83], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 1080, 42, 42], f16),), {})
|
aten.relu.default
|
HuggingFace/MobileBertForMaskedLM
|
((T([16, 128, 512], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 1344, 21, 21], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 168, 42, 42], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 168, 83, 83], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 2016, 21, 21], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 216, 42, 42], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 2160, 21, 21], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 2688, 11, 11], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 270, 83, 83], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 336, 21, 21], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 336, 42, 42], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 4032, 11, 11], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 42, 165, 165], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 42, 83, 83], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 432, 21, 21], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 432, 42, 42], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 4320, 11, 11], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 54, 165, 165], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 54, 83, 83], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 540, 42, 42], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 672, 11, 11], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 672, 21, 21], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 84, 42, 42], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 84, 83, 83], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 864, 11, 11], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 864, 21, 21], f16),), {})
|
aten.relu.default
|
TIMM/nasnetalarge
|
((T([16, 96, 165, 165], f16),), {})
|
aten.relu.default
|
TIMM/pnasnet5large
|
((T([16, 96, 165, 165], f16),), {})
|
aten.relu.default
|
HuggingFace/M2M100ForConditionalGeneration
|
((T([2, 128, 4096], f16),), {})
|
aten.relu.default
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([256, 31, 2048], f16),), {})
|
aten.relu.default
|
TorchBench/attention_is_all_you_need_pytorch
|
((T([256, 33, 2048], f16),), {})
|
aten.relu.default
|
TorchBench/timm_efficientdet
|
((T([2], f16),), {})
|
aten.relu.default
|
TorchBench/pytorch_struct
|
((T([30, 256], f16),), {})
|
aten.relu.default
|
TorchBench/mobilenet_v3_large
|
((T([32, 120, 1, 1], f16),), {})
|
aten.relu.default
|
HuggingFace/MobileBertForQuestionAnswering
|
((T([32, 128, 512], f16),), {})
|
aten.relu.default
|
TorchBench/mobilenet_v3_large
|
((T([32, 168, 1, 1], f16),), {})
|
aten.relu.default
|
TorchBench/timm_regnet
|
((T([32, 224, 56, 56], f16),), {})
|
aten.relu.default
|
TorchBench/timm_regnet
|
((T([32, 2240, 7, 7], f16),), {})
|
aten.relu.default
|
TorchBench/mobilenet_v3_large
|
((T([32, 24, 1, 1], f16),), {})
|
aten.relu.default
|
TorchBench/mobilenet_v3_large
|
((T([32, 240, 1, 1], f16),), {})
|
aten.relu.default
|
TIMM/gluon_xception65
|
((T([32, 256, 38, 38], f16),), {})
|
aten.relu.default
|
TorchBench/mobilenet_v3_large
|
((T([32, 32, 1, 1], f16),), {})
|
aten.relu.default
|
TorchBench/timm_regnet
|
((T([32, 448, 28, 28], f16),), {})
|
aten.relu.default
|
TIMM/gluon_xception65
|
((T([32, 728, 19, 19], f16),), {})
|
aten.relu.default
|
TIMM/convmixer_768_32
|
((T([32, 768, 32, 32], f16),), {})
|
aten.relu.default
|
TorchBench/timm_regnet
|
((T([32, 896, 14, 14], f16),), {})
|
aten.relu.default
|
TorchBench/timm_efficientdet
|
((T([3], f16),), {})
|
aten.relu.default
|
HuggingFace/OPTForCausalLM
|
((T([512, 3072], f16),), {})
|
aten.relu.default
|
HuggingFace/Speech2Text2ForCausalLM
|
((T([64, 128, 2048], f16),), {})
|
aten.relu.default
|
TorchBench/LearningToPaint
|
((T([96, 128, 16, 16], f16),), {})
|
aten.relu.default
|
TorchBench/LearningToPaint
|
((T([96, 256, 8, 8], f16),), {})
|
aten.relu.default
|
TorchBench/LearningToPaint
|
((T([96, 512, 4, 4], f16),), {})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.