operator name stringclasses 180 values | used in model stringclasses 155 values | args stringlengths 19 5.24k |
|---|---|---|
aten.bmm.default | TorchBench/BERT_pytorch | ((T([192, 128, 128], f16, stride=(16384, 1, 128)), T([192, 128, 64], f16)), {}) |
aten.bmm.default | HuggingFace/DistilBertForMaskedLM | ((T([192, 128, 64], f16), T([192, 64, 128], f16)), {}) |
aten.bmm.default | TorchBench/BERT_pytorch | ((T([192, 128, 64], f16), T([192, 64, 128], f16)), {}) |
aten.bmm.default | HuggingFace/DistilBertForMaskedLM | ((T([192, 128, 64], f16), T([192, 64, 128], f16, stride=(8192, 1, 64))), {}) |
aten.bmm.default | HuggingFace/PLBartForCausalLM | ((T([192, 128, 64], f16), T([192, 64, 128], f16, stride=(8192, 1, 64))), {}) |
aten.bmm.default | TorchBench/BERT_pytorch | ((T([192, 128, 64], f16), T([192, 64, 128], f16, stride=(8192, 1, 64))), {}) |
aten.bmm.default | HuggingFace/LayoutLMForMaskedLM | ((T([192, 512, 512], f16), T([192, 512, 64], f16)), {}) |
aten.bmm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([192, 512, 512], f16), T([192, 512, 64], f16)), {}) |
aten.bmm.default | HuggingFace/LayoutLMForMaskedLM | ((T([192, 512, 512], f16), T([192, 512, 64], f16, stride=(32768, 1, 512))), {}) |
aten.bmm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([192, 512, 512], f16), T([192, 512, 64], f16, stride=(32768, 1, 512))), {}) |
aten.bmm.default | HuggingFace/LayoutLMForMaskedLM | ((T([192, 512, 512], f16, stride=(262144, 1, 512)), T([192, 512, 64], f16)), {}) |
aten.bmm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([192, 512, 512], f16, stride=(262144, 1, 512)), T([192, 512, 64], f16)), {}) |
aten.bmm.default | HuggingFace/LayoutLMForMaskedLM | ((T([192, 512, 64], f16), T([192, 64, 512], f16)), {}) |
aten.bmm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([192, 512, 64], f16), T([192, 64, 512], f16)), {}) |
aten.bmm.default | HuggingFace/LayoutLMForMaskedLM | ((T([192, 512, 64], f16), T([192, 64, 512], f16, stride=(32768, 1, 64))), {}) |
aten.bmm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([192, 512, 64], f16), T([192, 64, 512], f16, stride=(32768, 1, 64))), {}) |
aten.bmm.default | HuggingFace/DistilBertForMaskedLM | ((T([192, 64, 128], f16, stride=(8192, 1, 64)), T([192, 128, 128], f16)), {}) |
aten.bmm.default | HuggingFace/PLBartForCausalLM | ((T([192, 64, 128], f16, stride=(8192, 1, 64)), T([192, 128, 128], f16)), {}) |
aten.bmm.default | TorchBench/BERT_pytorch | ((T([192, 64, 128], f16, stride=(8192, 1, 64)), T([192, 128, 128], f16)), {}) |
aten.bmm.default | HuggingFace/LayoutLMForMaskedLM | ((T([192, 64, 512], f16, stride=(32768, 1, 64)), T([192, 512, 512], f16)), {}) |
aten.bmm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([192, 64, 512], f16, stride=(32768, 1, 64)), T([192, 512, 512], f16)), {}) |
aten.bmm.default | TIMM/levit_128 | ((T([2048, 16, 16], f16), T([2048, 16, 49], f16)), {}) |
aten.bmm.default | TIMM/levit_128 | ((T([2048, 16, 16], f16, stride=(256, 1, 16)), T([2048, 16, 49], f16)), {}) |
aten.bmm.default | TIMM/levit_128 | ((T([2048, 16, 49], f16), T([2048, 49, 16], f16, stride=(784, 1, 49))), {}) |
aten.bmm.default | TIMM/levit_128 | ((T([2048, 16, 49], f16), T([2048, 49, 64], f16)), {}) |
aten.bmm.default | TIMM/levit_128 | ((T([2048, 16, 64], f16), T([2048, 64, 49], f16, stride=(3136, 1, 64))), {}) |
aten.bmm.default | TIMM/jx_nest_base | ((T([2048, 196, 196], f16), T([2048, 196, 32], f16)), {}) |
aten.bmm.default | TIMM/jx_nest_base | ((T([2048, 196, 196], f16), T([2048, 196, 32], f16, stride=(6272, 1, 196))), {}) |
aten.bmm.default | TIMM/jx_nest_base | ((T([2048, 196, 196], f16, stride=(38416, 1, 196)), T([2048, 196, 32], f16)), {}) |
aten.bmm.default | TIMM/jx_nest_base | ((T([2048, 196, 32], f16), T([2048, 32, 196], f16)), {}) |
aten.bmm.default | TIMM/jx_nest_base | ((T([2048, 196, 32], f16), T([2048, 32, 196], f16, stride=(6272, 1, 32))), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 31], f16), T([2048, 31, 64], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 31], f16), T([2048, 31, 64], f16, stride=(1984, 1, 31))), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 31], f16, stride=(961, 1, 31)), T([2048, 31, 64], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 33], f16), T([2048, 33, 64], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 33], f16), T([2048, 33, 64], f16, stride=(2112, 1, 33))), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 64], f16), T([2048, 64, 31], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 64], f16), T([2048, 64, 31], f16, stride=(1984, 1, 64))), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 64], f16), T([2048, 64, 33], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 31, 64], f16), T([2048, 64, 33], f16, stride=(2112, 1, 64))), {}) |
aten.bmm.default | TIMM/jx_nest_base | ((T([2048, 32, 196], f16, stride=(6272, 1, 32)), T([2048, 196, 196], f16)), {}) |
aten.bmm.default | TIMM/swin_base_patch4_window7_224 | ((T([2048, 32, 49], f16, stride=(1568, 1, 32)), T([2048, 49, 49], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 33, 31], f16, stride=(1023, 1, 33)), T([2048, 31, 64], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 33, 33], f16), T([2048, 33, 64], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 33, 33], f16), T([2048, 33, 64], f16, stride=(2112, 1, 33))), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 33, 33], f16, stride=(1089, 1, 33)), T([2048, 33, 64], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 33, 64], f16), T([2048, 64, 33], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 33, 64], f16), T([2048, 64, 33], f16, stride=(2112, 1, 64))), {}) |
aten.bmm.default | TIMM/levit_128 | ((T([2048, 49, 16], f16, stride=(784, 1, 49)), T([2048, 16, 64], f16)), {}) |
aten.bmm.default | TIMM/swin_base_patch4_window7_224 | ((T([2048, 49, 32], f16), T([2048, 32, 49], f16)), {}) |
aten.bmm.default | TIMM/swin_base_patch4_window7_224 | ((T([2048, 49, 32], f16), T([2048, 32, 49], f16, stride=(1568, 1, 32))), {}) |
aten.bmm.default | TIMM/swin_base_patch4_window7_224 | ((T([2048, 49, 49], f16), T([2048, 49, 32], f16)), {}) |
aten.bmm.default | TIMM/swin_base_patch4_window7_224 | ((T([2048, 49, 49], f16), T([2048, 49, 32], f16, stride=(1568, 1, 49))), {}) |
aten.bmm.default | TIMM/swin_base_patch4_window7_224 | ((T([2048, 49, 49], f16, stride=(2401, 1, 49)), T([2048, 49, 32], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 64, 31], f16, stride=(1984, 1, 64)), T([2048, 31, 31], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 64, 31], f16, stride=(1984, 1, 64)), T([2048, 31, 33], f16)), {}) |
aten.bmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048, 64, 33], f16, stride=(2112, 1, 64)), T([2048, 33, 33], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 1024, 64], f16, stride=(65536, 1, 1024)), T([24, 64, 64], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 448, 64], f16, stride=(28672, 1, 448)), T([24, 64, 64], f16)), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([24, 512, 512], f16), T([24, 512, 64], f16)), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([24, 512, 512], f16), T([24, 512, 64], f16)), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([24, 512, 512], f16, stride=(262144, 1, 512)), T([24, 512, 64], f16, stride=(64, 1536, 1))), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([24, 512, 512], f16, stride=(262144, 1, 512)), T([24, 512, 64], f16, stride=(64, 1536, 1))), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([24, 512, 64], f16), T([24, 64, 512], f16, stride=(32768, 1, 64))), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([24, 512, 64], f16), T([24, 64, 512], f16, stride=(32768, 1, 64))), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([24, 512, 64], f16, stride=(64, 1536, 1)), T([24, 64, 512], f16, stride=(32768, 1, 64))), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([24, 512, 64], f16, stride=(64, 1536, 1)), T([24, 64, 512], f16, stride=(32768, 1, 64))), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 64, 1024], f16), T([24, 1024, 64], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 64, 448], f16), T([24, 448, 64], f16)), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([24, 64, 512], f16, stride=(32768, 1, 64)), T([24, 512, 512], f16)), {}) |
aten.bmm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([24, 64, 512], f16, stride=(32768, 1, 64)), T([24, 512, 512], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 64, 64], f16), T([24, 64, 1024], f16, stride=(65536, 1, 64))), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 64, 64], f16), T([24, 64, 448], f16, stride=(28672, 1, 64))), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 64, 64], f16, stride=(4096, 1, 64)), T([24, 64, 1024], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 64, 64], f16, stride=(4096, 1, 64)), T([24, 64, 448], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 64, 768], f16, stride=(393216, 1, 512)), T([24, 768, 64], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 64, 768], f16, stride=(49152, 1, 64)), T([24, 768, 64], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 768, 64], f16), T([24, 64, 64], f16)), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 768, 64], f16), T([24, 64, 64], f16, stride=(4096, 1, 64))), {}) |
aten.bmm.default | TorchBench/hf_BigBird | ((T([24, 768, 64], f16, stride=(393216, 512, 1)), T([24, 64, 64], f16)), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 1, 197], f16), T([256, 197, 64], f16)), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 1, 197], f16), T([256, 197, 64], f16, stride=(12608, 1, 197))), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 1, 32], f16), T([256, 32, 401], f16)), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 1, 32], f16), T([256, 32, 401], f16, stride=(12832, 1, 32))), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 1, 401], f16), T([256, 401, 32], f16)), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 1, 401], f16), T([256, 401, 32], f16, stride=(12832, 1, 401))), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 1, 64], f16), T([256, 64, 197], f16)), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 1, 64], f16), T([256, 64, 197], f16, stride=(12608, 1, 64))), {}) |
aten.bmm.default | TIMM/sebotnet33ts_256 | ((T([256, 1024, 1024], f16), T([256, 1024, 32], f16, stride=(32768, 1, 1024))), {}) |
aten.bmm.default | TIMM/sebotnet33ts_256 | ((T([256, 1024, 1024], f16, stride=(1048576, 1, 1024)), T([256, 1024, 32], f16, stride=(32768, 1, 1024))), {}) |
aten.bmm.default | TIMM/sebotnet33ts_256 | ((T([256, 1024, 32], f16, stride=(32768, 1, 1024)), T([256, 32, 1024], f16)), {}) |
aten.bmm.default | HuggingFace/MBartForCausalLM | ((T([256, 128, 128], f16), T([256, 128, 64], f16)), {}) |
aten.bmm.default | HuggingFace/Speech2Text2ForCausalLM | ((T([256, 128, 128], f16), T([256, 128, 64], f16)), {}) |
aten.bmm.default | HuggingFace/MBartForCausalLM | ((T([256, 128, 128], f16, stride=(16384, 1, 128)), T([256, 128, 64], f16)), {}) |
aten.bmm.default | HuggingFace/Speech2Text2ForCausalLM | ((T([256, 128, 128], f16, stride=(16384, 1, 128)), T([256, 128, 64], f16)), {}) |
aten.bmm.default | TIMM/sebotnet33ts_256 | ((T([256, 128, 256], f16), T([256, 256, 256], f16)), {}) |
aten.bmm.default | HuggingFace/MBartForCausalLM | ((T([256, 128, 64], f16), T([256, 64, 128], f16, stride=(8192, 1, 64))), {}) |
aten.bmm.default | HuggingFace/Speech2Text2ForCausalLM | ((T([256, 128, 64], f16), T([256, 64, 128], f16, stride=(8192, 1, 64))), {}) |
aten.bmm.default | TIMM/sebotnet33ts_256 | ((T([256, 128, 64], f16), T([256, 64, 64], f16)), {}) |
aten.bmm.default | TIMM/crossvit_9_240 | ((T([256, 197, 197], f16), T([256, 197, 64], f16)), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.