operator name stringclasses 180 values | used in model stringclasses 155 values | args stringlengths 19 5.24k |
|---|---|---|
aten.addmm.default | HuggingFace/AlbertForQuestionAnswering | ((T([4096], f16), T([1024, 4096], f16), T([4096, 4096], f16, stride=(1, 4096))), {}) |
aten.addmm.default | TorchBench/alexnet | ((T([4096], f16), T([128, 4096], f16), T([4096, 4096], f16, stride=(1, 4096))), {}) |
aten.addmm.default | TorchBench/alexnet | ((T([4096], f16), T([128, 9216], f16), T([9216, 4096], f16, stride=(1, 9216))), {}) |
aten.addmm.default | HuggingFace/BartForConditionalGeneration | ((T([4096], f16), T([2048, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/MBartForCausalLM | ((T([4096], f16), T([2048, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/XLNetLMHeadModel | ((T([4096], f16), T([2048, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/M2M100ForConditionalGeneration | ((T([4096], f16), T([256, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/MegatronBertForCausalLM | ((T([4096], f16), T([256, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/XGLMForCausalLM | ((T([4096], f16), T([256, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([4096], f16), T([3136, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/BartForCausalLM | ((T([4096], f16), T([4096, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TIMM/pit_b_224 | ((T([4096], f16), T([4160, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/PegasusForConditionalGeneration | ((T([4096], f16), T([512, 1024], f16), T([1024, 4096], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TorchBench/vgg16 | ((T([4096], f16), T([64, 25088], f16), T([25088, 4096], f16, stride=(1, 25088))), {}) |
aten.addmm.default | TorchBench/vgg16 | ((T([4096], f16), T([64, 4096], f16), T([4096, 4096], f16, stride=(1, 4096))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([432], f16), T([65536, 144], f16), T([144, 432], f16, stride=(1, 144))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([480], f16), T([4096, 240], f16), T([240, 480], f16, stride=(1, 240))), {}) |
aten.addmm.default | HuggingFace/AllenaiLongformerBase | ((T([50265], f16), T([1024, 768], f16), T([768, 50265], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/DebertaForMaskedLM | ((T([50265], f16), T([2048, 768], f16), T([768, 50265], f16, stride=(1, 768))), {}) |
aten.addmm.default | TorchBench/hf_Longformer | ((T([50265], f16), T([2048, 768], f16), T([768, 50265], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/BigBird | ((T([50358], f16), T([1024, 768], f16), T([768, 50358], f16, stride=(1, 768))), {}) |
aten.addmm.default | TorchBench/hf_BigBird | ((T([50358], f16), T([2048, 768], f16), T([768, 50358], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([512], f16), T([100352, 64], f16), T([64, 512], f16, stride=(1, 64))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([512], f16), T([12544, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([512], f16), T([12544, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([512], f16), T([1568, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([512], f16), T([1568, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/pit_b_224 | ((T([512], f16), T([16448, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | TIMM/pit_b_224 | ((T([512], f16), T([16448, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([512], f16), T([200704, 128], f16), T([128, 512], f16, stride=(1, 128))), {}) |
aten.addmm.default | TorchBench/speech_transformer | ((T([512], f16), T([2040, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | TorchBench/speech_transformer | ((T([512], f16), T([2040, 320], f16), T([320, 512], f16, stride=(1, 320))), {}) |
aten.addmm.default | TorchBench/speech_transformer | ((T([512], f16), T([2040, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | HuggingFace/MobileBertForMaskedLM | ((T([512], f16), T([2048, 128], f16), T([128, 512], f16, stride=(1, 128))), {}) |
aten.addmm.default | HuggingFace/MobileBertForMaskedLM | ((T([512], f16), T([2048, 384], f16), T([384, 512], f16, stride=(1, 384))), {}) |
aten.addmm.default | HuggingFace/MobileBertForMaskedLM | ((T([512], f16), T([2048, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/speech_transformer | ((T([512], f16), T([220, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | TorchBench/speech_transformer | ((T([512], f16), T([220, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/nvidia_deeprecommender | ((T([512], f16), T([256, 1024], f16), T([1024, 512], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TorchBench/nvidia_deeprecommender | ((T([512], f16), T([256, 197951], f16), T([197951, 512], f16, stride=(1, 197951))), {}) |
aten.addmm.default | TorchBench/nvidia_deeprecommender | ((T([512], f16), T([256, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([512], f16), T([401536, 64], f16), T([64, 512], f16, stride=(1, 64))), {}) |
aten.addmm.default | HuggingFace/MobileBertForQuestionAnswering | ((T([512], f16), T([4096, 128], f16), T([128, 512], f16, stride=(1, 128))), {}) |
aten.addmm.default | HuggingFace/MobileBertForQuestionAnswering | ((T([512], f16), T([4096, 384], f16), T([384, 512], f16, stride=(1, 384))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([512], f16), T([6400, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([512], f16), T([6400, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([512], f16), T([7936, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | HuggingFace/BlenderbotSmallForCausalLM | ((T([512], f16), T([8192, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | HuggingFace/BlenderbotSmallForConditionalGeneration | ((T([512], f16), T([8192, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | HuggingFace/BlenderbotSmallForCausalLM | ((T([512], f16), T([8192, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | HuggingFace/BlenderbotSmallForConditionalGeneration | ((T([512], f16), T([8192, 512], f16), T([512, 512], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([512], f16), T([8448, 2048], f16), T([2048, 512], f16, stride=(1, 2048))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([576], f16), T([16384, 192], f16), T([192, 576], f16, stride=(1, 192))), {}) |
aten.addmm.default | TorchBench/maml_omniglot | ((T([5], f16), T([5, 64], f16), T([64, 5], f16, stride=(1, 64))), {}) |
aten.addmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([6144], f16), T([512, 1536], f16), T([1536, 6144], f16, stride=(1, 1536))), {}) |
aten.addmm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([6144], f16), T([512, 1536], f16), T([1536, 6144], f16, stride=(1, 1536))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([640], f16), T([1568, 320], f16), T([320, 640], f16, stride=(1, 320))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([64], f16), T([100352, 512], f16), T([512, 64], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([64], f16), T([100352, 64], f16), T([64, 64], f16, stride=(1, 64))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([64], f16), T([401536, 512], f16), T([512, 64], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([64], f16), T([401536, 64], f16), T([64, 64], f16, stride=(1, 64))), {}) |
aten.addmm.default | TorchBench/LearningToPaint | ((T([65], f16), T([96, 512], f16), T([512, 65], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([720], f16), T([4096, 240], f16), T([240, 720], f16, stride=(1, 240))), {}) |
aten.addmm.default | HuggingFace/BigBird | ((T([768], f16), T([1, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/GoogleFnet | ((T([768], f16), T([1, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/AllenaiLongformerBase | ((T([768], f16), T([1024, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | HuggingFace/BigBird | ((T([768], f16), T([1024, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | HuggingFace/PLBartForConditionalGeneration | ((T([768], f16), T([1024, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | HuggingFace/AllenaiLongformerBase | ((T([768], f16), T([1024, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/BigBird | ((T([768], f16), T([1024, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/PLBartForConditionalGeneration | ((T([768], f16), T([1024, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/cait_m36_384 | ((T([768], f16), T([1152, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TIMM/cait_m36_384 | ((T([768], f16), T([1152, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/cait_m36_384 | ((T([768], f16), T([1154, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/mixer_b16_224 | ((T([768], f16), T([12544, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TIMM/crossvit_9_240 | ((T([768], f16), T([12608, 256], f16), T([256, 768], f16, stride=(1, 256))), {}) |
aten.addmm.default | TIMM/beit_base_patch16_224 | ((T([768], f16), T([12608, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TIMM/vit_base_patch16_224 | ((T([768], f16), T([12608, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TIMM/beit_base_patch16_224 | ((T([768], f16), T([12608, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/vit_base_patch16_224 | ((T([768], f16), T([12608, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/deit_base_distilled_patch16_224 | ((T([768], f16), T([12672, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TIMM/deit_base_distilled_patch16_224 | ((T([768], f16), T([12672, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/LayoutLMForMaskedLM | ((T([768], f16), T([16, 768], f16, stride=(393216, 1)), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/LayoutLMForSequenceClassification | ((T([768], f16), T([16, 768], f16, stride=(393216, 1)), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/cait_m36_384 | ((T([768], f16), T([2, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TIMM/cait_m36_384 | ((T([768], f16), T([2, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/cait_m36_384 | ((T([768], f16), T([2, 768], f16, stride=(443136, 1)), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TorchBench/hf_BigBird | ((T([768], f16), T([2, 768], f16, stride=(786432, 1)), T([768, 768], f16, stride=(1, 768))), {}) |
aten.addmm.default | TorchBench/hf_GPT2 | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16)), {}) |
aten.addmm.default | HuggingFace/DebertaForMaskedLM | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | HuggingFace/DebertaForQuestionAnswering | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | HuggingFace/DistilBertForMaskedLM | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | HuggingFace/PLBartForCausalLM | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TorchBench/BERT_pytorch | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TorchBench/hf_Bart | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TorchBench/hf_Bert | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TorchBench/hf_BigBird | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TorchBench/hf_Longformer | ((T([768], f16), T([2048, 3072], f16), T([3072, 768], f16, stride=(1, 3072))), {}) |
aten.addmm.default | TorchBench/hf_GPT2 | ((T([768], f16), T([2048, 768], f16), T([768, 768], f16)), {}) |
aten.addmm.default | HuggingFace/DebertaForMaskedLM | ((T([768], f16), T([2048, 768], f16), T([768, 768], f16, stride=(1, 768))), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.