operator name stringclasses 180 values | used in model stringclasses 155 values | args stringlengths 19 5.24k |
|---|---|---|
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([1024], f16), T([3136, 1024], f16), T([1024, 1024], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([1024], f16), T([3136, 4096], f16), T([4096, 1024], f16, stride=(1, 4096))), {}) |
aten.addmm.default | HuggingFace/ElectraForQuestionAnswering | ((T([1024], f16), T([32768, 256], f16), T([256, 1024], f16, stride=(1, 256))), {}) |
aten.addmm.default | HuggingFace/BartForCausalLM | ((T([1024], f16), T([4096, 1024], f16), T([1024, 1024], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/BartForCausalLM | ((T([1024], f16), T([4096, 4096], f16), T([4096, 1024], f16, stride=(1, 4096))), {}) |
aten.addmm.default | TIMM/pit_b_224 | ((T([1024], f16), T([4160, 1024], f16), T([1024, 1024], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TIMM/pit_b_224 | ((T([1024], f16), T([4160, 4096], f16), T([4096, 1024], f16, stride=(1, 4096))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([1024], f16), T([50176, 256], f16), T([256, 1024], f16, stride=(1, 256))), {}) |
aten.addmm.default | HuggingFace/PegasusForConditionalGeneration | ((T([1024], f16), T([512, 1024], f16), T([1024, 1024], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/ElectraForCausalLM | ((T([1024], f16), T([512, 256], f16), T([256, 1024], f16, stride=(1, 256))), {}) |
aten.addmm.default | HuggingFace/PegasusForConditionalGeneration | ((T([1024], f16), T([512, 4096], f16), T([4096, 1024], f16, stride=(1, 4096))), {}) |
aten.addmm.default | TIMM/pit_b_224 | ((T([1024], f16), T([61568, 256], f16), T([256, 1024], f16, stride=(1, 256))), {}) |
aten.addmm.default | TorchBench/timm_vision_transformer | ((T([1152], f16), T([1576, 384], f16), T([384, 1152], f16, stride=(1, 384))), {}) |
aten.addmm.default | TIMM/volo_d1_224 | ((T([1152], f16), T([64, 384], f16), T([384, 1152], f16, stride=(1, 384))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([1280], f16), T([25216, 320], f16), T([320, 1280], f16, stride=(1, 320))), {}) |
aten.addmm.default | TorchBench/mobilenet_v3_large | ((T([1280], f16), T([32, 960], f16), T([960, 1280], f16, stride=(1, 960))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([1280], f16), T([6272, 320], f16), T([320, 1280], f16, stride=(1, 320))), {}) |
aten.addmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([128100], f16), T([512, 1536], f16), T([1536, 128100], f16, stride=(1, 1536))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([128], f16), T([100480, 1024], f16), T([1024, 128], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([128], f16), T([100480, 128], f16), T([128, 128], f16, stride=(1, 128))), {}) |
aten.addmm.default | HuggingFace/AlbertForMaskedLM | ((T([128], f16), T([1024, 4096], f16), T([4096, 128], f16, stride=(1, 4096))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([128], f16), T([1568, 64], f16), T([64, 128], f16, stride=(1, 64))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([128], f16), T([200704, 128], f16), T([128, 128], f16, stride=(1, 128))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([128], f16), T([200704, 512], f16), T([512, 128], f16, stride=(1, 512))), {}) |
aten.addmm.default | HuggingFace/MobileBertForMaskedLM | ((T([128], f16), T([2048, 128], f16), T([128, 128], f16, stride=(1, 128))), {}) |
aten.addmm.default | HuggingFace/MobileBertForMaskedLM | ((T([128], f16), T([2048, 512], f16), T([512, 128], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([128], f16), T([25088, 1024], f16), T([1024, 128], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([128], f16), T([25088, 128], f16), T([128, 128], f16, stride=(1, 128))), {}) |
aten.addmm.default | TIMM/crossvit_9_240 | ((T([128], f16), T([25664, 128], f16), T([128, 128], f16, stride=(1, 128))), {}) |
aten.addmm.default | TIMM/crossvit_9_240 | ((T([128], f16), T([25664, 384], f16), T([384, 128], f16, stride=(1, 384))), {}) |
aten.addmm.default | HuggingFace/MobileBertForQuestionAnswering | ((T([128], f16), T([4096, 128], f16), T([128, 128], f16, stride=(1, 128))), {}) |
aten.addmm.default | HuggingFace/MobileBertForQuestionAnswering | ((T([128], f16), T([4096, 512], f16), T([512, 128], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/hf_Albert | ((T([128], f16), T([4096, 768], f16), T([768, 128], f16, stride=(1, 768))), {}) |
aten.addmm.default | HuggingFace/ElectraForCausalLM | ((T([128], f16), T([512, 256], f16), T([256, 128], f16, stride=(1, 256))), {}) |
aten.addmm.default | TIMM/crossvit_9_240 | ((T([128], f16), T([64, 128], f16), T([128, 128], f16, stride=(1, 128))), {}) |
aten.addmm.default | TIMM/crossvit_9_240 | ((T([128], f16), T([64, 256], f16), T([256, 128], f16, stride=(1, 256))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([144], f16), T([65536, 144], f16), T([144, 144], f16, stride=(1, 144))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([144], f16), T([65536, 288], f16), T([288, 144], f16, stride=(1, 288))), {}) |
aten.addmm.default | TorchBench/fambench_dlrm | ((T([1500], f16), T([1024, 1500], f16), T([1500, 1500], f16, stride=(1, 1500))), {}) |
aten.addmm.default | TorchBench/fambench_dlrm | ((T([1500], f16), T([1024, 2000], f16), T([2000, 1500], f16, stride=(1, 2000))), {}) |
aten.addmm.default | TIMM/gmlp_s16_224 | ((T([1536], f16), T([12544, 256], f16), T([256, 1536], f16, stride=(1, 256))), {}) |
aten.addmm.default | TIMM/gmixer_24_224 | ((T([1536], f16), T([12544, 384], f16), T([384, 1536], f16, stride=(1, 384))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([1536], f16), T([12544, 512], f16), T([512, 1536], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/tnt_s_patch16_224 | ((T([1536], f16), T([12608, 384], f16), T([384, 1536], f16, stride=(1, 384))), {}) |
aten.addmm.default | TorchBench/timm_vision_transformer | ((T([1536], f16), T([1576, 384], f16), T([384, 1536], f16, stride=(1, 384))), {}) |
aten.addmm.default | TIMM/pit_b_224 | ((T([1536], f16), T([16448, 512], f16), T([512, 1536], f16, stride=(1, 512))), {}) |
aten.addmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([1536], f16), T([512, 1536], f16), T([1536, 1536], f16, stride=(1, 1536))), {}) |
aten.addmm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([1536], f16), T([512, 1536], f16), T([1536, 1536], f16, stride=(1, 1536))), {}) |
aten.addmm.default | HuggingFace/DebertaV2ForMaskedLM | ((T([1536], f16), T([512, 6144], f16), T([6144, 1536], f16, stride=(1, 6144))), {}) |
aten.addmm.default | HuggingFace/DebertaV2ForQuestionAnswering | ((T([1536], f16), T([512, 6144], f16), T([6144, 1536], f16, stride=(1, 6144))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([1536], f16), T([6400, 512], f16), T([512, 1536], f16, stride=(1, 512))), {}) |
aten.addmm.default | HuggingFace/AlbertForMaskedLM | ((T([16384], f16), T([1024, 4096], f16), T([4096, 16384], f16, stride=(1, 4096))), {}) |
aten.addmm.default | HuggingFace/AlbertForQuestionAnswering | ((T([16384], f16), T([1024, 4096], f16), T([4096, 16384], f16, stride=(1, 4096))), {}) |
aten.addmm.default | TorchBench/fambench_dlrm | ((T([192], f16), T([1024, 1500], f16), T([1500, 192], f16, stride=(1, 1500))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([192], f16), T([16384, 192], f16), T([192, 192], f16, stride=(1, 192))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([192], f16), T([16384, 384], f16), T([384, 192], f16, stride=(1, 384))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([192], f16), T([401536, 64], f16), T([64, 192], f16, stride=(1, 64))), {}) |
aten.addmm.default | TIMM/gmixer_24_224 | ((T([196], f16), T([24576, 192], f16), T([192, 196], f16, stride=(1, 192))), {}) |
aten.addmm.default | TIMM/resmlp_12_224 | ((T([196], f16), T([49152, 196], f16), T([196, 196], f16, stride=(1, 196))), {}) |
aten.addmm.default | TIMM/mixer_b16_224 | ((T([196], f16), T([49152, 384], f16), T([384, 196], f16, stride=(1, 384))), {}) |
aten.addmm.default | TorchBench/nvidia_deeprecommender | ((T([197951], f16), T([256, 512], f16), T([512, 197951], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/fambench_dlrm | ((T([1], f16), T([1024, 4000], f16), T([4000, 1], f16)), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([2048], f16), T([12544, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | HuggingFace/GPTNeoForCausalLM | ((T([2048], f16), T([128, 2048], f16), T([2048, 2048], f16, stride=(1, 2048))), {}) |
aten.addmm.default | HuggingFace/GPTNeoForSequenceClassification | ((T([2048], f16), T([128, 2048], f16), T([2048, 2048], f16, stride=(1, 2048))), {}) |
aten.addmm.default | HuggingFace/GPTNeoForCausalLM | ((T([2048], f16), T([128, 8192], f16), T([8192, 2048], f16, stride=(1, 8192))), {}) |
aten.addmm.default | HuggingFace/GPTNeoForSequenceClassification | ((T([2048], f16), T([128, 8192], f16), T([8192, 2048], f16, stride=(1, 8192))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([2048], f16), T([1568, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/pit_b_224 | ((T([2048], f16), T([16448, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/speech_transformer | ((T([2048], f16), T([2040, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/speech_transformer | ((T([2048], f16), T([220, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/coat_lite_mini | ((T([2048], f16), T([6400, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048], f16), T([7936, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | HuggingFace/Speech2Text2ForCausalLM | ((T([2048], f16), T([8192, 256], f16), T([256, 2048], f16, stride=(1, 256))), {}) |
aten.addmm.default | HuggingFace/BlenderbotSmallForCausalLM | ((T([2048], f16), T([8192, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | HuggingFace/BlenderbotSmallForConditionalGeneration | ((T([2048], f16), T([8192, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | TorchBench/attention_is_all_you_need_pytorch | ((T([2048], f16), T([8448, 512], f16), T([512, 2048], f16, stride=(1, 512))), {}) |
aten.addmm.default | TIMM/cait_m36_384 | ((T([2304], f16), T([1152, 768], f16), T([768, 2304], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/beit_base_patch16_224 | ((T([2304], f16), T([12608, 768], f16), T([768, 2304], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/vit_base_patch16_224 | ((T([2304], f16), T([12608, 768], f16), T([768, 2304], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/deit_base_distilled_patch16_224 | ((T([2304], f16), T([12672, 768], f16), T([768, 2304], f16, stride=(1, 768))), {}) |
aten.addmm.default | TorchBench/hf_GPT2 | ((T([2304], f16), T([2048, 768], f16), T([768, 2304], f16)), {}) |
aten.addmm.default | HuggingFace/GPT2ForSequenceClassification | ((T([2304], f16), T([4096, 768], f16), T([768, 2304], f16)), {}) |
aten.addmm.default | HuggingFace/DistillGPT2 | ((T([2304], f16), T([512, 768], f16), T([768, 2304], f16)), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([240], f16), T([4096, 240], f16), T([240, 240], f16, stride=(1, 240))), {}) |
aten.addmm.default | TIMM/mobilevit_s | ((T([240], f16), T([4096, 480], f16), T([480, 240], f16, stride=(1, 480))), {}) |
aten.addmm.default | TIMM/tnt_s_patch16_224 | ((T([24], f16), T([200704, 24], f16), T([24, 24], f16, stride=(1, 24))), {}) |
aten.addmm.default | TIMM/tnt_s_patch16_224 | ((T([24], f16), T([200704, 96], f16), T([96, 24], f16, stride=(1, 96))), {}) |
aten.addmm.default | TIMM/gmlp_s16_224 | ((T([256], f16), T([12544, 768], f16), T([768, 256], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/crossvit_9_240 | ((T([256], f16), T([12608, 256], f16), T([256, 256], f16, stride=(1, 256))), {}) |
aten.addmm.default | TIMM/crossvit_9_240 | ((T([256], f16), T([12608, 768], f16), T([768, 256], f16, stride=(1, 768))), {}) |
aten.addmm.default | TIMM/twins_pcpvt_base | ((T([256], f16), T([1568, 128], f16), T([128, 256], f16, stride=(1, 128))), {}) |
aten.addmm.default | TorchBench/pytorch_struct | ((T([256], f16), T([30, 256], f16), T([256, 256], f16, stride=(1, 256))), {}) |
aten.addmm.default | HuggingFace/ElectraForQuestionAnswering | ((T([256], f16), T([32768, 1024], f16), T([1024, 256], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/ElectraForQuestionAnswering | ((T([256], f16), T([32768, 128], f16), T([128, 256], f16, stride=(1, 128))), {}) |
aten.addmm.default | HuggingFace/ElectraForQuestionAnswering | ((T([256], f16), T([32768, 256], f16), T([256, 256], f16, stride=(1, 256))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([256], f16), T([50176, 1024], f16), T([1024, 256], f16, stride=(1, 1024))), {}) |
aten.addmm.default | TIMM/swin_base_patch4_window7_224 | ((T([256], f16), T([50176, 256], f16), T([256, 256], f16, stride=(1, 256))), {}) |
aten.addmm.default | HuggingFace/ElectraForCausalLM | ((T([256], f16), T([512, 1024], f16), T([1024, 256], f16, stride=(1, 1024))), {}) |
aten.addmm.default | HuggingFace/ElectraForCausalLM | ((T([256], f16), T([512, 128], f16), T([128, 256], f16, stride=(1, 128))), {}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.