operator name stringclasses 180 values | used in model stringclasses 155 values | args stringlengths 19 5.24k |
|---|---|---|
aten._softmax_backward_data.default | HuggingFace/DebertaForMaskedLM | ((T([4, 12, 512, 512], f16), T([4, 12, 512, 512], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/DebertaForQuestionAnswering | ((T([4, 12, 512, 512], f16), T([4, 12, 512, 512], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TorchBench/hf_Bert | ((T([4, 12, 512, 512], f16), T([4, 12, 512, 512], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TorchBench/hf_GPT2 | ((T([4, 12, 512, 512], f16), T([4, 12, 512, 512], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/XLNetLMHeadModel | ((T([4, 16, 512, 512], f16), T([4, 16, 512, 512], f16), 3, f16), {}) |
aten._softmax_backward_data.default | TIMM/swin_base_patch4_window7_224 | ((T([4096, 4, 49, 49], f16), T([4096, 4, 49, 49], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/OPTForCausalLM | ((T([48, 128, 128], f32), T([48, 128, 128], f32), -1, f16), {}) |
aten._softmax_backward_data.default | TorchBench/hf_Bart | ((T([48, 512, 512], f16), T([48, 512, 512], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/botnet26t_256 | ((T([512, 256, 256], f16), T([512, 256, 256], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/eca_botnext26ts_256 | ((T([512, 256, 256], f16), T([512, 256, 256], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/botnet26t_256 | ((T([512, 64, 64], f16), T([512, 64, 64], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/eca_botnext26ts_256 | ((T([512, 64, 64], f16), T([512, 64, 64], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TorchBench/fastNLP_Bert | ((T([6, 12, 476, 476], f16), T([6, 12, 476, 476], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/BartForCausalLM | ((T([64, 1024, 1024], f16), T([64, 1024, 1024], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/volo_d1_224 | ((T([64, 12, 1, 197], f16), T([64, 12, 1, 197], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/BertForMaskedLM | ((T([64, 12, 128, 128], f16), T([64, 12, 128, 128], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/BertForQuestionAnswering | ((T([64, 12, 128, 128], f16), T([64, 12, 128, 128], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/RobertaForQuestionAnswering | ((T([64, 12, 128, 128], f16), T([64, 12, 128, 128], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/volo_d1_224 | ((T([64, 12, 196, 196], f16), T([64, 12, 196, 196], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/beit_base_patch16_224 | ((T([64, 12, 197, 197], f16), T([64, 12, 197, 197], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/vit_base_patch16_224 | ((T([64, 12, 197, 197], f16), T([64, 12, 197, 197], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/deit_base_distilled_patch16_224 | ((T([64, 12, 198, 198], f16), T([64, 12, 198, 198], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/PegasusForConditionalGeneration | ((T([64, 128, 128], f16), T([64, 128, 128], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/jx_nest_base | ((T([64, 16, 1, 196, 196], f16), T([64, 16, 1, 196, 196], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/pit_b_224 | ((T([64, 16, 65, 65], f16), T([64, 16, 65, 65], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/swin_base_patch4_window7_224 | ((T([64, 32, 49, 49], f16), T([64, 32, 49, 49], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/crossvit_9_240 | ((T([64, 4, 1, 197], f16), T([64, 4, 1, 197], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/crossvit_9_240 | ((T([64, 4, 1, 401], f16), T([64, 4, 1, 401], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/jx_nest_base | ((T([64, 4, 16, 196, 196], f16), T([64, 4, 16, 196, 196], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/crossvit_9_240 | ((T([64, 4, 197, 197], f16), T([64, 4, 197, 197], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/crossvit_9_240 | ((T([64, 4, 401, 401], f16), T([64, 4, 401, 401], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/ElectraForQuestionAnswering | ((T([64, 4, 512, 512], f16), T([64, 4, 512, 512], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/pit_b_224 | ((T([64, 4, 962, 962], f16), T([64, 4, 962, 962], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/volo_d1_224 | ((T([64, 6, 196, 9, 9], f16), T([64, 6, 196, 9, 9], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/tnt_s_patch16_224 | ((T([64, 6, 197, 197], f16), T([64, 6, 197, 197], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/pit_b_224 | ((T([64, 8, 257, 257], f16), T([64, 8, 257, 257], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TIMM/jx_nest_base | ((T([64, 8, 4, 196, 196], f16), T([64, 8, 4, 196, 196], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TorchBench/hf_Albert | ((T([8, 12, 512, 512], f16), T([8, 12, 512, 512], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TorchBench/hf_DistilBert | ((T([8, 12, 512, 512], f16), T([8, 12, 512, 512], f16), -1, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/MegatronBertForQuestionAnswering | ((T([8, 16, 128, 128], f16), T([8, 16, 128, 128], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TorchBench/timm_vision_transformer | ((T([8, 6, 197, 197], f16), T([8, 6, 197, 197], f16), -1, f16), {}) |
aten._softmax_backward_data.default | TorchBench/speech_transformer | ((T([80, 204, 204], f16), T([80, 204, 204], f16), 2, f16), {}) |
aten._softmax_backward_data.default | TorchBench/speech_transformer | ((T([80, 22, 204], f16), T([80, 22, 204], f16), 2, f16), {}) |
aten._softmax_backward_data.default | TorchBench/speech_transformer | ((T([80, 22, 22], f16), T([80, 22, 22], f16), 2, f16), {}) |
aten._softmax_backward_data.default | HuggingFace/PLBartForConditionalGeneration | ((T([96, 128, 128], f16), T([96, 128, 128], f16), -1, f16), {}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54687], i64), T([54687, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54692], i64), T([54692, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54697], i64), T([54697, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54701], i64), T([54701, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54704], i64), T([54704, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54705], i64), T([54705, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54707], i64), T([54707, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54710], i64), T([54710, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54711], i64), T([54711, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54712], i64), T([54712, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54713], i64), T([54713, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54714], i64), T([54714, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54715], i64), T([54715, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54716], i64), T([54716, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54717], i64), T([54717, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54718], i64), T([54718, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54719], i64), T([54719, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54722], i64), T([54722, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54723], i64), T([54723, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54725], i64), T([54725, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54727], i64), T([54727, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54729], i64), T([54729, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54730], i64), T([54730, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54731], i64), T([54731, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54732], i64), T([54732, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54733], i64), T([54733, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54734], i64), T([54734, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54735], i64), T([54735, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54736], i64), T([54736, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54737], i64), T([54737, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54738], i64), T([54738, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54739], i64), T([54739, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54740], i64), T([54740, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54741], i64), T([54741, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54742], i64), T([54742, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54743], i64), T([54743, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54744], i64), T([54744, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54745], i64), T([54745, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54746], i64), T([54746, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54747], i64), T([54747, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54748], i64), T([54748, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54749], i64), T([54749, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54750], i64), T([54750, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54751], i64), T([54751, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54753], i64), T([54753, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54754], i64), T([54754, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54756], i64), T([54756, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54757], i64), T([54757, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54758], i64), T([54758, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54759], i64), T([54759, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54760], i64), T([54760, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54761], i64), T([54761, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54762], i64), T([54762, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54763], i64), T([54763, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
aten._sparse_coo_tensor_with_dims_and_tensors.default | TorchBench/fambench_dlrm | ((1, 1, [965, 192], T([1, 54764], i64), T([54764, 192], f16)), {'dtype': f16, 'layout': torch.sparse_coo, 'device': 'cuda', 'pin_memory': None}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.