python_code stringlengths 0 4.04M | repo_name stringlengths 7 58 | file_path stringlengths 5 147 |
|---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import hydra
from mblink.conf.config import MainConfig
from omegaconf import OmegaConf
from pytorch_lightning.trainer import Train... | BELA-main | mblink/main.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import mmap
from typing import List
import torch
from pytorch_lightning import LightningDataModule
from mblink.u... | BELA-main | mblink/datamodule/blink_datamodule.py |
BELA-main | mblink/tests/__init__.py | |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import os
import tempfile
import random
import torch
import h5py
import numpy as np
import torch
from mblink.datamodule.blin... | BELA-main | mblink/tests/test_datamodules.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.models.hf_encoder import HFEncoder
from bela.transforms.joint_el_transform import JointELTransform
c... | BELA-main | mblink/tests/test_models.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from bela.transforms.joint_el_transform import JointELTransform
class TestJointELXlmrTransforms(unittest.TestC... | BELA-main | mblink/tests/test_transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from enum import Enum
from typing import List
import torch
import h5py
logger = logging.getLogger()
class En... | BELA-main | mblink/utils/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from transformers import AutoModel
from torch import nn
class HFEncoder(nn.Module):
def __init__(
... | BELA-main | mblink/models/hf_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from transformers import AutoTokenizer
class HFTransform(nn.Module):
def __init__(
self,
model_pa... | BELA-main | mblink/transforms/hf_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from mbl... | BELA-main | mblink/transforms/blink_transform.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict
from typing import Optional
from pytorch_lightning.strategies import DDPShardedStrategy, ... | BELA-main | mblink/task/blink_task.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List, Any
# @manual "//github/facebookresearch/hydra:hydra"
from hydra.core.conf... | BELA-main | mblink/conf/config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
@dataclass
class TransformConf:
pass
@dataclass
class DataModuleConf:
pass
@dataclass
... | BELA-main | mblink/conf/__init__.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pandas as pd
import os, sys
from syntactic_testsets.utils import load_vocab
def lstm_probs(output, gold, w2idx):
... | colorlessgreenRNNs-main | src/results.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
lm_parser = argparse.ArgumentParser(add_help=False)
lm_parser.add_argument('--data', type=str,
... | colorlessgreenRNNs-main | src/language_models/lm_argparser.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| colorlessgreenRNNs-main | src/language_models/__init__.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.utils.data.dataloader
class RNNModel(nn.Module):
"""Container module with an encoder, ... | colorlessgreenRNNs-main | src/language_models/model.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
def repackage_hidden(h):
"""Detaches hidden states from their history."""
if isinstance(h, torch.Tensor)... | colorlessgreenRNNs-main | src/language_models/utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import argparse
from utils import batchify, get_batch, repackage_hidden
import torch
import torch.nn as nn
from d... | colorlessgreenRNNs-main | src/language_models/evaluate_test_perplexity.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.f... | colorlessgreenRNNs-main | src/language_models/ngram_lstm.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import dictionary_corpus
from utils import... | colorlessgreenRNNs-main | src/language_models/evaluate_target_word.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import torch
import torch.nn as nn
from dictionary_corpus import Corpu... | colorlessgreenRNNs-main | src/language_models/main.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import torch
from collections import defaultdict
import logging
class Dictionary(object):
def __init__(self, pat... | colorlessgreenRNNs-main | src/language_models/dictionary_corpus.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import subprocess
def query_KenLM(lm_file, file_name, kenlm_path="/private/home/gulordava/kenlm/build/bin/"):
"""
:p... | colorlessgreenRNNs-main | src/syntactic_testsets/evaluate_utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import tree_module as tm
import argparse
import itertools
from collections import defaultdict
import numpy as np
from gener... | colorlessgreenRNNs-main | src/syntactic_testsets/extract_dependency_patterns.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Classes Node, Arc, DependencyTree providing functionality for syntactic dependency trees
"""
from __future__ import prin... | colorlessgreenRNNs-main | src/syntactic_testsets/tree_module.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
from utils import read_paradigms, load_vocab, extract_sent_features, transform_gold, vocab_freqs
import pandas as ... | colorlessgreenRNNs-main | src/syntactic_testsets/_create_datatable.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
| colorlessgreenRNNs-main | src/syntactic_testsets/__init__.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function
#!/usr/bin/env python
import sys
import re
from collections import namedtuple
ConllC... | colorlessgreenRNNs-main | src/syntactic_testsets/conll_utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pandas as pd
from collections import defaultdict
import string
def read_paradigms(path):
""" reads morphological... | colorlessgreenRNNs-main | src/syntactic_testsets/utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import random
import pandas as pd
import tree_module as tm
from extract_dependency_patterns import grep_morp... | colorlessgreenRNNs-main | src/syntactic_testsets/generate_nonsense.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def is_vowel(c):
return c in ["a","o","u","e","i","A","O","U","E","I","è"]
def alt_numeral_morph(morph):
if "Number... | colorlessgreenRNNs-main | src/syntactic_testsets/generate_utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from collections import defaultdict
from data import data_utils
parser = argparse.ArgumentParser(description=... | colorlessgreenRNNs-main | src/data/collect_paradigms.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
file_name = sys.argv[1]
for l in open(file_name):
fields = l.strip().split("\t")
if len(fields) == 10:
... | colorlessgreenRNNs-main | src/data/preprocess_EnglishUD_morph.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
from collections import defaultdict
from random import shuffle
from data import data_utils
par... | colorlessgreenRNNs-main | src/data/data_vocab_prep.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import gzip
import logging
def read_gzip_stream(path):
with gzip.open(path, 'rt', encoding="UTF-8") as f:
for line... | colorlessgreenRNNs-main | src/data/data_utils.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import conll_utils
import tree_module as tm
def remove_segmented_morphemes_hebrew(t):
for start, end, token in t.fused_n... | colorlessgreenRNNs-main | src/data/hebrew/preprocess_HebrewUD_morph.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
file_name = sys.argv[1]
for l in open(file_name):
fields = l.strip().split("\t")
if len(fields) == 10:
morp... | colorlessgreenRNNs-main | src/data/hebrew/add_poss_wiki_annotation.py |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
file_name = sys.argv[1]
for l in open(file_name):
fields = l.strip().split("\t")
if len(fields) == 10:
mor... | colorlessgreenRNNs-main | src/data/hebrew/remove_binyanim.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
import logging
from dataclasses import dataclass, field
from math import sqrt
from typing import List, Optional, Union
import torch
import torch.nn as nn
logger: logging.Logger = logging.getLogger(__name__)
@dataclass
class MtlConfigs:
mtl_model: str = "att... | AdaTT-main | mtl_lib.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import argparse
import numpy as np
class BisonEval:
def __init__(sel... | binary-image-selection-main | bison_eval.py |
"""Load data, create a model, (optionally train it), and evaluate it
Example:
```
python run.py --task WiC --n_epochs 1 --counter_unit epochs --evaluation_freq 0.25 --checkpointing 1 --logging 1 --lr 1e-5
```
"""
import argparse
import json
import logging
import os
import sys
from functools import partial
import s... | snorkel-superglue-master | run.py |
import logging
import os
import superglue_parsers
from task_config import SuperGLUE_TASK_SPLIT_MAPPING
from tokenizer import get_tokenizer
from pytorch_pretrained_bert import BertTokenizer
from snorkel.mtl.data import MultitaskDataLoader
logger = logging.getLogger(__name__)
def get_jsonl_path(data_dir: str, task_... | snorkel-superglue-master | dataloaders.py |
SuperGLUE_TASK_NAMES = ["CB", "COPA", "MultiRC", "RTE", "WiC", "WSC"]
SuperGLUE_TASK_SPLIT_MAPPING = {
"CB": {"train": "train.jsonl", "valid": "val.jsonl", "test": "test.jsonl"},
"COPA": {"train": "train.jsonl", "valid": "val.jsonl", "test": "test.jsonl"},
"MultiRC": {"train": "train.jsonl", "valid": "val.... | snorkel-superglue-master | task_config.py |
import logging
from pytorch_pretrained_bert import BertTokenizer
logger = logging.getLogger(__name__)
def get_tokenizer(tokenizer_name):
logger.info(f"Loading Tokenizer {tokenizer_name}")
if tokenizer_name.startswith("bert"):
do_lower_case = "uncased" in tokenizer_name
tokenizer = BertToken... | snorkel-superglue-master | tokenizer.py |
from setuptools import find_packages, setup
with open("README.md") as read_file:
long_description = read_file.read()
with open("requirements.txt") as f:
requirements = f.read().splitlines()
setup(
name="snorkel-superglue",
version="0.1.0",
url="https://github.com/HazyResearch/snorkel-superglue",
... | snorkel-superglue-master | setup.py |
import argparse
import logging
import os
import pandas as pd
from snorkel.mtl.data import MultitaskDataset
def str2list(v, dim=","):
return [t.strip() for t in v.split(dim)]
def str2bool(v):
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", ... | snorkel-superglue-master | utils.py |
""" Script for downloading all SuperGLUE data.
For licence information, see the original dataset information links
available from: https://super.gluebenchmark.com/
Example usage:
python download_superglue_data.py --data_dir data --tasks all
"""
import argparse
import os
import shutil
import sys
import tempfile
im... | snorkel-superglue-master | download_superglue_data.py |
import torch
from torch import nn
class ChoiceModule(nn.Module):
def __init__(self, n_choices=2):
super().__init__()
self.n_choices = n_choices
def forward(self, immediate_ouput_dict):
logits = []
for i in range(self.n_choices):
logits.append(immediate_ouput_dict[... | snorkel-superglue-master | superglue_modules/copa_module.py |
import os
import torch
from pytorch_pretrained_bert.modeling import BertModel
from torch import nn
class BertModule(nn.Module):
def __init__(self, bert_model_name, cache_dir="./cache/"):
super().__init__()
# Create cache directory if not exists
if not os.path.exists(cache_dir):
... | snorkel-superglue-master | superglue_modules/bert_module.py |
snorkel-superglue-master | superglue_modules/__init__.py | |
from torch import nn
class RegressionModule(nn.Module):
def __init__(self, feature_dim):
super().__init__()
self.linear = nn.Linear(feature_dim, 1)
def forward(self, feature):
return self.linear.forward(feature)
| snorkel-superglue-master | superglue_modules/regression_module.py |
import torch
from torch import nn
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor
class SpanClassifierModule(nn.Module):
def _make_span_extractor(self):
return SelfAttentiveSpanExtractor(self.proj_dim)
def _make_cnn_layer(self, d_inp):
"""
Make a CNN layer as a... | snorkel-superglue-master | superglue_modules/wsc_module.py |
from torch import nn
class ClassificationModule(nn.Module):
def __init__(self, feature_dim, class_cardinality):
super().__init__()
self.linear = nn.Linear(feature_dim, class_cardinality)
def forward(self, feature):
return self.linear.forward(feature)
| snorkel-superglue-master | superglue_modules/classification_module.py |
from snorkel.slicing.sf import slicing_function
from .general_sfs import slice_func_dict as general_slice_func_dict
@slicing_function
def slice_temporal_preposition(example):
temporal_prepositions = ["after", "before", "past"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sen... | snorkel-superglue-master | superglue_slices/CB_sfs.py |
from .general_sfs import slice_func_dict as general_slice_func_dict
slices = []
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict) | snorkel-superglue-master | superglue_slices/COPA_sfs.py |
from .general_sfs import slice_func_dict as general_slice_func_dict
slices = []
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict) | snorkel-superglue-master | superglue_slices/WSC_sfs.py |
from . import general_sfs, RTE_sfs, WiC_sfs, CB_sfs, COPA_sfs, MultiRC_sfs, WSC_sfs
slice_func_dict = {
"CB": CB_sfs.slice_func_dict,
"COPA": COPA_sfs.slice_func_dict,
"MultiRC": MultiRC_sfs.slice_func_dict,
"RTE": RTE_sfs.slice_func_dict,
"WiC": WiC_sfs.slice_func_dict,
"WSC": WSC_sfs.slice_fu... | snorkel-superglue-master | superglue_slices/__init__.py |
from snorkel.slicing.sf import slicing_function
from .general_sfs import slice_func_dict as general_slice_func_dict
@slicing_function()
def slice_temporal_preposition(example):
temporal_prepositions = ["after", "before", "past"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_... | snorkel-superglue-master | superglue_slices/RTE_sfs.py |
from .general_sfs import slice_func_dict as general_slice_func_dict
slices = []
slice_func_dict = {slice.name: slice for slice in slices}
slice_func_dict.update(general_slice_func_dict) | snorkel-superglue-master | superglue_slices/MultiRC_sfs.py |
from snorkel.slicing.sf import slicing_function
from .general_sfs import slice_func_dict as general_slice_func_dict
@slicing_function()
def slice_verb(example):
"""Is the target word a verb?"""
return example.pos == "V"
@slicing_function()
def slice_noun(example):
"""Is the target word a noun?"""
ret... | snorkel-superglue-master | superglue_slices/WiC_sfs.py |
from snorkel.slicing.sf import slicing_function
@slicing_function()
def slice_temporal_preposition(example):
temporal_prepositions = ["after", "before", "past"]
both_sentences = example.sentence1 + example.sentence2
return any([p in both_sentences for p in temporal_prepositions])
@slicing_function()
def... | snorkel-superglue-master | superglue_slices/general_sfs.py |
import sys
from functools import partial
from superglue_modules.bert_module import (
BertContactLastCLSWithTwoTokensModule,
BertModule,
)
from superglue_modules.wsc_module import SpanClassifierModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snork... | snorkel-superglue-master | superglue_tasks/wsc.py |
import sys
from functools import partial
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from superglue_modules.copa_module import ChoiceModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel... | snorkel-superglue-master | superglue_tasks/swag.py |
import sys
from functools import partial
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.model.metrics import metric_score
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.tas... | snorkel-superglue-master | superglue_tasks/cb.py |
import sys
from functools import partial
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from superglue_modules.copa_module import ChoiceModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel... | snorkel-superglue-master | superglue_tasks/copa.py |
from . import cb, copa, multirc, rte, wic, wsc, swag
task_funcs = {
"CB": cb.build_task,
"COPA": copa.build_task,
"MultiRC": multirc.build_task,
"RTE": rte.build_task,
"WiC": wic.build_task,
"WSC": wsc.build_task,
"SWAG": swag.build_task,
}
| snorkel-superglue-master | superglue_tasks/__init__.py |
import sys
from functools import partial
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METRIC_MAPPING
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Task, Operation
from . import utils
... | snorkel-superglue-master | superglue_tasks/rte.py |
import sys
from functools import partial
from torch import nn
from snorkel.model.metrics import metric_score
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Operation, Task
from superglue_modules.bert_module import BertLastCLSModule, BertModule
from task_config import SuperGLUE_LABEL_MAPPING, Super... | snorkel-superglue-master | superglue_tasks/multirc.py |
import torch.nn.functional as F
def ce_loss(module_name, immediate_ouput_dict, Y, active):
return F.cross_entropy(
immediate_ouput_dict[module_name][0][active], (Y.view(-1) - 1)[active]
)
def output(module_name, immediate_ouput_dict):
return F.softmax(immediate_ouput_dict[module_name][0], dim=1)... | snorkel-superglue-master | superglue_tasks/utils.py |
import sys
from functools import partial
from torch import nn
from snorkel.mtl.scorer import Scorer
from snorkel.mtl.task import Task, Operation
from superglue_modules.bert_module import (
BertContactLastCLSWithTwoTokensModule,
BertModule,
)
from task_config import SuperGLUE_LABEL_MAPPING, SuperGLUE_TASK_METR... | snorkel-superglue-master | superglue_tasks/wic.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WSC"
def get_char_index(tex... | snorkel-superglue-master | superglue_parsers/wsc.py |
import json
import logging
import sys
import pandas as pd
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "SWAG"
d... | snorkel-superglue-master | superglue_parsers/swag.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "CB"
def parse(jsonl_path, t... | snorkel-superglue-master | superglue_parsers/cb.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "COPA"
def parse(jsonl_path,... | snorkel-superglue-master | superglue_parsers/copa.py |
from . import cb, copa, multirc, rte, wic, wsc, swag
parser = {
"MultiRC": multirc.parse,
"WiC": wic.parse,
"CB": cb.parse,
"COPA": copa.parse,
"RTE": rte.parse,
"WSC": wsc.parse,
"SWAG": swag.parse,
}
| snorkel-superglue-master | superglue_parsers/__init__.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "RTE"
def parse(jsonl_path, ... | snorkel-superglue-master | superglue_parsers/rte.py |
import json
import logging
import re
import sys
import numpy as np
import torch
from snorkel.mtl.data import MultitaskDataset
from task_config import SuperGLUE_LABEL_MAPPING
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "MultiRC"
def get_... | snorkel-superglue-master | superglue_parsers/multirc.py |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WiC"
def get_rows(jsonl_pat... | snorkel-superglue-master | superglue_parsers/wic.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import hydra
import torch
from lib.ddp_trainer import SegmentationTrainer
from lib.distributed import multi_proc_run
d... | ContrastiveSceneContexts-main | downstream/semseg/ddp_main.py |
import random
import logging
import numpy as np
import scipy
import scipy.ndimage
import scipy.interpolate
import torch
# A sparse tensor consists of coordinates and associated features.
# You must apply augmentation to both.
# In 2D, flip, shear, scale, and rotation of images are coordinate transformation
# color j... | ContrastiveSceneContexts-main | downstream/semseg/datasets/transforms.py |
#from lib.datasets import synthia
#from lib.datasets import shapenet
from datasets import stanford
from datasets import scannet
DATASETS = []
def add_datasets(module):
DATASETS.extend([getattr(module, a) for a in dir(module) if 'Dataset' in a])
add_datasets(stanford)
#add_datasets(synthia)
add_datasets(scannet)... | ContrastiveSceneContexts-main | downstream/semseg/datasets/__init__.py |
import logging
import unittest
import imageio
import os
import os.path as osp
import pickle
import numpy as np
from collections import defaultdict
from plyfile import PlyData
from lib.pc_utils import Camera, read_plyfile
from lib.dataset import DictDataset, VoxelizationDataset, TemporalVoxelizationDataset, \
str2... | ContrastiveSceneContexts-main | downstream/semseg/datasets/synthia.py |
from abc import ABC
from pathlib import Path
from collections import defaultdict
import random
import numpy as np
from enum import Enum
import torch
from torch.utils.data import Dataset, DataLoader
import MinkowskiEngine as ME
from plyfile import PlyData
import datasets.transforms as t
from datasets.dataloader impo... | ContrastiveSceneContexts-main | downstream/semseg/datasets/dataset.py |
import logging
import os
import sys
import numpy as np
from collections import defaultdict
from scipy import spatial
import torch
from plyfile import PlyData
from lib.utils import read_txt, fast_hist, per_class_iu
from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type, cache
import datas... | ContrastiveSceneContexts-main | downstream/semseg/datasets/stanford.py |
import collections
import numpy as np
import MinkowskiEngine as ME
from scipy.linalg import expm, norm
# Rotation matrix along axis with angle theta
def M(axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta))
class Voxelizer:
def __init__(self,
voxel_size=1,
c... | ContrastiveSceneContexts-main | downstream/semseg/datasets/voxelizer.py |
import math
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class InfSampler(Sampler):
"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, shuffle=False):
s... | ContrastiveSceneContexts-main | downstream/semseg/datasets/dataloader.py |
import logging
import os
import sys
from pathlib import Path
import torch
import numpy as np
from scipy import spatial
from datasets.dataset import VoxelizationDataset, DatasetPhase, str2datasetphase_type
from lib.pc_utils import read_plyfile, save_point_cloud
from lib.utils import read_txt, fast_hist, per_class_iu
f... | ContrastiveSceneContexts-main | downstream/semseg/datasets/scannet.py |
# Evaluates semantic label task
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage... | ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/evaluate_semantic_label.py |
# Evaluates semantic instance task
# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Each .txt prediction file look lik... | ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/evaluate_semantic_instance.py |
import os, sys
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
# print an error message and quit
def... | ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/util.py |
import os, sys
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
from . import util... | ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/util_3d.py |
# Evaluates semantic label task
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Note that only the valid classes are used for evaluation,
# i.e., any ground truth label not in the valid label set
# is ignored in the evaluation.
#
# example usage... | ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_label.py |
import os, sys
import csv
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
# print an error message and quit
def... | ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/util.py |
import os, sys
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from plyfile import PlyData, PlyElement
except:
print("Please install the module 'plyfile' for PLY i/o, e.g.")
print("pip install plyfile")
sys.exit(-1)
import util
# ma... | ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/util_3d.py |
# Evaluates semantic instance task
# Adapted from the CityScapes evaluation: https://github.com/mcordts/cityscapesScripts/tree/master/cityscapesscripts/evaluation
# Input:
# - path to .txt prediction files
# - path to .txt ground truth files
# - output file to write results to
# Each .txt prediction file look lik... | ContrastiveSceneContexts-main | downstream/semseg/datasets/evaluation/scannet_benchmark_utils/scripts/evaluate_semantic_instance.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob
import numpy as np
import os
import torch
from tqdm import tqdm
from lib.utils import mkdir_p
from lib.pc_utils import save_po... | ContrastiveSceneContexts-main | downstream/semseg/datasets/preprocessing/stanford/stanford.py |
import os
import sys
import plyfile
import json
import time
import torch
import argparse
import numpy as np
def get_raw2scannet_label_map():
lines = [line.rstrip() for line in open('scannetv2-labels.combined.tsv')]
lines = lines[1:]
raw2scannet = {}
for i in range(len(lines)):
elements = lines[... | ContrastiveSceneContexts-main | downstream/semseg/datasets/preprocessing/scannet/collect_indoor3d_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from torch.nn import Module
from MinkowskiEngine import SparseTensor
class Wrapper(Module):
"""
Wrapper for the segment... | ContrastiveSceneContexts-main | downstream/semseg/models/wrapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resne... | ContrastiveSceneContexts-main | downstream/semseg/models/resunet.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.