python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
import torch import torch.nn as nn # import torch.jit import numpy as np class LossFunction: def compute_Loss(self, y_pred, y_true): raise NotImplemented("compute_loss method must be implemented!") class L1Loss(LossFunction): def __init__(self): self.loss_function = nn.L1Loss() def c...
EXA-1-master
exa/modular_components/lossFunctions/nebula/nebulav2.py
from setuptools import setup, find_packages setup( name = 'nebula', packages = find_packages(exclude=[]), version = '0.2.0', license='MIT', description = '1 Loss Function to rule them all!', author = 'Agora', author_email = 'kye@apac.ai', long_description_content_type = 'text/markdown', url = 'https:...
EXA-1-master
exa/modular_components/lossFunctions/nebula/setup.py
import torch import torch.nn as nn import numpy as np #define the loss function class class LossFunction: def compute_loss(self, y_pred, y_true): raise NotImplemented("compute_loss method must be implemented") #implement specific loss functions that inherit from LossFunction class L1Loss(LossFuncti...
EXA-1-master
exa/modular_components/lossFunctions/nebula/nebula.py
#using gradient boosted greedy algorithms to compute loss import xgboost as xgb import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, accuracy_score # Load your dataset # X, y = load_your_data() # For demonstration purposes, we'll use random data X =...
EXA-1-master
exa/modular_components/lossFunctions/nebula/experimental/xgboostV3.py
import torch import torch.nn as nn import numpy as np #define the loss function class class LossFunction: def compute_loss(self, y_pred, y_true): raise NotImplemented("compute_loss method must be implemented") #implement specific loss functions that inherit from LossFunction class L1Loss(LossFuncti...
EXA-1-master
exa/modular_components/lossFunctions/nebula/experimental/nebulaV2.py
import numpy as np from torch.nn import BCELoss # Define the base LossFunction class class LossFunction: def compute_loss(self, y_pred, y_true): raise NotImplementedError("compute_loss method must be implemented in the derived class") # Define specific loss function classes that inherit from LossFunction ...
EXA-1-master
exa/modular_components/lossFunctions/nebula/experimental/nebula.py
import torch import torch.nn as nn # import torch.jit import numpy as np class LossFunction: def compute_Loss(self, y_pred, y_true): raise NotImplemented("compute_loss method must be implemented!") class L1Loss(LossFunction): def __init__(self): self.loss_function = nn.L1Loss() def c...
EXA-1-master
exa/modular_components/lossFunctions/nebula/experimental/reinforcement/nebula.py
import torch import torch.nn as nn # import torch.jit import numpy as np class LossFunction: def compute_Loss(self, y_pred, y_true): raise NotImplemented("compute_loss method must be implemented!") class L1Loss(LossFunction): def __init__(self): self.loss_function = nn.L1Loss() def c...
EXA-1-master
exa/modular_components/lossFunctions/nebula/experimental/reinforcement/experimental/nebula2.py
import torch import torch.nn as nn # import torch.jit import numpy as np class LossFunction: def compute_Loss(self, y_pred, y_true): raise NotImplemented("compute_loss method must be implemented!") class L1Loss(LossFunction): def __init__(self): self.loss_function = nn.L1Loss() def c...
EXA-1-master
exa/modular_components/lossFunctions/nebula/experimental/reinforcement/experimental/nebula3.py
import torch import torch.nn as nn # import torch.jit import numpy as np class LossFunction: def compute_Loss(self, y_pred, y_true): raise NotImplemented("compute_loss method must be implemented!") class L1Loss(LossFunction): def __init__(self): self.loss_function = nn.L1Loss() def c...
EXA-1-master
exa/modular_components/lossFunctions/nebula/experimental/reinforcement/experimental/nebula1.py
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import pairwise_distances import torch import numpy as np import networkx as nx from gudhi import SimplexTree import gudhi as gd def geometric_similarity(y_pred, y_true): # Compute a simple geometric metric based on the L2 norm of th...
EXA-1-master
exa/modular_components/lossFunctions/Yau/polymorphic.py
import torch import numpy as np import networkx as nx from gudhi import SimplexTree import gudhi as gd def geometric_similarity(y_pred, y_true): # Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true geometric_difference = torch.norm(y_pred - y_true) return ge...
EXA-1-master
exa/modular_components/lossFunctions/Yau/polymorphicv2.py
import numpy as np import tensorflow as tf from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split from sklearn.metrics import log_loss from v2 import calabi_yau_loss # Create a synthetic dataset X, y = make_classification(n_samples=1000, n_features=20, n_classes=2) X_tra...
EXA-1-master
exa/modular_components/lossFunctions/Yau/test.py
# import numpy as np # import tensorflow as tf # from sklearn.datasets import make_classification # from sklearn.model_selection import train_test_split # from sklearn.metrics import log_loss # # from v2 import calabi_yau_loss # # from polymorphic import calabi_yau_loss # from polymorphicv2 import calabi_yau_loss # # ...
EXA-1-master
exa/modular_components/lossFunctions/Yau/test2.py
import numpy as np from scipy.spatial.distance import cdist # Function to measure geometric similarity def geometric_similarity(y_pred, y_true): # Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true geometric_difference = np.linalg.norm(y_pred - y_true) retur...
EXA-1-master
exa/modular_components/lossFunctions/Yau/Yau.py
import torch import numpy as np import networkx as nx from gudhi import SimplexTree import gudhi as gd def geometric_similarity(y_pred, y_true): # Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true geometric_difference = torch.norm(y_pred - y_true) return g...
EXA-1-master
exa/modular_components/lossFunctions/Yau/visual.py
import torch import numpy as np import networkx as nx from gudhi import SimplexTree import gudhi as gd def geometric_similarity(y_pred, y_true): # Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true geometric_difference = torch.norm(y_pred - y_true) return ge...
EXA-1-master
exa/modular_components/lossFunctions/Yau/p3.py
import numpy as np # # Functions to measure geometric similarity, topological invariance, complexity reduction, and stability # def geometric_similarity(y_pred, y_true): # # Compute a metric based on curvature or other geometric properties between y_pred and y_true # pass # def topological_invariance(y_pred, y...
EXA-1-master
exa/modular_components/lossFunctions/Yau/main.py
import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import pairwise_distances def geometric_similarity(y_pred, y_true): # Compute a simple geometric metric based on the L2 norm of the difference between y_pred and y_true geometric_difference = np.linalg.norm(y_pred - y_true) r...
EXA-1-master
exa/modular_components/lossFunctions/Yau/v2.py
import torch import torch.nn as nn DEV = torch.device('cuda:0') def find_layers(module, layers=[nn.Conv2d, nn.Linear], name=''): if type(module) in layers: return {name: module} res = {} for name1, child in module.named_children(): res.update(find_layers( child, layers=layers...
EXA-1-master
exa/modular_components/gptq/modelutils.py
from setuptools import setup, Extension from torch.utils import cpp_extension setup( name='quant_cuda', ext_modules=[cpp_extension.CUDAExtension( 'quant_cuda', ['quant_cuda.cpp', 'quant_cuda_kernel.cu'] )], cmdclass={'build_ext': cpp_extension.BuildExtension} )
EXA-1-master
exa/modular_components/gptq/setup_cuda.py
import time import torch import torch.nn as nn from gptq import * from modelutils import * from quant import * def get_opt(model): import torch def skip(*args, **kwargs): pass torch.nn.init.kaiming_uniform_ = skip torch.nn.init.uniform_ = skip torch.nn.init.normal_ = skip from transf...
EXA-1-master
exa/modular_components/gptq/opt.py
import numpy as np import torch def set_seed(seed): np.random.seed(seed) torch.random.manual_seed(seed) def get_wikitext2(nsamples, seed, seqlen, model): from datasets import load_dataset traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train') testdata = load_dataset('wikitext',...
EXA-1-master
exa/modular_components/gptq/datautils.py
import time import torch import torch.nn as nn from gptq import * from modelutils import * from quant import * def get_llama(model): import torch def skip(*args, **kwargs): pass torch.nn.init.kaiming_uniform_ = skip torch.nn.init.uniform_ = skip torch.nn.init.normal_ = skip from tran...
EXA-1-master
exa/modular_components/gptq/llama.py
import torch import torch.nn as nn import quant_cuda torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False print('Benchmarking OPT-175B FC2 matvec ...') DEV = torch.device('cuda:0') M = 12288 * 4 N = 12288 DTYPE = torch.half mat = torch.randn((M, N), device=DEV, dtype=DTYPE) vec = ...
EXA-1-master
exa/modular_components/gptq/test_kernel.py
import math import time import torch import torch.nn as nn import transformers from quant import * DEBUG = False torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False class GPTQ: def __init__(self, layer): self.layer = layer self.dev = self.layer.weight.devic...
EXA-1-master
exa/modular_components/gptq/gptq.py
import numpy as np import torch import torch.nn as nn def quantize(x, scale, zero, maxq): if maxq < 0: return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) return scale * (q - zero) class Quantizer(nn.Module): def __in...
EXA-1-master
exa/modular_components/gptq/quant.py
import math import time import torch import torch.nn as nn import transformers from gptq import * from modelutils import * from quant import * def get_bloom(model): import torch def skip(*args, **kwargs): pass torch.nn.init.kaiming_uniform_ = skip torch.nn.init.uniform_ = skip torch.nn....
EXA-1-master
exa/modular_components/gptq/bloom.py
import math from collections.abc import Iterable import numpy as np import sacrebleu import sklearn.metrics import random def mean(arr): return sum(arr) / len(arr) def pop_stddev(arr): mu = mean(arr) return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr)) def sample_stddev(arr): mu = mean(...
EXA-1-master
exa/modular_components/gptq/zeroShot/metrics.py
import numpy as np import torch def set_seed(seed): np.random.seed(seed) torch.random.manual_seed(seed) def get_wikitext2(nsamples, seed, seqlen, model): from datasets import load_dataset traindata = load_dataset('wikitext', 'wikitext-2-raw-v1', split='train') testdata = load_dataset('wikitext',...
EXA-1-master
exa/modular_components/gptq/zeroShot/datautils.py
import argparse import fnmatch import tasks import inspect import functools def positional_deprecated(fn): """ A decorator to nudge users into passing only keyword args (`kwargs`) to the wrapped function, `fn`. """ @functools.wraps(fn) def _wrapper(*args, **kwargs): if len(args) != 1 ...
EXA-1-master
exa/modular_components/gptq/zeroShot/utils.py
import json import logging import evaluator import tasks from utils import parse_args, pattern_match def main(): args = parse_args() if args.tasks is None: raise ValueError("Please specify a task to run") else: task_names = pattern_match(args.tasks.split(","), tasks.ALL_TASKS) print...
EXA-1-master
exa/modular_components/gptq/zeroShot/main.py
from utils import positional_deprecated import random import numpy as np import models import models.models_utils import tasks import collections import itertools import metrics import torch import time from datautils import get_loaders @positional_deprecated def simple_evaluate( # model, args, tasks_lis...
EXA-1-master
exa/modular_components/gptq/zeroShot/evaluator.py
""" The LAMBADA dataset: Word prediction requiring a broad discourse context∗ https://arxiv.org/pdf/1606.06031.pdf LAMBADA is a dataset to evaluate the capabilities of computational models for text understanding by means of a word prediction task. LAMBADA is a collection of narrative passages sharing the characteristi...
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/lambada.py
""" A Corpus and Cloze Evaluation for Deeper Understanding of Commonsense Stories https://arxiv.org/pdf/1604.01696.pdf 'Story Cloze Test' (2018) is a commonsense reasoning framework for evaluating story understanding, story generation, and script learning. This test requires a system to choose the correct ending to a f...
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/storycloze.py
from pprint import pprint from typing import List, Union from .tasks_utils import Task from . import piqa from . import arc from . import superglue from .local_datasets import lambada as lambada_dataset from .lambada import LAMBADA from . import glue from . import storycloze # TODO: Add the rest of the results! ######...
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/__init__.py
""" GLUE: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding https://openreview.net/pdf?id=rJ4km2R5t7 The General Language Understanding Evaluation (GLUE) benchmark is a collection of resources for training, evaluating, and analyzing natural language understanding systems. GLUE consists of...
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/glue.py
""" SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems https://w4ngatang.github.io/static/papers/superglue.pdf SuperGLUE is a benchmark styled after GLUE with a new set of more difficult language understanding tasks. Homepage: https://super.gluebenchmark.com/ TODO: WSC requires free-f...
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/superglue.py
import abc import math import datasets import inspect import functools import numpy as np from abc import abstractmethod import sklearn def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): """Compute max metric between prediction and each ground truth.""" scores_for_ground_truths = [] f...
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/tasks_utils.py
""" Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge https://arxiv.org/pdf/1803.05457.pdf The ARC dataset consists of 7,787 science exam questions drawn from a variety of sources, including science questions provided under license by a research partner affiliated with AI2. These are text-...
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/arc.py
""" PIQA: Reasoning about Physical Commonsense in Natural Language https://arxiv.org/pdf/1911.11641.pdf Physical Interaction: Question Answering (PIQA) is a physical commonsense reasoning and a corresponding benchmark dataset. PIQA was designed to investigate the physical knowledge of existing models. To what extent a...
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/piqa.py
from .lambada import lambada
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/local_datasets/__init__.py
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2....
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/local_datasets/lambada/lambada.py
from .lambada import Lambada
EXA-1-master
exa/modular_components/gptq/zeroShot/tasks/local_datasets/lambada/__init__.py
from . import opt from . import bloom MODEL_REGISTRY = { 'opt': opt.OPT, 'bloom': bloom.BLOOM } def get_model(model_name): if 'opt' in model_name: return MODEL_REGISTRY['opt'] elif 'bloom' in model_name: return MODEL_REGISTRY['bloom'] return MODEL_REGISTRY[model_name]
EXA-1-master
exa/modular_components/gptq/zeroShot/models/__init__.py
import transformers import torch from .models_utils import BaseLM, find_layers from transformers import OPTForCausalLM, AutoTokenizer import torch.nn.functional as F from torch import nn import torch from tqdm import tqdm from .quant import * from .gptq import GPTQ class OPTClass(BaseLM): def __init__(self, args)...
EXA-1-master
exa/modular_components/gptq/zeroShot/models/opt.py
import math import time import torch import torch.nn as nn import transformers from .quant import * DEBUG = False torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False class GPTQ: def __init__(self, layer): self.layer = layer self.dev = self.layer.weight.devi...
EXA-1-master
exa/modular_components/gptq/zeroShot/models/gptq.py
import torch import torch.nn as nn try: import quant_cuda except: print('CUDA extension not installed.') def quantize(x, scale, zero, maxq): q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) return scale * (q - zero) class Quantizer(nn.Module): def __init__(self, shape=1): super(Q...
EXA-1-master
exa/modular_components/gptq/zeroShot/models/quant.py
import abc import torch import json import hashlib import collections from tqdm import tqdm from typing import Iterable from abc import abstractmethod from torch import nn import transformers def find_layers(module, layers=[nn.Conv2d, nn.Linear, transformers.Conv1D], name=''): if type(module) in layers: re...
EXA-1-master
exa/modular_components/gptq/zeroShot/models/models_utils.py
import math import time import torch import torch.nn as nn import transformers from quant import * DEBUG = False torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False class TrueOBS: def __init__(self, layer): self.layer = layer self.dev = self.layer.weight.dev...
EXA-1-master
exa/modular_components/gptq/zeroShot/models/fast_trueobs.py
import transformers import torch from .models_utils import BaseLM, find_layers from transformers import BloomForCausalLM, AutoTokenizer import torch.nn.functional as F from torch import nn import torch from tqdm import tqdm from .quant import * from .gptq import GPTQ class BLOOMClass(BaseLM): def __init__(self, a...
EXA-1-master
exa/modular_components/gptq/zeroShot/models/bloom.py
from fts import FineTuner model_id="google/flan-t5-xxl" dataset_name="samsum" finetune = FineTuner( model_id=model_id, dataset_name="samsum", max_length=150, lora_r=16, lora_alpha=32, quantize=True ) finetune.train
Finetuning-Suite-master
example.py
from fts import Inference model = Inference( model_id="georgesung/llama2_7b_chat_uncensored", quantized=True ) model.run("What is your name")
Finetuning-Suite-master
inference.py
from datasets import load_dataset from transformers import AutoTokenizer from fts.finetuner import FineTuner tokenizer = AutoTokenizer.from_pretrained("Phind/Phind-CodeLlama-34B-v1") def data_preprocessing(dataset="Abirate/english_quotes"): data = load_dataset(dataset) data = data.map( lambda samples...
Finetuning-Suite-master
playground/llama2_english.py
from fts.finetuner import FineTuner from fts.inference.hf_model import Inference from fts.processing.base import Preprocessor, DefaultPreprocessor from fts.trainer.base import TrainerConfiguration, DefaultTrainerConfig from fts.processing.build_dataset import BuildDataset
Finetuning-Suite-master
fts/__init__.py
import logging import torch from datasets import load_dataset from peft import TaskType from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, Seq2SeqTrainer, ) from fts.inference.base import DefaultInferenceHandler from fts.processing.base import DefaultPreprocessor from ...
Finetuning-Suite-master
fts/finetuner.py
Finetuning-Suite-master
fts/processing/__init__.py
import argparse import multiprocessing from itertools import chain from datasets import load_dataset from kosmosx.model import KosmosTokenizer class BuildDataset: def __init__(self, seed=42, seq_len=8192, hf_account="YOUR HUGGINGFACE API KEY", dataset_name="uggingFaceM4/VQAv2"): self.SEED = seed ...
Finetuning-Suite-master
fts/processing/build_dataset.py
from abc import ABC, abstractmethod class Preprocessor(ABC): def __init__(self, tokenizer): self.tokenizer = tokenizer @abstractmethod def preprocess_function(self, sample, padding="max_length"): pass # Step 2: Default Preprocessor class DefaultPreprocessor(Preprocessor): def prepro...
Finetuning-Suite-master
fts/processing/base.py
Finetuning-Suite-master
fts/utils/__init__.py
def print_trainable_parameters(model): trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"Trainable params: {trainable_params} || all par...
Finetuning-Suite-master
fts/utils/main.py
import torch import logging from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig class Inference: def __init__( self, model_id: str, device: str = None, max_length: int = 20, quantize: bool = False, quantizatio...
Finetuning-Suite-master
fts/inference/hf_model.py
Finetuning-Suite-master
fts/inference/__init__.py
import torch from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig class GPTQInference: def __init__( self, model_id, quantization_config_bits: int = 4, quantization_config_dataset: str = None, max_length: int = 500 ): self.model_id = model_id ...
Finetuning-Suite-master
fts/inference/gptq.py
from abc import ABC, abstractmethod class InferenceHandler(ABC): @abstractmethod def run( self, prompt_text=None, model=None, tokenizer=None, device=None, max_length = None ): pass class DefaultInferenceHandler(InferenceHandler): def run( ...
Finetuning-Suite-master
fts/inference/base.py
Finetuning-Suite-master
fts/trainer/__init__.py
from abc import ABC, abstractmethod from peft import LoraConfig, TaskType, get_peft_model from transformers import ( DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, ) class TrainerConfiguration(ABC): @abstractmethod def configure(self, model, tokenizer, output_dir, num_train_epochs, *args, **kwargs)...
Finetuning-Suite-master
fts/trainer/base.py
from setuptools import setup, find_packages setup( name='pali-torch', packages=find_packages(exclude=[]), version='0.0.3', license='MIT', description='Pali - PyTorch', author='Kye Gomez', author_email='kye@apac.ai', long_description_content_type='text/markdown', url='https://github....
PALI-main
setup.py
import torch from pali import Pali model = Pali() img = torch.randn(1, 3, 256, 256) prompt = torch.randint(0, 256, (1, 1024)) mask = torch.ones(1, 1024).bool() output_text = torch.randint(0, 256, (1, 1024)) result = model.process(img, prompt, output_text, mask) print(result)
PALI-main
example.py
import torch from pali.model import Pali # # Initialize Pali model # pali = Pali() # Example 1: Caption an Image # # Load images # images = [torch.randn(1, 3, 256, 256) for _ in range(3)] # for i, img in enumerate(images): # # Generate a caption for the image # prompt = torch.randint(0, 256, (1, 1024)) # ...
PALI-main
inference.py
from math import ceil import torch from torch import nn import torch.nn.functional as F from einops import rearrange, pack, unpack def exists(val): return val is not None def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self, ...
PALI-main
pali/autoregressive_wrapper.py
from pali.model import VitModel, Transformer, Pali
PALI-main
pali/__init__.py
import torch from pali.transformer import ViTransformerWrapper, Encoder, XTransformer from transformers import AutoTokenizer class VitModel: def __init__(self, image_size=256, patch_size=32, dim=512, depth=6, heads=8, ...
PALI-main
pali/model.py
from functools import partial import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from collections import namedtuple from functools import wraps from packaging import version from dataclasses import dataclass from einops import rearrange # constants EfficientAttentionConfig = namedtup...
PALI-main
pali/attend.py
import math from random import random import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from functools import partial, wraps from inspect import isfunction from dataclasses import dataclass from typing import List from einops import rearrange, repeat from pali.attend import Attend, I...
PALI-main
pali/transformer.py
VIMA-main
example.py
import torch from torch.nn import Module from transformers import AutoTokenizer from vima.transformer import ( Decoder, Transformer, ViTransformerWrapper, Encoder ) import logging from vima.autoregressive import AutoregressiveWrapper logging.basicConfig( level=logging.DEBUG, format='%(ascti...
VIMA-main
vima/vima.py
from math import ceil import torch import torch.nn.functional as F from einops import pack, rearrange, unpack from torch import nn def exists(val): return val is not None def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self,...
VIMA-main
vima/autoregressive.py
from vima.vima import VimaTokenizer, Vima, VimaMultiModal
VIMA-main
vima/__init__.py
from typing import Callable, Literal import torch from torch import nn from torch.nn import Embedding as _Embedding #utils class Embedding(_Embedding): @property def output_dim(self): return self.embedding_dim def build_mlp( input_dim, *, hidden_dim, output_dim, hidden_depth,...
VIMA-main
vima/model.py
from collections import namedtuple from dataclasses import dataclass from functools import partial, wraps from typing import Optional import torch import torch.nn.functional as F from einops import rearrange, repeat from packaging import version from torch import Tensor, einsum, nn # constants EfficientAttentionConf...
VIMA-main
vima/attend.py
from vima.transformer import ViTransformerWrapper, Encoder class VisionEncoder: # # Usage: # image_encoder = ImageEncoder() # img_embeddings = image_encoder.embed_image_data([img1, img2]) # You'd provide your list of image data here. def __init__( self, image_size: int = 256, ...
VIMA-main
vima/vit.py
import math from dataclasses import dataclass from functools import partial, wraps from inspect import isfunction from random import random from typing import Callable, List, Optional import torch import torch.nn.functional as F from einops import rearrange, reduce, repeat from torch import Tensor, einsum, nn from sa...
VIMA-main
vima/transformer.py
from falcon.main import Falcon falcon = Falcon( temperature=0.5, top_p=0.9, max_new_tokens=500, quantized=True, system_prompt="" ) prompt = "What is the meaning of the collapse of the wave function?" result = falcon.run(prompt=prompt) print(result)
Falcon-main
example.py
from falcon.main import Falcon
Falcon-main
falcon/__init__.py
import torch import transformers from transformers import AutoModelForCausalLM, AutoTokenizer class Falcon: def __init__( self, *, model_id: str = "tiiuae/falcon-180B", temperature: float = 0.5, top_p: float = 0.9, max_new_tokens: int = 400, quantized: bool ...
Falcon-main
falcon/main.py
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details. """ preferences.py -- Preferences system. @author: Bruce @version: $Id: preferences.py 13965 2008-08-14 20:09:41Z derrickdb1 $ @copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details. Module classification: [bruce 071215] At least foundati...
NanoCAD-master
packaging/Pref_Mod/preferences.py
# setup.py from distutils.core import setup import py2exe setup(version = "1.0.0", description = "Preferences modifier for databases", name = "pref_modifier", console=["pref_modifier.py"])
NanoCAD-master
packaging/Pref_Mod/setup_win.py
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details. """ prefs_constants.py Constants and utilities related to user preferences, which need to be defined immediately upon startup. @author: Mark, Bruce, Ninad @version: $Id: prefs_constants.py 11951 2008-03-14 04:44:50Z ericmessick $ @copyright: 2005-200...
NanoCAD-master
packaging/Pref_Mod/prefs_constants.py
import os from preferences import prefs_context import sys import getopt import NE1_Build_Constants prefs = prefs_context() if os.name=="nt": capture_console = False capture_file = "" # if it's not reporting as python is the executable if not sys.executable.upper().endswith("PYTHON.EXE") and \ ...
NanoCAD-master
packaging/Pref_Mod/pref_modifier.py
#!/usr/bin/env python """ setup.py - script for building MyApplication Usage: % python setup.py py2app """ from distutils.core import setup import py2app setup( app=['pref_modifier.py'], )
NanoCAD-master
packaging/Pref_Mod/setup_mac.py
""" This is a setup.py script generated by py2applet Usage: python setup.py py2app """ from setuptools import setup APP = ['main.py'] DATA_FILES = [] OPTIONS = {'argv_emulation': True} setup( app=APP, name='NanoEngineer-1', data_files=DATA_FILES, options={'py2app': OPTIONS}, setup_requires=[...
NanoCAD-master
packaging/MacOSX/setup.py
""" This is a setup.py script generated by py2applet Usage: python setup.py py2app """ #from setuptools import setup from distutils.core import setup import py2exe APP = [{'script': 'main.py', 'icon_resources': [(0, '../../packaging/Win32/NE1.ico')]}] DATA_FILES = [] OPTIONS = {'argv_emulation': True} setup( ...
NanoCAD-master
packaging/Win32/setup.py
#!/usr/bin/python # Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details. # $Id$ # This is used only for "make deps" import sys, re srcdir = sys.argv[1] objdir = sys.argv[2] substitution = sys.argv[3] objs = sys.argv[4:] # This is a little kludgey. The idea is to allow either C files (with # ".o:.c") or C...
NanoCAD-master
cad/plugins/CoNTub/procdeps.py
# Copyright 2007 Nanorex, Inc. See LICENSE file for details. # usage: # # python Generate.py adenine > adenine.mmp import sys import math zSpacing = 3180 # 0.1 pm minorGroveDegrees = 133 baseTwistDegrees = 33.75 sugarRadius = 6760 # pm -- Value from EricD's pdb: 6760 sugarPhosphateDistance = 3640 # Value from Eri...
NanoCAD-master
cad/plugins/DNA/bdna-pseudo-bases/Generate.py
#!/usr/bin/python # Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details. import sys import string sys.path.append("../../../src") from VQT import A, V, vlen class AtomType: def __init__(self, symbol, number, rcovalent): self.symbol = symbol self.number = number self.rcovalen...
NanoCAD-master
cad/plugins/DNA/Z-DNA/Atomistic-bases/prepare.py
#!/usr/bin/python # Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details. import sys import string sys.path.append("../../../src") from VQT import A, V, vlen class AtomType: def __init__(self, symbol, number, rcovalent): self.symbol = symbol self.number = number self.rcovalen...
NanoCAD-master
cad/plugins/DNA/B-DNA/Atomistic-bases/prepare.py
#!/usr/bin/python # Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details. import sys import string sys.path.append("../../../src") from VQT import A, V, vlen class AtomType: def __init__(self, symbol, number, rcovalent): self.symbol = symbol self.number = number self.rcovalent...
NanoCAD-master
cad/plugins/DNA/bdna-bases/prepare.py
#!/usr/bin/python # Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details. import sys import string sys.path.append("../../../src") from VQT import A, V, vlen class AtomType: def __init__(self, symbol, number, rcovalent): self.symbol = symbol self.number = number self.rcovalent...
NanoCAD-master
cad/plugins/DNA/zdna-bases/prepare.py
# Copyright 2008 Nanorex, Inc. See LICENSE file for details. """ This program can be used to translate cartesian coordinates of PAM5 strut ends into the basis necessary to generate gromacs virtual particles (which are used to represent the ends of those struts). First it reads the locations of the three real atoms wh...
NanoCAD-master
cad/plugins/NanoDynamics-1/TranslateStruts.py