python_code stringlengths 0 4.04M | repo_name stringlengths 7 58 | file_path stringlengths 5 147 |
|---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import pdb
import sys
__all__ = ["set_trace"]
_stdin = [None]
_stdin_lock = mult... | ClassyVision-main | classy_vision/generic/pdb.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
def debug_info(type, value, tb):
if hasattr(sys, "ps1") or not sys.stderr.isatty():
sys.__e... | ClassyVision-main | classy_vision/generic/debug.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import defaultdict, deque
from time import perf_counter
from typing import List, Mapp... | ClassyVision-main | classy_vision/generic/perf_stats.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from classy_vision.generic.util import is_pos_int
def add_generic_args(parser):
"""
... | ClassyVision-main | classy_vision/generic/opts.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections.abc as abc
import logging
import operator
from typing import Any, Callable, Dict, List, Optional, ... | ClassyVision-main | classy_vision/generic/profiler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import tempfile
from typing import Any, Callable, List, Tuple
import torch
# Default to GPU 0
_cuda_devi... | ClassyVision-main | classy_vision/generic/distributed_util.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .classy_hub_interface import ClassyHubInterface
__all__ = ["ClassyHubInterface"]
| ClassyVision-main | classy_vision/hub/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Iterator, List, Optional, Union
import torch
import torch.nn as nn
from classy_vis... | ClassyVision-main | classy_vision/hub/classy_hub_interface.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .classy_trainer import ClassyTrainer
from .distributed_trainer import DistributedTrainer
from .local_trainer imp... | ClassyVision-main | classy_vision/trainer/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from classy_vision.generic.distributed_util import barrier
from classy_vision.tasks import ClassyTask
class Classy... | ClassyVision-main | classy_vision/trainer/classy_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import torch
from classy_vision.generic.distributed_util import (
get_rank,
get_wor... | ClassyVision-main | classy_vision/trainer/distributed_trainer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
from classy_vision.generic.distributed_util import set_cpu_device, set_cu... | ClassyVision-main | classy_vision/trainer/local_trainer.py |
ClassyVision-main | classy_vision/hydra/__init__.py | |
ClassyVision-main | classy_vision/hydra/conf/__init__.py | |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json,sys,os
output_path = 'output/'
def json_save(path, obj):
with open(path, 'w') as f:
json.dump(obj, f)
def os_... | CoDraw-master | script/preprocess.py |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspa... | torchhalp-master | setup.py |
import numpy as np
import pytest
import torch
from torch.autograd import Variable
from examples import regression
from torchhalp.optim import SVRG
from utils import *
np.random.seed(0xdeadbeef)
#========================================================================================
# SVRG implementations
#========... | torchhalp-master | test/test_svrg.py |
torchhalp-master | test/__init__.py | |
import pytest
import numpy as np
import torch
from torch.autograd import Variable
from utils import *
from torchhalp.optim import HALP
from examples import regression
np.random.seed(0xdeadbeef)
#========================================================================================
# Helpers
#=====================... | torchhalp-master | test/test_halp.py |
import math
import torch
import pytest
import numpy as np
from utils import iter_indices
import torchhalp.quantize
def check_saturation(m1, scale_factor, bits):
min_val = -scale_factor*math.pow(2, bits-1)
max_val = scale_factor*(math.pow(2, bits-1) - 1)
m2 = m1.clone()
for i in iter_indices(m2):
m2[i] = max... | torchhalp-master | test/test_quantize.py |
import numpy as np
from itertools import product
def stablesoftmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
shiftx = x - np.max(x, axis=1).reshape((-1,1))
exps = np.exp(shiftx)
return exps / np.sum(exps, axis=1).reshape(-1,1)
def logistic_grad(x, y, w, avg=False):
"""... | torchhalp-master | test/utils.py |
torchhalp-master | examples/__init__.py | |
from utils import build_model, SynthDataset
| torchhalp-master | examples/regression/__init__.py |
import torch
import torch.utils.data as data
class SynthDataset(data.Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
return self.data[idx], self.labels[idx]
def build_... | torchhalp-master | examples/regression/utils.py |
import torch
import torch.utils.data as data
from torch.autograd import Variable
from torch import optim
import numpy as np
import argparse
from sklearn import linear_model, datasets
from utils import SynthDataset
from torchhalp.optim import SVRG, HALP
import matplotlib
matplotlib.use('pdf') # uncomment to run on r... | torchhalp-master | examples/regression/main.py |
# MIT License
# Copyright (c) 2017 liukuang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publi... | torchhalp-master | examples/cifar10/resnet.py |
# MIT License
# Copyright (c) 2017 liukuang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publi... | torchhalp-master | examples/cifar10/utils.py |
# MIT License
# Copyright (c) 2017 liukuang
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publi... | torchhalp-master | examples/cifar10/main.py |
import math
import torch
# Modified from
# https://github.com/aaron-xichen/pytorch-playground/blob/master/utee/quant.py
def quantize_(input, scale_factor, bits, biased=False):
assert bits >= 1, bits
bound = math.pow(2.0, bits-1)
min_val = - bound
max_val = bound - 1
if biased:
adj_val = 0.5
... | torchhalp-master | torchhalp/quantize.py |
torchhalp-master | torchhalp/__init__.py | |
from torch.optim.optimizer import Optimizer, required
import torch
from torch.autograd import Variable
import copy, logging
class SVRG(torch.optim.SGD):
"""Implements stochastic variance reduction gradient descent.
Args:
params (iterable): iterable of parameters to optimize
lr (float): learning... | torchhalp-master | torchhalp/optim/svrg.py |
from torch.optim.optimizer import Optimizer, required
import torch
from torch.autograd import Variable
import copy, logging
import math
import torchhalp.quantize
class HALP(torch.optim.SGD):
"""Implements high-accuracy low-precision algorithm.
Args:
params (iterable): iterable of parameters to optimiz... | torchhalp-master | torchhalp/optim/halp.py |
from svrg import SVRG
from halp import HALP
| torchhalp-master | torchhalp/optim/__init__.py |
import torch
import numpy as np
import os, sys
from sklearn.manifold import Isomap
import utils.distortions as dis
import utils.load_graph as load_graph
module_path = os.path.abspath(os.path.join('./pytorch'))
if module_path not in sys.path:
sys.path.append(module_path)
import graph_helpers as gh
from hyperbolic_... | hyperbolics-master | iso_comp.py |
import glob, os, sys
import pandas as pd
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import iso_comp
if __name__ == '__main__':
run_name = sys.argv[1]
rows = []
for f in sorted(glob.glob(run_name + '/*.emb.final')):
line = os.... | hyperbolics-master | run_isomaps.py |
import matplotlib as mpl
import matplotlib.pyplot as plt
import requests
import numpy as np
import json
from scipy.sparse import csr_matrix
import networkx as nx
from collections import defaultdict
import os
def make_edge_set(): return ([],([],[]))
def add_edge(e, i,j):
(v,(row,col)) = e
row.append(i)
co... | hyperbolics-master | products/wikidata_relextract.py |
import logging, argh
import os, sys
import networkx as nx
import numpy as np
# root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path.insert(0, root_dir)
import utils.load_graph as load_graph
import utils.vis as vis
import utils.distortions as dis
import pytorch.graph_helpers as gh
def Ka(... | hyperbolics-master | products/curv.py |
import numpy as np | hyperbolics-master | analysis/load_emb.py |
# Baselines using ancestor encoding:
import networkx as nx
import os, sys
import subprocess
edges_dir = '../data/edges/'
all_files = os.listdir(edges_dir)
out = open('./spanning_forest_avgs.txt', 'w')
for file in all_files:
if os.path.isdir(edges_dir+file):
continue
print("Working on ", edges_dir+fi... | hyperbolics-master | utils/baselines.py |
# library of useful hyperbolic functions
import numpy as np
# Reflection (circle inversion of x through orthogonal circle centered at a)
def isometric_transform(a, x):
r2 = np.linalg.norm(a)**2 - (1.0)
return r2/np.linalg.norm(x - a)**2 * (x-a) + a
# Inversion taking mu to origin
def reflect_at_zero(mu,x):
... | hyperbolics-master | utils/hyp_functions.py |
# This implements the algorithm for finding a good tree embedding from
import networkx as nx
import scipy.sparse.csgraph as csg
import numpy as np
import time, argh
import data_prep as dp
import distortions as dis
import load_dist as ld
import pickle
from joblib import Parallel, delayed
import multiprocessing
# get... | hyperbolics-master | utils/steiner.py |
hyperbolics-master | utils/__init__.py | |
# visualization functions
import numpy as np
import networkx as nx
import os, sys
from itertools import product, combinations
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import utils.hyp_functions as hf
import torch
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as a... | hyperbolics-master | utils/vis.py |
# load the first 3 graphs's distance matrices:
import data_prep as dp
import load_dist as ld
for i in (6,12,13):
G = dp.load_graph(i)
ld.save_dist_mat(G,"dists/dist_mat"+str(i)+".p")
| hyperbolics-master | utils/load_distances.py |
import nltk
from nltk.corpus import wordnet as wn
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import floyd_warshall, connected_components
from collections import defaultdict
import numpy as np
import networkx as nx
import json
import time
from co... | hyperbolics-master | utils/wordnet_forest_prep.py |
# This is to load all of our data
import networkx as nx
import scipy as sp
import numpy as np
# from Bio import Phylo
# import nltk.corpus as nc
# import utils.word_net_prep as wnp
def load_graph(opt):
if opt == 1:
G = nx.read_edgelist("data/facebook_combined.txt")
elif opt == 2:
G = nx.read_e... | hyperbolics-master | utils/data_prep.py |
from nltk.corpus import wordnet as wn
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import floyd_warshall, connected_components
from collections import defaultdict
import numpy as np
import networkx as nx
# for adding edges in CSR format
def make_e... | hyperbolics-master | utils/word_net_prep.py |
# distortions.py
# python code to compute distortion/MAP
import numpy as np
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
def entry_is_good(h, h_rec): return (not np.isnan(h_rec)) and (not np.isinf(h_rec)) and h_rec != 0 and h != 0
def distortion_... | hyperbolics-master | utils/distortions.py |
# load_dist.py
import networkx as nx
import numpy as np
import pickle
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
#import data_prep as dp
import time
import torch
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())... | hyperbolics-master | utils/load_dist.py |
# This is to load data
# the graph needs to be prepared; for example utils.data_prep preprocesses and saves prepared edge lists
import networkx as nx
# def load_graph(file_name, directed=False):
# container = nx.DiGraph() if directed else nx.Graph()
# G = nx.read_edgelist(file_name, data=(('weight',float),), ... | hyperbolics-master | utils/load_graph.py |
from __future__ import unicode_literals, print_function, division
import os
import numpy as np
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
i... | hyperbolics-master | scratch/tree_mapping.py |
"""This file contains core hyperbolic operations for learning modules."""
import numpy as np
import random
import os
import logging
from numpy import linalg as la
from numpy import random
import torch
import torch.nn.functional as F
import torch.nn as nn
EPS = 1e-15
PROJ_EPS = 1e-5
MAX_TANH_ARG = 15.0
def torch_no... | hyperbolics-master | scratch/learning_util.py |
from __future__ import unicode_literals, print_function, division
import os
import numpy as np
import scipy.sparse.csgraph as csg
from joblib import Parallel, delayed
import multiprocessing
import networkx as nx
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
i... | hyperbolics-master | scratch/mapping_utils.py |
import argh
import os
import subprocess
import itertools
# ranks = [2,5,10,50,100,200]
ranks = [200]
def run_comb2(run_name, datasets):
os.makedirs(f"{run_name}/comb_dim2", exist_ok=True)
params = []
rank = 2
epss = [1.0, 0.1]
precision = 8192
for dataset, eps in itertools.product(datasets, eps... | hyperbolics-master | scripts/run_exps.py |
import nltk
from nltk.corpus import wordnet as wn
import numpy as np
import networkx as nx
from scipy.sparse import csr_matrix
import json
from collections import defaultdict
import matplotlib.pyplot as plt
def make_edge_set(): return ([],([],[]))
def add_edge(e,i,j):
(v,(row,col)) = e
row.append(i)
col.... | hyperbolics-master | scripts/wn_small_gen.py |
import argh, os
from collections import defaultdict
#cat run_file.sh | parallel -P 4 "source path.src; bash -c {}"
def work_command(run_name, dataset, rank, gpu, batch_size, epochs, scale):
run_stem = f"{run_name}/dataset_{dataset}.r={rank}"
exec_str = f"CUDA_VISIBLE_DEVICES=\"{gpu}\" python pytorch/pytorch_... | hyperbolics-master | scripts/generate_pytorch.py |
import os
import subprocess
import itertools
import random
ranks = [10, 20]
for file in os.listdir(".\data\hmds-graphs"):
file_base = file.split('.')[0]
cmd_base = "julia hMDS\hmds-simple.jl"
cmd_edges = " -d data\edges\\" + file_base + ".edges"
cmd_emb = " -k data\emb\\" + file_base + ".emb"
... | hyperbolics-master | scripts/hmds-runs.py |
import sys, os, subprocess
import shutil
import numpy as np
import pandas
def comb(edge_file, distance_file, flags):
comb_cmd = ['julia', 'combinatorial/comb.jl',
'--dataset', edge_file,
'--save-distances', distance_file] + flags
print(comb_cmd)
print()
subprocess.run(co... | hyperbolics-master | scripts/comb_stats.py |
import argh, os
from collections import defaultdict
#cat run_file.sh | parallel -P 4 "source path.src; bash -c {}"
def work_command(run_name, dataset, rank, gpu, batch_size, epochs, scale):
run_stem = f"{run_name}/dataset_{dataset}.r={rank}"
exec_str = f"CUDA_VISIBLE_DEVICES=\"{gpu}\" python pytorch/pytorch_... | hyperbolics-master | scripts/generate_pytorch_hp.py |
import os
import argh
import subprocess
import itertools
import random
# ranks = [2,5,10,50,100,200]
datasets = [
# "synthetic/sierp-C50-2",
# "synthetic/sierp-C5-6",
# "synthetic/diamond7"
# "synthetic/sierp-K3-8"
# "synthetic/tree-20-3"
# "smalltree"
# "bio-yeast", # 1458
# "web-edu"... | hyperbolics-master | scripts/products.py |
import glob, os, sys
import pandas as pd
if __name__ == '__main__':
run_name = sys.argv[1]
rows = []
for f in sorted(glob.glob(run_name + '/*.stat')):
# line = os.path.splitext(os.path.splitext(os.path.basename(f))[0])[0] + ' '
# with open(f, "r") as g:
# line += g.readline()
... | hyperbolics-master | scripts/collect_stats.py |
import argh, os
def work_command(run_name, dataset, rank, scale, prec, tol):
run_stem = f"{run_name}/dataset_{dataset}.r={rank}"
exec_str = f" julia mds-scale.jl {dataset} {rank} {scale} {prec} {tol} > {run_stem}.log"
return exec_str
def get_scale_dict(scale_file):
with open(scale_file) as fh: ls = f... | hyperbolics-master | scripts/generate_mds.py |
# Copy and modified from
#
# https://github.com/mleszczy/pytorch_optimizers
#
from torch.optim.optimizer import Optimizer, required
import torch
import copy, logging
from torch.autograd import Variable
from hyperbolic_parameter import Hyperbolic_Parameter
#TODO(mleszczy): Be able to inherit from different optimizers... | hyperbolics-master | pytorch/svrg.py |
import math
import numpy as np
import torch
import copy
import logging
import os
import pickle as cp
# eps for numerical stability
eps = 1e-6
class YFOptimizer(object):
def __init__(self, var_list, lr=0.0001, mu=0.0, clip_thresh=None, weight_decay=0.0,
beta=0.999, curv_win_width=20, zero_debias=True, sparsity_d... | hyperbolics-master | pytorch/yellowfin.py |
import logging, argh
import os, sys
import networkx as nx
import random
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
import matplotlib as mpl
if torch.cuda.is_available(): mpl.use('Agg')
import matplotlib.pyplot as plt
if torch.cuda.is_a... | hyperbolics-master | pytorch/pytorch_hyperbolic.py |
import utils.data_prep as dp
import pytorch.graph_helpers as gh
import numpy as np
import utils.distortions as dis
import utils.load_graph as load_graph
import torch, logging
from math import sqrt
def cudaify(x): return x.cuda() if torch.cuda.is_available() else x
def compute_d(u,l,n):
if np.min(u) < 0... | hyperbolics-master | pytorch/mds_warmstart.py |
import numpy as np
import torch
from torch import nn
from hyperbolic_parameter import PoincareParameter, EuclideanParameter, SphericalParameter, HyperboloidParameter
import logging
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#
# Our models
#
class Hyperbolic_Mean(nn.Module):
def __ini... | hyperbolics-master | pytorch/hyperbolic_models.py |
# Should be moved to utility
from multiprocessing import Pool
import networkx as nx
import scipy.sparse.csgraph as csg
import logging
import numpy as np
def djikstra_wrapper( _x ):
(mat, x) = _x
return csg.dijkstra(mat, indices=x, unweighted=False, directed=False)
def build_distance(G, scale, num_workers=None... | hyperbolics-master | pytorch/graph_helpers.py |
import torch
from torch import nn
from torch.autograd import Variable
import logging
import numpy as np, math
import random
def dot(x,y): return torch.sum(x * y, -1)
def acosh(x):
return torch.log(x + torch.sqrt(x**2-1))
class RParameter(nn.Parameter):
def __new__(cls, data=None, requires_grad=True, sizes=No... | hyperbolics-master | pytorch/hyperbolic_parameter.py |
import nltk
from nltk.corpus import wordnet as wn
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse.csgraph import floyd_warshall, connected_components
import operator
from collections import defaultdict
import numpy as np
import networkx as nx
import json
from... | hyperbolics-master | pytorch/analysis/intrinsic.py |
import logging, argh
import os, sys
import networkx as nx
import random
import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
import matplotlib as mpl
if torch.cuda.is_available(): mpl.use('Agg')
import matplotlib.pyplot as plt
if torch.cuda.is_av... | hyperbolics-master | pytorch/analysis/postprocess.py |
import networkx as nx
# wrapper for nx.bfs_tree that keeps weights
def get_BFS_tree(G, src):
G_BFS = nx.bfs_tree(G, src)
for edge in G_BFS.edges():
if G[edge[0]][edge[1]]:
G_BFS.add_edge(edge[0], edge[1], weight=G[edge[0]][edge[1]]['weight'])
return G_BFS
def max_degree(G):
max_d... | hyperbolics-master | combinatorial/graph_util.py |
import os
import argh
import numpy as np
import pandas
import networkx as nx
import scipy.sparse.csgraph as csg
from timeit import default_timer as timer
from multiprocessing import Pool
import utils.load_graph as lg
import utils.distortions as dis
import graph_util as gu
def compute_row_stats(i, n, adj_mat_original... | hyperbolics-master | combinatorial/stats.py |
import numpy as np
import os
filename = 'ha30.txt'
# fileout = 'usca312.edges'
if __name__ == '__main__':
base, ext = os.path.splitext(filename)
fileout = f'{base}.edges'
D = np.loadtxt(filename)
print(D.shape)
n = D.shape[0]
with open(fileout, 'w') as fout:
for i in range(n):
... | hyperbolics-master | data/edges/preprocess_dist_matrix.py |
import networkx as nx
def make_ancestor_closure(G, name=None):
G_BFS = nx.bfs_tree(G, 0)
G_A = nx.Graph()
if name is not None:
f = open(name + ".edges", 'w')
for node in G_BFS.nodes():
curr = node
while len(list(G_BFS.predecessors(curr))):
curr = list(G_BFS.predec... | hyperbolics-master | data/edges/ancestor_tests.py |
import numpy as np
import networkx as nx
import itertools
import argh
cycle_nodes = 10
tree = nx.balanced_tree(2, 2)
nx.relabel_nodes(tree, {n : n+1 for n in tree.nodes}, copy=False)
tree.add_edge(0, 1)
tree_nodes = len(tree.nodes())
copies = []
for i in range(cycle_nodes):
T = tree.copy()
copies.append(nx.r... | hyperbolics-master | data/edges/synthetic/cycle-tree.py |
import numpy as np
import networkx as nx
import itertools
import argh
d = 6
edges = [(0,1), (1,2), (2,3), (3,0)]
n = 4
for t in range(d-1):
edges2 = []
for u,v in edges:
edges2 += [(u, n), (n, v), (v, n+1), (n+1, u)]
n += 2
edges = edges2
nx.write_edgelist(nx.Graph(edges), f"diamond{d}.e... | hyperbolics-master | data/edges/synthetic/diamond.py |
import numpy as np
import networkx as nx
import sys, os
import subprocess
# generate some random trees on the same nodes:
n = 300
t = 5
g_list = []
for i in range(t):
g_list.append(nx.random_tree(n))
# compress the tree:
G = nx.Graph()
for node in range(n):
for tree in range(t):
for edge in g_lis... | hyperbolics-master | data/edges/synthetic/compressed_tree.py |
import numpy as np
import networkx as nx
import itertools
import argh
# construct generalized Sierpinski graph
# vertices: strings of length d chosen from [n]
def construct(n=3, d=2, base='clique'):
if base in ['clique', 'K', 'k']:
base = 'K'
base_graph = list(nx.complete_graph(n).edges)
if b... | hyperbolics-master | data/edges/synthetic/sierpinski.py |
import numpy as np
import os
# -----------------------------------------------------------------------------------------------
# Read in proto
# -----------------------------------------------------------------------------------------------
solver = 'inputs/caffenet_solver_8_4gpu.prototxt'
train_val = 'inputs/caffe... | CaffeConTroll-master | experiments/batch/batch.py |
import operator
import sys
if len(sys.argv) != 2:
print 'Usage: >>> python process.py filename'
sys.exit(0)
total_num_iters = 0
f = open(sys.argv[1])
current_layer = ''
layer_to_time = {}
for line in f:
line = line.strip()
if 'BATCH:' in line:
total_num_iters += 1
elif 'Time Elapsed' in lin... | CaffeConTroll-master | tests/process_detailed_profiling.py |
import sys
import random
if len(sys.argv) != 2:
print 'Usage: >>> python generate_conv_test.py test_name'
sys.exit(0)
mB = 4
iD = 3
oD = 8
iR = 127
iC = 127
k = 11
s = 4
p = 2
test_name = sys.argv[1]
fname = 'input/conv_forward_in_' + test_name + '.txt'
f = open(fname, 'w')
print 'Creating ' + fname + '...'... | CaffeConTroll-master | tests/generate_conv_test.py |
import sys
if len(sys.argv) != 2:
print 'Usage: >>> python process_perf_test.py filename'
sys.exit(0)
f = open(sys.argv[1])
test_to_metric_table = {}
current_test = ''
current_metric = ''
for line in f:
line = line.strip()
if not line:
continue
if '[ RUN ]' in line:
current_tes... | CaffeConTroll-master | tests/process_perf_test.py |
import sys
########################################################
# Small Examples
########################################################
D = []
D.append(['a', 'b', 'c', 'd'])
D.append(['e', 'f', 'g', 'h'])
D.append(['i', 'j', 'k', 'l'])
D.append(['m', 'n', 'o', 'p'])
n = 4
k = 2
d = 1
b = 1
o = 1
# First no pa... | CaffeConTroll-master | docs/lowering/type_1/pad_stride_example.py |
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.semi_supervised import LabelSpreading
from sklearn.linear_model import SGDClassifier
class BaselineModel(object):
"""
A base class for all sklearn-esque baseline methods
"""
... | reef-master | baselines/models.py |
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
| reef-master | baselines/__init__.py |
reef-master | data/__init__.py | |
import numpy as np
import scipy
import json
import sklearn.cross_validation
from scipy import sparse
from sklearn.feature_extraction.text import CountVectorizer
def parse_file(filename):
def parse(filename):
movies = []
with open(filename) as f:
for line in f:
obj = js... | reef-master | data/loader.py |
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
#top_words=5000
def lstm_simple(train_text, y_train, test_text, y_test, bs=64, n=3):
#La... | reef-master | lstm/imdb_lstm.py |
reef-master | lstm/__init__.py | |
import numpy as np
from scipy import sparse
def log_odds(p):
"""This is the logit function"""
return np.log(p / (1.0 - p))
def odds_to_prob(l):
"""
This is the inverse logit function logit^{-1}:
l = \log\frac{p}{1-p}
\exp(l) = \frac{p}{1-p}
p = \frac{\exp(l)}{1 + \exp(l)}
"""
retur... | reef-master | program_synthesis/label_aggregator.py |
reef-master | program_synthesis/__init__.py | |
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
from program_synthesis.synthesizer import Synthesizer
from program_synthesis.verifier import Verifier
class HeuristicGenerator(object):
"""
A class to go through the synthesizer-verifier loop
"""
def __init__(self, train_prim... | reef-master | program_synthesis/heuristic_generator.py |
import numpy as np
import itertools
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
class Synthesizer(object):
"""
A class to synthesize heuristics from primitives and va... | reef-master | program_synthesis/synthesizer.py |
import numpy as np
from scipy import sparse
from label_aggregator import LabelAggregator
def odds_to_prob(l):
"""
This is the inverse logit function logit^{-1}:
l = \log\frac{p}{1-p}
\exp(l) = \frac{p}{1-p}
p = \frac{\exp(l)}{1 + \exp(l)}
"""
return np.exp(l) / (1.0 + np.exp(l))
class ... | reef-master | program_synthesis/verifier.py |
import ray
from database import Database, save_results_to_es
from utils.experiment_utils import *
# from experiment_driver import map_runstats_to_modelpath
import pickle
import os
import json
from utils.metadata_utils import append_experiment_metadata
ray.init(address="auto")
datasets = ["agnews"]
encoders = ["rnn",... | ludwig-benchmarking-toolkit-main | upload_to_db.py |
import os
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
ENCODER_CONFIG_DIR = os.path.join(PATH_HERE, "model-configs")
# EXPERIMENT_CONFIGS_DIR = '/experiments/ludwig-bench-textclassification/experiment-configs'
EXPERIMENT_CONFIGS_DIR = os.path.join(PATH_HERE, "hyperopt-experiment-configs")
DATASET_CACHE_DIR =... | ludwig-benchmarking-toolkit-main | globals.py |
import copy
import json
import logging
import os
import ray
import socket
from elasticsearch import Elasticsearch
from lbt.utils.experiment_utils import (
format_fields_float,
get_model_ckpt_paths,
hash_dict,
substitute_dict_parameters,
)
# from utils.metadata_utils import append_experiment_metadata
f... | ludwig-benchmarking-toolkit-main | database.py |
import argparse
import datetime
import logging
import ray
import globals
from lbt.utils.experiment_utils import set_globals, load_yaml
from lbt.experiments import (
run_experiments,
reproduce_experiment,
download_data,
)
from lbt.datasets import DATASET_REGISTRY
from lbt.experiments import (
run_exper... | ludwig-benchmarking-toolkit-main | experiment_driver.py |
__version__ = "0.3.0.post1"
| ludwig-benchmarking-toolkit-main | lbt/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.