text stringlengths 1 93.6k |
|---|
self.c_table = name
|
def make_table(self):
|
n_items = len(self.items)
|
size = 1 << (ceil(log2(n_items) + 0.5))
|
table = [None] * size
|
if self.lower:
|
fun = (lambda s: hash32trans(s.lower())) \
|
if self.config.lexicon_hash_bits == 32 \
|
else (lambda s: hash64trans(s.lower()))
|
else:
|
fun = (lambda s: hash32(1, s.encode('utf-8'))) \
|
if self.config.lexicon_hash_bits == 32 \
|
else (lambda s: hash64(1, s.encode('utf-8')))
|
for key, value in self.items:
|
key_hash = fun(key)
|
i = key_hash % size
|
while not table[i] is None:
|
if table[i][0] == key_hash: break
|
i = (i + 1) % size
|
if table[i] is None:
|
table[i] = (key_hash, value+1)
|
return table
|
def lookup(self, form):
|
if 'normalize' in form.get_translations():
|
self.lower = True
|
return Lookup(form, self)
|
@staticmethod
|
def from_file(name, filename, config):
|
with open(filename, 'r', encoding='utf-8') as f:
|
items = [tuple(line.rstrip('\n').split('\t')) for line in f]
|
assert all(len(t) == 2 for t in items)
|
return WCLexicon(name, [(key, int(s)) for key, s in items], config)
|
def c_emit(self, f):
|
table = self.make_table()
|
f.write('#define %s 0x%x\n\n' % (
|
self.c_size, len(table)))
|
def c_kv(entry):
|
if entry is None: return '{ 0, 0 }'
|
key_hash, value = entry
|
return '{ 0x%x, %d }' % (key_hash, value+1)
|
body = '\n'.join(
|
' %s%s' % (c_kv(t), '' if i == len(table)-1 else ',')
|
for i,t in enumerate(table))
|
f.write('static const hash%d_kv_label %s[%s] = {\n%s\n};\n\n' % (
|
self.config.lexicon_hash_bits, self.c_table, self.c_size, body))
|
f.write('''
|
static inline label %s_get_wc(uint%d_t key) {
|
size_t i = key & 0x%x;
|
for (;;) {
|
if (%s[i].hash == key) return %s[i].value;
|
if (%s[i].value == 0) return 0;
|
i = (i + 1) & 0x%x;
|
}
|
}
|
''' % (self.name, self.config.lexicon_hash_bits, len(table)-1,
|
self.c_table, self.c_table, self.c_table, len(table)-1))
|
def c_lookup(self, c_key):
|
return '%s_get_wc(%s)' % (self.name, c_key)
|
# <FILESEP>
|
from gatgnn.data import *
|
from gatgnn.model import *
|
from gatgnn.pytorch_early_stopping import *
|
from gatgnn.file_setter import use_property
|
from gatgnn.utils import *
|
# MOST CRUCIAL DATA PARAMETERS
|
parser = argparse.ArgumentParser(description='GATGNN')
|
parser.add_argument('--property', default='bulk-modulus',
|
choices=['absolute-energy','band-gap','bulk-modulus',
|
'fermi-energy','formation-energy',
|
'poisson-ratio','shear-modulus','new-property'],
|
help='material property to train (default: bulk-modulus)')
|
parser.add_argument('--data_src', default='CGCNN',choices=['CGCNN','MEGNET','NEW'],
|
help='selection of the materials dataset to use (default: CGCNN)')
|
# MOST CRUCIAL MODEL PARAMETERS
|
parser.add_argument('--num_layers',default=3, type=int,
|
help='number of AGAT layers to use in model (default:3)')
|
parser.add_argument('--num_neurons',default=64, type=int,
|
help='number of neurons to use per AGAT Layer(default:64)')
|
parser.add_argument('--num_heads',default=4, type=int,
|
help='number of Attention-Heads to use per AGAT Layer (default:4)')
|
parser.add_argument('--use_hidden_layers',default=True, type=bool,
|
help='option to use hidden layers following global feature summation (default:True)')
|
parser.add_argument('--global_attention',default='composition', choices=['composition','cluster']
|
,help='selection of the unpooling method as referenced in paper GI M-1 to GI M-4 (default:composition)')
|
parser.add_argument('--cluster_option',default='fixed', choices=['fixed','random','learnable'],
|
help='selection of the cluster unpooling strategy referenced in paper GI M-1 to GI M-4 (default: fixed)')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.