repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
netcdf4-python | netcdf4-python-master/test/tst_stringarr.py | from netCDF4 import Dataset, stringtochar, chartostring
import random, numpy, string
import unittest
import os
from numpy.testing import assert_array_equal, assert_array_almost_equal
def generateString(length, alphabet=string.ascii_letters + string.digits + string.punctuation):
return(''.join([random.choice(alphabet) for i in range(length)]))
# test conversion of arrays of fixed-length strings
# to arrays of characters (with an extra dimension), and vice-versa.
FILE_NAME = 'tst_stringarr.nc'
FILE_FORMAT = 'NETCDF4_CLASSIC'
n2 = 20; nchar = 12; nrecs = 4
data = numpy.empty((nrecs,n2),'S'+repr(nchar))
for nrec in range(nrecs):
for n in range(n2):
data[nrec,n] = generateString(nchar)
datau = data.astype('U')
datac = stringtochar(data, encoding='ascii')
class StringArrayTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(FILE_NAME,'w',format=FILE_FORMAT)
nc.createDimension('n1',None)
nc.createDimension('n2',n2)
nc.createDimension('nchar',nchar)
v = nc.createVariable('strings','S1',('n1','n2','nchar'))
v2 = nc.createVariable('strings2','S1',('n1','n2','nchar'))
# if _Encoding set, string array should automatically be converted
# to a char array and vice-versan
v2._Encoding = 'ascii'
v3 = nc.createVariable('strings3','S1',('n1','n2','nchar'))
v3._Encoding = 'ascii'
for nrec in range(nrecs):
datac = stringtochar(data,encoding='ascii')
v[nrec] = datac[nrec]
v2[:-1] = data[:-1]
v2[-1] = data[-1]
v2[-1,-1] = data[-1,-1] # write single element
v2[-1,-1] = data[-1,-1].tostring() # write single python string
# _Encoding should be ignored if an array of characters is specified
v3[:] = stringtochar(data, encoding='ascii')
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing functions for converting arrays of chars to fixed-len strings"""
nc = Dataset(FILE_NAME)
assert nc.dimensions['n1'].isunlimited() == True
v = nc.variables['strings']
v2 = nc.variables['strings2']
v3 = nc.variables['strings3']
assert v.dtype.str[1:] in ['S1','U1']
assert v.shape == (nrecs,n2,nchar)
for nrec in range(nrecs):
data2 = chartostring(v[nrec],encoding='ascii')
assert_array_equal(data2,datau[nrec])
data2 = v2[:]
data2[0] = v2[0]
data2[0,1] = v2[0,1]
assert_array_equal(data2,datau)
data3 = v3[:]
assert_array_equal(data3,datau)
# these slices should return a char array, not a string array
data4 = v2[:,:,0]
assert(data4.dtype.itemsize == 1)
assert_array_equal(data4, datac[:,:,0])
data5 = v2[0,0:nchar,0]
assert(data5.dtype.itemsize == 1)
assert_array_equal(data5, datac[0,0:nchar,0])
# test turning auto-conversion off.
v2.set_auto_chartostring(False)
data6 = v2[:]
assert(data6.dtype.itemsize == 1)
assert_array_equal(data6, datac)
nc.set_auto_chartostring(False)
data7 = v3[:]
assert(data7.dtype.itemsize == 1)
assert_array_equal(data7, datac)
nc.close()
if __name__ == '__main__':
unittest.main()
| 3,409 | 36.472527 | 95 | py |
netcdf4-python | netcdf4-python-master/test/tst_unlimdim.py | import sys
import unittest
import os
import tempfile
import numpy as np
from numpy.random.mtrand import uniform
from numpy.testing import assert_array_equal, assert_array_almost_equal
import netCDF4
# test creating variables with unlimited dimensions,
# writing to and retrieving data from such variables.
# create an n1dim by n2dim by n3dim random array
n1dim = 4
n2dim = 10
n3dim = 8
ranarr = 100.*uniform(size=(n1dim,n2dim,n3dim))
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
class UnlimdimTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = netCDF4.Dataset(self.file, 'w')
# foo has a single unlimited dimension
f.createDimension('n1', n1dim)
f.createDimension('n2', None)
f.createDimension('n3', n3dim)
foo = f.createVariable('data1', ranarr.dtype.str[1:], ('n1','n2','n3'))
# write some data to it.
#foo[:,0:n2dim,:] = ranarr
foo[:] = ranarr
foo[:,n2dim:,:] = 2.*ranarr
# bar has 2 unlimited dimensions
f.createDimension('n4', None)
f.createDimension('n5', n2dim)
f.createDimension('n6', None)
# write some data to it.
bar = f.createVariable('data2', ranarr.dtype.str[1:], ('n4','n5','n6'))
# bar[0:n1dim,:, 0:n3dim] = ranarr
bar[0:n1dim,:, 0:n3dim] = 2.0
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing unlimited dimensions"""
f = netCDF4.Dataset(self.file, 'r')
foo = f.variables['data1']
# check shape.
self.assertTrue(foo.shape == (n1dim,2*n2dim,n3dim))
# check data.
assert_array_almost_equal(foo[:,0:n2dim,:], ranarr)
assert_array_almost_equal(foo[:,n2dim:2*n2dim,:], 2.*ranarr)
bar = f.variables['data2']
# check shape.
self.assertTrue(bar.shape == (n1dim,n2dim,n3dim))
# check data.
#assert_array_almost_equal(bar[:,:,:], ranarr)
assert_array_almost_equal(bar[:,:,:], 2.*np.ones((n1dim,n2dim,n3dim),ranarr.dtype))
f.close()
if __name__ == '__main__':
unittest.main()
| 2,210 | 32 | 91 | py |
netcdf4-python | netcdf4-python-master/test/tst_masked6.py | import unittest
import os
import tempfile
import numpy as np
from numpy import ma
from numpy.testing import assert_array_almost_equal
from netCDF4 import Dataset
# Test automatic conversion of masked arrays (set_always_mask())
class SetAlwaysMaskTestBase(unittest.TestCase):
"""Base object for tests checking the functionality of set_always_mask()"""
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
self.v = np.array([4, 3, 2, 1], dtype="i2")
self.w = np.ma.array([-1, -2, -3, -4], mask=[False, True, False, False], dtype="i2")
f = Dataset(self.testfile, 'w')
_ = f.createDimension('x', None)
v = f.createVariable('v', "i2", 'x')
w = f.createVariable('w', "i2", 'x')
v[...] = self.v
w[...] = self.w
f.close()
def tearDown(self):
os.remove(self.testfile)
class SetAlwaysMaskTrue(SetAlwaysMaskTestBase):
def test_always_mask(self):
"""Testing auto-conversion of masked arrays with no missing values to regular arrays."""
f = Dataset(self.testfile)
f.variables["v"].set_always_mask(True) # The default anyway...
v = f.variables['v'][:]
self.assertTrue(isinstance(v, np.ndarray))
self.assertTrue(isinstance(v, ma.core.MaskedArray))
assert_array_almost_equal(v, self.v)
w = f.variables['w'][:]
self.assertTrue(isinstance(w, np.ndarray))
self.assertTrue(isinstance(w, ma.core.MaskedArray))
assert_array_almost_equal(w, self.w)
f.close()
class SetAlwyasMaskFalse(SetAlwaysMaskTestBase):
def test_always_mask(self):
"""Testing auto-conversion of masked arrays with no missing values to regular arrays."""
f = Dataset(self.testfile)
f.variables["v"].set_always_mask(False)
v = f.variables['v'][:]
self.assertTrue(isinstance(v, np.ndarray))
self.assertFalse(isinstance(v, ma.core.MaskedArray))
assert_array_almost_equal(v, self.v)
w = f.variables['w'][:]
self.assertTrue(isinstance(w, np.ndarray))
self.assertTrue(isinstance(w, ma.core.MaskedArray))
assert_array_almost_equal(w, self.w)
f.close()
class GlobalSetAlwaysMaskTest(unittest.TestCase):
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
f = Dataset(self.testfile, 'w')
grp1 = f.createGroup('Group1')
grp2 = f.createGroup('Group2')
f.createGroup('Group3') # empty group
f.createVariable('var0', "i2", ())
grp1.createVariable('var1', 'f8', ())
grp2.createVariable('var2', 'f4', ())
f.close()
def tearDown(self):
os.remove(self.testfile)
def runTest(self):
# Note: The default behaviour is to always return masked
# arrays, which is already tested elsewhere.
f = Dataset(self.testfile, "r")
# Without regular numpy arrays
f.set_always_mask(True)
v0 = f.variables['var0']
v1 = f.groups['Group1'].variables['var1']
v2 = f.groups['Group2'].variables['var2']
self.assertTrue(v0.always_mask)
self.assertTrue(v1.always_mask)
self.assertTrue(v2.always_mask)
# With regular numpy arrays
f.set_always_mask(False)
self.assertFalse(v0.always_mask)
self.assertFalse(v1.always_mask)
self.assertFalse(v2.always_mask)
f.close()
if __name__ == '__main__':
unittest.main()
| 3,641 | 25.583942 | 96 | py |
netcdf4-python | netcdf4-python-master/test/tst_masked3.py | import unittest
import os
import tempfile
import numpy as np
from numpy import ma
from numpy.testing import assert_array_almost_equal
from netCDF4 import Dataset, default_fillvals
# Test automatic conversion of masked arrays (set_auto_mask())
class SetAutoMaskTestBase(unittest.TestCase):
"""Base object for tests checking the functionality of set_auto_mask()"""
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
self.fillval = default_fillvals["i2"]
self.v = np.array([self.fillval, 5, 4, -9999], dtype = "i2")
self.v_ma = ma.array([self.fillval, 5, 4, -9999], dtype = "i2", mask = [True, False, False, True])
self.scale_factor = 10.
self.add_offset = 5.
self.v_scaled = self.v * self.scale_factor + self.add_offset
self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset
f = Dataset(self.testfile, 'w')
_ = f.createDimension('x', None)
v = f.createVariable('v', "i2", 'x')
v.missing_value = np.array(-9999, v.dtype)
# v[0] not set, will be equal to _FillValue
v[1] = self.v[1]
v[2] = self.v[2]
v[3] = v.missing_value
f.close()
def tearDown(self):
os.remove(self.testfile)
class SetAutoMaskFalse(SetAutoMaskTestBase):
def test_unscaled(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(False)"""
f = Dataset(self.testfile, "r")
f.variables["v"].set_auto_mask(False)
v = f.variables["v"][:]
self.assertEqual(v.dtype, "i2")
self.assertTrue(isinstance(v, np.ndarray))
self.assertTrue(not isinstance(v, ma.core.MaskedArray))
assert_array_almost_equal(v, self.v)
f.close()
def test_scaled(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(False) with scaling"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].scale_factor = self.scale_factor
f.variables["v"].add_offset = self.add_offset
f.close()
# Note: Scaling variables is default if scale_factor and/or add_offset are present
f = Dataset(self.testfile, "r")
f.variables["v"].set_auto_mask(False)
v = f.variables["v"][:]
self.assertEqual(v.dtype, "f8")
self.assertTrue(isinstance(v, np.ndarray))
self.assertTrue(not isinstance(v, ma.core.MaskedArray))
assert_array_almost_equal(v, self.v_scaled)
f.close()
class SetAutoMaskTrue(SetAutoMaskTestBase):
def test_unscaled(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(True)"""
f = Dataset(self.testfile)
f.variables["v"].set_auto_mask(True) # The default anyway...
v_ma = f.variables['v'][:]
self.assertEqual(v_ma.dtype, "i2")
self.assertTrue(isinstance(v_ma, np.ndarray))
self.assertTrue(isinstance(v_ma, ma.core.MaskedArray))
assert_array_almost_equal(v_ma, self.v_ma)
f.close()
def test_scaled(self):
"""Testing auto-conversion of masked arrays for set_auto_mask(True)"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].scale_factor = self.scale_factor
f.variables["v"].add_offset = self.add_offset
f.close()
# Note: Scaling variables is default if scale_factor and/or add_offset are present
f = Dataset(self.testfile)
f.variables["v"].set_auto_mask(True) # The default anyway...
v_ma = f.variables['v'][:]
self.assertEqual(v_ma.dtype, "f8")
self.assertTrue(isinstance(v_ma, np.ndarray))
self.assertTrue(isinstance(v_ma, ma.core.MaskedArray))
assert_array_almost_equal(v_ma, self.v_ma_scaled)
f.close()
class GlobalSetAutoMaskTest(unittest.TestCase):
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
f = Dataset(self.testfile, 'w')
grp1 = f.createGroup('Group1')
grp2 = f.createGroup('Group2')
f.createGroup('Group3') # empty group
f.createVariable('var0', "i2", ())
grp1.createVariable('var1', 'f8', ())
grp2.createVariable('var2', 'f4', ())
f.close()
def tearDown(self):
os.remove(self.testfile)
def runTest(self):
# Note: The default behaviour is to to have both auto-masking and auto-scaling activated.
# This is already tested in tst_scaled.py, so no need to repeat here. Instead,
# disable auto-masking and auto-scaling altogether.
f = Dataset(self.testfile, "r")
# Neither scaling and masking enabled
f.set_auto_maskandscale(False)
v0 = f.variables['var0']
v1 = f.groups['Group1'].variables['var1']
v2 = f.groups['Group2'].variables['var2']
self.assertFalse(v0.scale)
self.assertFalse(v0.mask)
self.assertFalse(v1.scale)
self.assertFalse(v1.mask)
self.assertFalse(v2.scale)
self.assertFalse(v2.mask)
# No auto-masking, but auto-scaling
f.set_auto_maskandscale(True)
f.set_auto_mask(False)
self.assertTrue(v0.scale)
self.assertFalse(v0.mask)
self.assertTrue(v1.scale)
self.assertFalse(v1.mask)
self.assertTrue(v2.scale)
self.assertFalse(v2.mask)
f.close()
if __name__ == '__main__':
unittest.main()
| 5,568 | 26.569307 | 106 | py |
netcdf4-python | netcdf4-python-master/test/tst_slicing.py | from netCDF4 import Dataset
from numpy.random import seed, randint
from numpy.testing import assert_array_equal, assert_equal,\
assert_array_almost_equal
import tempfile, unittest, os, random, sys
import numpy as np
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
xdim=9; ydim=10; zdim=11
#seed(9) # fix seed
data = randint(0,10,size=(xdim,ydim,zdim)).astype('u1')
datarev = data[:,::-1,:]
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = file_name
f = Dataset(file_name,'w')
f.createDimension('x',xdim)
f.createDimension('xu',None)
f.createDimension('xu2',None)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
f.createDimension('zu',None)
v = f.createVariable('data','u1',('x','y','z'))
vu = f.createVariable('datau','u1',('xu','y','zu'))
v1 = f.createVariable('data1d', 'u1', ('x',))
v2 = f.createVariable('data1dx', 'u1', ('xu2',))
# variable with no unlimited dim.
# write slice in reverse order
v[:,::-1,:] = data
# variable with an unlimited dimension.
# write slice in reverse order
#vu[0:xdim,::-1,0:zdim] = data
vu[:,::-1,:] = data
v1[:] = data[:, 0, 0]
if sys.maxsize > 2**32:
v2[2**31] = 1 # issue 1112 (overflow on windows)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def test_3d(self):
"""testing variable slicing"""
f = Dataset(self.file, 'r')
v = f.variables['data']
vu = f.variables['datau']
# test return of array scalar.
assert_equal(v[0,0,0].shape,())
assert_array_equal(v[:], datarev)
# test reading of slices.
# negative value means count back from end.
assert_array_equal(v[:-1,:-2,:-3],datarev[:-1,:-2,:-3])
# every other element (positive step)
assert_array_equal(v[2:-1:2,2:-2:2,2:-3:2],datarev[2:-1:2,2:-2:2,2:-3:2])
# every other element (negative step)
assert_array_equal(v[-1:2:-2,-2:2:-2,-3:2:-2],datarev[-1:2:-2,-2:2:-2,-3:2:-2])
# read elements in reverse order
assert_array_equal(v[:,::-1,:],data)
assert_array_equal(v[::-1,:,::-1],datarev[::-1,:,::-1])
assert_array_equal(v[xdim-1::-3,:,zdim-1::-3],datarev[xdim-1::-3,:,zdim-1::-3])
# ellipsis slice.
assert_array_equal(v[...,2:],datarev[...,2:])
# variable with an unlimited dimension.
assert_array_equal(vu[:], data[:,::-1,:])
# read data in reverse order
assert_array_equal(vu[:,::-1,:],data)
# index using an integer array scalar
i = np.ones(1,'i4')[0]
assert_array_equal(v[i],datarev[1])
f.close()
def test_1d(self):
f = Dataset(self.file, 'r')
v1 = f.variables['data1d']
v2 = f.variables['data1dx']
d = data[:,0,0]
assert_equal(v1[:], d)
if sys.maxsize > 2**32:
assert_equal(v2[2**31], 1)
assert_equal(v1[4:], d[4:])
# test return of array scalar.
assert_equal(v1[0].shape, ())
i1 = np.array([2,3,4])
assert_equal(v1[i1], d[i1])
i2 = np.array([2,3,5])
assert_equal(v1[i2], d[i2])
assert_equal(v1[d<5], d[d<5])
assert_equal(v1[5], d[5])
f.close()
def test_0d(self):
f = Dataset(self.file, 'w')
v = f.createVariable('data', float)
v[...] = 10
assert_array_equal(v[...], 10)
assert_equal(v.shape, v[...].shape)
# issue #785: always return masked array
#assert(type(v[...]) == np.ndarray)
assert(type(v[...]) == np.ma.core.MaskedArray)
f.set_auto_mask(False)
assert(type(v[...]) == np.ndarray)
f.close()
def test_issue259(self):
dset = Dataset(self.file, 'w', format='NETCDF4_CLASSIC')
dset.createDimension('dim', None)
a = dset.createVariable('a', 'i', ('dim',))
b = dset.createVariable('b', 'i', ('dim',))
c = dset.createVariable('c', 'i', ('dim',))
c[:] = 1 # c initially is empty, new entry created
assert_array_equal(c[...], np.array([1]))
b[:] = np.array([1,1])
a[:] = 1 # a should be same as b
assert_array_equal(a[...], b[...])
dset.close()
def test_issue371(self):
dataset = Dataset(self.file, 'w')
dataset.createDimension('dim', 5)
var = dataset.createVariable('bar', 'i8', ('dim', ))
data = [1, 2, 3, 4, 5]
var[..., :] = data
assert_array_equal(var[..., :], np.array(data))
dataset.close()
def test_issue306(self):
f = Dataset(self.file,'w')
nlats = 7; lat = f.createDimension('lat',nlats)
nlons = 12; lon = f.createDimension('lon',nlons)
nlevs = 1; lev = f.createDimension('lev',nlevs)
time = f.createDimension('time',None)
var = f.createVariable('var',np.float64,('time','lev','lat','lon'))
a = np.random.uniform(size=(10,nlevs,nlats,nlons))
var[0:10] = a
f.close()
f = Dataset(self.file)
aa = f.variables['var'][4,-1,:,:]
assert_array_almost_equal(a[4,-1,:,:],aa)
v = f.variables['var']
try:
aa = v[4,-2,:,:] # -2 when dimension is length 1
except IndexError:
pass
else:
raise IndexError('This test should have failed.')
try:
aa = v[4,...,...,:] # more than one Ellipsis
except IndexError:
pass
else:
raise IndexError('This test should have failed.')
try:
aa = v[:,[True,True],:,:] # boolean array too long.
except IndexError:
pass
else:
raise IndexError('This test should have failed.')
try:
aa = v[:,[0,1],:,:] # integer index too large
except IndexError:
pass
else:
raise IndexError('This test should have failed.')
f.close()
def test_issue300(self):
f = Dataset(self.file,'w')
nlats = 11; lat = f.createDimension('lat',nlats)
nlons = 20; lon = f.createDimension('lon',nlons)
time = f.createDimension('time',None)
var = f.createVariable('var',np.float64,('time','lat','lon'))
a = np.random.uniform(size=(3,nlats,nlons))
var[[True,True,False,False,False,True]] = a
var[0,2.0,"-1"] = 0 # issue 312
a[0,2,-1]=0
f.close()
f = Dataset(self.file)
var = f.variables['var']
aa = var[[0,1,5]]
bb = var[[True,True,False,False,False,True]]
lats = np.arange(nlats); lons = np.arange(nlons)
cc = var[-1,lats > 2,lons < 6]
assert_array_almost_equal(a,aa)
assert_array_almost_equal(bb,aa)
assert_array_almost_equal(cc,a[-1,3:,:6])
f.close()
def test_retain_single_dims(self):
f = Dataset(self.file, 'r')
v = f.variables['data']
keys = ((0, 1, 2, 3, 4, 5, 6, 7, 8), (5,), (4,))
shape = (9, 1, 1)
data = v[keys]
assert_equal(data.shape, shape)
keys = ((0, 1, 2, 3, 4, 5, 6, 7, 8), 5, 4,)
shape = (9,)
data = v[keys]
assert_equal(data.shape, shape)
f.close()
def test_issue743(self):
nc = Dataset(self.file,'w',format='NETCDF3_CLASSIC')
td = nc.createDimension('t',None)
xd = nc.createDimension('x',33)
yd = nc.createDimension('y',4)
v = nc.createVariable('v',np.float64,('t','x','y'))
nc.close()
nc = Dataset(self.file)
data = np.empty(nc['v'].shape, nc['v'].dtype)
data2 = nc['v'][...]
assert_array_equal(data,data2)
nc.close()
def test_issue906(self):
f = Dataset(self.file,'w')
f.createDimension('d1',3)
f.createDimension('d2',None)
f.createDimension('d3',5)
f.createVariable('v2',np.float64,('d1','d2','d3'))
f['v2'][:] = np.zeros((3,4,5))
f['v2'][0,:,0] = np.arange(4)
f['v2'][0,:,:] = np.ones((4,5))
f.close()
def test_issue919(self):
with Dataset(self.file,'w') as f:
f.createDimension('time',2)
f.createDimension('lat',10)
f.createDimension('lon',9)
f.createVariable('v1',np.int64,('time', 'lon','lat',))
arr = np.arange(9*10).reshape((9, 10))
f['v1'][:] = arr
assert_array_equal(f['v1'][:],np.broadcast_to(arr,f['v1'].shape))
arr = np.arange(10)
f['v1'][:] = arr
assert_array_equal(f['v1'][:],np.broadcast_to(arr,f['v1'].shape))
def test_issue922(self):
with Dataset(self.file,'w') as f:
f.createDimension('d1',3)
f.createDimension('d2',None)
f.createVariable('v1',np.int64,('d2','d1',))
f['v1'][0] = np.arange(3,dtype=np.int64)
f['v1'][1:3] = np.arange(3,dtype=np.int64)
assert_array_equal(f['v1'][:], np.broadcast_to(np.arange(3),(3,3)))
f.createVariable('v2',np.int64,('d1','d2',))
f['v2'][:,0] = np.arange(3,dtype=np.int64)
f['v2'][:,1:3] = np.arange(6,dtype=np.int64).reshape(3,2)
assert_array_equal(f['v2'][:,1:3],np.arange(6,dtype=np.int64).reshape(3,2))
assert_array_equal(f['v2'][:,0],np.arange(3,dtype=np.int64))
def test_issue1083(self):
with Dataset(self.file, "w") as nc:
nc.createDimension("test", 5)
v = nc.createVariable("var", "f8", ("test", "test", "test"))
v[:] = 1 # works
v[:] = np.ones(()) # works
v[:] = np.ones((1,)) # works
v[:] = np.ones((5,)) # works
v[:] = np.ones((5,5,5)) # works
v[:] = np.ones((5,1,1)) # fails (before PR #1084)
v[:] = np.ones((5,1,5)) # fails (before PR #1084)
v[:] = np.ones((5,5,1)) # fails (before PR #1084)
if __name__ == '__main__':
unittest.main()
| 10,135 | 35.992701 | 87 | py |
netcdf4-python | netcdf4-python-master/test/tst_compression_szip.py | from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from numpy.testing import assert_almost_equal
import os, tempfile, unittest, sys
ndim = 100000
filename = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
datarr = uniform(size=(ndim,))
def write_netcdf(filename,dtype='f8'):
nc = Dataset(filename,'w')
nc.createDimension('n', ndim)
foo = nc.createVariable('data',\
dtype,('n'),compression=None)
foo_szip = nc.createVariable('data_szip',\
dtype,('n'),compression='szip',szip_coding='ec',szip_pixels_per_block=32)
foo[:] = datarr
foo_szip[:] = datarr
nc.close()
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.filename = filename
write_netcdf(self.filename)
def tearDown(self):
# Remove the temporary files
os.remove(self.filename)
def runTest(self):
f = Dataset(self.filename)
assert_almost_equal(datarr,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False}
assert_almost_equal(datarr,f.variables['data_szip'][:])
dtest = {'zlib': False, 'szip': {'coding': 'ec', 'pixels_per_block': 32}, 'zstd': False, 'bzip2': False, 'blosc': False, 'shuffle': False, 'complevel': 0, 'fletcher32': False}
assert f.variables['data_szip'].filters() == dtest
f.close()
if __name__ == '__main__':
nc = Dataset(filename,'w')
if not nc.has_szip_filter():
sys.stdout.write('szip filter not available, skipping tests ...\n')
else:
nc.close()
unittest.main()
| 1,716 | 34.770833 | 183 | py |
netcdf4-python | netcdf4-python-master/test/tst_masked4.py | import unittest
import os
import tempfile
import numpy as np
from numpy import ma
from numpy.testing import assert_array_almost_equal
from netCDF4 import Dataset, default_fillvals
# Test use of valid_min/valid_max/valid_range in generation of masked arrays
class SetValidMinMax(unittest.TestCase):
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
self.valid_min = -32765
self.valid_max = 32765
self.valid_range = [self.valid_min,self.valid_max]
self.v = np.array([self.valid_min-1, 5, 4, self.valid_max+1], dtype = "i2")
self.v_ma = ma.array([self.valid_min-1, 5, 4, self.valid_max+1], dtype = "i2", mask = [True, False, False, True])
self.scale_factor = 10.
self.add_offset = 5.
self.v_scaled = self.v * self.scale_factor + self.add_offset
self.v_ma_scaled = self.v_ma * self.scale_factor + self.add_offset
f = Dataset(self.testfile, 'w')
_ = f.createDimension('x', None)
v = f.createVariable('v', "i2", 'x')
v2 = f.createVariable('v2', "i2", 'x')
v3 = f.createVariable('v3', "i2", 'x', fill_value=self.valid_min)
v.missing_value = np.array(32767, v.dtype)
v.valid_min = np.array(self.valid_min, v.dtype)
v.valid_max = np.array(self.valid_max, v.dtype)
v.valid_range = np.array(0, v.dtype) # issue 1013, this is wrong but should not raise an exception
v[0] = self.valid_min-1
v[1] = self.v[1]
v[2] = self.v[2]
v[3] = self.valid_max+1
v2.missing_value = np.array(32767, v.dtype)
v2.valid_range = np.array(self.valid_range, v.dtype)
v2[0] = self.valid_range[0]-1
v2[1] = self.v[1]
v2[2] = self.v[2]
v2[3] = self.valid_range[1]+1
v3.missing_value = np.array(32767, v.dtype)
v3.valid_max = np.array(self.valid_max, v.dtype)
# _FillValue should act as valid_min
v3[0] = v3._FillValue-1
v3[1] = self.v[1]
v3[2] = self.v[2]
v3[3] = self.valid_max+1
f.close()
def tearDown(self):
os.remove(self.testfile)
def test_scaled(self):
"""Testing auto-conversion of masked arrays"""
# Update test data file
f = Dataset(self.testfile, "a")
f.variables["v"].scale_factor = self.scale_factor
f.variables["v"].add_offset = self.add_offset
f.variables["v2"].scale_factor = self.scale_factor
f.variables["v2"].add_offset = self.add_offset
f.close()
f = Dataset(self.testfile, "r")
v = f.variables["v"][:]
v2 = f.variables["v2"][:]
v3 = f.variables["v3"][:]
self.assertEqual(v.dtype, "f8")
self.assertTrue(isinstance(v, np.ndarray))
self.assertTrue(isinstance(v, ma.core.MaskedArray))
assert_array_almost_equal(v, self.v_scaled)
self.assertEqual(v2.dtype, "f8")
self.assertTrue(isinstance(v2, np.ndarray))
self.assertTrue(isinstance(v2, ma.core.MaskedArray))
assert_array_almost_equal(v2, self.v_scaled)
self.assertTrue(np.all(self.v_ma.mask == v.mask))
self.assertTrue(np.all(self.v_ma.mask == v2.mask))
# treating _FillValue as valid_min/valid_max was
# too surprising, revert to old behaviour (issue #761)
#self.assertTrue(np.all(self.v_ma.mask == v3.mask))
# check that underlying data is same as in netcdf file
v = f.variables['v']
v.set_auto_scale(False)
v = v[:]
self.assertTrue(np.all(self.v == v.data))
f.close()
# issue 672
f = Dataset('issue672.nc')
field = 'azi_angle_trip'
v = f.variables[field]
data1 = v[:]
v.set_auto_scale(False)
data2 = v[:]
v.set_auto_maskandscale(False)
data3 = v[:]
assert(data1[(data3 < v.valid_min)].mask.sum() == 12)
assert(data2[(data3 < v.valid_min)].mask.sum() ==
data1[(data3 < v.valid_min)].mask.sum())
f.close()
if __name__ == '__main__':
unittest.main()
| 4,136 | 32.096 | 121 | py |
netcdf4-python | netcdf4-python-master/test/tst_compression_zstd.py | from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from numpy.testing import assert_almost_equal
import os, tempfile, unittest, sys
ndim = 100000
filename1 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
filename2 = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
array = uniform(size=(ndim,))
def write_netcdf(filename,dtype='f8',complevel=6):
nc = Dataset(filename,'w')
nc.createDimension('n', ndim)
foo = nc.createVariable('data',\
dtype,('n'),compression='zstd',complevel=complevel)
foo[:] = array
nc.close()
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.filename1 = filename1
self.filename2 = filename2
write_netcdf(self.filename1,complevel=0) # no compression
write_netcdf(self.filename2,complevel=4) # with compression
def tearDown(self):
# Remove the temporary files
os.remove(self.filename1)
os.remove(self.filename2)
def runTest(self):
uncompressed_size = os.stat(self.filename1).st_size
# check uncompressed data
f = Dataset(self.filename1)
size = os.stat(self.filename1).st_size
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False}
assert_almost_equal(size,uncompressed_size)
f.close()
# check compressed data.
f = Dataset(self.filename2)
size = os.stat(self.filename2).st_size
assert_almost_equal(array,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':True,'bzip2':False,'blosc':False,'shuffle':False,'complevel':4,'fletcher32':False}
assert(size < 0.96*uncompressed_size)
f.close()
if __name__ == '__main__':
nc = Dataset(filename1,'w')
if not nc.has_zstd_filter():
sys.stdout.write('zstd filter not available, skipping tests ...\n')
else:
nc.close()
unittest.main()
| 2,133 | 35.793103 | 125 | py |
netcdf4-python | netcdf4-python-master/test/tst_masked5.py | import unittest
import os
import tempfile
import numpy as np
from numpy import ma
from numpy.testing import assert_array_equal
from netCDF4 import Dataset, __netcdf4libversion__
# Test use of vector of missing values.
class VectorMissingValues(unittest.TestCase):
def setUp(self):
self.testfile = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
self.missing_values = [-999,999,0]
self.v = np.array([-999,0,1,2,3,999], dtype = "i2")
self.v_ma = ma.array([-1,0,1,2,3,4], dtype = "i2", \
mask = [True, True, False, False, False, True])
f = Dataset(self.testfile, 'w')
d = f.createDimension('x',6)
v = f.createVariable('v', "i2", 'x')
# issue 730: set fill_value for vlen str vars
v2 = f.createVariable('v2', str, 'x', fill_value='<missing>')
v.missing_value = self.missing_values
v[:] = self.v
v2[0]='first'
f.close()
def tearDown(self):
os.remove(self.testfile)
def test_scaled(self):
"""Testing auto-conversion of masked arrays"""
f = Dataset(self.testfile)
v = f.variables["v"]
v2 = f.variables["v2"]
self.assertTrue(isinstance(v[:], ma.core.MaskedArray))
assert_array_equal(v[:], self.v_ma)
assert_array_equal(v[2],self.v[2]) # issue #624.
v.set_auto_mask(False)
self.assertTrue(isinstance(v[:], np.ndarray))
assert_array_equal(v[:], self.v)
# issue 730
# this part fails with netcdf 4.1.3
# a bug in vlen strings?
if __netcdf4libversion__ >= '4.4.0':
assert v2[0] == 'first'
assert v2[1] == '<missing>'
f.close()
if __name__ == '__main__':
unittest.main()
| 1,787 | 25.294118 | 84 | py |
netcdf4-python | netcdf4-python-master/test/tst_grps2.py | import sys
import unittest
import os
import tempfile
import netCDF4
# test implicit group creation by using unix-like paths
# in createVariable and createGroups (added in 1.1.8).
# also test Dataset.__getitem__, also added in 1.1.8.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
class Groups2TestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = netCDF4.Dataset(self.file,'w')
x = f.createDimension('x',10)
# create groups in path if they don't already exist
v = f.createVariable('/grouped/data/v',float,('x',))
g = f.groups['grouped']
# create groups underneath 'grouped'
v2 = g.createVariable('./data/data2/v2',float,('x',))
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing implicit group and creation and Dataset.__getitem__"""
f = netCDF4.Dataset(self.file, 'r')
v1 = f['/grouped/data/v']
v2 = ((f.groups['grouped']).groups['data']).variables['v']
g = f['/grouped/data']
v3 = g['data2/v2']
assert(v1 == v2)
assert(g == f.groups['grouped'].groups['data'])
assert(v3.name == 'v2')
f.close()
if __name__ == '__main__':
unittest.main()
| 1,334 | 29.340909 | 73 | py |
netcdf4-python | netcdf4-python-master/test/tst_vlen.py | import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
| 7,675 | 32.519651 | 86 | py |
netcdf4-python | netcdf4-python-master/test/tst_compression_blosc.py | from numpy.random.mtrand import uniform
from netCDF4 import Dataset
from numpy.testing import assert_almost_equal
import os, tempfile, unittest, sys
ndim = 100000
iblosc_shuffle=2
iblosc_complevel=4
filename = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
datarr = uniform(size=(ndim,))
def write_netcdf(filename,dtype='f8',blosc_shuffle=1,complevel=6):
nc = Dataset(filename,'w')
nc.createDimension('n', ndim)
foo = nc.createVariable('data',\
dtype,('n'),compression=None)
foo_lz = nc.createVariable('data_lz',\
dtype,('n'),compression='blosc_lz',blosc_shuffle=blosc_shuffle,complevel=complevel)
foo_lz4 = nc.createVariable('data_lz4',\
dtype,('n'),compression='blosc_lz4',blosc_shuffle=blosc_shuffle,complevel=complevel)
foo_lz4hc = nc.createVariable('data_lz4hc',\
dtype,('n'),compression='blosc_lz4hc',blosc_shuffle=blosc_shuffle,complevel=complevel)
foo_zlib = nc.createVariable('data_zlib',\
dtype,('n'),compression='blosc_zlib',blosc_shuffle=blosc_shuffle,complevel=complevel)
foo_zstd = nc.createVariable('data_zstd',\
dtype,('n'),compression='blosc_zstd',blosc_shuffle=blosc_shuffle,complevel=complevel)
foo_lz[:] = datarr
foo_lz4[:] = datarr
foo_lz4hc[:] = datarr
foo_zlib[:] = datarr
foo_zstd[:] = datarr
nc.close()
class CompressionTestCase(unittest.TestCase):
def setUp(self):
self.filename = filename
write_netcdf(self.filename,complevel=iblosc_complevel,blosc_shuffle=iblosc_shuffle)
def tearDown(self):
# Remove the temporary files
os.remove(self.filename)
def runTest(self):
f = Dataset(self.filename)
assert_almost_equal(datarr,f.variables['data'][:])
assert f.variables['data'].filters() ==\
{'zlib':False,'szip':False,'zstd':False,'bzip2':False,'blosc':False,'shuffle':False,'complevel':0,'fletcher32':False}
assert_almost_equal(datarr,f.variables['data_lz'][:])
dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc':
{'compressor': 'blosc_lz', 'shuffle': iblosc_shuffle},
'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False}
assert f.variables['data_lz'].filters() == dtest
assert_almost_equal(datarr,f.variables['data_lz4'][:])
dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc':
{'compressor': 'blosc_lz4', 'shuffle': iblosc_shuffle},
'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False}
assert f.variables['data_lz4'].filters() == dtest
assert_almost_equal(datarr,f.variables['data_lz4hc'][:])
dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc':
{'compressor': 'blosc_lz4hc', 'shuffle': iblosc_shuffle},
'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False}
assert f.variables['data_lz4hc'].filters() == dtest
assert_almost_equal(datarr,f.variables['data_zlib'][:])
dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc':
{'compressor': 'blosc_zlib', 'shuffle': iblosc_shuffle},
'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False}
assert f.variables['data_zlib'].filters() == dtest
assert_almost_equal(datarr,f.variables['data_zstd'][:])
dtest = {'zlib': False, 'szip':False, 'zstd': False, 'bzip2': False, 'blosc':
{'compressor': 'blosc_zstd', 'shuffle': iblosc_shuffle},
'shuffle': False, 'complevel': iblosc_complevel, 'fletcher32': False}
assert f.variables['data_zstd'].filters() == dtest
f.close()
if __name__ == '__main__':
nc = Dataset(filename,'w')
if not nc.has_blosc_filter():
sys.stdout.write('blosc filter not available, skipping tests ...\n')
else:
nc.close()
unittest.main()
| 4,029 | 47.554217 | 125 | py |
netcdf4-python | netcdf4-python-master/test/tst_rename.py | import sys
import unittest
import os
import tempfile
import netCDF4
from netCDF4 import __has_rename_grp__
# test changing dimension, variable names
# and deleting attributes.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
LAT_NAME="lat"
LON_NAME="lon"
LON_NAME2 = "longitude"
LEVEL_NAME="level"
TIME_NAME="time"
VAR_NAME='temp'
VAR_NAME2='wind'
GROUP_NAME='subgroup'
GROUP_NAME2='subgroup2'
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = netCDF4.Dataset(self.file, 'w')
f.createDimension(LAT_NAME,73)
f.createDimension(LON_NAME,145)
f.createDimension(LEVEL_NAME,10)
f.createDimension(TIME_NAME,None)
if __has_rename_grp__:
g = f.createGroup(GROUP_NAME)
else:
g = f.createGroup(GROUP_NAME2)
g.createDimension(LAT_NAME,145)
g.createDimension(LON_NAME,289)
g.createDimension(LEVEL_NAME,20)
g.createDimension(TIME_NAME,None)
f.foo = 'bar'
f.goober = 2
g.foo = 'bar'
g.goober = 2
f.createVariable(VAR_NAME,'f4',(LAT_NAME, LON_NAME, TIME_NAME))
v = f.variables[VAR_NAME]
v.bar = 'foo'
v.slobber = 3
g.createVariable(VAR_NAME,'f4',(LAT_NAME, LON_NAME, TIME_NAME))
v2 = g.variables[VAR_NAME]
v2.bar = 'foo'
v2.slobber = 3
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing renaming of dimensions, variables and attribute deletion"""
f = netCDF4.Dataset(self.file, 'r+')
v = f.variables[VAR_NAME]
names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME]
# check that dimension names are correct.
for name in f.dimensions.keys():
self.assertTrue(name in names_check)
names_check = [VAR_NAME]
# check that variable names are correct.
for name in f.variables.keys():
self.assertTrue(name in names_check)
# rename dimension.
f.renameDimension(LON_NAME,LON_NAME2)
# rename variable.
f.renameVariable(VAR_NAME,VAR_NAME2)
# rename group.
if __has_rename_grp__:
f.renameGroup(GROUP_NAME,GROUP_NAME2)
# check that new dimension names are correct.
names_check = [LAT_NAME, LON_NAME2, LEVEL_NAME, TIME_NAME]
for name in f.dimensions.keys():
self.assertTrue(name in names_check)
names_check = [VAR_NAME2]
# check that new variable names are correct.
for name in f.variables.keys():
self.assertTrue(name in names_check)
g = f.groups[GROUP_NAME2]
vg = g.variables[VAR_NAME]
names_check = [LAT_NAME, LON_NAME, LEVEL_NAME, TIME_NAME]
# check that dimension names are correct.
for name in g.dimensions.keys():
self.assertTrue(name in names_check)
names_check = [VAR_NAME]
# check that variable names are correct.
for name in g.variables.keys():
self.assertTrue(name in names_check)
# check that group name is correct.
self.assertTrue(GROUP_NAME not in f.groups and GROUP_NAME2 in f.groups)
# rename dimension.
g.renameDimension(LON_NAME,LON_NAME2)
# rename variable.
g.renameVariable(VAR_NAME,VAR_NAME2)
# check that new dimension names are correct.
names_check = [LAT_NAME, LON_NAME2, LEVEL_NAME, TIME_NAME]
for name in g.dimensions.keys():
self.assertTrue(name in names_check)
names_check = [VAR_NAME2]
# check that new variable names are correct.
for name in g.variables.keys():
self.assertTrue(name in names_check)
# delete a global attribute.
atts = f.ncattrs()
del f.goober
atts.remove('goober')
self.assertTrue(atts == f.ncattrs())
atts = g.ncattrs()
del g.goober
atts.remove('goober')
self.assertTrue(atts == g.ncattrs())
# delete a variable attribute.
atts = v.ncattrs()
del v.slobber
atts.remove('slobber')
self.assertTrue(atts == v.ncattrs())
atts = vg.ncattrs()
del vg.slobber
atts.remove('slobber')
self.assertTrue(atts == vg.ncattrs())
f.close()
# make sure attributes cannot be deleted, or vars/dims renamed
# when file is open read-only.
f = netCDF4.Dataset(self.file)
v = f.variables[VAR_NAME2]
self.assertRaises(RuntimeError, delattr, v, 'bar')
self.assertRaises(RuntimeError, f.renameVariable, VAR_NAME2, VAR_NAME)
self.assertRaises(RuntimeError, f.renameDimension, LON_NAME2, LON_NAME)
g = f.groups[GROUP_NAME2]
vg = g.variables[VAR_NAME2]
self.assertRaises(RuntimeError, delattr, vg, 'bar')
self.assertRaises(RuntimeError, g.renameVariable, VAR_NAME2, VAR_NAME)
self.assertRaises(RuntimeError, g.renameDimension, LON_NAME2, LON_NAME)
f.close()
if __name__ == '__main__':
unittest.main()
| 5,167 | 35.13986 | 79 | py |
netcdf4-python | netcdf4-python-master/test/tst_alignment.py | import numpy as np
from netCDF4 import set_alignment, get_alignment, Dataset
from netCDF4 import __has_set_alignment__
import netCDF4
import os
import subprocess
import tempfile
import unittest
# During testing, sometimes development versions are used.
# They may be written as 4.9.1-development
libversion_no_development = netCDF4.__netcdf4libversion__.split('-')[0]
libversion = tuple(int(v) for v in libversion_no_development.split('.'))
has_alignment = (libversion[0] > 4) or (
libversion[0] == 4 and (libversion[1] >= 9)
)
try:
has_h5ls = subprocess.check_call(['h5ls', '--version'], stdout=subprocess.PIPE) == 0
except Exception:
has_h5ls = False
file_name = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
class AlignmentTestCase(unittest.TestCase):
def setUp(self):
self.file = file_name
# This is a global variable in netcdf4, it must be set before File
# creation
if has_alignment:
set_alignment(1024, 4096)
assert get_alignment() == (1024, 4096)
f = Dataset(self.file, 'w')
f.createDimension('x', 4096)
# Create many datasets so that we decrease the chance of
# the dataset being randomly aligned
for i in range(10):
f.createVariable(f'data{i:02d}', np.float64, ('x',))
v = f.variables[f'data{i:02d}']
v[...] = 0
f.close()
if has_alignment:
# ensure to reset the alignment to 1 (default values) so as not to
# disrupt other tests
set_alignment(1, 1)
assert get_alignment() == (1, 1)
def test_version_settings(self):
if has_alignment:
# One should always be able to set the alignment to 1, 1
set_alignment(1, 1)
assert get_alignment() == (1, 1)
else:
with self.assertRaises(RuntimeError):
set_alignment(1, 1)
with self.assertRaises(RuntimeError):
get_alignment()
def test_reports_alignment_capabilities(self):
# Assert that the library reports that it supports alignment correctly
assert has_alignment == __has_set_alignment__
# if we have no support for alignment, we have no guarantees on
# how the data can be aligned
@unittest.skipIf(
not has_h5ls,
"h5ls not found."
)
@unittest.skipIf(
not has_alignment,
"No support for set_alignment in libnetcdf."
)
def test_setting_alignment(self):
# We choose to use h5ls instead of h5py since h5ls is very likely
# to be installed alongside the rest of the tooling required to build
# netcdf4-python
# Output from h5ls is expected to look like:
"""
Opened "/tmp/tmpqexgozg1.nc" with sec2 driver.
data00 Dataset {4096/4096}
Attribute: DIMENSION_LIST {1}
Type: variable length of
object reference
Attribute: _Netcdf4Coordinates {1}
Type: 32-bit little-endian integer
Location: 1:563
Links: 1
Storage: 32768 logical bytes, 32768 allocated bytes, 100.00% utilization
Type: IEEE 64-bit little-endian float
Address: 8192
data01 Dataset {4096/4096}
Attribute: DIMENSION_LIST {1}
Type: variable length of
object reference
Attribute: _Netcdf4Coordinates {1}
Type: 32-bit little-endian integer
Location: 1:1087
Links: 1
Storage: 32768 logical bytes, 32768 allocated bytes, 100.00% utilization
Type: IEEE 64-bit little-endian float
Address: 40960
[...]
x Dataset {4096/4096}
Attribute: CLASS scalar
Type: 16-byte null-terminated ASCII string
Attribute: NAME scalar
Type: 64-byte null-terminated ASCII string
Attribute: REFERENCE_LIST {10}
Type: struct {
"dataset" +0 object reference
"dimension" +8 32-bit little-endian unsigned integer
} 16 bytes
Attribute: _Netcdf4Dimid scalar
Type: 32-bit little-endian integer
Location: 1:239
Links: 1
Storage: 16384 logical bytes, 0 allocated bytes
Type: IEEE 32-bit big-endian float
Address: 18446744073709551615
"""
h5ls_results = subprocess.check_output(
["h5ls", "--verbose", "--address", "--simple", self.file]
).decode()
addresses = {
f'data{i:02d}': -1
for i in range(10)
}
data_variable = None
for line in h5ls_results.split('\n'):
if not line.startswith(' '):
data_variable = line.split(' ')[0]
# only process the data variables we care to inpsect
if data_variable not in addresses:
continue
line = line.strip()
if line.startswith('Address:'):
address = int(line.split(':')[1].strip())
addresses[data_variable] = address
for key, address in addresses.items():
is_aligned = (address % 4096) == 0
assert is_aligned, f"{key} is not aligned. Address = 0x{address:x}"
# Alternative implementation in h5py
# import h5py
# with h5py.File(self.file, 'r') as h5file:
# for i in range(10):
# v = h5file[f'data{i:02d}']
# assert (dataset.id.get_offset() % 4096) == 0
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
if __name__ == '__main__':
unittest.main()
| 5,676 | 33.828221 | 88 | py |
netcdf4-python | netcdf4-python-master/test/tst_filepath.py | import os, sys, shutil
import tempfile
import unittest
import netCDF4
class test_filepath(unittest.TestCase):
def setUp(self):
self.netcdf_file = os.path.join(os.getcwd(), "netcdf_dummy_file.nc")
self.nc = netCDF4.Dataset(self.netcdf_file)
def test_filepath(self):
assert self.nc.filepath() == str(self.netcdf_file)
def test_filepath_with_non_ascii_characters(self):
# create nc-file in a filepath using a cp1252 string
tmpdir = tempfile.mkdtemp()
filepath = os.path.join(tmpdir,b'Pl\xc3\xb6n.nc'.decode('cp1252'))
nc = netCDF4.Dataset(filepath,'w',encoding='cp1252')
filepatho = nc.filepath(encoding='cp1252')
assert filepath == filepatho
assert filepath.encode('cp1252') == filepatho.encode('cp1252')
nc.close()
shutil.rmtree(tmpdir)
def test_no_such_file_raises(self):
fname = 'not_a_nc_file.nc'
with self.assertRaisesRegex(OSError, fname):
netCDF4.Dataset(fname, 'r')
if __name__ == '__main__':
unittest.main()
| 1,067 | 29.514286 | 76 | py |
netcdf4-python | netcdf4-python-master/test/tst_issue908.py | import netCDF4, unittest
import numpy as np
class Issue908TestCase(unittest.TestCase):
def setUp(self):
nc = netCDF4.Dataset('CRM032_test1.nc')
self.nc = nc
def tearDown(self):
self.nc.close()
def runTest(self):
data = self.nc['rgrid'][:]
assert(data.all() is np.ma.masked)
if __name__ == '__main__':
unittest.main()
| 378 | 18.947368 | 47 | py |
camel_tools | camel_tools-master/setup.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from setuptools import setup
import sys
VERSION_FILE = os.path.join(os.path.dirname(__file__),
'camel_tools',
'VERSION')
with open(VERSION_FILE, encoding='utf-8') as version_fp:
VERSION = version_fp.read().strip()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: Arabic',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
]
DESCRIPTION = ('A suite of Arabic natural language processing tools developed '
'by the CAMeL Lab at New York University Abu Dhabi.')
README_FILE = os.path.join(os.path.dirname(__file__), 'README.rst')
with open(README_FILE, 'r', encoding='utf-8') as version_fp:
LONG_DESCRIPTION = version_fp.read().strip()
INSTALL_REQUIRES = [
'future',
'six',
'docopt',
'cachetools',
'numpy',
'scipy',
'pandas',
'scikit-learn',
'dill',
'torch>=1.3',
'transformers>=3.0.2',
'editdistance',
'requests',
'emoji',
'pyrsistent',
'tabulate',
'tqdm',
'muddler',
]
INSTALL_REQUIRES_NOT_WINDOWS = [
'camel-kenlm >= 2023.3.17.2 ; platform_system!="Windows"'
]
if sys.platform != 'win32':
INSTALL_REQUIRES.extend(INSTALL_REQUIRES_NOT_WINDOWS)
setup(
name='camel_tools',
version=VERSION,
author='Ossama W. Obeid',
author_email='oobeid@nyu.edu',
maintainer='Ossama W. Obeid',
maintainer_email='oobeid@nyu.edu',
packages=['camel_tools',
'camel_tools.cli',
'camel_tools.utils',
'camel_tools.morphology',
'camel_tools.disambig',
'camel_tools.disambig.bert',
'camel_tools.tokenizers',
'camel_tools.tagger',
'camel_tools.data',
'camel_tools.sentiment',
'camel_tools.dialectid',
'camel_tools.ner'],
package_data={
'camel_tools.utils': ['charmaps/*.json'],
},
include_package_data=True,
entry_points={
'console_scripts': [
('camel_transliterate='
'camel_tools.cli.camel_transliterate:main'),
('camel_arclean='
'camel_tools.cli.camel_arclean:main'),
('camel_morphology='
'camel_tools.cli.camel_morphology:main'),
('camel_dediac='
'camel_tools.cli.camel_dediac:main'),
('camel_word_tokenize='
'camel_tools.cli.camel_word_tokenize:main'),
('camel_diac='
'camel_tools.cli.camel_diac:main'),
('camel_data='
'camel_tools.cli.camel_data:main'),
],
},
url='https://github.com/CAMeL-Lab/CAMeL_Tools',
license='MIT',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
python_requires='>=3.7.0, <3.11'
)
| 4,817 | 32.227586 | 79 | py |
camel_tools | camel_tools-master/camel_tools/__init__.py | # MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A suite of Arabic natural language processing tools developed by the CAMeL Lab
at New York University Abu Dhabi.
"""
from __future__ import print_function, absolute_import
import os
try:
version_file = os.path.join(os.path.dirname(__file__), 'VERSION')
with open(version_file, 'r', encoding='utf-8') as version_fp:
__version__ = version_fp.read().strip()
except Exception: # pragma: no cover
__version__ = '???'
| 1,567 | 39.205128 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/camel_data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The CAMeL Tools data download utility.
Usage:
camel_data (-i | --install) [-f | --force] <PACKAGE>
camel_data (-p | --post-install) <PACKAGE> <ARGS>...
camel_data (-l | --list)
camel_data (-u | --update)
camel_data (-v | --version)
camel_data (-h | --help)
Options:
-l --list
Show a list of packages available for download.
-i --install
Install package.
-p --post-install
Perform post installation for a given package.
-f --force
Force install package and dependencies.
-u --update
Update package list.
-h --help
Show this screen.
-v --version
Show version.
"""
import sys
from docopt import docopt
import tabulate
import camel_tools
from camel_tools.data import CATALOGUE
from camel_tools.data.catalogue import Catalogue, CatalogueError
from camel_tools.data.downloader import DownloaderError
__version__ = camel_tools.__version__
def _sizeof_fmt(num):
# Modified from https://stackoverflow.com/a/1094933
if num is None:
return ''
for ndx, unit in enumerate(['', 'k', 'M', 'G']):
if abs(num) < 1000.0:
if ndx > 0:
return f'{num:3.1f} {unit}B'
else:
return f'{num:3.0f} B '
num /= 1000.0
return f'{num:.1f} GB'
def _print_package_list(catalogue: Catalogue):
packages = catalogue.get_public_packages()
header = ['Package Name', 'Size', 'License', 'Description']
rows = [(p.name,
_sizeof_fmt(p.size),
p.license,
p.description) for p in packages]
alignment = ('left', 'right', 'left', 'left')
tabulate.PRESERVE_WHITESPACE = True
print(tabulate.tabulate(rows,
tablefmt='simple',
headers=header,
colalign=alignment))
print()
tabulate.PRESERVE_WHITESPACE = False
def _update_catalogue():
try:
sys.stdout.write(f'Updating catalogue... ')
CATALOGUE.update_catalogue()
sys.stdout.write(f'done\n')
except Exception:
sys.stdout.write(f'failed\n')
sys.stderr.write(f'Error: Couldn\'t update catalogue.\n')
sys.exit(1)
def main(): # pragma: no cover
try:
version = ('CAMeL Tools v{}'.format(__version__))
arguments = docopt(__doc__, version=version)
cat_path = CATALOGUE.get_default_catalogue_path()
if not cat_path.exists():
_update_catalogue()
if arguments['--list']:
_print_package_list(CATALOGUE)
sys.exit(0)
if arguments['--update']:
_update_catalogue()
sys.exit(0)
if arguments['--install']:
package_name = arguments.get('<PACKAGE>', None)
try:
CATALOGUE.download_package(package_name,
recursive=True,
force=arguments['--force'],
print_status=True)
sys.exit(0)
except CatalogueError as c:
sys.stderr.write(f'Error: {c.msg}')
sys.exit(1)
except DownloaderError as d:
sys.stderr.write(f'Error: {d.msg}')
if arguments['--post-install']:
package_name = arguments['<PACKAGE>']
if package_name is None:
pass
else:
CATALOGUE.post_install_package(package_name,
arguments['<ARGS>'])
except KeyboardInterrupt:
sys.stderr.write('Exiting...\n')
sys.exit(1)
except Exception:
sys.stderr.write('Error: An unknown error occurred.\n')
sys.exit(1)
if __name__ == '__main__': # pragma: no cover
main()
| 5,041 | 29.011905 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/camel_diac.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The CAMeL Tools diacritization utility.
Usage:
camel_diac [-d DATABASE | --db=DATABASE]
[-m MARKER | --marker=MARKER]
[-I | --ignore-markers]
[-S | --strip-markers]
[-p | --pretokenized]
[-o OUTPUT | --output=OUTPUT] [FILE]
camel_diac (-l | --list-schemes)
camel_diac (-v | --version)
camel_diac (-h | --help)
Options:
-d DATABASE --db=DATABASE
Morphology database to use. DATABASE could be the name of a builtin
database or a path to a database file. [default: calima-msa-r13]
-o OUTPUT --output=OUTPUT
Output file. If not specified, output will be printed to stdout.
-m MARKER --marker=MARKER
Marker used to prefix tokens not to be transliterated.
[default: @@IGNORE@@]
-I --ignore-markers
Transliterate marked words as well.
-S --strip-markers
Remove markers in output.
-p --pretokenized
Input is already pre-tokenized by punctuation. When this is set,
camel_diac will not split tokens by punctuation but any tokens that
do contain punctuation will not be diacritized.
-l --list
Show a list of morphological databases.
-h --help
Show this screen.
-v --version
Show version.
"""
import re
import sys
from docopt import docopt
import camel_tools
from camel_tools.morphology.database import MorphologyDB
from camel_tools.disambig.mle import MLEDisambiguator
from camel_tools.tokenizers.word import simple_word_tokenize
from camel_tools.cli.utils import open_files
__version__ = camel_tools.__version__
_BUILTIN_DBS = MorphologyDB.list_builtin_dbs()
_DEFAULT_DB = 'calima-msa-r13'
_WHITESPACE_RE = re.compile(r'\s+|\S+')
def _diac_tokens(tokens, disambig, ignore_markers, marker, strip_markers,
pretokenized):
result = []
for token in tokens:
if len(token.strip()) == 0:
result.append(token)
elif ignore_markers and token.startswith(marker):
if strip_markers:
result.append(token[len(marker):])
else:
result.append(token)
else:
if pretokenized:
subtokens = [token]
else:
subtokens = simple_word_tokenize(token)
disambig_tokens = disambig.disambiguate(subtokens)
result.extend([d.analyses[0].analysis.get('diac', d.word)
for d in disambig_tokens])
return result
def main():
try:
version = ('CAMeL Tools v{}'.format(__version__))
arguments = docopt(__doc__, version=version)
if arguments['--list']:
for db_entry in _BUILTIN_DBS:
print("{} {}".format(db_entry.name.ljust(8),
db_entry.description))
sys.exit(0)
db_name = None
for db_entry in _BUILTIN_DBS:
if arguments['--db'] == db_entry.name:
db_name = db_entry.name
if db_name is None:
sys.stderr.write('Error: {} is not a valid database name.\n'
'Run `camel_diac -l` to see the list of available'
' databases.\n'.format(repr(arguments['--db'])))
sys.exit(1)
disambig = MLEDisambiguator.pretrained(db_name)
marker = arguments['--marker']
ignore_markers = arguments['--ignore-markers']
strip_markers = arguments['--strip-markers']
pretokenized = arguments['--pretokenized']
# Open files (or just use stdin and stdout)
fin, fout = open_files(arguments['FILE'], arguments['--output'])
# Diacritize lines
try:
for line in fin:
toks = _WHITESPACE_RE.findall(line)
diac_toks = _diac_tokens(toks, disambig, ignore_markers,
marker, strip_markers, pretokenized)
fout.write(''.join(diac_toks))
# If everything worked so far, this shouldn't happen
except Exception: # pylint: disable=W0703
sys.stderr.write('Error: An unkown error occured during '
'diacritization.\n')
sys.exit(1)
# Cleanup
if arguments['FILE'] is not None:
fin.close()
if arguments['--output'] is not None:
fout.close()
sys.exit(0)
except KeyboardInterrupt:
sys.stderr.write('Exiting...\n')
sys.exit(1)
except Exception:
sys.stderr.write('Error: An unknown error occurred.\n')
sys.exit(1)
if __name__ == '__main__': # pragma: no cover
main()
| 5,882 | 32.617143 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/camel_arclean.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The CAMeL Tools Arabic cleaning utility.
Usage:
camel_arclean [-o OUTPUT | --output=OUTPUT] [FILE]
camel_arclean (-v | --version)
camel_arclean (-h | --help)
Options:
-o OUTPUT --output=OUTPUT
Output file. If not specified, output will be printed to stdout.
-h --help
Show this screen.
-v --version
Show version.
"""
from __future__ import print_function, absolute_import
import sys
from docopt import docopt
import six
import camel_tools as camelt
from camel_tools.utils.stringutils import force_encoding, force_unicode
from camel_tools.utils.charmap import CharMapper
__version__ = camelt.__version__
def _open_files(finpath, foutpath):
if finpath is None:
fin = sys.stdin
else:
try:
fin = open(finpath, 'r', encoding='utf-8')
except Exception:
sys.stderr.write('Error: Couldn\'t open input file {}.'
'\n'.format(repr(finpath)))
sys.exit(1)
if foutpath is None:
fout = sys.stdout
else:
try:
fout = open(foutpath, 'w', encoding='utf-8')
except Exception:
sys.stderr.write('Error: Couldn\'t open output file {}.'
'\n'.format(repr(foutpath)))
if finpath is not None:
fin.close()
sys.exit(1)
return fin, fout
def _arclean(mapper, fin, fout):
for line in fin:
line = force_unicode(line)
if six.PY3:
fout.write(mapper.map_string(line))
else:
fout.write(force_encoding(mapper.map_string(line)))
fout.flush()
def main(): # pragma: no cover
try:
version = ('CAMeL Tools v{}'.format(__version__))
arguments = docopt(__doc__, version=version)
# Open files (or just use stdin and stdout)
fin, fout = _open_files(arguments['FILE'], arguments['--output'])
try:
mapper = CharMapper.builtin_mapper('arclean')
_arclean(mapper, fin, fout)
# If everything worked so far, this shouldn't happen
except Exception:
sys.stderr.write('Error: An error occured during cleaning.\n')
fin.close()
fout.close()
sys.exit(1)
# Cleanup
if arguments['FILE'] is not None:
fin.close()
if arguments['--output'] is not None:
fout.close()
sys.exit(0)
except KeyboardInterrupt:
sys.stderr.write('Exiting...\n')
sys.exit(1)
except Exception:
sys.stderr.write('Error: An unknown error occurred.\n')
sys.exit(1)
if __name__ == '__main__': # pragma: no cover
main()
| 3,901 | 29.015385 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/camel_morphology.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The CAMeL Tools morphological analyzer, generator, and reinflector.
Usage:
camel_morphology analyze
[-d DATABASE | --db=DATABASE]
[-b BACKOFF | --backoff=BACKOFF]
[-c | --cache]
[-o OUTPUT | --output=OUTPUT] [FILE]
camel_morphology generate
[-d DATABASE | --db=DATABASE]
[-b BACKOFF | --backoff=BACKOFF]
[-o OUTPUT | --output=OUTPUT] [FILE]
camel_morphology reinflect
[-d DATABASE | --db=DATABASE]
[-o OUTPUT | --output=OUTPUT] [FILE]
camel_morphology (-l | --list)
camel_morphology (-v | --version)
camel_morphology (-h | --help)
Options:
-b BACKOFF --backoff=BACKOFF
Backoff mode for analyzer and generator. In analyze mode, it can have
the following values: NONE, NOAN_ALL, NOAN_PROP, ADD_ALL, ADD_PROP.
In generate mode it can have the following values: NONE, REINFLECT.
[default: NONE]
-c --cache
Cache computed analyses (only in analyze mode).
-d DATABASE --db=DATABASE
Morphology database to use. DATABASE could be the name of a builtin
database or a path to a database file. [default: calima-msa-r13]
-o OUTPUT --output=OUTPUT
Output file. If not specified, output will be printed to stdout.
-l --list
List builtin databases with their respective versions.
-h --help
Show this screen.
-v --version
Show version.
"""
from __future__ import absolute_import
import collections
import sys
import re
from docopt import docopt
import six
import camel_tools as camelt
from camel_tools.utils.charsets import AR_DIAC_CHARSET
from camel_tools.utils.stringutils import force_unicode, force_encoding
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.analyzer import Analyzer
from camel_tools.morphology.generator import Generator
from camel_tools.morphology.reinflector import Reinflector
from camel_tools.morphology.errors import DatabaseError, AnalyzerError
from camel_tools.morphology.errors import GeneratorError, MorphologyError
__version__ = camelt.__version__
_ANALYSIS_BACKOFFS = frozenset(('NONE', 'NOAN_ALL', 'NOAN_PROP', 'ADD_ALL',
'ADD_PROP'))
_GENARATION_BACKOFFS = frozenset(('NONE', 'REINFLECT'))
_BUILTIN_DBS = frozenset(MorphologyDB.list_builtin_dbs())
_DEFAULT_DB = 'calima-msa-r13'
_DIAC_RE = re.compile(r'[' + re.escape(u''.join(AR_DIAC_CHARSET)) + r']')
def _tokenize(s):
return s.split()
def _dediac(word):
return _DIAC_RE.sub('', word)
def _to_int(s):
s = str(s)
try:
if not s.isdigit():
return None
return int(s)
except Exception:
return None
def _open_files(finpath, foutpath):
if finpath is None:
fin = sys.stdin
else:
try:
fin = open(finpath, 'r', encoding='utf-8')
except IOError:
sys.stderr.write('Error: Couldn\'t open input file {}.'
'\n'.format(repr(finpath)))
sys.exit(1)
if foutpath is None:
fout = sys.stdout
else:
try:
fout = open(foutpath, 'w', encoding='utf-8')
except IOError:
sys.stderr.write('Error: Couldn\'t open output file {}.'
'\n'.format(repr(foutpath)))
if finpath is not None:
fin.close()
sys.exit(1)
return fin, fout
def _list_dbs():
for db in sorted(MorphologyDB.list_builtin_dbs()):
sys.stdout.write('{}\t{}\n'.format(db.name, db.version))
def _serialize_analyses(fout, word, analyses, order, generation=False):
buff = collections.deque()
buff.append(u'#{}: {}'.format(u'LEMMA' if generation else u'WORD',
force_unicode(word)))
if len(analyses) == 0:
buff.append(u'NO_ANALYSIS')
else:
sub_buff = collections.OrderedDict()
for a in analyses:
output = u' '.join([u'{}:{}'.format(force_unicode(f),
force_unicode(str(a[f])))
for f in order if f in a])
if output not in sub_buff:
sub_buff[output] = True
buff.extend(sub_buff.keys())
return u'\n'.join(buff)
def _parse_generator_line(line):
lemma = None
feats = {}
tokens = line.strip().split()
if len(tokens) < 1:
return None
lemma = tokens[0]
for token in tokens[1:]:
subtokens = token.split(':')
if len(subtokens) < 2:
return None
else:
feat = subtokens[0]
val = ':'.join(subtokens[1:])
feats[feat] = val
return (lemma, feats)
def _parse_reinflector_line(line):
word = None
feats = {}
tokens = line.strip().split()
if len(tokens) < 1:
return None
word = tokens[0]
for token in tokens[1:]:
subtokens = token.split(':')
if len(subtokens) < 2:
return None
else:
feat = subtokens[0]
val = ':'.join(subtokens[1:])
feats[feat] = val
return (word, feats)
def _analyze(db, fin, fout, backoff, cache):
if cache:
analyzer = Analyzer(db, backoff, cache_size=1024)
else:
analyzer = Analyzer(db, backoff)
line = force_unicode(fin.readline())
while line:
if len(line) == 0:
line = force_unicode(fin.readline())
continue
line = line.strip()
tokens = _tokenize(line)
for token in tokens:
analyses = analyzer.analyze(token)
serialized = _serialize_analyses(fout, token, analyses, db.order)
if six.PY3:
fout.write(serialized)
else:
fout.write(force_encoding(serialized))
fout.write('\n\n')
line = force_unicode(fin.readline())
def _generate(db, fin, fout, backoff):
generator = Generator(db)
reinflector = Reinflector(db) if backoff == 'REINFLECT' else None
line = force_unicode(fin.readline())
line_num = 1
while line:
line = line.strip()
if len(line) == 0:
line = force_unicode(fin.readline())
line_num += 1
continue
parsed = _parse_generator_line(line)
if parsed is None:
if fin is sys.stdin:
sys.stderr.write('Error: Invalid input line.\n')
else:
sys.stderr.write(
'Error: Invalid input line ({}).\n'.format(line_num))
else:
lemma = parsed[0]
feats = parsed[1]
# Make sure lemma and pos are specified first
if lemma is None:
if fin is sys.stdin:
sys.stderr.write('Error: Missing lex/lemma feature.\n')
else:
sys.stderr.write(
'Error: Missing lex/lemma feature. [{}].\n'.format(
line_num))
elif 'pos' not in feats:
if fin is sys.stdin:
sys.stderr.write('Error: Missing pos feature.\n')
else:
sys.stderr.write(
'Error: Missing pos feature. [{}]\n'.format(
line_num))
else:
try:
analyses = generator.generate(lemma, feats)
if len(analyses) == 0 and backoff == 'REINFLECT':
word = _dediac(lemma)
analyses = reinflector.reinflect(word, feats)
serialized = _serialize_analyses(fout, lemma, analyses,
db.order, True)
if six.PY3:
fout.write(serialized)
else:
fout.write(force_encoding(serialized))
fout.write('\n\n')
except GeneratorError as error:
if fin is sys.stdin:
sys.stderr.write('Error: {}.\n'.format(error.msg))
else:
sys.stderr.write('Error: {}. [{}]\n'.format(error.msg,
line_num))
line = force_encoding(fin.readline())
line_num += 1
def _reinflect(db, fin, fout):
reinflector = Reinflector(db)
line = force_unicode(fin.readline())
line_num = 1
while line:
line = line.strip()
if len(line) == 0:
line = force_unicode(fin.readline())
line_num += 1
continue
parsed = _parse_reinflector_line(line)
if parsed is None:
if fin is sys.stdin:
sys.stderr.write('Error: Invalid input line.\n')
else:
sys.stderr.write(
'Error: Invalid input line. [{}]\n'.format(line_num))
else:
word = parsed[0]
feats = parsed[1]
try:
analyses = reinflector.reinflect(word, feats)
serialized = _serialize_analyses(fout, word, analyses,
db.order)
if six.PY3:
fout.write(serialized)
else:
fout.write(force_encoding(serialized))
fout.write('\n\n')
except MorphologyError as error:
# This could be thrown by the analyzer, generator, or
# reinflector.
if fin is sys.stdin:
sys.stderr.write('Error: {}.\n'.format(error.msg))
else:
sys.stderr.write('Error: {}. [{}]\n'.format(error.msg,
line_num))
line = force_unicode(fin.readline())
line_num += 1
def main(): # pragma: no cover
try:
version = ('CAMeL Tools v{}'.format(__version__))
arguments = docopt(__doc__, version=version)
if arguments.get('--list', False):
_list_dbs()
sys.exit(1)
analyze = arguments.get('analyze', False)
generate = arguments.get('generate', False)
reinflect = arguments.get('reinflect', False)
cache = arguments.get('--cache', False)
backoff = arguments.get('--backoff', 'NONE')
# Make sure we have a valid backoff mode
if backoff is None:
backoff = 'NONE'
if analyze and backoff not in _ANALYSIS_BACKOFFS:
sys.stderr.write('Error: invalid backoff mode.\n')
sys.exit(1)
if generate and backoff not in _GENARATION_BACKOFFS:
sys.stderr.write('Error: invalid backoff mode.\n')
sys.exit(1)
# Open files (or just use stdin and stdout)
fin, fout = _open_files(arguments['FILE'], arguments['--output'])
# Determine required DB flags
if analyze:
dbflags = 'a'
elif generate and backoff == 'NONE':
dbflags = 'g'
else:
dbflags = 'r'
# Load DB
try:
dbname = arguments.get('--db', _DEFAULT_DB)
if dbname in _BUILTIN_DBS:
db = MorphologyDB.builtin_db(dbname, dbflags)
else:
db = MorphologyDB(dbname, dbflags)
except DatabaseError:
sys.stderr.write('Error: Couldn\'t parse database.\n')
sys.exit(1)
except IOError:
sys.stderr.write('Error: Database file could not be read.\n')
sys.exit(1)
# Continue execution in requested mode
if analyze:
try:
_analyze(db, fin, fout, backoff, cache)
except AnalyzerError as error:
sys.stderr.write('Error: {}\n'.format(error.msg))
sys.exit(1)
except IOError:
sys.stderr.write('Error: An IO error occurred.\n')
sys.exit(1)
elif generate:
try:
_generate(db, fin, fout, backoff)
except IOError:
sys.stderr.write('Error: An IO error occurred.\n')
sys.exit(1)
elif reinflect:
try:
_reinflect(db, fin, fout)
except IOError:
sys.stderr.write('Error: An IO error occurred.\n')
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.stderr.write('Exiting...\n')
sys.exit(1)
except Exception:
sys.stderr.write('Error: An unknown error occurred.\n')
sys.exit(1)
if __name__ == '__main__':
main()
| 14,086 | 29.757642 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
def open_files(finpath, foutpath):
if finpath is None:
fin = sys.stdin
else:
try:
fin = open(finpath, 'r', encoding='utf-8')
except OSError:
sys.stderr.write('Error: Couldn\'t open input file {}.'
'\n'.format(repr(finpath)))
sys.exit(1)
if foutpath is None:
fout = sys.stdout
else:
try:
fout = open(foutpath, 'w', encoding='utf-8')
except OSError:
sys.stderr.write('Error: Couldn\'t open output file {}.'
'\n'.format(repr(foutpath)))
if finpath is not None:
fin.close()
sys.exit(1)
return fin, fout
| 1,914 | 34.462963 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/camel_word_tokenize.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The CAMeL Tools word tokenization utility.
This tool splits words from punctuation while collapsing contiguous segments of
spaces into a single whitespace character. It is also language agnostic and
splits all characters marked as punctuation or symbols in the Unicode
specification.
For example the following sentence:
> Hello, world!!!!
becomes:
> Hello , world ! ! ! !
At the moment, this tool splits all punctuation indiscriminately.
Usage:
camel_word_tokenize [-o OUTPUT | --output=OUTPUT] [FILE]
camel_word_tokenize (-v | --version)
camel_word_tokenize (-h | --help)
Options:
-o OUTPUT --output=OUTPUT
Output file. If not specified, output will be printed to stdout.
-h --help
Show this screen.
-v --version
Show version.
"""
import sys
from docopt import docopt
import camel_tools
from camel_tools.tokenizers.word import simple_word_tokenize
from camel_tools.cli.utils import open_files
from camel_tools.cli.utils import open_files
__version__ = camel_tools.__version__
_TOKENIZERS = [
('simple_word_tokenize', simple_word_tokenize)
]
def main(): # pragma: no cover
try:
version = ('CAMeL Tools v{}'.format(__version__))
arguments = docopt(__doc__, version=version)
# A bit redundant for now, but makes adding new tokenizers easier in
# future
tokenize_fn = simple_word_tokenize
# Open files (or just use stdin and stdout)
fin, fout = open_files(arguments['FILE'], arguments['--output'])
# Tokenize lines
try:
for line in fin:
fout.write(' '.join(tokenize_fn(line)))
fout.write('\n')
# If everything worked so far, this shouldn't happen
except Exception: # pylint: disable=W0703
sys.stderr.write('Error: An unkown error occured during '
'tokenization.\n')
sys.exit(1)
# Cleanup
if arguments['FILE'] is not None:
fin.close()
if arguments['--output'] is not None:
fout.close()
sys.exit(0)
except KeyboardInterrupt:
sys.stderr.write('Exiting...\n')
sys.exit(1)
except Exception:
sys.stderr.write('Error: An unknown error occurred.\n')
sys.exit(1)
if __name__ == '__main__': # pragma: no cover
main()
| 3,552 | 28.363636 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/camel_dediac.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The CAMeL Tools de-diacritization utility.
Usage:
camel_dediac [-s <SCHEME> | --scheme=<SCHEME>]
[-m <MARKER> | --marker=<MARKER>]
[-I | --ignore-markers]
[-S | --strip-markers]
[-o OUTPUT | --output=OUTPUT] [FILE]
camel_dediac (-l | --list)
camel_dediac (-v | --version)
camel_dediac (-h | --help)
Options:
-s <SCHEME> --scheme=<SCHEME>
The encoding scheme of the input text. [default: ar]
-o OUTPUT --output=OUTPUT
Output file. If not specified, output will be printed to stdout.
-m <MARKER> --marker=<MARKER>
Marker used to prefix tokens not to be de-diacritized.
[default: @@IGNORE@@]
-I --ignore-markers
De-diacritize words prefixed with a marker.
-S --strip-markers
Remove prefix markers in output if --ignore-markers is set.
-l --list
Show a list of available input encoding schemes.
-h --help
Show this screen.
-v --version
Show version.
"""
import re
import sys
from docopt import docopt
import camel_tools as camelt
from camel_tools.utils.dediac import dediac_ar, dediac_bw, dediac_safebw
from camel_tools.utils.dediac import dediac_xmlbw, dediac_hsb
from camel_tools.cli.utils import open_files
__version__ = camelt.__version__
_BUILTIN_SCHEMES = [
('ar', 'Arabic script', dediac_ar),
('bw', 'Buckwalter encoding', dediac_bw),
('safebw', 'Safe Buckwalter encoding', dediac_safebw),
('xmlbw', 'XML Buckwalter encoding', dediac_xmlbw),
('hsb', 'Habash-Soudi-Buckwalter encoding', dediac_hsb)
]
_WHITESPACE_RE = re.compile(r'\s+|\S+')
def _dediac_marked_tokens(tokens, dediac_fn, marker=None, strip_markers=False):
result = []
n = len(marker)
if strip_markers:
for token in tokens:
if token.startswith(marker):
result.append(token[n:])
else:
result.append(dediac_fn(token))
else:
for token in tokens:
if token.startswith(marker):
result.append(token)
else:
result.append(dediac_fn(token))
return result
def main(): # pragma: no cover
try:
version = ('CAMeL Tools v{}'.format(__version__))
arguments = docopt(__doc__, version=version)
if arguments['--list']:
for scheme in _BUILTIN_SCHEMES:
print("{} {}".format(scheme[0].ljust(8), scheme[1]))
sys.exit(0)
dediac_fn = None
for scheme in _BUILTIN_SCHEMES:
if scheme[0] == arguments['--scheme']:
dediac_fn = scheme[2]
if dediac_fn is None:
sys.stderr.write('Error: {} is not a valid scheme.\n'
'Run `camel_dediac -l` to see the list'
' of available schemes.'
'\n'.format(repr(arguments['--scheme'])))
sys.exit(1)
strip_markers = arguments['--strip-markers']
marker = arguments['--marker']
ignore_markers = arguments['--ignore-markers']
# Open files (or just use stdin and stdout)
fin, fout = open_files(arguments['FILE'], arguments['--output'])
# De-diacritize lines
try:
if ignore_markers:
for line in fin:
toks = _WHITESPACE_RE.findall(line)
dediac_toks = _dediac_marked_tokens(toks, dediac_fn,
marker, strip_markers)
fout.write(''.join(dediac_toks))
else:
for line in fin:
fout.write(dediac_fn(line))
# If everything worked so far, this shouldn't happen
except Exception: # pylint: disable=W0703
sys.stderr.write('Error: An unkown error occured during '
'de-diacritization.\n')
sys.exit(1)
# Cleanup
if arguments['FILE'] is not None:
fin.close()
if arguments['--output'] is not None:
fout.close()
sys.exit(0)
except KeyboardInterrupt:
sys.stderr.write('Exiting...\n')
sys.exit(1)
except Exception:
sys.stderr.write('Error: An unknown error occurred.\n')
sys.exit(1)
if __name__ == '__main__': # pragma: no cover
main()
| 5,596 | 31.923529 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/camel_transliterate.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The CAMeL Tools transliteration utility.
Usage:
camel_transliterate (-s SCHEME | --scheme=SCHEME)
[-m MARKER | --marker=MARKER]
[-I | --ignore-markers]
[-S | --strip-markers]
[-o OUTPUT | --output=OUTPUT] [FILE]
camel_transliterate (-l | --list)
camel_transliterate (-v | --version)
camel_transliterate (-h | --help)
Options:
-s SCHEME --scheme
Scheme used for transliteration.
-o OUTPUT --output=OUTPUT
Output file. If not specified, output will be printed to stdout.
-m MARKER --marker=MARKER
Marker used to prefix tokens not to be transliterated.
[default: @@IGNORE@@]
-I --ignore-markers
Transliterate marked words as well.
-S --strip-markers
Remove markers in output.
-l --list
Show a list of available transliteration schemes.
-h --help
Show this screen.
-v --version
Show version.
"""
from __future__ import print_function, absolute_import
import sys
from docopt import docopt
import six
import camel_tools as camelt
from camel_tools.utils.stringutils import force_encoding, force_unicode
from camel_tools.utils.charmap import CharMapper
from camel_tools.utils.transliterate import Transliterator
__version__ = camelt.__version__
_BUILTIN_SCHEMES = [
('ar2bw', 'Arabic to Buckwalter'),
('ar2safebw', 'Arabic to Safe Buckwalter'),
('ar2xmlbw', 'Arabic to XML Buckwalter'),
('ar2hsb', 'Arabic to Habash-Soudi-Buckwalter'),
('bw2ar', 'Buckwalter to Arabic'),
('bw2safebw', 'Buckwalter to Safe Buckwalter'),
('bw2xmlbw', 'Buckwalter to XML Buckwalter'),
('bw2hsb', 'Buckwalter to Habash-Soudi-Buckwalter'),
('safebw2ar', 'Safe Buckwalter to Arabic'),
('safebw2bw', 'Safe Buckwalter to Buckwalter'),
('safebw2xmlbw', 'Safe Buckwalter to XML Buckwalter'),
('safebw2hsb', 'Safe Buckwalter to Habash-Soudi-Buckwalter'),
('xmlbw2ar', 'XML Buckwalter to Arabic'),
('xmlbw2bw', 'XML Buckwalter to Buckwalter'),
('xmlbw2safebw', 'XML Buckwalter to Safe Buckwalter'),
('xmlbw2hsb', 'XML Buckwalter to Habash-Soudi-Buckwalter'),
('hsb2ar', 'Habash-Soudi-Buckwalter to Arabic'),
('hsb2bw', 'Habash-Soudi-Buckwalter to Buckwalter'),
('hsb2safebw', 'Habash-Soudi-Buckwalter to Safe Buckwalter'),
('hsb2xmlbw', 'Habash-Soudi-Buckwalter to Habash-Soudi-Buckwalter'),
]
def _open_files(finpath, foutpath):
if finpath is None:
fin = sys.stdin
else:
try:
fin = open(finpath, 'r', encoding='utf-8')
except OSError:
sys.stderr.write('Error: Couldn\'t open input file {}.'
'\n'.format(repr(finpath)))
sys.exit(1)
if foutpath is None:
fout = sys.stdout
else:
try:
fout = open(foutpath, 'w', encoding='utf-8')
except OSError:
sys.stderr.write('Error: Couldn\'t open output file {}.'
'\n'.format(repr(foutpath)))
if finpath is not None:
fin.close()
sys.exit(1)
return fin, fout
def main(): # pragma: no cover
try:
version = ('CAMeL Tools v{}'.format(__version__))
arguments = docopt(__doc__, version=version)
if arguments['--list']:
for scheme in _BUILTIN_SCHEMES:
print("{} {}".format(scheme[0].ljust(20), scheme[1]))
sys.exit(0)
if arguments['--scheme'] is not None:
if arguments['--scheme'] not in [s[0] for s in _BUILTIN_SCHEMES]:
sys.stderr.write('Error: {} is not a valid scheme.\n'
'Run `camel_transliterate -l` to see the list'
' of available schemes.'
'\n'.format(repr(arguments['--scheme'])))
sys.exit(1)
if arguments['--marker'] is None:
marker = '@@IGNORE@@'
else:
marker = arguments['--marker']
ignore_markers = arguments['--ignore-markers']
strip_markers = arguments['--strip-markers']
# Open files (or just use stdin and stdout)
fin, fout = _open_files(arguments['FILE'], arguments['--output'])
# Load the CharMapper and initialize a Transliterator with it
try:
mapper = CharMapper.builtin_mapper(arguments['--scheme'])
trans = Transliterator(mapper, marker)
except Exception: # pylint: disable=W0703
sys.stderr.write('Error: Could not load builtin scheme'
' {}.\n'.format(repr(arguments['--scheme'])))
sys.exit(1)
# Transliterate lines
try:
for line in fin:
line = force_unicode(line)
if six.PY3:
fout.write(
trans.transliterate(line, strip_markers,
ignore_markers))
else:
fout.write(
force_encoding(
trans.transliterate(line, strip_markers,
ignore_markers)))
fout.flush()
# If everything worked so far, this shouldn't happen
except Exception: # pylint: disable=W0703
sys.stderr.write('Error: An unkown error occured during '
'transliteration.\n')
sys.exit(1)
# Cleanup
if arguments['FILE'] is not None:
fin.close()
if arguments['--output'] is not None:
fout.close()
sys.exit(0)
except KeyboardInterrupt:
sys.stderr.write('Exiting...\n')
sys.exit(1)
except Exception:
sys.stderr.write('Error: An unknown error occurred.\n')
sys.exit(1)
if __name__ == '__main__': # pragma: no cover
main()
| 7,318 | 35.412935 | 79 | py |
camel_tools | camel_tools-master/camel_tools/cli/__init__.py | # MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This sub-module contains scripts for all provided command-line tools.
"""
| 1,203 | 47.16 | 79 | py |
camel_tools | camel_tools-master/camel_tools/morphology/reinflector.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The reinflector component of CAMeL Tools.
"""
from __future__ import absolute_import
from collections import deque
import re
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.analyzer import Analyzer
from camel_tools.morphology.generator import Generator
from camel_tools.morphology.errors import ReinflectorError
from camel_tools.morphology.errors import InvalidReinflectorFeature
from camel_tools.morphology.errors import InvalidReinflectorFeatureValue
from camel_tools.utils.dediac import dediac_ar
_CLITIC_FEATS = frozenset(['enc0', 'prc0', 'prc1', 'prc2', 'prc3'])
_IGNORED_FEATS = frozenset(['diac', 'lex', 'bw', 'gloss', 'source', 'stem',
'stemcat', 'lmm', 'dediac', 'caphi', 'catib6',
'ud', 'd3seg', 'atbseg', 'd2seg', 'd1seg', 'd1tok',
'd2tok', 'atbtok', 'd3tok', 'bwtok', 'root',
'pattern', 'freq', 'pos_logprob', 'lex_logprob',
'pos_lex_logprob', 'stemgloss'])
_SPECIFIED_FEATS = frozenset(['form_gen', 'form_num'])
_CLITIC_IGNORED_FEATS = frozenset(['stt', 'cas', 'mod'])
_FILTER_FEATS = frozenset(['pos', 'lex'])
_ANY_FEATS = frozenset(['per', 'gen', 'num', 'cas', 'stt', 'vox', 'mod',
'asp'])
_LEMMA_SPLIT_RE = re.compile(u'-|_')
class Reinflector(object):
"""Morphological reinflector component.
Arguments:
db (:obj:`~camel_tools.morphology.database.MorphologyDB`): Database to
use for generation. Must be opened in reinflection mode or both
analysis and generation modes.
Raises:
:obj:`~camel_tools.morphology.errors.ReinflectorError`: If **db** is
not an instance of
:obj:`~camel_tools.morphology.database.MorphologyDB` or if **db**
does not support reinflection.
"""
def __init__(self, db):
if not isinstance(db, MorphologyDB):
raise ReinflectorError('DB is not an instance of MorphologyDB')
if not db.flags.generation:
raise ReinflectorError('DB does not support reinflection')
self._db = db
self._analyzer = Analyzer(db)
self._generator = Generator(db)
def reinflect(self, word, feats):
"""Generate surface forms and their associated analyses for a given
word and a given set of (possibly underspecified) features.
The surface form is accessed through the `diac` feature.
Arguments:
word (:obj:`str`): Word to reinflect.
feats (:obj:`dict`): Dictionary of features.
See :doc:`/reference/camel_morphology_features` for more
information on features and their values.
Returns:
:obj:`list` of :obj:`dict`: List of generated analyses.
See :doc:`/reference/camel_morphology_features` for more
information on features and their values.
Raises:
:obj:`~camel_tools.morphology.errors.InvalidReinflectorFeature`:
If a feature is given that is not defined in database.
:obj:`~camel_tools.morphology.errors.InvalidReinflectorFeatureValue`:
If an invalid value is given to a feature or if 'pos' feature
is not defined.
"""
analyses = self._analyzer.analyze(word)
if not analyses or len(analyses) == 0:
return []
for feat in feats:
if feat not in self._db.defines:
raise InvalidReinflectorFeature(feat)
elif self._db.defines[feat] is not None:
if feat in _ANY_FEATS and feats[feat] == 'ANY':
continue
elif feats[feat] not in self._db.defines[feat]:
raise InvalidReinflectorFeatureValue(feat, feats[feat])
has_clitics = False
for feat in _CLITIC_FEATS:
if feat in feats:
has_clitics = True
break
results = deque()
for analysis in analyses:
if dediac_ar(analysis['diac']) != dediac_ar(word):
continue
if 'pos' in feats and feats['pos'] != analysis['pos']:
continue
lemma = _LEMMA_SPLIT_RE.split(analysis['lex'])[0]
if 'lex' in feats and feats['lex'] != lemma:
continue
is_valid = True
generate_feats = {}
for feat in analysis.keys():
if feat in _IGNORED_FEATS:
continue
elif feat in _SPECIFIED_FEATS and feat not in feats:
continue
elif has_clitics and feat in _CLITIC_IGNORED_FEATS:
continue
else:
if feat in feats:
if feats[feat] == 'ANY':
continue
elif analysis[feat] != 'na':
generate_feats[feat] = feats[feat]
else:
is_valid = False
break
elif analysis[feat] != 'na':
generate_feats[feat] = analysis[feat]
if is_valid:
generated = self._generator.generate(lemma, generate_feats)
if generated is not None:
results.extend(generated)
# TODO: Temporary fix to get unique analyses
results = [dict(y) for y in set(tuple(x.items()) for x in results)]
return list(results)
def all_feats(self):
"""Return a set of all features provided by the database used in this
reinflector instance.
Returns:
:obj:`frozenset` of :obj:`str`: The set all features provided by
the database used in this reinflector instance.
"""
return self._db.all_feats()
def tok_feats(self):
"""Return a set of tokenization features provided by the database used
in this reinflector instance.
Returns:
:obj:`frozenset` of :obj:`str`: The set tokenization features
provided by the database used in this reinflector instance.
"""
return self._db.tok_feats()
| 7,462 | 37.271795 | 81 | py |
camel_tools | camel_tools-master/camel_tools/morphology/errors.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Morphology error classes.
"""
class MorphologyError(Exception):
"""Base class for all morphology errors.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return str(self.msg)
# Database Errors
class DatabaseError(MorphologyError):
"""Base class for errors thrown by Database component.
"""
def __init__(self, msg):
MorphologyError.__init__(self, msg)
class InvalidBuiltinDatabaseName(DatabaseError):
"""Error thrown when an invalid builtin database name is given.
"""
def __init__(self, dbname):
DatabaseError.__init__(self,
'Invalid builtin database with name {}'.format(
repr(dbname)))
class InvalidDatabaseFlagError(DatabaseError):
"""Error thrown while parsing a database file.
"""
def __init__(self, flag):
DatabaseError.__init__(self, 'Invalid flag value {}'.format(
repr(flag)))
class DatabaseParseError(DatabaseError):
"""Error thrown while parsing a database file.
"""
def __init__(self, msg):
DatabaseError.__init__(self, 'Error parsing database ({})'.format(msg))
# Analyzer Errors
class AnalyzerError(MorphologyError):
"""Class for errors thrown by Analyzer component.
"""
def __init__(self, msg):
MorphologyError.__init__(self, msg)
# Generator Errors
class GeneratorError(MorphologyError):
"""Base class for errors thrown by Generator component.
"""
def __init__(self, msg):
MorphologyError.__init__(self, msg)
class InvalidGeneratorFeature(GeneratorError):
"""Error thrown when an invalid feature
"""
def __init__(self, feat):
GeneratorError.__init__(self, 'Invalid feature {}'.format(repr(feat)))
class InvalidGeneratorFeatureValue(GeneratorError):
"""Error thrown when an invalid value is given to a feature.
"""
def __init__(self, feat, val):
GeneratorError.__init__(self, 'Invalid value {} for feature {}'.format(
repr(val), repr(feat)))
# Reinflector Errors
class ReinflectorError(MorphologyError):
"""Base class for errors thrown by Reinflector component.
"""
def __init__(self, msg):
MorphologyError.__init__(self, msg)
class InvalidReinflectorFeature(ReinflectorError):
"""Error thrown when an invalid feature
"""
def __init__(self, feat):
GeneratorError.__init__(self, 'Invalid feature {}'.format(repr(feat)))
class InvalidReinflectorFeatureValue(ReinflectorError):
"""Error thrown when an invalid value is given to a feature.
"""
def __init__(self, feat, val):
GeneratorError.__init__(self, 'Invalid value {} for feature {}'.format(
repr(val), repr(feat)))
| 3,942 | 27.366906 | 79 | py |
camel_tools | camel_tools-master/camel_tools/morphology/utils.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utility functions used by the various morphology components.
"""
import copy
import re
import sys
# eatures which should be concatinated when generating analysis
_JOIN_FEATS = frozenset(['gloss', 'bw'])
_CONCAT_FEATS = frozenset(['diac', 'pattern', 'caphi', 'catib6', 'ud'])
_CONCAT_FEATS_NONE = frozenset(['d3tok', 'd3seg', 'atbseg', 'd2seg', 'd1seg',
'd1tok', 'd2tok', 'atbtok', 'bwtok'])
_LOGPROB_FEATS = frozenset(['pos_logprob', 'lex_logprob', 'pos_lex_logprob'])
# Tokenization and segmentation schemes to which Sun letters and Fatha after
# Alif rewrite rules apply
_TOK_SCHEMES_1 = frozenset(['d1tok', 'd2tok', 'atbtok', 'd1seg', 'd2seg',
'd3seg', 'atbseg'])
# Tokenization and segmentation schemes to which only the Fatha after Alif
# rewrite rule apply
_TOK_SCHEMES_2 = frozenset(['d3tok', 'd3seg'])
# Splits lemmas on '_' and '-'
_STRIP_LEX_RE = re.compile('_|-')
# Sun letters with definite article
_REWRITE_DIAC_RE_1 = re.compile(u'#\\+*([\u062a\u062b\u062f\u0630\u0631\u0632'
u'\u0633\u0634\u0635\u0636\u0637\u0638\u0644'
u'\u0646])')
# Moon letters with definite article
_REWRITE_DIAC_RE_2 = re.compile(u'#\\+*')
# Fatha after Alif
_REWRITE_DIAC_RE_3 = re.compile(u'\u0627\\+?\u064e([\u0629\u062a])')
# Hamza Wasl
_REWRITE_DIAC_RE_4 = re.compile(u'\u0671')
# Remove '+'s
_REWRITE_DIAC_RE_5 = re.compile(u'\\+')
# Fix Multiple Shadda's
# FIXME: Remove after DB fix
_REWRITE_DIAC_RE_6 = re.compile(u'\u0651+')
# Sun letters
_REWRITE_CAPHI_RE_1 = re.compile(u'(l-)\\+(t\\_|th\\_|d\\_|th\\.\\_|r\\_|z\\_|'
u's\\_|sh\\_|s\\.\\_|d\\.\\_|t\\.\\_|'
u'dh\\.\\_|l\\_|n\\_|dh\\_)')
# Replace shadda
_REWRITE_CAPHI_RE_2 = re.compile(u'(\\S)[-]*\\+~')
# Replace ending i_y with ii if suffix is not a vowel
_REWRITE_CAPHI_RE_3 = re.compile(u'i\\_y-\\+([^iau]+|$)')
# Replacing ending u_w with uu if suffix is not a vowel
_REWRITE_CAPHI_RE_4 = re.compile(u'u\\_w-\\+([^iau]+|$)')
# Remove hamza wasl if preceeded by a vowel
_REWRITE_CAPHI_RE_5 = re.compile(u'([iua])\\+-2_[iua]')
# Remove hamza wasl if preceeded by a non-vowel
_REWRITE_CAPHI_RE_6 = re.compile(u'(.+)\\+-2_([iua])')
# Handle _u+w_ cases followed by non-vowels (eg. 2_u+w_l_ii)
_REWRITE_CAPHI_RE_7 = re.compile(u'u\\+w(_+[^ioua])')
# Handle stems followed that end with taa marboutah
_REWRITE_CAPHI_RE_8 = re.compile(u'p-\\+([iua])')
# Compress alef madda followed by fatha followed by short vowels
_REWRITE_CAPHI_RE_9 = re.compile(u'aa\\+a[_]*')
# Remove '+'s
_REWRITE_CAPHI_RE_10 = re.compile(u'[\\+-]')
# Remove multiple '_'
_REWRITE_CAPHI_RE_11 = re.compile(u'_+')
# Remove initial and tailing underscores tailing taa marboutah
_REWRITE_CAPHI_RE_12 = re.compile(u'((^\\_+)|(\\_p?\\_*$))')
# Normalize tanwyn
_NORMALIZE_TANWYN_FA_RE = re.compile(u'\u064b\u0627')
_NORMALIZE_TANWYN_FY_RE = re.compile(u'\u064b\u0649')
_NORMALIZE_TANWYN_AF_RE = re.compile(u'\u0627\u064b')
_NORMALIZE_TANWYN_YF_RE = re.compile(u'\u0649\u064b')
# Simple Arabic script to CAPHI map
_AR2CAPHI = {
'\u0621': '2',
'\u0622': '2_aa',
'\u0623': '2',
'\u0624': '2',
'\u0625': '2',
'\u0626': '2',
'\u0627': 'aa',
'\u0628': 'b',
'\u062A': 't',
'\u062B': 'th',
'\u062C': 'j',
'\u062D': '7',
'\u062E': 'kh',
'\u062F': 'd',
'\u0630': 'dh',
'\u0631': 'r',
'\u0632': 'z',
'\u0633': 's',
'\u0634': 'sh',
'\u0635': 's.',
'\u0636': 'd.',
'\u0637': 't.',
'\u0638': 'dh.',
'\u0639': '3',
'\u063A': 'gh',
'\u0641': 'f',
'\u0642': 'q',
'\u0643': 'k',
'\u0644': 'l',
'\u0645': 'm',
'\u0646': 'n',
'\u0647': 'h',
'\u0648': 'w',
'\u0649': 'aa',
'\u064A': 'y'
}
def strip_lex(lex):
return _STRIP_LEX_RE.split(lex)[0]
def simple_ar_to_caphi(ar_str):
"""Convert Arabic script to CAPHI.
Args:
ar_str (:obj:`str`): String to convert.
Returns:
:obj:`str`: CAPHI string.
"""
# If the first letter is Alif change it to hamza Alif to map it to '2'
if ar_str.startswith('\u0627'):
ar_str = '\u0623{}'.format(ar_str[1:])
return '_'.join([_AR2CAPHI[x] for x in ar_str if x in _AR2CAPHI])
def normalize_tanwyn(word, mode='AF'):
if mode == 'FA':
word = _NORMALIZE_TANWYN_FA_RE.sub(u'\u064b\u0627', word)
word = _NORMALIZE_TANWYN_FY_RE.sub(u'\u064b\u0649', word)
else:
word = _NORMALIZE_TANWYN_AF_RE.sub(u'\u0627\u064b', word)
word = _NORMALIZE_TANWYN_YF_RE.sub(u'\u0649\u064b', word)
return word
def rewrite_diac(word):
word = _REWRITE_DIAC_RE_1.sub(u'\\1\u0651', word)
word = _REWRITE_DIAC_RE_2.sub(u'', word)
word = _REWRITE_DIAC_RE_3.sub(u'\u0627\\1', word)
word = _REWRITE_DIAC_RE_4.sub(u'\u0627', word)
word = _REWRITE_DIAC_RE_5.sub(u'', word)
word = _REWRITE_DIAC_RE_6.sub(u'\u0651', word)
return word
def rewrite_caphi(word):
word = _REWRITE_CAPHI_RE_1.sub(u'\\2\\2', word)
word = _REWRITE_CAPHI_RE_2.sub(u'\\1_\\1', word)
word = _REWRITE_CAPHI_RE_3.sub(u'ii_\\1', word)
word = _REWRITE_CAPHI_RE_4.sub(u'uu_\\1', word)
word = _REWRITE_CAPHI_RE_5.sub(u'\\1', word)
word = _REWRITE_CAPHI_RE_6.sub(u'\\1_\\2', word)
word = _REWRITE_CAPHI_RE_7.sub(u'uu\\1', word)
word = _REWRITE_CAPHI_RE_8.sub(u't_\\1', word)
word = _REWRITE_CAPHI_RE_9.sub(u'aa_', word)
word = _REWRITE_CAPHI_RE_10.sub(u'_', word)
word = _REWRITE_CAPHI_RE_11.sub(u'_', word)
word = _REWRITE_CAPHI_RE_12.sub(u'', word)
return word
def rewrite_tok_1(word):
word = _REWRITE_DIAC_RE_1.sub(u'\\1\u0651', word)
word = _REWRITE_DIAC_RE_2.sub(u'', word)
word = _REWRITE_DIAC_RE_3.sub(u'\u0627\\1', word)
return word
def rewrite_tok_2(word):
word = _REWRITE_DIAC_RE_3.sub(u'\u0627\\1', word)
return word
def rewrite_pattern(word):
word = _REWRITE_DIAC_RE_2.sub(u'', word)
return word
def merge_features(db, prefix_feats, stem_feats, suffix_feats, diac_mode="AF"):
result = copy.copy(stem_feats)
for stem_feat in stem_feats:
suffix_feat_val = suffix_feats.get(stem_feat, '')
if suffix_feat_val != '-' and suffix_feat_val != '':
result[stem_feat] = suffix_feat_val
prefix_feat_val = prefix_feats.get(stem_feat, '')
if prefix_feat_val != '-' and prefix_feat_val != '':
result[stem_feat] = prefix_feat_val
for join_feat in _JOIN_FEATS:
if join_feat in db.defines:
feat_vals = [
prefix_feats.get(join_feat, None),
stem_feats.get(join_feat, None),
suffix_feats.get(join_feat, None)
]
result[join_feat] = u'+'.join([fv for fv in feat_vals
if fv is not None and fv != ''])
for concat_feat in _CONCAT_FEATS:
if concat_feat in db.defines:
result[concat_feat] = u'+'.join([x for x in [
prefix_feats.get(concat_feat, ''),
stem_feats.get(concat_feat, ''),
suffix_feats.get(concat_feat, '')] if len(x) > 0])
for concat_feat in _CONCAT_FEATS_NONE:
if concat_feat in db.defines:
result[concat_feat] = u'{}{}{}'.format(
prefix_feats.get(concat_feat, ''),
stem_feats.get(concat_feat, stem_feats.get('diac', '')),
suffix_feats.get(concat_feat, ''))
result['stem'] = stem_feats['diac']
result['stemgloss'] = stem_feats.get('gloss', '')
result['diac'] = normalize_tanwyn(rewrite_diac(result['diac']),
diac_mode)
for feat in _TOK_SCHEMES_1:
if feat in db.defines:
result[feat] = rewrite_tok_1(result.get(feat, ''))
for feat in _TOK_SCHEMES_2:
if feat in db.defines:
result[feat] = rewrite_tok_2(result.get(feat, ''))
if 'caphi' in db.defines:
result['caphi'] = rewrite_caphi(result.get('caphi', ''))
if 'form_gen' in db.defines and result['gen'] == '-':
result['gen'] = result['form_gen']
if 'form_num' in db.defines and result['num'] == '-':
result['num'] = result['form_num']
if 'pattern' in db.compute_feats:
result['pattern'] = u'{}{}{}'.format(prefix_feats.get('diac', ''),
stem_feats.get('pattern',
stem_feats.get('diac', '')),
suffix_feats.get('diac', ''))
result['pattern'] = rewrite_pattern(result['pattern'])
for logprob_feat in _LOGPROB_FEATS:
if logprob_feat in db.defines:
result[logprob_feat] = float(result.get(logprob_feat, -99.0))
return result
def feat_prettyprint(feats, order, default='', file=sys.stdout):
for feat in order:
feat_value = repr(feats.get(feat, default))
print(f'{feat}: {feat_value}', file=file)
| 10,355 | 34.34471 | 79 | py |
camel_tools | camel_tools-master/camel_tools/morphology/analyzer.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The morphological analyzer component of CAMeL Tools.
"""
from __future__ import absolute_import
from collections import deque, namedtuple
import copy
import itertools
import re
from threading import RLock
from cachetools import LFUCache, cached
from camel_tools.utils.charsets import UNICODE_PUNCT_SYMBOL_CHARSET
from camel_tools.utils.charsets import AR_CHARSET, AR_DIAC_CHARSET
from camel_tools.utils.charmap import CharMapper
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.errors import AnalyzerError
from camel_tools.morphology.utils import merge_features
from camel_tools.morphology.utils import simple_ar_to_caphi
from camel_tools.utils.dediac import dediac_ar
_ALL_PUNC = u''.join(UNICODE_PUNCT_SYMBOL_CHARSET)
_DIAC_RE = re.compile(u'[' + re.escape(u''.join(AR_DIAC_CHARSET)) + u']')
_IS_DIGIT_RE = re.compile(u'^.*[0-9\u0660-\u0669]+.*$')
_IS_STRICT_DIGIT_RE = re.compile(u'^[0-9\u0660-\u0669]+$')
_IS_PUNC_RE = re.compile(u'^[' + re.escape(_ALL_PUNC) + u']+$')
_HAS_PUNC_RE = re.compile(u'[' + re.escape(_ALL_PUNC) + u']')
_IS_AR_RE = re.compile(u'^[' + re.escape(u''.join(AR_CHARSET)) + u']+$')
# Identify No Analysis marker
_NOAN_RE = re.compile(u'NOAN')
_COPY_FEATS = frozenset(['gloss', 'atbtok', 'atbseg', 'd1tok', 'd1seg',
'd2tok', 'd2seg', 'd3tok', 'd3seg', 'bwtok'])
_UNDEFINED_LEX_FEATS = frozenset(['root', 'pattern', 'caphi'])
DEFAULT_NORMALIZE_MAP = CharMapper({
u'\u0625': u'\u0627',
u'\u0623': u'\u0627',
u'\u0622': u'\u0627',
u'\u0671': u'\u0627',
u'\u0649': u'\u064a',
u'\u0629': u'\u0647',
u'\u0640': u''
})
""":obj:`~camel_tools.utils.charmap.CharMapper`: The default character map used
for normalization by :obj:`Analyzer`.
Removes the tatweel/kashida character and does the following conversions:
- 'إ' to 'ا'
- 'أ' to 'ا'
- 'آ' to 'ا'
- 'ٱ' to 'ا'
- 'ى' to 'ي'
- 'ة' to 'ه'
"""
_BACKOFF_TYPES = frozenset(['NONE', 'NOAN_ALL', 'NOAN_PROP', 'ADD_ALL',
'ADD_PROP'])
class AnalyzedWord(namedtuple('AnalyzedWord', ['word', 'analyses'])):
"""A named tuple containing a word and its analyses.
Attributes:
word (:obj:`str`): The analyzed word.
analyses (:obj:`list` of :obj:`dict`): List of analyses for **word**.
See :doc:`/reference/camel_morphology_features` for more
information on features and their values.
"""
def _is_digit(word):
return _IS_DIGIT_RE.match(word) is not None
def _is_strict_digit(word):
return _IS_STRICT_DIGIT_RE.match(word) is not None
def _is_punc(word):
return _IS_PUNC_RE.match(word) is not None
def _has_punc(word):
return _HAS_PUNC_RE.search(word) is not None
def _is_ar(word):
return _IS_AR_RE.match(word) is not None
def _segments_gen(word, max_prefix=1, max_suffix=1):
w = len(word)
for p in range(0, min(max_prefix, w - 1) + 1):
prefix = word[:p]
for s in range(max(1, w - p - max_suffix), w - p + 1):
stem = word[p:p+s]
suffix = word[p+s:]
yield (prefix, stem, suffix)
class Analyzer:
"""Morphological analyzer component.
Args:
db (:obj:`~camel_tools.morphology.database.MorphologyDB`): Database to
use for analysis. Must be opened in analysis or reinflection mode.
backoff (:obj:`str`, optional): Backoff mode. Can be one of the
following: 'NONE', 'NOAN_ALL', 'NOAN_PROP', 'ADD_ALL', or
'ADD_PROP'. Defaults to 'NONE'.
norm_map (:obj:`~camel_tools.utils.charmap.CharMapper`, optional):
Character map for normalizing input words. If set to None, then
:const:`DEFAULT_NORMALIZE_MAP` is used.
Defaults to None.
strict_digit (:obj:`bool`, optional): If set to `True`, then only words
completely comprised of digits are considered numbers, otherwise,
all words containing a digit are considered numbers. Defaults to
`False`.
cache_size (:obj:`int`, optional): If greater than zero, then the
analyzer will cache the analyses for the **cache_Size** most
frequent words, otherwise no analyses will be cached.
Raises:
:obj:`~camel_tools.morphology.errors.AnalyzerError`: If database is
not an instance of
(:obj:`~camel_tools.morphology.database.MorphologyDB`), if **db**
does not support analysis, or if **backoff** is not a valid backoff
mode.
"""
def __init__(self, db, backoff='NONE',
norm_map=None,
strict_digit=False,
cache_size=0):
if not isinstance(db, MorphologyDB):
raise AnalyzerError('DB is not an instance of MorphologyDB')
if not db.flags.analysis:
raise AnalyzerError('DB does not support analysis')
self._db = db
self._backoff = backoff
self._strict_digit = strict_digit
if norm_map is None:
self._norm_map = DEFAULT_NORMALIZE_MAP
else:
self._norm_map = norm_map
if backoff in _BACKOFF_TYPES:
if backoff == 'NONE':
self._backoff_condition = None
self._backoff_action = None
else:
backoff_toks = backoff.split('_')
self._backoff_condition = backoff_toks[0]
self._backoff_action = backoff_toks[1]
else:
raise AnalyzerError('Invalid backoff mode {}'.format(
repr(backoff)))
if isinstance(cache_size, int):
if cache_size > 0:
cache = LFUCache(cache_size)
self.analyze = cached(cache, lock=RLock())(self.analyze)
else:
raise AnalyzerError('Invalid cache size {}'.format(
repr(cache_size)))
def _normalize(self, word):
if self._norm_map is None:
return word
return self._norm_map.map_string(word)
def _combined_analyses(self,
word_dediac,
prefix_analyses,
stem_analyses,
suffix_analyses):
combined = deque()
for p in itertools.product(prefix_analyses, stem_analyses):
prefix_cat = p[0][0]
prefix_feats = p[0][1]
stem_cat = p[1][0]
stem_feats = p[1][1]
if stem_cat in self._db.prefix_stem_compat[prefix_cat]:
for suffix_cat, suffix_feats in suffix_analyses:
if ((stem_cat not in self._db.stem_suffix_compat) or
(prefix_cat not in self._db.prefix_suffix_compat) or
(suffix_cat not in
self._db.stem_suffix_compat[stem_cat]) or
(suffix_cat not in
self._db.prefix_suffix_compat[prefix_cat])):
continue
merged = merge_features(self._db, prefix_feats, stem_feats,
suffix_feats)
merged['stem'] = stem_feats['diac']
merged['stemcat'] = stem_cat
merged_dediac = dediac_ar(merged['diac'])
if word_dediac.replace(u'\u0640', '') != merged_dediac:
merged['source'] = 'spvar'
combined.append(merged)
return combined
def _combined_backoff_analyses(self,
stem,
word_dediac,
prefix_analyses,
stem_analyses,
suffix_analyses):
combined = deque()
for p in itertools.product(prefix_analyses, stem_analyses):
prefix_cat = p[0][0]
prefix_feats = p[0][1]
stem_cat = p[1][0]
stem_feats = copy.copy(p[1][1])
if stem_cat in self._db.prefix_stem_compat[prefix_cat]:
for suffix_cat, suffix_feats in suffix_analyses:
if ((suffix_cat not in
self._db.stem_suffix_compat[stem_cat]) or
(prefix_cat not in self._db.prefix_suffix_compat or
suffix_cat not in
self._db.prefix_suffix_compat[prefix_cat])):
continue
if (self._backoff_action == 'PROP' and
'NOUN_PROP' not in stem_feats['bw']):
continue
stem_feats['bw'] = _NOAN_RE.sub(stem, stem_feats['bw'])
stem_feats['diac'] = _NOAN_RE.sub(stem, stem_feats['diac'])
stem_feats['lex'] = _NOAN_RE.sub(stem, stem_feats['lex'])
stem_feats['caphi'] = simple_ar_to_caphi(stem)
merged = merge_features(self._db, prefix_feats, stem_feats,
suffix_feats)
merged['stem'] = stem_feats['diac']
merged['stemcat'] = stem_cat
merged['source'] = 'backoff'
merged['pattern'] = 'backoff'
merged['gloss'] = stem_feats['gloss']
combined.append(merged)
return combined
# pylint: disable=E0202
def analyze(self, word):
"""Analyze a given word.
Args:
word (:py:obj:`str`): Word to analyze.
Returns:
:obj:`list` of :obj:`dict`: The list of analyses for **word**.
See :doc:`/reference/camel_morphology_features` for more
information on features and their values.
"""
word = word.strip()
if word == '':
return []
analyses = deque()
word_dediac = dediac_ar(word)
word_normal = self._normalize(word_dediac)
if ((self._strict_digit and _is_strict_digit(word)) or
(not self._strict_digit and _is_digit(word))):
result = copy.copy(self._db.defaults['digit'])
result['diac'] = word
result['stem'] = word
result['stemgloss'] = word
result['stemcat'] = None
result['lex'] = word
result['bw'] = word + '/NOUN_NUM'
result['source'] = 'digit'
for feat in _COPY_FEATS:
if feat in self._db.defines:
result[feat] = word
for feat in _UNDEFINED_LEX_FEATS:
if feat in self._db.defines:
result[feat] = 'DIGIT'
if 'catib6' in self._db.defines:
result['catib6'] = 'NOM'
if 'ud' in self._db.defines:
result['ud'] = 'NUM'
result['pos_logprob'] = -99.0
result['lex_logprob'] = -99.0
result['pos_lex_logprob'] = -99.0
if 'form_gen' in self._db.defines and result['gen'] == '-':
result['gen'] = result['form_gen']
if 'form_num' in self._db.defines and result['num'] == '-':
result['num'] = result['form_num']
return [result]
elif _is_punc(word):
result = copy.copy(self._db.defaults['punc'])
result['diac'] = word
result['stem'] = word
result['stemgloss'] = word
result['stemcat'] = None
result['lex'] = word
result['bw'] = word + '/PUNC'
result['source'] = 'punc'
for feat in _COPY_FEATS:
if feat in self._db.defines:
result[feat] = word
for feat in _UNDEFINED_LEX_FEATS:
if feat in self._db.defines:
result[feat] = 'PUNC'
if 'catib6' in self._db.defines:
result['catib6'] = 'PNX'
if 'ud' in self._db.defines:
result['ud'] = 'PUNCT'
result['pos_logprob'] = -99.0
result['lex_logprob'] = -99.0
result['pos_lex_logprob'] = -99.0
if 'form_gen' in self._db.defines and result['gen'] == '-':
result['gen'] = result['form_gen']
if 'form_num' in self._db.defines and result['num'] == '-':
result['num'] = result['form_num']
return [result]
elif _has_punc(word):
pass
elif not _is_ar(word):
# TODO: This is a temporary workaround until a 'foreign' entry is
# added to the databases.
result = copy.copy(self._db.defaults['latin'])
result['pos'] = 'foreign'
result['diac'] = word
result['stem'] = word
result['stemgloss'] = word
result['stemcat'] = None
result['lex'] = word
result['bw'] = word + '/FOREIGN'
result['source'] = 'foreign'
for feat in _COPY_FEATS:
if feat in self._db.defines:
result[feat] = word
for feat in _UNDEFINED_LEX_FEATS:
if feat in self._db.defines:
result[feat] = 'FOREIGN'
if 'catib6' in self._db.defines:
result['catib6'] = 'FOREIGN'
if 'ud' in self._db.defines:
result['ud'] = 'X'
result['pos_logprob'] = -99.0
result['lex_logprob'] = -99.0
result['pos_lex_logprob'] = -99.0
if 'form_gen' in self._db.defines and result['gen'] == '-':
result['gen'] = result['form_gen']
if 'form_num' in self._db.defines and result['num'] == '-':
result['num'] = result['form_num']
return [result]
else:
segments_gen = _segments_gen(word_normal, self._db.max_prefix_size,
self._db.max_suffix_size)
for segmentation in segments_gen:
prefix = segmentation[0]
stem = segmentation[1]
suffix = segmentation[2]
prefix_analyses = self._db.prefix_hash.get(prefix, None)
suffix_analyses = self._db.suffix_hash.get(suffix, None)
if prefix_analyses is None or suffix_analyses is None:
continue
stem_analyses = self._db.stem_hash.get(stem, None)
if stem_analyses is not None:
combined = self._combined_analyses(word_dediac,
prefix_analyses,
stem_analyses,
suffix_analyses)
analyses.extend(combined)
if ((self._backoff_condition == 'NOAN' and len(analyses) == 0) or
(self._backoff_condition == 'ADD')):
segments_gen = _segments_gen(word_normal,
self._db.max_prefix_size,
self._db.max_suffix_size)
backoff_cats = self._db.stem_backoffs[self._backoff_action]
stem_analyses = [(cat, analysis)
for cat, analysis in self._db.stem_hash['NOAN']
if cat in backoff_cats]
for segmentation in segments_gen:
prefix = segmentation[0]
stem = segmentation[1]
suffix = segmentation[2]
prefix_analyses = self._db.prefix_hash.get(prefix, None)
suffix_analyses = self._db.suffix_hash.get(suffix, None)
if prefix_analyses is None or suffix_analyses is None:
continue
combined = self._combined_backoff_analyses(stem,
word_dediac,
prefix_analyses,
stem_analyses,
suffix_analyses)
analyses.extend(combined)
result = list(analyses)
return result
def analyze_words(self, words):
'''Analyze a list of words.
Args:
words (:py:obj:`list` of :py:obj:`str`): List of words to analyze.
Returns:
:obj:`list` of :obj:`AnalyzedWord`: The list of analyses for each
word in **words**.
'''
return list(map(lambda w: AnalyzedWord(w, self.analyze(w)), words))
def all_feats(self):
"""Return a set of all features provided by the database used in this
analyzer instance.
Returns:
:obj:`frozenset` of :obj:`str`: The set all features provided by
the database used in this analyzer instance.
"""
return self._db.all_feats()
def tok_feats(self):
"""Return a set of tokenization features provided by the database used
in this analyzer instance.
Returns:
:obj:`frozenset` of :obj:`str`: The set tokenization features
provided by the database used in this analyzer instance.
"""
return self._db.tok_feats()
| 18,547 | 34.876209 | 79 | py |
camel_tools | camel_tools-master/camel_tools/morphology/database.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The database component of CAMeL Tools.
"""
from __future__ import absolute_import
from collections import namedtuple
from pathlib import Path
import re
from camel_tools.utils.stringutils import force_unicode
from camel_tools.morphology.utils import strip_lex
from camel_tools.morphology.errors import InvalidDatabaseFlagError
from camel_tools.morphology.errors import DatabaseParseError
from camel_tools.data import CATALOGUE
MorphologyDBFlags = namedtuple('MorphologyDBFlags', ['analysis', 'generation',
'reinflection'])
class MorphologyDB:
"""Class providing indexes from a given morphology database file.
Args:
fpath (:obj:`str`): File path to database.
flags (:obj:`str`): Flag string (similar to opening files) indicates
what components the database will be used for. 'a' indicates
analysis, 'g' indicates generation, and 'r' indicates indicates
reinflection. 'r' is equivalent to 'ag' since the reinflector
uses both analyzer and generator components internally.
Defaults to 'a'.
Raises:
:obj:`~camel_tools.morphology.errors.InvalidDatabaseFlagError`: When
an invalid flag value is given.
"""
@staticmethod
def list_builtin_dbs():
"""Returns a list of builtin databases provided with CAMeL Tools.
Returns:
:obj:`list` of :obj:`~camel_tools.data.DatasetEntry`: List of
builtin databases.
"""
return list(CATALOGUE.get_component('MorphologyDB').datasets)
@staticmethod
def builtin_db(db_name=None, flags='a'):
"""Create a :obj:`MorphologyDB` instance from one of the builtin
databases provided.
Args:
db_name (:obj:`str`, optional): Name of builtin database.
You can use :meth:`list_builtin_dbs` to get a list of
builtin databases or see :ref:`camel_morphology_dbs`.
Defaults to 'calima-msa-r13'.
flags (:obj:`str`, optional): Flag string to be passed to
:obj:`MorphologyDB` constructor. Defaults to 'a'.
Returns:
:obj:`MorphologyDB`: Instance of builtin database with given flags.
"""
if db_name is None:
db_name = CATALOGUE.components['MorphologyDB'].default
db_info = CATALOGUE.components['MorphologyDB'].datasets[db_name]
return MorphologyDB(str(Path(db_info.path, 'morphology.db')), flags)
def __init__(self, fpath, flags='a'):
"""Class constructor.
"""
self._withAnalysis = False
self._withReinflection = False
self._withGeneration = False
self._defaultKey = 'pos'
for flag in flags:
if flag == 'a':
self._withAnalysis = True
elif flag == 'g':
self._withGeneration = True
elif flag == 'r':
self._withReinflection = True
self._withAnalysis = True
self._withGeneration = True
else:
raise InvalidDatabaseFlagError(flag)
if self._withAnalysis and self._withGeneration:
self._withReinflection = True
self.flags = MorphologyDBFlags(self._withAnalysis,
self._withGeneration,
self._withGeneration)
self.defines = {}
self.defaults = {}
self.order = None
self.tokenizations = set()
self.compute_feats = frozenset()
self.stem_backoffs = {}
self.prefix_hash = {}
self.suffix_hash = {}
self.stem_hash = {}
self.prefix_cat_hash = {}
self.suffix_cat_hash = {}
self.lemma_hash = {}
self.prefix_stem_compat = {}
self.stem_suffix_compat = {}
self.prefix_suffix_compat = {}
self.stem_prefix_compat = {}
self.max_prefix_size = 0
self.max_suffix_size = 0
self._parse_dbfile(fpath)
def _parse_analysis_line_toks(self, toks):
res = {}
for tok in toks:
if len(tok) == 0:
continue
subtoks = tok.split(u':')
if len(subtoks) < 2:
raise DatabaseParseError(
'invalid key value pair {}'.format(repr(tok)))
res[subtoks[0]] = u':'.join(subtoks[1:])
return res
def _parse_defaults_line_toks(self, toks):
res = {}
for tok in toks:
subtoks = tok.split(u':')
if len(subtoks) < 2:
raise DatabaseParseError(
'invalid key value pair {} in DEFAULTS'.format(
repr(tok)))
feat = subtoks[0]
val = ':'.join(subtoks[1:])
if val == '*':
res[feat] = None
else:
res[feat] = val
return res
def _parse_dbfile(self, fpath):
with open(fpath, 'r', encoding='utf-8') as dbfile:
# Process DEFINES
for line in dbfile:
line = line = force_unicode(line).strip()
if line == '###DEFINES###':
continue
if line == '###DEFAULTS###':
break
toks = line.split(u' ')
# Check if line has the minimum viable format
if len(toks) < 3 or toks[0] != 'DEFINE':
raise DatabaseParseError(
'invalid DEFINES line {}'.format(repr(line)))
new_define = toks[1]
val_set = set()
# Parse values for defined keyword
for tok in toks[2:]:
subtoks = tok.split(':')
# If it's a malformed entry, ignore it
if len(subtoks) != 2 and subtoks[0] != toks[1]:
raise DatabaseParseError(
'invalid key value pair {} in DEFINES'.format(
repr(tok)))
# If it's an open class, we use None instead of a set
if len(toks) == 3 and subtoks[1] == '*open*':
val_set = None
break
val_set.add(subtoks[1])
self.defines[new_define] = (
list(val_set) if val_set is not None else None)
# Process DEFAULTS
for line in dbfile:
line = force_unicode(line).strip()
if line == '###ORDER###':
break
toks = line.split(u' ')
if len(toks) < 2 or toks[0] != 'DEFAULT':
raise DatabaseParseError(
'invalid DEFAULTS line {}'.format(repr(line)))
parsed_default = self._parse_defaults_line_toks(toks[1:])
if self._defaultKey not in parsed_default:
raise DatabaseParseError(
'DEFAULTS line {} missing {} value'.format(
repr(line), self._defaultKey))
dkey = parsed_default[self._defaultKey]
self.defaults[dkey] = parsed_default
# Process ORDER
for line in dbfile:
line = force_unicode(line).strip()
if line == '###TOKENIZATIONS###':
self.compute_feats = frozenset(self.order)
break
toks = line.split(u' ')
if (self.order is not None and len(toks) < 2 and
toks[0] != 'ORDER'):
raise DatabaseParseError(
'invalid ORDER line {}'.format(repr(line)))
if toks[1] not in self.defines:
raise DatabaseParseError(
'invalid feature {} in ORDER line.'.format(
repr(toks[1])))
self.order = toks[1:]
# Process TOKENIZATIONS
for line in dbfile:
line = force_unicode(line).strip()
if line == '###STEMBACKOFF###':
self.tokenizations = frozenset(self.tokenizations)
break
toks = line.split(u' ')
if (self.order is not None and len(toks) < 2 and
toks[0] != 'TOKENIZATION'):
raise DatabaseParseError(
'invalid TOKENIZATION line {}'.format(repr(line)))
if toks[1] not in self.defines:
raise DatabaseParseError(
'invalid feature {} in TOKENIZATION line.'.format(
repr(toks[1])))
self.tokenizations.update(toks[1:])
# Process STEMBACKOFFS
for line in dbfile:
line = force_unicode(line).strip()
if line == '###PREFIXES###':
break
toks = line.split(u' ')
if len(toks) < 3 or toks[0] != 'STEMBACKOFF':
raise DatabaseParseError(
'invalid STEMBACKOFFS line {}'.format(repr(line)))
self.stem_backoffs[toks[1]] = toks[2:]
# Process PREFIXES
for line in dbfile:
line = force_unicode(line)
parts = line.split(u'\t')
if len(parts) != 3:
if line.strip() == '###SUFFIXES###':
break
raise DatabaseParseError(
'invalid PREFIXES line {}'.format(repr(line)))
prefix = parts[0].strip()
category = parts[1]
analysis = self._parse_analysis_line_toks(
parts[2].strip().split(u' '))
if self._withAnalysis:
if prefix not in self.prefix_hash:
self.prefix_hash[prefix] = []
self.prefix_hash[prefix].append((category, analysis))
if self._withGeneration:
# FIXME: Make sure analyses for category are unique?
if category not in self.prefix_cat_hash:
self.prefix_cat_hash[category] = []
self.prefix_cat_hash[category].append(analysis)
# Process SUFFIXES
for line in dbfile:
line = force_unicode(line)
parts = line.split(u'\t')
if len(parts) != 3:
if line.strip() == '###STEMS###':
break
raise DatabaseParseError(
'invalid SUFFIXES line {}'.format(repr(line)))
suffix = parts[0].strip()
category = parts[1]
analysis = self._parse_analysis_line_toks(
parts[2].strip().split(u' '))
if self._withAnalysis:
if suffix not in self.suffix_hash:
self.suffix_hash[suffix] = []
self.suffix_hash[suffix].append((category, analysis))
if self._withGeneration:
# FIXME: Make sure analyses for category are unique?
if category not in self.suffix_cat_hash:
self.suffix_cat_hash[category] = []
self.suffix_cat_hash[category].append(analysis)
# Process STEMS
for line in dbfile:
line = force_unicode(line).strip()
if line == '###TABLE AB###':
break
parts = line.split(u'\t')
if len(parts) != 3:
raise DatabaseParseError(
'invalid STEMS line {}'.format(repr(line)))
stem = parts[0]
category = parts[1]
analysis = self._parse_analysis_line_toks(parts[2].split(u' '))
analysis['lex'] = strip_lex(analysis['lex'])
if self._withAnalysis:
if stem not in self.stem_hash:
self.stem_hash[stem] = []
self.stem_hash[stem].append((category, analysis))
if self._withGeneration:
# FIXME: Make sure analyses for category are unique?
lemma_key = analysis['lex']
analysis['stemcat'] = category
if lemma_key not in self.lemma_hash:
self.lemma_hash[lemma_key] = []
self.lemma_hash[lemma_key].append(analysis)
# Process prefix_stem compatibility table
for line in dbfile:
line = force_unicode(line).strip()
if line == '###TABLE BC###':
break
toks = line.split()
if len(toks) != 2:
raise DatabaseParseError(
'invalid TABLE AB line {}'.format(repr(line)))
prefix_cat = toks[0]
stem_cat = toks[1]
if self._withAnalysis:
if prefix_cat not in self.prefix_stem_compat:
self.prefix_stem_compat[prefix_cat] = set()
self.prefix_stem_compat[prefix_cat].add(stem_cat)
if self._withGeneration:
if stem_cat not in self.stem_prefix_compat:
self.stem_prefix_compat[stem_cat] = set()
self.stem_prefix_compat[stem_cat].add(prefix_cat)
# Process stem_suffix compatibility table
for line in dbfile:
line = force_unicode(line).strip()
if line == '###TABLE AC###':
break
toks = line.split()
if len(toks) != 2:
raise DatabaseParseError(
'invalid TABLE BC line {}'.format(repr(line)))
stem_cat = toks[0]
suffix_cat = toks[1]
if stem_cat not in self.stem_suffix_compat:
self.stem_suffix_compat[stem_cat] = set()
self.stem_suffix_compat[stem_cat].add(suffix_cat)
# Process prefix_suffix compatibility table
for line in dbfile:
line = force_unicode(line).strip()
toks = line.split()
if len(toks) != 2:
raise DatabaseParseError(
'invalid TABLE AC line {}'.format(repr(line)))
prefix_cat = toks[0]
suffix_cat = toks[1]
if prefix_cat not in self.prefix_suffix_compat:
self.prefix_suffix_compat[prefix_cat] = set()
self.prefix_suffix_compat[prefix_cat].add(suffix_cat)
if self._withAnalysis:
for prefix in self.prefix_hash.keys():
self.max_prefix_size = max(self.max_prefix_size,
len(prefix))
for suffix in self.suffix_hash.keys():
self.max_suffix_size = max(self.max_suffix_size,
len(suffix))
def all_feats(self):
"""Return a set of all features provided by this database instance.
Returns:
:obj:`frozenset` of :obj:`str`: The set all features provided by
this database instance.
"""
return frozenset(self.defines.keys())
def tok_feats(self):
"""Return a set of tokenization features provided by this database
instance.
Returns:
:obj:`frozenset` of :obj:`str`: The set tokenization features
provided by this database instance.
"""
return self.tokenizations
| 17,140 | 34.415289 | 79 | py |
camel_tools | camel_tools-master/camel_tools/morphology/__init__.py | # MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This sub-module contains components for the CAMeL Tools morphological
anlyzer, generator, and reinflector systems based on
`CALIMA Star <http://www.aclweb.org/anthology/W18-5816>`_.
"""
| 1,315 | 47.740741 | 79 | py |
camel_tools | camel_tools-master/camel_tools/morphology/generator.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The generator component of CAMeL Tools.
"""
from __future__ import absolute_import
import copy
import collections
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.errors import GeneratorError
from camel_tools.morphology.errors import InvalidGeneratorFeature
from camel_tools.morphology.errors import InvalidGeneratorFeatureValue
from camel_tools.morphology.utils import merge_features, strip_lex
class Generator(object):
"""Morphological generator component.
Args:
db (:obj:`~camel_tools.morphology.database.MorphologyDB`): Database to
use for generation. Must be opened in generation or reinflection
mode.
Raises:
:obj:`~camel_tools.morphology.errors.GeneratorError`: If **db** is not
an instance of
:obj:`~camel_tools.morphology.database.MorphologyDB` or if **db**
does not support generation.
"""
def __init__(self, db):
if not isinstance(db, MorphologyDB):
raise GeneratorError('DB is not an instance of MorphologyDB')
if not db.flags.generation:
raise GeneratorError('DB does not support generation')
self._db = db
def generate(self, lemma, feats):
"""Generate surface forms and their associated analyses for a given
lemma and a given set of (possibly underspecified) features.
The surface form is accessed through the `diac` feature.
Args:
lemma (:obj:`str`): Lemma to generate from.
feats (:obj:`dict`): Dictionary of features. Must contain 'pos'
feature.
See :doc:`/reference/camel_morphology_features` for
more information on features and their values.
Returns:
:obj:`list` of :obj:`dict`: List of generated analyses.
See :doc:`/reference/camel_morphology_features` for more
information on features and their values.
Raises:
:obj:`~camel_tools.morphology.errors.InvalidGeneratorFeature`: If
a feature is given that is not defined in database.
:obj:`~camel_tools.morphology.errors.InvalidGeneratorFeatureValue`:
If an invalid value is given to a feature or if 'pos' feature
is not defined.
"""
lemma = strip_lex(lemma)
if lemma not in self._db.lemma_hash:
return []
for feat in feats:
if feat not in self._db.defines:
raise InvalidGeneratorFeature(feat)
elif (self._db.defines[feat] is not None and
feats[feat] not in self._db.defines[feat]):
raise InvalidGeneratorFeatureValue(feat, feats[feat])
if 'pos' not in feats or feats['pos'] not in self._db.defines['pos']:
raise InvalidGeneratorFeatureValue('pos', feats.get('pos', None))
feats = copy.copy(feats)
default = self._db.defaults[feats['pos']]
default_feat_set = frozenset(default.keys())
feat_set = frozenset(feats.keys())
if not feat_set.issubset(default_feat_set):
return []
# Set default values for undefined feats
for feat in ['prc0', 'prc1', 'prc2', 'prc3', 'enc0', 'enc1', 'enc2']:
if feat not in feats and feat in default:
feats[feat] = default[feat]
stem_feats_list = self._db.lemma_hash[lemma]
analyses = collections.deque()
for stem_feats in stem_feats_list:
if 'vox' in feats and stem_feats['vox'] != feats['vox']:
continue
if 'rat' in feats and stem_feats['rat'] != feats['rat']:
continue
if 'pos' in feats and stem_feats['pos'] != feats['pos']:
continue
ignore_stem = False
for feat in ['prc0', 'prc1', 'prc2', 'prc3', 'enc0', 'enc1', 'enc2']:
if feat not in feats:
continue
if (feat in stem_feats and
stem_feats[feat] != '0' and
feats[feat] != stem_feats[feat]):
ignore_stem = True
break
if ignore_stem:
continue
prefix_cats = self._db.stem_prefix_compat[stem_feats['stemcat']]
suffix_cats = self._db.stem_suffix_compat[stem_feats['stemcat']]
for prefix_cat in prefix_cats:
if prefix_cat not in self._db.prefix_cat_hash:
continue
prefix_feats_list = self._db.prefix_cat_hash[prefix_cat]
for prefix_feats in prefix_feats_list:
ignore_prefix = False
for feat in ['prc0', 'prc1', 'prc2', 'prc3']:
if feat not in feats:
continue
if ((feats[feat] != '0' and
feat not in prefix_feats and
stem_feats.get(feat, '0') != feats[feat]) or
(feat in prefix_feats and
feats[feat] != prefix_feats[feat])):
ignore_prefix = True
break
if ignore_prefix:
continue
for suffix_cat in suffix_cats:
if suffix_cat not in self._db.suffix_cat_hash:
continue
suffix_feats_list = (
self._db.suffix_cat_hash[suffix_cat])
for suffix_feats in suffix_feats_list:
if ((prefix_cat not in
self._db.prefix_suffix_compat) or
(suffix_cat not in
self._db.prefix_suffix_compat[prefix_cat])):
continue
ignore_suffix = False
for feat in ['enc0', 'enc1', 'enc2']:
if feat not in feats:
continue
if ((feats[feat] != '0' and
feat not in suffix_feats and
stem_feats.get(feat, '0') != feats[feat])
or (feat in suffix_feats and
feats[feat] != suffix_feats[feat])):
ignore_suffix = True
break
if ignore_suffix:
continue
merged = merge_features(self._db, prefix_feats,
stem_feats, suffix_feats)
ignore_analysis = False
for feat in feats.keys():
if (feat in merged and
merged[feat] != feats[feat]):
ignore_analysis = True
break
if not ignore_analysis:
analyses.append(merged)
return list(analyses)
def all_feats(self):
"""Return a set of all features provided by the database used in this
generator instance.
Returns:
:obj:`frozenset` of :obj:`str`: The set all features provided by
the database used in this generator instance.
"""
return self._db.all_feats()
def tok_feats(self):
"""Return a set of tokenization features provided by the database used
in this generator instance.
Returns:
:obj:`frozenset` of :obj:`str`: The set tokenization features
provided by the database used in this generator instance.
"""
return self._db.tok_feats()
| 9,177 | 39.078603 | 81 | py |
camel_tools | camel_tools-master/camel_tools/tokenizers/morphological.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains utilities for morphological tokenization.
"""
import re
from collections import deque
from camel_tools.utils.dediac import dediac_ar
# Reduce consequitive '+'s to one
_REMOVE_PLUSES = re.compile(r'(_\+|\+_)+')
def _default_dediac(tok):
return dediac_ar(tok)
def _bwtok_dediac(tok):
return _REMOVE_PLUSES.sub(r'\g<1>', dediac_ar(tok).strip('+_'))
_DIAC_TYPE = {
'atbtok': _default_dediac,
'atbseg': _default_dediac,
'bwtok': _bwtok_dediac,
'd1tok': _default_dediac,
'd1seg': _default_dediac,
'd2tok': _default_dediac,
'd2seg': _default_dediac,
'd3tok': _default_dediac,
'd3seg': _default_dediac
}
class MorphologicalTokenizer(object):
"""Class for morphologically tokenizing Arabic words.
Args:
disambiguator (:obj:`~camel_tools.disambig.common.Disambiguator`): The
disambiguator to use for tokenization.
scheme (:obj:`str`): The tokenization scheme to use.
You can use the
:meth:`~camel_tools.disambig.common.Disambiguator.tok_feats`
method of your chosen disambiguator to get a list of tokenization
schemes it produces.
split (:obj:`bool`, optional): If set to True, then morphological
tokens will be split into separate strings, otherwise they will be
delimited by an underscore. Defaults to False.
diac (:obj:`bool`, optional): If set to True, then output tokens will
be diacritized, otherwise they will be undiacritized.
Defaults to False.
Note that when the tokenization scheme is set to 'bwtok', the
number of produced undiacritized tokens might be less than the
diacritized tokens becuase the 'bwtok' scheme can have
morphemes that are standalone diacritics (e.g. case and mood).
"""
def __init__(self, disambiguator, scheme, split=False,
diac=False):
self._disambiguator = disambiguator
self._scheme = scheme
self._split = split
self._diacf = lambda w: w if diac else _DIAC_TYPE[self._scheme](w)
def tokenize(self, words):
"""Generate morphological tokens for a given list of words.
Args:
words (:obj:`list` of :obj:`str`): List of words to tokenize.
Returns:
:obj:`list` of :obj:`str`: List of morphologically tokenized words.
"""
disambig_words = self._disambiguator.disambiguate(words)
result = deque()
for disambig_word in disambig_words:
scored_analyses = disambig_word.analyses
if len(scored_analyses) > 0:
analysis = scored_analyses[0].analysis
tok = analysis.get(self._scheme, None)
if tok is None or tok == 'NOAN':
tok = disambig_word.word
result.append(self._diacf(tok))
elif self._split:
tok = self._diacf(tok)
result.extend(tok.split('_'))
else:
result.append(self._diacf(tok))
else:
result.append(disambig_word.word)
return list(result)
| 4,375 | 35.466667 | 79 | py |
camel_tools | camel_tools-master/camel_tools/tokenizers/__init__.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module provides utilities for tokenization.
"""
| 1,208 | 42.178571 | 79 | py |
camel_tools | camel_tools-master/camel_tools/tokenizers/word.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains utilities for word-boundary tokenization.
"""
import re
from camel_tools.utils.charsets import UNICODE_PUNCT_SYMBOL_CHARSET
from camel_tools.utils.charsets import UNICODE_LETTER_MARK_NUMBER_CHARSET
from camel_tools.utils.charsets import UNICODE_LETTER_CHARSET
from camel_tools.utils.charsets import UNICODE_MARK_CHARSET
from camel_tools.utils.charsets import UNICODE_NUMBER_CHARSET
from camel_tools.utils.charsets import EMOJI_MULTICHAR_CHARSET
__all__ = ['simple_word_tokenize']
_ALL_PUNCT_SYMBOLS = (UNICODE_PUNCT_SYMBOL_CHARSET | EMOJI_MULTICHAR_CHARSET)
_ALL_PUNCT_SYMBOLS = [re.escape(x) for x in _ALL_PUNCT_SYMBOLS]
_ALL_PUNCT_SYMBOLS = sorted(_ALL_PUNCT_SYMBOLS, key=len, reverse=True)
_ALL_NUMBER = u''.join(UNICODE_NUMBER_CHARSET)
_ALL_LETTER_MARK = u''.join((UNICODE_LETTER_CHARSET | UNICODE_MARK_CHARSET))
_ALL_LETTER_MARK_NUMBER = u''.join(UNICODE_LETTER_MARK_NUMBER_CHARSET)
_TOKENIZE_RE = re.compile(u'|'.join(_ALL_PUNCT_SYMBOLS) + r'|[' +
re.escape(_ALL_LETTER_MARK_NUMBER) + r']+')
_TOKENIZE_NUMBER_RE = re.compile(u'|'.join(_ALL_PUNCT_SYMBOLS) + r'|[' +
re.escape(_ALL_NUMBER) + r']+|[' +
re.escape(_ALL_LETTER_MARK) + r']+')
def simple_word_tokenize(sentence, split_digits=False):
"""Tokenizes a sentence by splitting on whitespace and seperating
punctuation. The resulting tokens are either alpha-numeric words, single
punctuation/symbol/emoji characters, or multi-character emoji sequences.
This function is language agnostic and splits all characters marked as
punctuation or symbols in the Unicode specification.
For example, tokenizing :code:`'Hello, world!!!'`
would yield :code:`['Hello', ',', 'world', '!', '!', '!']`.
If split_digits is set to True, it also splits on number.
For example, tokenizing :code:`'Hello, world123!!!'`
would yield :code:`['Hello', ',', 'world', '123', '!', '!', '!']`.
Args:
sentence (:obj:`str`): Sentence to tokenize.
split_digits (:obj:`bool`, optional): The flag to split on number.
Defaults to False.
Returns:
:obj:`list` of :obj:`str`: The list of tokens.
"""
if split_digits:
return _TOKENIZE_NUMBER_RE.findall(sentence)
else:
return _TOKENIZE_RE.findall(sentence)
| 3,526 | 41.493976 | 79 | py |
camel_tools | camel_tools-master/camel_tools/sentiment/__init__.py |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools sentiment analyzer component.
"""
import torch
import torch.nn.functional as torch_fun
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification
from camel_tools.data import CATALOGUE
_LABELS = ('positive', 'negative', 'neutral')
class SentimentDataset(Dataset):
"""Sentiment PyTorch Dataset
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained tokenizer.
max_seq_length (:obj:`int`): Maximum sentence length.
"""
def __init__(self, sentences, tokenizer, max_seq_length):
self.encoded_sents = tokenizer(sentences, add_special_tokens=True,
padding=True, max_length=max_seq_length,
truncation=True, return_tensors="pt")
def __getitem__(self, idx):
return {
'input_ids': self.encoded_sents.input_ids[idx],
'token_type_ids': self.encoded_sents.token_type_ids[idx],
'attention_mask': self.encoded_sents.attention_mask[idx]
}
def __len__(self):
return self.encoded_sents.input_ids.shape[0]
class SentimentAnalyzer:
"""CAMeL Tools sentiment analysis component.
Args:
model_path (:obj:`str`): The path to the fine-tuned model.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
"""
def __init__(self, model_path, use_gpu=True):
self.model = BertForSequenceClassification.from_pretrained(model_path)
self.tokenizer = BertTokenizer.from_pretrained(model_path)
self.labels_map = self.model.config.id2label
self.use_gpu = use_gpu
@staticmethod
def pretrained(model_name=None, use_gpu=True):
"""Load a pre-trained model provided with camel_tools.
Args:
model_name (:obj:`str`, optional): Name of pre-trained model to
load.
Two models are available: 'arabert' and 'mbert'.
If None, the default model ('arabert') will be loaded.
Defaults to None.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
Returns:
:obj:`SentimentAnalyzer`: Instance with loaded pre-trained model.
"""
if model_name is None:
model_name = CATALOGUE.components['SentimentAnalysis'].default
model_info = (CATALOGUE.components['SentimentAnalysis']
.datasets[model_name])
model_path = str(model_info.path)
return SentimentAnalyzer(model_path, use_gpu)
@staticmethod
def labels():
"""Get the list of possible sentiment labels returned by predictions.
Returns:
:obj:`list` of :obj:`str`: List of sentiment labels.
"""
return list(_LABELS)
def predict_sentence(self, sentence):
"""Predict the sentiment label of a single sentence.
Args:
sentence (:obj:`str`): Input sentence.
Returns:
:obj:`str`: The predicted sentiment label for given sentence.
"""
return self.predict([sentence])[0]
def predict(self, sentences, batch_size=32):
"""Predict the sentiment labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`str`): Input sentences.
batch_size (:obj:`int`): The batch size.
Returns:
:obj:`list` of :obj:`str`: The predicted sentiment labels for given
sentences.
"""
sentiment_dataset = SentimentDataset(sentences, self.tokenizer,
max_seq_length=512)
data_loader = DataLoader(sentiment_dataset, batch_size=batch_size,
shuffle=False, drop_last=False)
device = ('cuda' if self.use_gpu and torch.cuda.is_available() else
'cpu')
self.model.to(device)
self.model.eval()
predicted_labels = []
with torch.no_grad():
for batch in data_loader:
batch = {k: v.to(device) for k, v in batch.items()}
inputs = {'input_ids': batch['input_ids'],
'token_type_ids': batch['token_type_ids'],
'attention_mask': batch['attention_mask']}
logits = self.model(**inputs)[0]
predictions = torch_fun.softmax(logits, dim=-1)
max_predictions = torch.argmax(predictions, dim=-1)
batch_preds = [self.labels_map[p.item()] for p in max_predictions]
predicted_labels.extend(batch_preds)
return predicted_labels
| 6,028 | 34.464706 | 82 | py |
camel_tools | camel_tools-master/camel_tools/ner/__init__.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools Named Entity Recognition component.
"""
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from transformers import BertForTokenClassification, BertTokenizer
from camel_tools.data import CATALOGUE
_LABELS = ['B-LOC', 'B-ORG', 'B-PERS', 'B-MISC', 'I-LOC', 'I-ORG', 'I-PERS',
'I-MISC', 'O']
class _PrepSentence:
"""A single input sentence for token classification.
Args:
guid (:obj:`str`): Unique id for the sentence.
words (:obj:`list` of :obj:`str`): list of words of the sentence.
labels (:obj:`list` of :obj:`str`): The labels for each word
of the sentence.
"""
def __init__(self, guid, words, labels):
self.guid = guid
self.words = words
self.labels = labels
def _prepare_sentences(sentences):
"""
Encapsulates the input sentences into PrepSentence
objects.
Args:
sentences (:obj:`list` of :obj:`list` of :obj: `str): The input
sentences.
Returns:
:obj:`list` of :obj:`PrepSentence`: The list of PrepSentence objects.
"""
guid_index = 1
prepared_sentences = []
for words in sentences:
labels = ['O']*len(words)
prepared_sentences.append(_PrepSentence(guid=f"{guid_index}",
words=words,
labels=labels))
guid_index += 1
return prepared_sentences
class NERDataset(Dataset):
"""NER PyTorch Dataset
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained tokenizer.
labels (:obj:`list` of :obj:`str`): The labels which the model was
trained to classify.
max_seq_length (:obj:`int`): Maximum sentence length.
"""
def __init__(self, sentences, tokenizer, labels, max_seq_length):
prepared_sentences = _prepare_sentences(sentences)
# Use cross entropy ignore_index as padding label id so that only
# real label ids contribute to the loss later.
self.pad_token_label_id = nn.CrossEntropyLoss().ignore_index
self.features = self._featurize_input(
prepared_sentences,
labels,
max_seq_length,
tokenizer,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
)
def _featurize_input(self, prepared_sentences, label_list, max_seq_length,
tokenizer, cls_token="[CLS]", cls_token_segment_id=0,
sep_token="[SEP]", pad_token=0, pad_token_segment_id=0,
pad_token_label_id=-100, sequence_a_segment_id=0,
mask_padding_with_zero=True):
"""Featurizes the input which will be fed to the fine-tuned BERT model.
Args:
prepared_sentences (:obj:`list` of :obj:`PrepSentence`): list of
PrepSentence objects.
label_list (:obj:`list` of :obj:`str`): The labels which the model
was trained to classify.
max_seq_length (:obj:`int`): Maximum sequence length.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained
tokenizer.
cls_token (:obj:`str`): BERT's CLS token. Defaults to [CLS].
cls_token_segment_id (:obj:`int`): BERT's CLS token segment id.
Defaults to 0.
sep_token (:obj:`str`): BERT's CLS token. Defaults to [SEP].
pad_token (:obj:`int`): BERT's pading token. Defaults to 0.
pad_token_segment_id (:obj:`int`): BERT's pading token segment id.
Defaults to 0.
pad_token_label_id (:obj:`int`): BERT's pading token label id.
Defaults to -100.
sequence_a_segment_id (:obj:`int`): BERT's segment id.
Defaults to 0.
mask_padding_with_zero (:obj:`bool`): Whether to masks the padding
tokens with zero or not. Defaults to True.
Returns:
obj:`list` of :obj:`Dict`: list of dicts of the needed features.
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for sent_id, sentence in enumerate(prepared_sentences):
tokens = []
label_ids = []
for word, label in zip(sentence.words, sentence.labels):
word_tokens = tokenizer.tokenize(word)
# bert-base-multilingual-cased sometimes output "nothing ([])
# when calling tokenize with just a space.
if len(word_tokens) > 0:
tokens.append(word_tokens)
# Use the real label id for the first token of the word,
# and padding ids for the remaining tokens
label_ids.append([label_map[label]] +
[pad_token_label_id] *
(len(word_tokens) - 1))
token_segments = []
token_segment = []
label_ids_segments = []
label_ids_segment = []
num_word_pieces = 0
seg_seq_length = max_seq_length - 2
# Dealing with empty sentences
if len(tokens) == 0:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
else:
# Chunking the tokenized sentence into multiple segments
# if it's longer than max_seq_length - 2
for idx, word_pieces in enumerate(tokens):
if num_word_pieces + len(word_pieces) > seg_seq_length:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
token_segments.append(token_segment)
label_ids_segments.append(label_ids_segment)
token_segment = list(word_pieces)
label_ids_segment = list(label_ids[idx])
num_word_pieces = len(word_pieces)
else:
token_segment.extend(word_pieces)
label_ids_segment.extend(label_ids[idx])
num_word_pieces += len(word_pieces)
# Adding the last segment
if len(token_segment) > 0:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
token_segments.append(token_segment)
label_ids_segments.append(label_ids_segment)
# DEBUG: Making sure we got all segments correctly
# assert sum([len(_) for _ in label_ids_segments]) == \
# sum([len(_) for _ in label_ids])
# assert sum([len(_) for _ in token_segments]) == \
# sum([len(_) for _ in tokens])
return features
def _add_special_tokens(self, tokens, label_ids, tokenizer, max_seq_length,
cls_token, sep_token, pad_token,
cls_token_segment_id, pad_token_segment_id,
pad_token_label_id, sequence_a_segment_id,
mask_padding_with_zero):
_tokens = list(tokens)
_label_ids = list(label_ids)
_tokens += [sep_token]
_label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(_tokens)
_tokens = [cls_token] + _tokens
_label_ids = [pad_token_label_id] + _label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only
# real tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
_label_ids += [pad_token_label_id] * padding_length
return {'input_ids': torch.tensor(input_ids),
'attention_mask': torch.tensor(input_mask),
'token_type_ids': torch.tensor(segment_ids),
'label_ids': torch.tensor(_label_ids)}
def __len__(self):
return len(self.features)
def __getitem__(self, i):
return self.features[i]
class NERecognizer():
"""CAMeL Tools NER component.
Args:
model_path (:obj:`str`): The path to the fine-tuned model.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
"""
def __init__(self, model_path, use_gpu=True):
self.model = BertForTokenClassification.from_pretrained(model_path)
self.tokenizer = BertTokenizer.from_pretrained(model_path)
self.labels_map = self.model.config.id2label
self.use_gpu = use_gpu
@staticmethod
def pretrained(model_name=None, use_gpu=True):
"""Load a pre-trained model provided with camel_tools.
Args:
model_name (:obj:`str`, optional): Name of pre-trained model to
load. One model is available: 'arabert'.
If None, the default model ('arabert') will be loaded.
Defaults to None.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
Returns:
:obj:`NERecognizer`: Instance with loaded pre-trained model.
"""
if model_name is None:
model_name = CATALOGUE.components['NamedEntityRecognition'].default
model_info = (CATALOGUE.components['NamedEntityRecognition']
.datasets[model_name])
model_path = str(model_info.path)
return NERecognizer(model_path, use_gpu)
@staticmethod
def labels():
"""Get the list of NER labels returned by predictions.
Returns:
:obj:`list` of :obj:`str`: List of NER labels.
"""
return list(_LABELS)
def _align_predictions(self, predictions, label_ids, sent_ids):
"""Aligns the predictions of the model with the inputs and it takes
care of getting rid of the padding token.
Args:
predictions (:obj:`np.ndarray`): The predictions of the model
label_ids (:obj:`np.ndarray`): The label ids of the inputs.
They will always be the ids of Os since we're dealing with a
test dataset. Note that label_ids are also padded.
sent_ids (:obj:`np.ndarray`): The sent ids of the inputs.
Returns:
:obj:`list` of :obj:`list` of :obj:`str`: The predicted labels for
all the sentences in the batch
"""
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
preds_list[i].append(self.labels_map[preds[i][j]])
# Collating the predicted labels based on the sentence ids
final_preds_list = [[] for _ in range(len(set(sent_ids)))]
for i, id in enumerate(sent_ids):
final_preds_list[id].extend(preds_list[i])
return final_preds_list
def predict(self, sentences, batch_size=32):
"""Predict the named entity labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
batch_size (:obj:`int`): The batch size.
Returns:
:obj:`list` of :obj:`list` of :obj:`str`: The predicted named
entity labels for the given sentences.
"""
if len(sentences) == 0:
return []
test_dataset = NERDataset(sentences=sentences,
tokenizer=self.tokenizer,
labels=list(self.labels_map.values()),
max_seq_length=256)
data_loader = DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, drop_last=False)
label_ids = None
preds = None
sent_ids = None
device = ('cuda' if self.use_gpu and torch.cuda.is_available()
else 'cpu')
self.model.to(device)
self.model.eval()
with torch.no_grad():
for batch in data_loader:
batch = {k: v.to(device) for k, v in batch.items()}
inputs = {'input_ids': batch['input_ids'],
'token_type_ids': batch['token_type_ids'],
'attention_mask': batch['attention_mask']}
label_ids = (batch['label_ids'] if label_ids is None
else torch.cat((label_ids, batch['label_ids'])))
sent_ids = (batch['sent_id'] if sent_ids is None
else torch.cat((sent_ids, batch['sent_id'])))
logits = self.model(**inputs)[0]
preds = logits if preds is None else torch.cat((preds, logits),
dim=0)
predictions = self._align_predictions(preds.cpu().numpy(),
label_ids.cpu().numpy(),
sent_ids.cpu().numpy())
return predictions
def predict_sentence(self, sentence):
"""Predict the named entity labels of a single sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The input sentence.
Returns:
:obj:`list` of :obj:`str`: The predicted named entity
labels for the given sentence.
"""
return self.predict([sentence])[0]
| 17,999 | 40.189931 | 79 | py |
camel_tools | camel_tools-master/camel_tools/dialectid/model6.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools dialect identification component.
This Dialect Identification system can identify between 5 Arabic city dialects
as well as Modern Standard Arabic. It is based on the system described by
`Salameh, Bouamor and Habash <http://www.aclweb.org/anthology/C18-1113>`_.
"""
import collections
from pathlib import Path
import sys
if sys.platform == 'win32':
raise ModuleNotFoundError(
'camel_tools.dialectid is not available on Windows.')
else:
import kenlm
import numpy as np
import pandas as pd
import scipy as sp
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import normalize
from sklearn.metrics import accuracy_score, f1_score, recall_score
from sklearn.metrics import precision_score
import dill
from camel_tools.data import CATALOGUE
from camel_tools.tokenizers.word import simple_word_tokenize
from camel_tools.utils.dediac import dediac_ar
from camel_tools.dialectid.common import *
__all__ = ['DIDModel6']
_DEFAULT_LABELS = frozenset(['BEI', 'CAI', 'DOH', 'MSA', 'RAB', 'TUN'])
_DEFAULT_COUNTRIES = frozenset(['Egypt', 'Lebanon', 'Modern Standard Arabic',
'Morocco', 'Qatar', 'Tunisia'])
_DEFAULT_REGIONS = frozenset(['Gulf', 'Levant', 'Maghreb',
'Modern Standard Arabic', 'Nile Basin'])
_LABEL_TO_CITY_MAP = {
'BEI': 'Beirut',
'CAI': 'Cairo',
'DOH': 'Doha',
'MSA': 'Modern Standard Arabic',
'RAB': 'Rabat',
'TUN': 'Tunis'
}
_LABEL_TO_COUNTRY_MAP = {
'BEI': 'Lebanon',
'CAI': 'Egypt',
'DOH': 'Qatar',
'MSA': 'Modern Standard Arabic',
'RAB': 'Morocco',
'TUN': 'Tunisia'
}
_LABEL_TO_REGION_MAP = {
'BEI': 'Levant',
'CAI': 'Nile Basin',
'DOH': 'Gulf',
'MSA': 'Modern Standard Arabic',
'RAB': 'Maghreb',
'TUN': 'Maghreb'
}
_DATA_DIR = CATALOGUE.components['DialectID'].datasets['model6'].path
_CHAR_LM_DIR = Path(_DATA_DIR, 'lm', 'char')
_WORD_LM_DIR = Path(_DATA_DIR, 'lm', 'word')
_TRAIN_DATA_PATH = Path(_DATA_DIR, 'corpus_6_train.tsv')
_DEV_DATA_PATH = Path(_DATA_DIR, 'corpus_6_dev.tsv')
_TEST_DATA_PATH = Path(_DATA_DIR, 'corpus_6_test.tsv')
def _normalize_lm_scores(scores):
norm_scores = np.exp(scores)
norm_scores = normalize(norm_scores)
return norm_scores
def _word_to_char(txt):
tokens = txt.split()
tokens = [' '.join(t) for t in tokens]
return ' <SPACE> '.join(tokens)
def label_to_city(prediction):
"""Converts a dialect prediction using labels to use city names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { _LABEL_TO_CITY_MAP[l]: s for l, s in prediction.scores.items() }
top = _LABEL_TO_CITY_MAP[prediction.top]
return DIDPred(top, scores)
def label_to_country(prediction):
"""Converts a dialect prediction using labels to use country names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { i: 0.0 for i in _DEFAULT_COUNTRIES }
for label, prob in prediction.scores.items():
scores[_LABEL_TO_COUNTRY_MAP[label]] += prob
top = max(scores.items(), key=lambda x: x[1])
return DIDPred(top[0], scores)
def label_to_region(prediction):
"""Converts a dialect prediction using labels to use region names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { i: 0.0 for i in _DEFAULT_REGIONS }
for label, prob in prediction.scores.items():
scores[_LABEL_TO_REGION_MAP[label]] += prob
top = max(scores.items(), key=lambda x: x[1])
return DIDPred(top[0], scores)
class DIDModel6(object):
"""A class for training, evaluating and running the dialect identification
model 'Model-6' described by Salameh et al. After initializing an instance,
you must run the train method once before using it.
Args:
labels (:obj:`set` of :obj:`str`, optional): The set of dialect labels
used in the training data in the main model.
If None, the default labels are used.
Defaults to None.
char_lm_dir (:obj:`str`, optional): Path to the directory containing
the character-based language models. If None, use the language
models that come with this package. Defaults to None.
word_lm_dir (:obj:`str`, optional): Path to the directory containing
the word-based language models. If None, use the language models
that come with this package. Defaults to None.
"""
def __init__(self, labels=None,
char_lm_dir=None,
word_lm_dir=None):
if labels is None:
labels = _DEFAULT_LABELS
if char_lm_dir is None:
char_lm_dir = _CHAR_LM_DIR
if word_lm_dir is None:
word_lm_dir = _WORD_LM_DIR
self._labels = labels
self._labels_sorted = sorted(labels)
self._char_lms = collections.defaultdict(kenlm.Model)
self._word_lms = collections.defaultdict(kenlm.Model)
self._load_lms(char_lm_dir, word_lm_dir)
self._is_trained = False
def _load_lms(self, char_lm_dir, word_lm_dir):
config = kenlm.Config()
config.show_progress = False
config.arpa_complain = kenlm.ARPALoadComplain.NONE
for label in self._labels:
char_lm_path = Path(char_lm_dir, '{}.arpa'.format(label))
word_lm_path = Path(word_lm_dir, '{}.arpa'.format(label))
self._char_lms[label] = kenlm.Model(str(char_lm_path), config)
self._word_lms[label] = kenlm.Model(str(word_lm_path), config)
def _get_char_lm_scores(self, txt):
chars = _word_to_char(txt)
return np.array([self._char_lms[label].score(chars, bos=True, eos=True)
for label in self._labels_sorted])
def _get_word_lm_scores(self, txt):
return np.array([self._word_lms[label].score(txt, bos=True, eos=True)
for label in self._labels_sorted])
def _get_lm_feats(self, txt):
word_lm_scores = self._get_word_lm_scores(txt).reshape(1, -1)
word_lm_scores = _normalize_lm_scores(word_lm_scores)
char_lm_scores = self._get_char_lm_scores(txt).reshape(1, -1)
char_lm_scores = _normalize_lm_scores(char_lm_scores)
feats = np.concatenate((word_lm_scores, char_lm_scores), axis=1)
return feats
def _get_lm_feats_multi(self, sentences):
feats_list = collections.deque()
for sentence in sentences:
feats_list.append(self._get_lm_feats(sentence))
feats_matrix = np.array(feats_list)
feats_matrix = feats_matrix.reshape((-1, 12))
return feats_matrix
def _prepare_sentences(self, sentences):
tokenized = [' '.join(simple_word_tokenize(dediac_ar(s)))
for s in sentences]
sent_array = np.array(tokenized)
x_trans = self._feat_union.transform(sent_array)
x_lm_feats = self._get_lm_feats_multi(sentences)
x_final = sp.sparse.hstack((x_trans, x_lm_feats))
return x_final
def train(self, data_path=None,
char_ngram_range=(1, 3),
word_ngram_range=(1, 1),
n_jobs=None):
"""Trains the model on a given data set.
Args:
data_path (:obj:`str`, optional): Path to main training data.
If None, use the provided training data.
Defaults to None.
char_ngram_range (:obj:`tuple`, optional): The n-gram ranges to
consider in the character-based language models.
Defaults to (1, 3).
word_ngram_range (:obj:`tuple`, optional): The n-gram ranges to
consider in the word-based language models.
Defaults to (1, 1).
n_jobs (:obj:`int`, optional): The number of parallel jobs to use
for computation. If None, then only 1 job is used.
If -1 then all processors are used. Defaults to None.
"""
if data_path is None:
data_path = _TRAIN_DATA_PATH
# Load training data and extract
train_data = pd.read_csv(data_path, sep='\t')
x = train_data['ar'].values
y = train_data['dialect'].values
# Build and train main classifier
self._label_encoder = LabelEncoder()
self._label_encoder.fit(y)
y_trans = self._label_encoder.transform(y)
word_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=word_ngram_range,
analyzer='word',
tokenizer=lambda x: x.split(' '))
char_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=char_ngram_range,
analyzer='char',
tokenizer=lambda x: x.split(' '))
self._feat_union = FeatureUnion([('wordgrams', word_vectorizer),
('chargrams', char_vectorizer)])
self._feat_union.fit(x)
x_prepared = self._prepare_sentences(x)
self._classifier = OneVsRestClassifier(MultinomialNB(), n_jobs=n_jobs)
self._classifier.fit(x_prepared, y_trans)
self._is_trained = True
def eval(self, data_path=None, data_set='DEV'):
"""Evaluate the trained model on a given data set.
Args:
data_path (:obj:`str`, optional): Path to an evaluation data set.
If None, use one of the provided data sets instead.
Defaults to None.
data_set (:obj:`str`, optional): Name of the provided data set to
use. This is ignored if data_path is not None. Can be either
'VALIDATION' or 'TEST'. Defaults to 'VALIDATION'.
Returns:
:obj:`dict`: A dictionary mapping an evaluation metric to its
computed value. The metrics used are accuracy, f1_micro, f1_macro,
recall_micro, recall_macro, precision_micro and precision_macro.
"""
if not self._is_trained:
raise UntrainedModelError(
'Can\'t evaluate an untrained model.')
if data_path is None:
if data_set == 'DEV':
data_path = _DEV_DATA_PATH
elif data_set == 'TEST':
data_path = _TEST_DATA_PATH
else:
raise InvalidDataSetError(data_set)
# Load eval data
eval_data = pd.read_csv(data_path, sep='\t')
sentences = eval_data['ar'].values
did_true_city = eval_data['dialect'].values
did_true_country = [_LABEL_TO_COUNTRY_MAP[d] for d in did_true_city]
did_true_region = [_LABEL_TO_REGION_MAP[d] for d in did_true_city]
# Generate predictions
did_pred = self.predict(sentences)
did_pred_city = [d.top for d in did_pred]
did_pred_country = [d.top for d in map(label_to_country, did_pred)]
did_pred_region = [d.top for d in map(label_to_region, did_pred)]
# Get scores
scores = {
'city': {
'accuracy': accuracy_score(did_true_city, did_pred_city),
'f1_macro': f1_score(did_true_city, did_pred_city,
average='macro'),
'recall_macro': recall_score(did_true_city, did_pred_city,
average='macro'),
'precision_macro': precision_score(did_true_city,
did_pred_city,
average='macro')
},
'country': {
'accuracy': accuracy_score(did_true_country, did_pred_country),
'f1_macro': f1_score(did_true_country, did_pred_country,
average='macro'),
'recall_macro': recall_score(did_true_country,
did_pred_country,
average='macro'),
'precision_macro': precision_score(did_true_country,
did_pred_country,
average='macro')
},
'region': {
'accuracy': accuracy_score(did_true_region, did_pred_region),
'f1_macro': f1_score(did_true_region, did_pred_region,
average='macro'),
'recall_macro': recall_score(did_true_region, did_pred_region,
average='macro'),
'precision_macro': precision_score(did_true_region,
did_pred_region,
average='macro')
},
}
return scores
def predict(self, sentences, output='label'):
"""Predict the dialect probability scores for a given list of
sentences.
Args:
sentences (:obj:`list` of :obj:`str`): The list of sentences.
output (:obj:`str`): The output label type. Possible values are
'label', 'city', 'country', or 'region'. Defaults to 'label'.
Returns:
:obj:`list` of :obj:`DIDPred`: A list of prediction results,
each corresponding to its respective sentence.
"""
if not self._is_trained:
raise UntrainedModelError(
'Can\'t predict with an untrained model.')
if output == 'label':
convert = lambda x: x
elif output == 'city':
convert = label_to_city
elif output == 'country':
convert = label_to_country
elif output == 'region':
convert = label_to_region
else:
convert = lambda x: x
x_prepared = self._prepare_sentences(sentences)
predicted_scores = self._classifier.predict_proba(x_prepared)
result = collections.deque()
for scores in predicted_scores:
score_tups = list(zip(self._labels_sorted, scores))
predicted_dialect = max(score_tups, key=lambda x: x[1])[0]
dialect_scores = dict(score_tups)
result.append(convert(DIDPred(predicted_dialect, dialect_scores)))
return list(result)
@staticmethod
def pretrained():
"""Load the default pre-trained model provided with camel-tools.
Raises:
:obj:`PretrainedModelError`: When a pre-trained model compatible
with the current Python version isn't available.
Returns:
:obj:`DialectIdentifier`: The loaded model.
"""
suffix = '{}{}'.format(sys.version_info.major, sys.version_info.minor)
model_file_name = 'did_pretrained_{}.dill'.format(suffix)
model_path = Path(_DATA_DIR, model_file_name)
if not model_path.is_file():
raise PretrainedModelError(
'No pretrained model for current Python version found.')
with model_path.open('rb') as model_fp:
model = dill.load(model_fp)
# We need to reload LMs since they were set to None when
# serialized.
model._char_lms = collections.defaultdict(kenlm.Model)
model._word_lms = collections.defaultdict(kenlm.Model)
model._load_lms(_CHAR_LM_DIR, _WORD_LM_DIR)
return model
def train_default_model():
print(_DATA_DIR)
did = DIDModel6()
did.train()
# We don't want to serialize kenlm models as they will utilize the
# absolute LM paths used in training. They will be reloaded when using
# DialectIdentifer.pretrained().
did._char_lms = None
did._word_lms = None
suffix = '{}{}'.format(sys.version_info.major, sys.version_info.minor)
model_file_name = 'did_pretrained_{}.dill'.format(suffix)
model_path = Path(_DATA_DIR, model_file_name)
with model_path.open('wb') as model_fp:
dill.dump(did, model_fp)
def label_city_pairs():
"""Returns the set of default label-city pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-dialect
pairs.
"""
return frozenset(_LABEL_TO_CITY_MAP.items())
def label_country_pairs():
"""Returns the set of default label-country pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-country
pairs.
"""
return frozenset(_LABEL_TO_COUNTRY_MAP.items())
def label_region_pairs():
"""Returns the set of default label-region pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-region
pairs.
"""
return frozenset(_LABEL_TO_REGION_MAP.items())
| 18,565 | 35.261719 | 79 | py |
camel_tools | camel_tools-master/camel_tools/dialectid/common.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools dialect identification component.
This Dialect Identification system can identify between 25 Arabic city dialects
as well as Modern Standard Arabic. It is based on the system described by
`Salameh, Bouamor and Habash <http://www.aclweb.org/anthology/C18-1113>`_.
"""
import collections
__all__ = [
'DIDPred',
'DialectIdError',
'UntrainedModelError',
'InvalidDataSetError',
'PretrainedModelError',
]
class DIDPred(collections.namedtuple('DIDPred', ['top', 'scores'])):
"""A named tuple containing dialect ID prediction results.
Attributes:
top (:obj:`str`): The dialect label with the highest score. See
:ref:`dialectid_labels` for a list of output labels.
scores (:obj:`dict`): A dictionary mapping each dialect label to it's
computed score.
"""
class DialectIdError(Exception):
"""Base class for all CAMeL Dialect ID errors.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return str(self.msg)
class UntrainedModelError(DialectIdError):
"""Error thrown when attempting to use an untrained DialectIdentifier
instance.
"""
def __init__(self, msg):
DialectIdError.__init__(self, msg)
class InvalidDataSetError(DialectIdError, ValueError):
"""Error thrown when an invalid data set name is given to eval.
"""
def __init__(self, dataset):
msg = ('Invalid data set name {}. Valid names are "DEV" and '
'"TEST"'.format(repr(dataset)))
DialectIdError.__init__(self, msg)
class PretrainedModelError(DialectIdError):
"""Error thrown when attempting to load a pretrained model provided with
camel-tools.
"""
def __init__(self, msg):
DialectIdError.__init__(self, msg)
| 2,979 | 31.043011 | 79 | py |
camel_tools | camel_tools-master/camel_tools/dialectid/model26.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools dialect identification component.
This Dialect Identification system can identify between 25 Arabic city dialects
as well as Modern Standard Arabic. It is based on the system described by
`Salameh, Bouamor and Habash <http://www.aclweb.org/anthology/C18-1113>`_.
"""
import collections
from pathlib import Path
import sys
if sys.platform == 'win32':
raise ModuleNotFoundError(
'camel_tools.dialectid is not available on Windows.')
else:
import kenlm
import numpy as np
import pandas as pd
import scipy as sp
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.preprocessing import normalize
from sklearn.metrics import accuracy_score, f1_score, recall_score
from sklearn.metrics import precision_score
import dill
from camel_tools.data import CATALOGUE
from camel_tools.tokenizers.word import simple_word_tokenize
from camel_tools.utils.dediac import dediac_ar
from camel_tools.dialectid.common import *
__all__ = ['DIDModel26']
_DEFAULT_LABELS = frozenset(['ALE', 'ALG', 'ALX', 'AMM', 'ASW', 'BAG', 'BAS',
'BEI', 'BEN', 'CAI', 'DAM', 'DOH', 'FES', 'JED',
'JER', 'KHA', 'MOS', 'MSA', 'MUS', 'RAB', 'RIY',
'SAL', 'SAN', 'SFX', 'TRI', 'TUN'])
_DEFAULT_LABELS_EXTRA = frozenset(['BEI', 'CAI', 'DOH', 'MSA', 'RAB', 'TUN'])
_DEFAULT_COUNTRIES = frozenset(['Algeria', 'Egypt', 'Iraq', 'Jordan',
'Lebanon', 'Libya', 'Modern Standard Arabic',
'Morocco', 'Oman', 'Palestine', 'Qatar',
'Saudi Arabia', 'Sudan', 'Syria', 'Tunisia',
'Yemen'])
_DEFAULT_REGIONS = frozenset(['Gulf', 'Gulf of Aden', 'Levant', 'Maghreb',
'Modern Standard Arabic', 'Nile Basin'])
_LABEL_TO_CITY_MAP = {
'ALE': 'Aleppo',
'ALG': 'Algiers',
'ALX': 'Alexandria',
'AMM': 'Amman',
'ASW': 'Aswan',
'BAG': 'Baghdad',
'BAS': 'Basra',
'BEI': 'Beirut',
'BEN': 'Benghazi',
'CAI': 'Cairo',
'DAM': 'Damascus',
'DOH': 'Doha',
'FES': 'Fes',
'JED': 'Jeddha',
'JER': 'Jerusalem',
'KHA': 'Khartoum',
'MOS': 'Mosul',
'MSA': 'Modern Standard Arabic',
'MUS': 'Muscat',
'RAB': 'Rabat',
'RIY': 'Riyadh',
'SAL': 'Salt',
'SAN': 'Sana\'a',
'SFX': 'Sfax',
'TRI': 'Tripoli',
'TUN': 'Tunis'
}
_LABEL_TO_COUNTRY_MAP = {
'ALE': 'Syria',
'ALG': 'Algeria',
'ALX': 'Egypt',
'AMM': 'Jordan',
'ASW': 'Egypt',
'BAG': 'Iraq',
'BAS': 'Iraq',
'BEI': 'Lebanon',
'BEN': 'Libya',
'CAI': 'Egypt',
'DAM': 'Syria',
'DOH': 'Qatar',
'FES': 'Morocco',
'JED': 'Saudi Arabia',
'JER': 'Palestine',
'KHA': 'Sudan',
'MOS': 'Iraq',
'MSA': 'Modern Standard Arabic',
'MUS': 'Oman',
'RAB': 'Morocco',
'RIY': 'Saudi Arabia',
'SAL': 'Jordan',
'SAN': 'Yemen',
'SFX': 'Tunisia',
'TRI': 'Libya',
'TUN': 'Tunisia'
}
_LABEL_TO_REGION_MAP = {
'ALE': 'Levant',
'ALG': 'Maghreb',
'ALX': 'Nile Basin',
'AMM': 'Levant',
'ASW': 'Nile Basin',
'BAG': 'Gulf',
'BAS': 'Gulf',
'BEI': 'Levant',
'BEN': 'Maghreb',
'CAI': 'Nile Basin',
'DAM': 'Levant',
'DOH': 'Gulf',
'FES': 'Maghreb',
'JED': 'Gulf',
'JER': 'Levant',
'KHA': 'Nile Basin',
'MOS': 'Gulf',
'MSA': 'Modern Standard Arabic',
'MUS': 'Gulf',
'RAB': 'Maghreb',
'RIY': 'Gulf',
'SAL': 'Levant',
'SAN': 'Gulf of Aden',
'SFX': 'Maghreb',
'TRI': 'Maghreb',
'TUN': 'Maghreb'
}
_DATA_DIR = CATALOGUE.components['DialectID'].datasets['model26'].path
_CHAR_LM_DIR = Path(_DATA_DIR, 'lm', 'char')
_WORD_LM_DIR = Path(_DATA_DIR, 'lm', 'word')
_TRAIN_DATA_PATH = Path(_DATA_DIR, 'corpus_26_train.tsv')
_TRAIN_DATA_EXTRA_PATH = Path(_DATA_DIR, 'corpus_6_train.tsv')
_DEV_DATA_PATH = Path(_DATA_DIR, 'corpus_26_dev.tsv')
_TEST_DATA_PATH = Path(_DATA_DIR, 'corpus_26_test.tsv')
def _normalize_lm_scores(scores):
norm_scores = np.exp(scores)
norm_scores = normalize(norm_scores)
return norm_scores
def _word_to_char(txt):
tokens = txt.split()
tokens = [' '.join(t) for t in tokens]
return ' <SPACE> '.join(tokens)
def label_to_city(prediction):
"""Converts a dialect prediction using labels to use city names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { _LABEL_TO_CITY_MAP[l]: s for l, s in prediction.scores.items() }
top = _LABEL_TO_CITY_MAP[prediction.top]
return DIDPred(top, scores)
def label_to_country(prediction):
"""Converts a dialect prediction using labels to use country names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { i: 0.0 for i in _DEFAULT_COUNTRIES }
for label, prob in prediction.scores.items():
scores[_LABEL_TO_COUNTRY_MAP[label]] += prob
top = max(scores.items(), key=lambda x: x[1])
return DIDPred(top[0], scores)
def label_to_region(prediction):
"""Converts a dialect prediction using labels to use region names instead.
Args:
pred (:obj:`DIDPred`): The prediction to convert.
Returns:
:obj:`DIDPred` The converted prediction.
"""
scores = { i: 0.0 for i in _DEFAULT_REGIONS }
for label, prob in prediction.scores.items():
scores[_LABEL_TO_REGION_MAP[label]] += prob
top = max(scores.items(), key=lambda x: x[1])
return DIDPred(top[0], scores)
class DIDModel26(object):
"""A class for training, evaluating and running the dialect identification
model 'Model-26' described by Salameh et al. After initializing an
instance, you must run the train method once before using it.
Args:
labels (:obj:`set` of :obj:`str`, optional): The set of dialect labels
used in the training data in the main model.
If None, the default labels are used.
Defaults to None.
labels_extra (:obj:`set` of :obj:`str`, optional): The set of dialect
labels used in the training data in the extra features model.
If None, the default labels are used.
Defaults to None.
char_lm_dir (:obj:`str`, optional): Path to the directory containing
the character-based language models. If None, use the language
models that come with this package. Defaults to None.
word_lm_dir (:obj:`str`, optional): Path to the directory containing
the word-based language models. If None, use the language models
that come with this package. Defaults to None.
"""
def __init__(self, labels=None,
labels_extra=None,
char_lm_dir=None,
word_lm_dir=None):
if labels is None:
labels = _DEFAULT_LABELS
if labels_extra is None:
labels_extra = _DEFAULT_LABELS_EXTRA
if char_lm_dir is None:
char_lm_dir = _CHAR_LM_DIR
if word_lm_dir is None:
word_lm_dir = _WORD_LM_DIR
self._labels = labels
self._labels_extra = labels_extra
self._labels_sorted = sorted(labels)
self._labels_extra_sorted = sorted(labels_extra)
self._char_lms = collections.defaultdict(kenlm.Model)
self._word_lms = collections.defaultdict(kenlm.Model)
self._load_lms(char_lm_dir, word_lm_dir)
self._is_trained = False
def _load_lms(self, char_lm_dir, word_lm_dir):
config = kenlm.Config()
config.show_progress = False
config.arpa_complain = kenlm.ARPALoadComplain.NONE
for label in self._labels:
char_lm_path = Path(char_lm_dir, '{}.arpa'.format(label))
word_lm_path = Path(word_lm_dir, '{}.arpa'.format(label))
self._char_lms[label] = kenlm.Model(str(char_lm_path), config)
self._word_lms[label] = kenlm.Model(str(word_lm_path), config)
def _get_char_lm_scores(self, txt):
chars = _word_to_char(txt)
return np.array([self._char_lms[label].score(chars, bos=True, eos=True)
for label in self._labels_sorted])
def _get_word_lm_scores(self, txt):
return np.array([self._word_lms[label].score(txt, bos=True, eos=True)
for label in self._labels_sorted])
def _get_lm_feats(self, txt):
word_lm_scores = self._get_word_lm_scores(txt).reshape(1, -1)
word_lm_scores = _normalize_lm_scores(word_lm_scores)
char_lm_scores = self._get_char_lm_scores(txt).reshape(1, -1)
char_lm_scores = _normalize_lm_scores(char_lm_scores)
feats = np.concatenate((word_lm_scores, char_lm_scores), axis=1)
return feats
def _get_lm_feats_multi(self, sentences):
feats_list = collections.deque()
for sentence in sentences:
feats_list.append(self._get_lm_feats(sentence))
feats_matrix = np.array(feats_list)
feats_matrix = feats_matrix.reshape((-1, 52))
return feats_matrix
def _prepare_sentences(self, sentences):
tokenized = [' '.join(simple_word_tokenize(dediac_ar(s)))
for s in sentences]
sent_array = np.array(tokenized)
x_trans = self._feat_union.transform(sent_array)
x_trans_extra = self._feat_union_extra.transform(sent_array)
x_predict_extra = self._classifier_extra.predict_proba(x_trans_extra)
x_lm_feats = self._get_lm_feats_multi(sentences)
x_final = sp.sparse.hstack((x_trans, x_lm_feats, x_predict_extra))
return x_final
def train(self, data_path=None,
data_extra_path=None,
char_ngram_range=(1, 3),
word_ngram_range=(1, 1),
n_jobs=None):
"""Trains the model on a given data set.
Args:
data_path (:obj:`str`, optional): Path to main training data.
If None, use the provided training data.
Defaults to None.
data_extra_path (:obj:`str`, optional): Path to extra features
training data. If None,cuse the provided training data.
Defaults to None.
char_ngram_range (:obj:`tuple`, optional): The n-gram ranges to
consider in the character-based language models.
Defaults to (1, 3).
word_ngram_range (:obj:`tuple`, optional): The n-gram ranges to
consider in the word-based language models.
Defaults to (1, 1).
n_jobs (:obj:`int`, optional): The number of parallel jobs to use
for computation. If None, then only 1 job is used.
If -1 then all processors are used. Defaults to None.
"""
if data_path is None:
data_path = _TRAIN_DATA_PATH
if data_extra_path is None:
data_extra_path = _TRAIN_DATA_EXTRA_PATH
# Load training data and extract
train_data = pd.read_csv(data_path, sep='\t')
train_data_extra = pd.read_csv(data_extra_path, sep='\t')
x = train_data['ar'].values
y = train_data['dialect'].values
x_extra = train_data_extra['ar'].values
y_extra = train_data_extra['dialect'].values
# Build and train extra classifier
self._label_encoder_extra = LabelEncoder()
self._label_encoder_extra.fit(y_extra)
y_trans = self._label_encoder_extra.transform(y_extra)
word_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=word_ngram_range,
analyzer='word',
tokenizer=lambda x: x.split(' '))
char_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=char_ngram_range,
analyzer='char',
tokenizer=lambda x: x.split(' '))
self._feat_union_extra = FeatureUnion([('wordgrams', word_vectorizer),
('chargrams', char_vectorizer)])
x_trans = self._feat_union_extra.fit_transform(x_extra)
self._classifier_extra = OneVsRestClassifier(MultinomialNB(),
n_jobs=n_jobs)
self._classifier_extra.fit(x_trans, y_trans)
# Build and train main classifier
self._label_encoder = LabelEncoder()
self._label_encoder.fit(y)
y_trans = self._label_encoder.transform(y)
word_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=word_ngram_range,
analyzer='word',
tokenizer=lambda x: x.split(' '))
char_vectorizer = TfidfVectorizer(lowercase=False,
ngram_range=char_ngram_range,
analyzer='char',
tokenizer=lambda x: x.split(' '))
self._feat_union = FeatureUnion([('wordgrams', word_vectorizer),
('chargrams', char_vectorizer)])
self._feat_union.fit(x)
x_prepared = self._prepare_sentences(x)
self._classifier = OneVsRestClassifier(MultinomialNB(), n_jobs=n_jobs)
self._classifier.fit(x_prepared, y_trans)
self._is_trained = True
def eval(self, data_path=None, data_set='DEV'):
"""Evaluate the trained model on a given data set.
Args:
data_path (:obj:`str`, optional): Path to an evaluation data set.
If None, use one of the provided data sets instead.
Defaults to None.
data_set (:obj:`str`, optional): Name of the provided data set to
use. This is ignored if data_path is not None. Can be either
'VALIDATION' or 'TEST'. Defaults to 'VALIDATION'.
Returns:
:obj:`dict`: A dictionary mapping an evaluation metric to its
computed value. The metrics used are accuracy, f1_micro, f1_macro,
recall_micro, recall_macro, precision_micro and precision_macro.
"""
if not self._is_trained:
raise UntrainedModelError(
'Can\'t evaluate an untrained model.')
if data_path is None:
if data_set == 'DEV':
data_path = _DEV_DATA_PATH
elif data_set == 'TEST':
data_path = _TEST_DATA_PATH
else:
raise InvalidDataSetError(data_set)
# Load eval data
eval_data = pd.read_csv(data_path, sep='\t')
sentences = eval_data['ar'].values
did_true_city = eval_data['dialect'].values
did_true_country = [_LABEL_TO_COUNTRY_MAP[d] for d in did_true_city]
did_true_region = [_LABEL_TO_REGION_MAP[d] for d in did_true_city]
# Generate predictions
did_pred = self.predict(sentences)
did_pred_city = [d.top for d in did_pred]
did_pred_country = [d.top for d in map(label_to_country, did_pred)]
did_pred_region = [d.top for d in map(label_to_region, did_pred)]
# Get scores
scores = {
'city': {
'accuracy': accuracy_score(did_true_city, did_pred_city),
'f1_macro': f1_score(did_true_city, did_pred_city,
average='macro'),
'recall_macro': recall_score(did_true_city, did_pred_city,
average='macro'),
'precision_macro': precision_score(did_true_city,
did_pred_city,
average='macro')
},
'country': {
'accuracy': accuracy_score(did_true_country, did_pred_country),
'f1_macro': f1_score(did_true_country, did_pred_country,
average='macro'),
'recall_macro': recall_score(did_true_country,
did_pred_country,
average='macro'),
'precision_macro': precision_score(did_true_country,
did_pred_country,
average='macro')
},
'region': {
'accuracy': accuracy_score(did_true_region, did_pred_region),
'f1_macro': f1_score(did_true_region, did_pred_region,
average='macro'),
'recall_macro': recall_score(did_true_region, did_pred_region,
average='macro'),
'precision_macro': precision_score(did_true_region,
did_pred_region,
average='macro')
},
}
return scores
def predict(self, sentences, output='label'):
"""Predict the dialect probability scores for a given list of
sentences.
Args:
sentences (:obj:`list` of :obj:`str`): The list of sentences.
output (:obj:`str`): The output label type. Possible values are
'label', 'city', 'country', or 'region'. Defaults to 'label'.
Returns:
:obj:`list` of :obj:`DIDPred`: A list of prediction results,
each corresponding to its respective sentence.
"""
if not self._is_trained:
raise UntrainedModelError(
'Can\'t predict with an untrained model.')
if output == 'label':
convert = lambda x: x
elif output == 'city':
convert = label_to_city
elif output == 'country':
convert = label_to_country
elif output == 'region':
convert = label_to_region
else:
convert = lambda x: x
x_prepared = self._prepare_sentences(sentences)
predicted_scores = self._classifier.predict_proba(x_prepared)
result = collections.deque()
for scores in predicted_scores:
score_tups = list(zip(self._labels_sorted, scores))
predicted_dialect = max(score_tups, key=lambda x: x[1])[0]
dialect_scores = dict(score_tups)
result.append(convert(DIDPred(predicted_dialect, dialect_scores)))
return list(result)
@staticmethod
def pretrained():
"""Load the default pre-trained model provided with camel-tools.
Raises:
:obj:`PretrainedModelError`: When a pre-trained model compatible
with the current Python version isn't available.
Returns:
:obj:`DialectIdentifier`: The loaded model.
"""
suffix = '{}{}'.format(sys.version_info.major, sys.version_info.minor)
model_file_name = 'did_pretrained_{}.dill'.format(suffix)
model_path = Path(_DATA_DIR, model_file_name)
if not model_path.is_file():
raise PretrainedModelError(
'No pretrained model for current Python version found.')
with model_path.open('rb') as model_fp:
model = dill.load(model_fp)
# We need to reload LMs since they were set to None when
# serialized.
model._char_lms = collections.defaultdict(kenlm.Model)
model._word_lms = collections.defaultdict(kenlm.Model)
model._load_lms(_CHAR_LM_DIR, _WORD_LM_DIR)
return model
def train_default_model():
print(_DATA_DIR)
did = DIDModel26()
did.train()
# We don't want to serialize kenlm models as they will utilize the
# absolute LM paths used in training. They will be reloaded when using
# DialectIdentifer.pretrained().
did._char_lms = None
did._word_lms = None
did._char_extra_lms = None
did._word_extra_lms = None
suffix = '{}{}'.format(sys.version_info.major, sys.version_info.minor)
model_file_name = 'did_pretrained_{}.dill'.format(suffix)
model_path = Path(_DATA_DIR, model_file_name)
with model_path.open('wb') as model_fp:
dill.dump(did, model_fp)
def label_city_pairs():
"""Returns the set of default label-city pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-dialect
pairs.
"""
return frozenset(_LABEL_TO_CITY_MAP.items())
def label_country_pairs():
"""Returns the set of default label-country pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-country
pairs.
"""
return frozenset(_LABEL_TO_COUNTRY_MAP.items())
def label_region_pairs():
"""Returns the set of default label-region pairs.
Returns:
:obj:`frozenset` of :obj:`tuple`: The set of default label-region
pairs.
"""
return frozenset(_LABEL_TO_REGION_MAP.items())
| 22,736 | 35.495987 | 79 | py |
camel_tools | camel_tools-master/camel_tools/dialectid/__init__.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools dialect identification component.
This Dialect Identification system can identify between 25 Arabic city dialects
as well as Modern Standard Arabic. It is based on the system described by
`Salameh, Bouamor and Habash <http://www.aclweb.org/anthology/C18-1113>`_.
"""
from camel_tools.dialectid.common import DIDPred, DialectIdError
from camel_tools.dialectid.common import UntrainedModelError
from camel_tools.dialectid.common import InvalidDataSetError
from camel_tools.dialectid.common import PretrainedModelError
from camel_tools.dialectid.model6 import DIDModel6
from camel_tools.dialectid.model26 import DIDModel26
DialectIdentifier = DIDModel26
__all__ = [
'DIDPred',
'DialectIdError',
'UntrainedModelError',
'InvalidDataSetError',
'PretrainedModelError',
'DialectIdentifier',
'DIDModel26',
'DIDModel6',
]
| 2,040 | 38.25 | 79 | py |
camel_tools | camel_tools-master/camel_tools/utils/charsets.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Contains character sets for different encoding schemes as well as Unicode
characters marked as symbols and punctuation.
"""
import unicodedata
from emoji import EMOJI_DATA
__all__ = [
'UNICODE_PUNCT_CHARSET', 'UNICODE_SYMBOL_CHARSET',
'UNICODE_LETTER_CHARSET', 'UNICODE_MARK_CHARSET', 'UNICODE_NUMBER_CHARSET',
'UNICODE_PUNCT_SYMBOL_CHARSET', 'UNICODE_LETTER_MARK_NUMBER_CHARSET',
'EMOJI_ALL_CHARSET', 'EMOJI_SINGLECHAR_CHARSET', 'EMOJI_MULTICHAR_CHARSET',
'AR_LETTERS_CHARSET', 'AR_DIAC_CHARSET', 'AR_CHARSET',
'BW_LETTERS_CHARSET', 'BW_DIAC_CHARSET', 'BW_CHARSET',
'SAFEBW_LETTERS_CHARSET', 'SAFEBW_DIAC_CHARSET', 'SAFEBW_CHARSET',
'XMLBW_LETTERS_CHARSET', 'XMLBW_DIAC_CHARSET', 'XMLBW_CHARSET',
'HSB_LETTERS_CHARSET', 'HSB_DIAC_CHARSET', 'HSB_CHARSET',
]
UNICODE_PUNCT_CHARSET = set()
UNICODE_SYMBOL_CHARSET = set()
UNICODE_LETTER_CHARSET = set()
UNICODE_MARK_CHARSET = set()
UNICODE_NUMBER_CHARSET = set()
for x in range(0x110000):
x_chr = chr(x)
x_cat = unicodedata.category(x_chr)
if x_cat[0] == 'L':
UNICODE_LETTER_CHARSET.add(x_chr)
elif x_cat[0] == 'M':
UNICODE_MARK_CHARSET.add(x_chr)
elif x_cat[0] == 'N':
UNICODE_NUMBER_CHARSET.add(x_chr)
elif x_cat[0] == 'P':
UNICODE_PUNCT_CHARSET.add(x_chr)
elif x_cat[0] == 'S':
UNICODE_SYMBOL_CHARSET.add(x_chr)
UNICODE_PUNCT_CHARSET = frozenset(UNICODE_PUNCT_CHARSET)
UNICODE_SYMBOL_CHARSET = frozenset(UNICODE_SYMBOL_CHARSET)
UNICODE_LETTER_CHARSET = frozenset(UNICODE_LETTER_CHARSET)
UNICODE_MARK_CHARSET = frozenset(UNICODE_MARK_CHARSET)
UNICODE_NUMBER_CHARSET = frozenset(UNICODE_NUMBER_CHARSET)
UNICODE_PUNCT_SYMBOL_CHARSET = UNICODE_PUNCT_CHARSET | UNICODE_SYMBOL_CHARSET
UNICODE_LETTER_MARK_NUMBER_CHARSET = (UNICODE_LETTER_CHARSET |
UNICODE_MARK_CHARSET |
UNICODE_NUMBER_CHARSET)
EMOJI_ALL_CHARSET = frozenset(EMOJI_DATA.keys())
EMOJI_SINGLECHAR_CHARSET = frozenset([
x for x in EMOJI_ALL_CHARSET if len(x) == 1])
EMOJI_MULTICHAR_CHARSET = frozenset([
x for x in EMOJI_ALL_CHARSET if len(x) > 1])
AR_LETTERS_CHARSET = frozenset(u'\u0621\u0622\u0623\u0624\u0625\u0626\u0627'
u'\u0628\u0629\u062a\u062b\u062c\u062d\u062e'
u'\u062f\u0630\u0631\u0632\u0633\u0634\u0635'
u'\u0636\u0637\u0638\u0639\u063a\u0640\u0641'
u'\u0642\u0643\u0644\u0645\u0646\u0647\u0648'
u'\u0649\u064a\u0671\u067e\u0686\u06a4\u06af')
AR_DIAC_CHARSET = frozenset(u'\u064b\u064c\u064d\u064e\u064f\u0650\u0651\u0652'
u'\u0670\u0640')
AR_CHARSET = AR_LETTERS_CHARSET | AR_DIAC_CHARSET
BW_LETTERS_CHARSET = frozenset(u'$&\'*<>ADEGHJPSTVYZ_bdfghjklmnpqrstvwxyz{|}')
BW_DIAC_CHARSET = frozenset(u'FKN`aiou~_')
BW_CHARSET = BW_LETTERS_CHARSET | BW_DIAC_CHARSET
SAFEBW_LETTERS_CHARSET = frozenset(u'ABCDEGHIJLMOPQSTVWYZ_bcdefghjklmnpqrstvwx'
u'yz')
SAFEBW_DIAC_CHARSET = frozenset(u'FKNaeiou~_')
SAFEBW_CHARSET = SAFEBW_LETTERS_CHARSET | SAFEBW_DIAC_CHARSET
XMLBW_LETTERS_CHARSET = frozenset(u'$\'*ABDEGHIJOPSTWYZ_bdfghjklmnpqrstvwxyz{|'
u'}')
XMLBW_DIAC_CHARSET = frozenset(u'FKN`aiou~_')
XMLBW_CHARSET = XMLBW_LETTERS_CHARSET | XMLBW_DIAC_CHARSET
HSB_LETTERS_CHARSET = frozenset(u'\'ADHST_bcdfghjklmnpqrstvwxyz'
u'\u00c2\u00c4\u00e1\u00f0\u00fd\u0100\u0102'
u'\u010e\u0127\u0161\u0175\u0177\u03b3\u03b8'
u'\u03c2')
HSB_DIAC_CHARSET = frozenset(u'.aiu~\u00c4\u00e1\u00e3\u0129\u0169_')
HSB_CHARSET = HSB_LETTERS_CHARSET | HSB_DIAC_CHARSET
| 4,968 | 42.208696 | 80 | py |
camel_tools | camel_tools-master/camel_tools/utils/dediac.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This submodule contains functions for dediacritizing Arabic text in
different encodings. See :doc:`/reference/encoding_schemes` for more
information on encodings.
"""
from __future__ import absolute_import
import re
from camel_tools.utils.charsets import AR_DIAC_CHARSET, BW_DIAC_CHARSET
from camel_tools.utils.charsets import SAFEBW_DIAC_CHARSET, XMLBW_DIAC_CHARSET
from camel_tools.utils.charsets import HSB_DIAC_CHARSET
_DIAC_RE_BW = re.compile(u'[' +
re.escape(u''.join(BW_DIAC_CHARSET)) +
u']')
_DIAC_RE_SAFEBW = re.compile(u'[' +
re.escape(u''.join(SAFEBW_DIAC_CHARSET)) +
u']')
_DIAC_RE_XMLBW = re.compile(u'[' +
re.escape(u''.join(XMLBW_DIAC_CHARSET)) +
u']')
_DIAC_RE_HSB = re.compile(u'[' +
re.escape(u''.join(HSB_DIAC_CHARSET)) +
u']')
_DIAC_RE_AR = re.compile(u'[' +
re.escape(u''.join(AR_DIAC_CHARSET)) +
u']')
def dediac_bw(s):
"""Dediacritize Buckwalter encoded string.
Args:
s (:obj:`str`): String to dediacritize.
Returns:
:obj:`str`: Dediacritized string.
"""
return _DIAC_RE_BW.sub(u'', s)
def dediac_safebw(s):
"""Dediacritize Safe Buckwalter encoded string.
Args:
s (:obj:`str`): String to dediacritize.
Returns:
:obj:`str`: Dediacritized string.
"""
return _DIAC_RE_SAFEBW.sub(u'', s)
def dediac_xmlbw(s):
"""Dediacritize XML Buckwalter encoded string.
Args:
s (:obj:`str`): String to dediacritize.
Returns:
:obj:`str`: Dediacritized string.
"""
return _DIAC_RE_XMLBW.sub(u'', s)
def dediac_hsb(s):
"""Dediacritize Habash-Soudi-Buckwalter encoded string.
Args:
s (:obj:`str`): String to dediacritize.
Returns:
:obj:`str`: Dediacritized string.
"""
return _DIAC_RE_HSB.sub(u'', s)
def dediac_ar(s):
"""Dediacritize Unicode Arabic string.
Args:
s (:obj:`str`): String to dediacritize.
Returns:
:obj:`str`: Dediacritized string.
"""
return _DIAC_RE_AR.sub(u'', s)
| 3,413 | 27.689076 | 79 | py |
camel_tools | camel_tools-master/camel_tools/utils/charmap.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Contains the CharMapper class (for mapping characters in a Unicode string to
other strings) and custom exceptions raised by CharMapper.
"""
from __future__ import absolute_import
from collections import deque
from collections.abc import Mapping
import os
import json
from .stringutils import isunicode
class InvalidCharMapKeyError(ValueError):
"""Exception raised when an invalid key is found in a charmap used to
initialize :obj:`CharMapper`.
"""
def __init__(self, key, message):
super(InvalidCharMapKeyError, self).__init__(message)
self.key = key
self.message = message
def __repr__(self):
return 'InvalidCharMapKeyError({}, {})'.format(
repr(self.key), repr(self.message)
)
def __str__(self):
return self.message
class BuiltinCharMapNotFoundError(ValueError):
"""Exception raised when a specified map name passed to
:func:`CharMapper.builtin_mapper` is not in the list of builtin maps.
"""
def __init__(self, map_name, message):
super(BuiltinCharMapNotFoundError, self).__init__(message)
self.map_name = map_name
self.message = message
def __repr__(self):
return 'BuiltinCharMapNotFoundError({}, {})'.format(
repr(self.map_name), repr(self.message)
)
def __str__(self):
return self.message
class CharMapper(object):
"""A class for mapping characters in a Unicode string to other strings.
Args:
charmap (:obj:`dict`): A dictionary or any other dictionary-like
obeject (implementing collections.Mapping) mapping characters
or range of characters to a string. Keys in the dictionary
should be Unicode strings of length 1 or 3. Strings of length 1
indicate a single character to be mapped, while strings of
length 3 indicate a range. Range strings should have the format
'a-b' where is the starting character in the range and 'b' is
the last character in the range (inclusive). 'b' should have a
strictly larger ordinal number than 'a'. Dictionary values
should be either strings or `None`, where `None` indicates that
characters are mapped to themselves. Use an empty string to
indicate deletion.
default (:obj:`str`, optional): The default value to map characters
not in **charmap** to. `None` indicates that characters map to
themselves. Defaults to `None`.
Raises:
:obj:`InvalidCharMapKeyError`: If a key in charmap is not a Unicode
string containing either a single character or a valid
character range.
:obj:`TypeError`: If default or a value for a key in charmap is
neither `None` nor a Unicode string, or if **charmap** is not a
dictionary-like object.
"""
BUILTIN_CHARMAPS = frozenset((
'ar2bw',
'ar2safebw',
'ar2xmlbw',
'ar2hsb',
'bw2ar',
'bw2safebw',
'bw2xmlbw',
'bw2hsb',
'safebw2ar',
'safebw2bw',
'safebw2xmlbw',
'safebw2hsb',
'xmlbw2ar',
'xmlbw2bw',
'xmlbw2safebw',
'xmlbw2hsb',
'hsb2ar',
'hsb2bw',
'hsb2safebw',
'hsb2xmlbw',
'arclean',
))
@staticmethod
def _expand_char_map(charmap):
"""Creates a new dictionary from charmap where character ranges are
expanded and given their own dictionary entry.
Args:
charmap (:obj:`dict`): The character map to be expanded.
Raises:
:obj:`InvalidCharMapKeyError`: If a key in **charmap** is not a
Unicode string containing either a single character or a valid
character range.
:obj:`TypeError`: If a value for a key in **charmap** is neither
`None` nor a Unicode string.
"""
# TODO: Implement a space efficient character map data structure
new_map = {}
for key in charmap.keys():
# Check that key is a string
if not isunicode(key):
raise TypeError('Expected string as key. '
'Got {} instead.'.format(type(key)))
# If string is one character long we can directly add it to the map
if len(key) == 1:
if charmap[key] is not None and not isunicode(charmap[key]):
raise TypeError(
('Expected a Unicode string or None value for key '
'value, got {} instead.').format(type(charmap[key])))
else:
new_map[key] = charmap[key]
# We check if it's a range with the following rules:
# a) The string is 3 character long with a dash '-' in the
# middle.
# b) The first character must have a strictly smaller ordinal
# than the last character.
elif len(key) == 3 and key[1] == '-':
if ord(key[0]) >= ord(key[2]):
raise InvalidCharMapKeyError(key, '')
else:
if (charmap[key] is not None
and not isunicode(charmap[key])):
raise TypeError(
('Expected a Unicode string or None value for key '
'value, got {} instead.').format(
type(charmap[key]))
)
for char in range(ord(key[0]), ord(key[2]) + 1):
new_map[chr(char)] = charmap[key]
# Otherwise, we have an invalid map key
else:
raise InvalidCharMapKeyError(
key, 'Invalid character or character range')
return new_map
def __init__(self, charmap, default=None):
"""Class constructor.
"""
if isinstance(charmap, Mapping):
self._charmap = self._expand_char_map(charmap)
else:
raise TypeError(
('Expected a dictionary like object for charmap, got {} '
'instead').format(type(charmap)))
if default is None or isunicode(default):
self._default = default
else:
raise TypeError(
('Expected a Unicode string or None value for default, got {} '
'instead.').format(type(default)))
def __call__(self, s):
"""Alias for :func:`CharMapper.map_string`.
"""
return self.map_string(s)
@staticmethod
def mapper_from_json(fpath):
"""Creates a :obj:`CharMapper` instance from a JSON file.
Args:
fpath (:obj:`str`): Path to JSON file.
Returns:
:obj:`CharMapper`: A new :obj:`CharMapper` instance generated from
given JSON file.
Raises:
:obj:`InvalidCharMapKeyError`: If a key in charmap is not a Unicode
string containing either a single character or a valid
character range.
:obj:`TypeError`: If default or a value for a key in charmap is
neither `None` nor a Unicode string.
:obj:`FileNotFoundError`: If file at `fpath` doesn't exist.
:obj:`JSONDecodeError`: If `fpath` is not a valid JSON file.
"""
with open(fpath, 'r', encoding='utf-8') as charmap_fp:
jsonstr = charmap_fp.read()
json_dict = json.loads(jsonstr)
return CharMapper(
json_dict.get('charMap', {}),
default=json_dict.get('default', None)
)
@staticmethod
def builtin_mapper(map_name):
"""Creates a :obj:`CharMapper` instance from built-in mappings.
Args:
map_name (:obj:`str`): Name of built-in map.
Returns:
:obj:`CharMapper`: A new :obj:`CharMapper` instance of built-in
map.
Raises:
:obj:`BuiltinCharMapNotFound`: If `map_name` is not in the list of
built-in maps.
"""
if map_name not in CharMapper.BUILTIN_CHARMAPS:
raise BuiltinCharMapNotFoundError(
map_name,
'No built in mapping with name \'{}\' '
'was found.'.format(map_name))
try:
charmaps_dir = os.path.join(os.path.dirname(__file__), 'charmaps')
# This should never happen unless there something wrong with the
# system or the installation.
except Exception: # pragma: no coverage
raise BuiltinCharMapNotFoundError(
map_name,
'Could not create mapping with name \'{}\'.'.format(map_name))
map_path = os.path.join(charmaps_dir, '{}_map.json'.format(map_name))
return CharMapper.mapper_from_json(map_path)
def map_string(self, s):
"""Maps each character in a given string to its corresponding value in
the charmap.
Args:
s (:obj:`str`): A Unicode string to be mapped.
Returns:
:obj:`str`: A new Unicode string with the charmap applied.
Raises:
:obj:`TypeError`: If s is not a Unicode string.
"""
if not isunicode(s):
raise TypeError((
'Expected Unicode string as input, got {} instead.'
).format(type(s)))
buff = deque()
for char in s:
transliteration = self._charmap.get(char, self._default)
if transliteration is None:
buff.append(char)
else:
buff.append(transliteration)
return ''.join(buff)
| 10,962 | 34.478964 | 79 | py |
camel_tools | camel_tools-master/camel_tools/utils/stringutils.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This submodule contains a collection of useful helper functions when working
with strings.
"""
from __future__ import absolute_import
import six
def isunicode(obj):
"""Checks if an object is a Unicode encoded string. Useful for Python 2 and
3 compatibility.
Args:
obj (:obj:`object`): The object to check.
Returns:
:obj:`bool`: `True` if **obj** is a Unicode encoded string, `False`
otherwise.
"""
return isinstance(obj, six.text_type)
def force_unicode(s, encoding='utf-8'):
"""Convert a given string into a Unicode (decoded) string if it isn't
already.
Args:
s (:obj:`str`): String object to convert.
encoding (:obj:`str`, optional): The encoding of **s** if it is
encoded. Defaults to 'utf-8'.
Returns:
:obj:`str`: A Unicode (decoded) version of **s**.
"""
if s is None or isinstance(s, six.text_type):
return s
return s.decode(encoding)
def force_encoding(s, encoding='utf-8'):
"""Convert a given string into an encoded string if it isn't already.
Args:
s (:obj:`str`): String object to convert.
encoding (:obj:`str`): The encoding **s** should be encoded into.
Note that if **s** is already encoded, it is returned as is,
even though it is in a differnet encoding than what is passed to
this parameter. Defaults to 'utf-8'.
Returns:
:obj:`str`: An encoded version of **s**.
"""
if s is None or isinstance(s, six.binary_type):
return s
return s.encode(encoding)
| 2,753 | 30.295455 | 79 | py |
camel_tools | camel_tools-master/camel_tools/utils/__init__.py | # MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This sub-module provides low-level text processing utilities that are useful
in other sub-modules.
"""
| 1,232 | 46.423077 | 79 | py |
camel_tools | camel_tools-master/camel_tools/utils/normalize.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module provides functions for normalizing Arabic text.
"""
import re
import unicodedata
from camel_tools.utils.charmap import CharMapper
_ALEF_NORMALIZE_BW_RE = re.compile(u'[<>{|]')
_ALEF_NORMALIZE_SAFEBW_RE = re.compile(u'[IOLM]')
_ALEF_NORMALIZE_XMLBW_RE = re.compile(u'[IO{|]')
_ALEF_NORMALIZE_HSB_RE = re.compile(u'[\u0102\u00c2\u00c4\u0100]')
_ALEF_NORMALIZE_AR_RE = re.compile(u'[\u0625\u0623\u0671\u0622]')
_UNICODE_CHAR_FIX = CharMapper({
'\ufdfc': 'ريال',
'\ufdfd': 'بسم الله الرحمن الرحيم',
})
def normalize_unicode(s, compatibility=True):
"""Normalize Unicode strings into their canonically composed form or
(i.e. characters that can be written as a combination of unicode characters
are converted to their single character form).
Note: This is essentially a call to :func:`unicodedata.normalize` with
form 'NFC' if **compatibility** is False or 'NFKC' if it's True.
Args:
s (:obj:`str`): The string to be normalized.
compatibility (:obj:`bool`, optional): Apply compatibility
decomposition. Defaults to True.
Returns:
:obj:`str`: The normalized string.
"""
if compatibility:
fixed = _UNICODE_CHAR_FIX(s)
return unicodedata.normalize('NFKC', fixed)
return unicodedata.normalize('NFC', s)
def normalize_alef_maksura_bw(s):
"""Normalize all occurences of Alef Maksura characters to a Yeh character
in a Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'Y', u'y')
def normalize_alef_maksura_safebw(s):
"""Normalize all occurences of Alef Maksura characters to a Yeh character
in a Safe Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'Y', u'y')
def normalize_alef_maksura_xmlbw(s):
"""Normalize all occurences of Alef Maksura characters to a Yeh character
in a XML Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'Y', u'y')
def normalize_alef_maksura_hsb(s):
"""Normalize all occurences of Alef Maksura characters to a Yeh character
in a Habash-Soudi-Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'\u00fd', u'y')
def normalize_alef_maksura_ar(s):
"""Normalize all occurences of Alef Maksura characters to a Yeh character
in an Arabic string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'\u0649', u'\u064a')
def normalize_teh_marbuta_bw(s):
"""Normalize all occurences of Teh Marbuta characters to a Heh character
in a Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'p', u'h')
def normalize_teh_marbuta_safebw(s):
"""Normalize all occurences of Teh Marbuta characters to a Heh character
in a Safe Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'p', u'h')
def normalize_teh_marbuta_xmlbw(s):
"""Normalize all occurences of Teh Marbuta characters to a Heh character
in a XML Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'p', u'h')
def normalize_teh_marbuta_hsb(s):
"""Normalize all occurences of Teh Marbuta characters to a Heh character
in a Habash-Soudi-Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'\u0127', u'h')
def normalize_teh_marbuta_ar(s):
"""Normalize all occurences of Teh Marbuta characters to a Heh character
in an Arabic string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return s.replace(u'\u0629', u'\u0647')
def normalize_alef_bw(s):
"""Normalize various Alef variations to plain a Alef character in a
Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return _ALEF_NORMALIZE_BW_RE.sub(u'A', s)
def normalize_alef_safebw(s):
"""Normalize various Alef variations to plain a Alef character in a
Safe Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return _ALEF_NORMALIZE_SAFEBW_RE.sub(u'A', s)
def normalize_alef_xmlbw(s):
"""Normalize various Alef variations to plain a Alef character in a
XML Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return _ALEF_NORMALIZE_XMLBW_RE.sub(u'A', s)
def normalize_alef_hsb(s):
"""Normalize various Alef variations to plain a Alef character in a
Habash-Soudi-Buckwalter encoded string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return _ALEF_NORMALIZE_HSB_RE.sub(u'A', s)
def normalize_alef_ar(s):
"""Normalize various Alef variations to plain a Alef character in an
Arabic string.
Args:
s (:obj:`str`): The string to be normalized.
Returns:
:obj:`str`: The normalized string.
"""
return _ALEF_NORMALIZE_AR_RE.sub(u'\u0627', s)
| 7,288 | 25.032143 | 79 | py |
camel_tools | camel_tools-master/camel_tools/utils/transliterate.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Contains the Transliterator class (for transliterating text using a
CharMapper).
"""
from __future__ import absolute_import
from collections import deque
import re
import six
from camel_tools.utils.charmap import CharMapper
_WHITESPACE_RE = re.compile(r'\s')
class Transliterator(object):
"""A class for transliterating text using a
:obj:`~camel_tools.utils.charmap.CharMapper`. This class adds the extra
utility of marking individual tokens to not be transliterated. It assumes
that tokens are whitespace seperated.
Args:
mapper (:obj:`~camel_tools.utils.charmap.CharMapper`): The
:obj:`~camel_tools.utils.charmap.CharMapper` instance to be used
for transliteration.
marker (:obj:`str`, optional): A string that is prefixed to all
tokens that shouldn't be transliterated. Should not contain any
whitespace characters. Defaults to '@@IGNORE@@'.
Raises:
:obj:`TypeError`: If mapper is not a
:obj:`~camel_tools.utils.charmap.CharMapper` instance or marker is
not a string.
:obj:`ValueError`: If marker contains whitespace or is an empty string.
"""
def __init__(self, mapper, marker='@@IGNORE@@'):
self._mapper = mapper
if not isinstance(mapper, CharMapper):
raise TypeError('Mapper is not a CharMapper instance.')
if not isinstance(marker, six.string_types):
raise TypeError('Marker is not a string.')
if not marker:
raise ValueError('Marker is empty.')
elif _WHITESPACE_RE.search(marker) is None:
self._marker = marker
else:
raise ValueError('Marker contains whitespace.')
self._markerre = re.compile(
r'({}\S+)'.format(re.escape(marker)),
re.UNICODE | re.MULTILINE
)
def transliterate(self, s, strip_markers=False, ignore_markers=False):
"""Transliterate a given string.
Args:
s (:obj:`str`): The string to transliterate.
strip_markers (:obj:`bool`, optional): Output is stripped of
markers if `True`, otherwise markers are kept in the output.
Defaults to `False`.
ignore_markers (:obj:`bool`, optional): If set to `True`, all text,
including marked tokens are transliterated as well excluding
the markers. If you would like to transliterate the markers as
well, use :obj:`~camel_tools.utils.charmap.CharMapper`
directly instead. Defaults to `False`.
Returns:
:obj:`str`: The transliteration of **s** with the exception of
marked words.
"""
buff = deque()
splits = self._markerre.split(s)
for spl in splits:
if spl.startswith(self._marker):
if ignore_markers:
if not strip_markers:
buff.append(self._marker)
buff.append(
self._mapper.map_string(spl[len(self._marker):])
)
else:
if strip_markers:
buff.append(spl[len(self._marker):])
else:
buff.append(spl)
else:
buff.append(self._mapper.map_string(spl))
return u''.join(buff)
| 4,578 | 36.532787 | 79 | py |
camel_tools | camel_tools-master/camel_tools/data/downloader.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from genericpath import exists
from pathlib import Path
from tempfile import TemporaryDirectory
from os import urandom, remove
from shutil import move, rmtree
import binascii
import zipfile
import requests
from requests.structures import CaseInsensitiveDict
_STREAM_CHUNK_SIZE = 32768
class DownloaderError(Exception):
"""Exception raised when an error occurs during data download.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return str(self.msg)
class HTTPDownloader:
"""Class to download shared files from a URL.
"""
@staticmethod
def download(url,
dst,
is_zip=False,
on_download_start=None,
on_download_update=None,
on_download_finish=None,
on_download_error=None,
on_unzip_start=None,
on_unzip_update=None,
on_unzip_finish=None,
on_unzip_error=None):
if is_zip:
if dst.exists() and not dst.is_dir():
raise DownloaderError(
'Destination directory {} is a pre-existing file.'.format(
repr(str(dst))))
else:
dst.mkdir(parents=True, exist_ok=True)
with TemporaryDirectory() as tmp_dir:
# Download data to temporary directory
fname = str(binascii.b2a_hex(urandom(15)), encoding='utf-8')
tmp_data_path = Path(tmp_dir, fname)
HTTPDownloader._save_content(url,
tmp_data_path,
on_start=on_download_start,
on_update=on_download_update,
on_finish=on_download_finish,
on_error=on_download_error)
if is_zip:
if dst.exists():
rmtree(dst)
# Extract data to destination directory
HTTPDownloader._extract_content(tmp_data_path,
dst,
on_start=on_unzip_start,
on_update=on_unzip_update,
on_finish=on_unzip_finish,
on_error=on_unzip_error)
else:
if dst.exists():
remove(dst)
move(tmp_data_path, dst)
@staticmethod
def _save_content(url,
destination,
on_start=None,
on_update=None,
on_finish=None,
on_error=None):
try:
session = requests.Session()
headers = CaseInsensitiveDict()
headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
headers["Pragma"] = "no-cache"
headers["Expires"] = "0"
response = session.get(url, stream=True, headers=headers)
curr_size = 0
total_size = int(response.headers.get('content-length', 0))
if on_start is not None:
on_start(total_size)
with open(destination, 'wb') as fp:
for chunk in response.iter_content(_STREAM_CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
fp.write(chunk)
chunk_size = len(chunk)
curr_size += chunk_size
if on_update is not None:
on_update(chunk_size)
if curr_size < total_size:
if on_error is not None:
on_error()
raise DownloaderError(
'Download could not be completed.')
if on_finish is not None:
on_finish()
except OSError:
if on_error is not None:
on_error()
raise DownloaderError(
'An error occured while downloading data.')
@staticmethod
def _extract_content(source,
destination,
on_start=None,
on_update=None,
on_finish=None,
on_error=None):
try:
with zipfile.ZipFile(source, 'r') as zip_fp:
uncompress_size = sum(
(file.file_size for file in zip_fp.infolist()))
if on_start is not None:
on_start(uncompress_size)
# zip_fp.extractall(destination)
for file in zip_fp.infolist():
file_size = file.file_size
zip_fp.extract(file, destination)
if on_update is not None:
on_update(file_size)
except:
if on_error is not None:
on_error()
raise DownloaderError(
'An error occured while extracting data.')
if on_finish is not None:
on_finish()
| 6,437 | 33.427807 | 79 | py |
camel_tools | camel_tools-master/camel_tools/data/post_install.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
from typing import List
from muddler import unmuddle
__all__ = (
'post_install',
)
def parse_args(arg_types: List[str], args: List[str]):
result = []
for arg_type, arg in zip(arg_types, args):
if arg_type == 'string':
result.append(arg)
elif arg_type == 'path':
result.append(Path(arg))
elif arg_type == 'int':
result.append(int(arg))
elif arg_type == 'float':
result.append(float(arg))
elif arg_type == 'bool':
arg_lower = arg.lower()
if arg_lower == 'true':
result.append(True)
elif arg_lower == 'false':
result.append(False)
else:
# TODO: Throw error
result.append(False)
return result
def post_install(config: dict, package_path: Path, args: List[str]):
parsed_args = parse_args(config['args'], args)
for step in config['steps']:
if step['action'] == 'unmuddle':
source_path = parsed_args[step['source_path_index']]
muddled_path = Path(package_path, step['muddled_path'])
output_path = Path(package_path, step['output_path'])
unmuddle(str(source_path), str(muddled_path), str(output_path))
| 2,459 | 34.652174 | 79 | py |
camel_tools | camel_tools-master/camel_tools/data/__init__.py | # MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This sub-module contains utilities for locating datastes for the various
CAMeL Tools components.
"""
from camel_tools.data.catalogue import Catalogue, DatasetEntry, ComponentEntry
from camel_tools.data.catalogue import CatalogueError, FileEntry, PackageEntry
from camel_tools.data.catalogue import PackageType, CT_DATA_DIR
from camel_tools.data.downloader import DownloaderError
__all__ = [
'Catalogue',
'CatalogueError',
'PackageEntry',
'PackageType',
'FileEntry',
'ComponentEntry',
'DatasetEntry',
'DownloaderError',
'CATALOGUE',
'CT_DATA_DIR'
]
CATALOGUE = Catalogue.load_catalogue()
| 1,763 | 34.28 | 79 | py |
camel_tools | camel_tools-master/camel_tools/data/catalogue.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from enum import Enum
from collections import deque
from genericpath import exists
import json
from pathlib import Path
from dataclasses import dataclass
from typing import Optional, Set, List, Mapping
import hashlib
import os
import sys
from pyrsistent import pvector, pmap
from tqdm import tqdm
import camel_tools
from camel_tools.data.downloader import HTTPDownloader
from camel_tools.data.post_install import post_install
HASH_BLOCK_SIZE = 65536
CATALOGUE_URL = "https://raw.githubusercontent.com/CAMeL-Lab/camel-tools-data/main/catalogue-1.5.json"
def _get_appdatadir():
home = Path.home()
# TODO: Make sure this works with OSs other than Windows, Linux and Mac.
if sys.platform == 'win32':
return Path(home, 'AppData/Roaming/camel_tools')
else:
return Path(home, '.camel_tools')
CT_DATA_PATH_DEFAULT = _get_appdatadir()
CT_DATA_PATH_DEFAULT.mkdir(parents=True, exist_ok=True)
CT_DATA_DIR = CT_DATA_PATH_DEFAULT
if os.environ.get('CAMELTOOLS_DATA') is not None:
CT_DATA_DIR = Path(
os.environ.get('CAMELTOOLS_DATA')).expanduser().absolute()
CT_VERSIONS_PATH = Path(CT_DATA_DIR, 'versions.json')
def _init_progress(progress_bar, desc, total):
progress_bar[0] = tqdm(desc=desc,
total=total,
unit='B',
colour='green',
unit_scale=True)
def _update_progress(progress_bar, chunk_size):
progress_bar[0].update(chunk_size)
def _finish_progress(progress_bar):
progress_bar[0].close()
def _hash_file(path):
file_hash = hashlib.sha256()
with path.open('rb') as fp:
fb = fp.read(HASH_BLOCK_SIZE)
while len(fb) > 0:
file_hash.update(fb)
fb = fp.read(HASH_BLOCK_SIZE)
return file_hash.hexdigest()
class PackageType(Enum):
"""Enum indicating the type of a package."""
META = 0
"""Indicates a package is a meta package (ie. contains no files,
only dependencies)"""
HTTP = 1
"""Indicates package is a zip file that can be downloaded via
HTTP/HTTPS."""
@dataclass(frozen=True)
class FileEntry:
"""Data class containing information about a given file.
"""
path: str
"""Relative path of file in the package directory."""
sha256: str
"""SHA256 hash of this file."""
@dataclass(frozen=True)
class PackageEntry:
"""Data class containing information about a given package.
"""
name: str
"""Name of this package."""
description: str
"""Description of this package"""
size: Optional[int]
"""Size of this package in bytes. Is `None` for meta packages."""
version: Optional[str]
"""Package version. Is `None` for meta packages."""
license: str
"""License this package is distributed under.
Is `None` for meta packages.
"""
package_type: PackageType
"""Type of this package."""
url: Optional[str]
"""URL for downlading this package's zip file.
Is `None` for meta packages."""
destination: Optional[Path]
"""Installation path of package. Is `None` for meta packages."""
dependencies: Optional[Set[str]]
"""Names of packages this package depends on."""
files: Optional[List[FileEntry]]
"""List of files included in this package. Is `None` for meta packages."""
private: bool
"""Indicates if this package should be hidden when being listed."""
sha256: Optional[str]
"""SHA256 hash of package zip file. Is `None` for meta packages."""
post_install: Optional[dict]
"""Post installation steps.
Is 'None' for packages that require no post-installation
"""
@dataclass(frozen=True)
class DatasetEntry:
"""Data class containing information about an individual dataset.
"""
name: str
"""Name of this dataset."""
component: str
"""Name of the component this dataset belongs to."""
path: str
"""Relative path of this dataset in the data directory."""
@dataclass(frozen=True)
class ComponentEntry:
"""Data class that contains dataset information for a given component.
"""
name: str
"""Name of this component."""
default: str
"""The default dataset name for this component."""
datasets: Mapping[str, DatasetEntry]
"""A mapping of dataset names to their respective entries."""
class CatalogueError(Exception):
"""Exception raised when an error occurs during data download.
"""
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return str(self.msg)
@dataclass(frozen=True)
class Catalogue:
"""This class allows downloading and querying datasets provided by
CAMeL Tools.
"""
version: str
"""Catalogue version string."""
packages: Mapping[str, PackageEntry]
"""Mapping of package names to their respective entries."""
components: Mapping[str, ComponentEntry]
"""Mapping of component names with their respective entries."""
@staticmethod
def get_default_catalogue_path() -> Path:
"""Returns the default catalogue path, respecting the `CAMELTOOLS_DATA`
environment variable if it is set.
Returns:
:obj:`Path`: Path to the catalogue file.
"""
cat_version = '.'.join(camel_tools.__version__.split('.')[0:2])
cat_name = f'catalogue.json'
return Path(CT_DATA_DIR, cat_name)
@staticmethod
def update_catalogue():
"""Download latest catalogue for the current version of CAMeL Tools.
Raises:
:obj:`~camel_tools.data.DownloaderError`: When an error occurs
while downloading catalogue.
"""
cat_path = Catalogue.get_default_catalogue_path()
HTTPDownloader.download(CATALOGUE_URL, cat_path)
@staticmethod
def load_catalogue(path: Path=None) -> 'Catalogue':
"""Load catalogue file at a given path.
Arguments:
path(:obj:`Path`): Path to catalogue file.
Returns:
:obj:`Catalogue`: :obj:`~Catalogue` instance populated by the
contents of the catalogue file.
"""
if path is None:
path = Catalogue.get_default_catalogue_path()
# Check if catalogue is there
if not exists(path):
Catalogue.update_catalogue()
with path.open('r', encoding='utf-8') as cfp:
catalogue_json = json.load(cfp)
version = catalogue_json['version']
packages = {}
components = {}
for pkg_name, pkg_json in catalogue_json['packages'].items():
if pkg_json.get('files', None) is None:
files = None
else:
files = []
for file_json in pkg_json['files']:
files.append(FileEntry(file_json['path'], file_json['sha256']))
files = pvector(files)
destination = pkg_json.get('destination', None)
if destination is not None:
destination = Path(CT_DATA_DIR, 'data', destination)
pkg_entry = PackageEntry(
name=pkg_name,
description=pkg_json.get('description', ''),
version=pkg_json.get('version', None),
license=pkg_json.get('license', None),
package_type=PackageType[pkg_json['package_type'].upper()],
url=pkg_json.get('url', None),
destination=destination,
dependencies=frozenset(pkg_json.get('dependencies', [])),
files=files,
private=pkg_json['private'],
sha256=pkg_json.get('sha256', None),
size=pkg_json.get('size', None),
post_install=pkg_json.get('post_install', None),
)
packages[pkg_name] = pkg_entry
for cmp_name, cmp in catalogue_json['components'].items():
default = cmp['default']
datasets = {}
for ds_name, ds in cmp['datasets'].items():
ds_path = Path(CT_DATA_DIR, 'data', ds['path'])
datasets[ds_name] = DatasetEntry(ds_name,
cmp_name,
ds_path)
components[cmp_name] = ComponentEntry(cmp_name, default, datasets)
return Catalogue(version, packages, components)
def get_package(self, package: str) -> PackageEntry:
"""Get a package entry for a given package name.
Arguments:
package (:obj:`str`): Name of package to query.
Returns:
:obj:`~camel_tools.data.ComponentEntry`: Entry associated with
given package name.
Raises:
:obj:`~camel_tools.data.CatalogueError`: When `package` is not
a valid package name.
"""
if package in self.packages:
return self.packages[package]
else:
raise CatalogueError(f'Invalid package name {repr(package)}.')
def get_component(self, component: str) -> ComponentEntry:
"""Get component entry for a given component name.
Arguments:
component (:obj:`str`): Name of component to query.
Returns:
:obj:`~camel_tools.data.PackageEntry`: Entry associated with given
component name.
Raises:
:obj:`~camel_tools.data.CatalogueError`: When `component` is not
a valid component name.
"""
if component in self.components:
return self.components[component]
else:
raise CatalogueError(f'Invalid component name {component}.')
def get_dataset(self, component: str, dataset: str=None) -> DatasetEntry:
"""Get dataset entry for a given component name and dataset name.
Arguments:
component (:obj:`str`): Name of component.
dataset (:obj:`str`, Optional): Name of dataset for given component
to query. If set to `None` then the entry for the default
dataset will be returned. Defaults to `None`.
Returns:
:obj:`~camel_tools.data.DatasetEntry`: The dataset entry for the
given component and dataset names.
"""
if component in self.components:
cmp = self.components[component]
if dataset is None:
dataset = cmp.default
if dataset in cmp.datasets:
return cmp.datasets[dataset]
raise CatalogueError(f'Invalid dataset name {repr(dataset)}.')
else:
raise CatalogueError(f'Invalid component name {repr(component)}.')
def _get_dependencies(self, package: str) -> "frozenset[PackageEntry]":
dep_set = set()
dep_stack = deque([package])
while len(dep_stack) > 0:
pkg_name = dep_stack.pop()
pkg = self.packages.get(pkg_name, None)
if pkg is None:
raise CatalogueError(f'Invalid package name {repr(pkg_name)}.')
if pkg.package_type != PackageType.META:
dep_set.add(pkg_name)
if pkg.dependencies is None or len(pkg.dependencies) == 0:
continue
for pkg_dep in pkg.dependencies:
if pkg_dep not in dep_set:
dep_stack.append(pkg_dep)
return frozenset(dep_set)
def get_public_packages(self) -> List[str]:
"""Returns a list of all package names marked as public in the
catalogue.
Returns:
:obj:`list` of :obj:`str`: The list of names of all packages marked
as public.
"""
pkgs = [p for p in self.packages.values() if p.private == False]
pkgs.sort(key=lambda p: p.name)
return pkgs
def download_package(self,
package: str,
recursive: bool=True,
force: bool=False,
print_status: bool=False):
"""Download and install package with a given name.
Arguments:
package (:obj:`str`): Name of package to download and install.
recursive (:obj:`bool`, Optional): If `True`, dependencies are
recursively installed. Otherwise, only the package contents are
installed. Defaults to `True`.
force (:obj:`bool`, Optional): If `True`, packages that are
already installed and up-to-date will be reinstalled, otherwise
they are ignored. Defaults to `False`.
print_status (:obj:`bool`, Optional): If `True`, prints out the
download status to standard output.
Defaults to `False`.
"""
if package not in self.packages:
raise CatalogueError(f'Invalid package name {repr(package)}')
if recursive:
deps = self._get_dependencies(package)
else:
deps = [package]
if CT_VERSIONS_PATH.exists():
with CT_VERSIONS_PATH.open('r', encoding='utf-8') as versions_fp:
ct_versions = json.load(versions_fp)
else:
ct_versions = {}
if not force:
new_deps = []
for dep in deps:
dep_ver = self.packages[dep].version
if dep not in ct_versions or dep_ver != ct_versions[dep]:
new_deps.append(dep)
deps = new_deps
if len(deps) == 0:
if print_status:
print(f'No new packages will be installed.')
return
if print_status:
pkg_repr = ', '.join([repr(d) for d in deps])
print(f'The following packages will be installed: {pkg_repr}')
for dep in deps:
dep_pkg = self.packages[dep]
if dep_pkg.package_type == PackageType.META:
continue
on_dl_start = None
on_dl_update = None
on_dl_finish = None
on_uz_start = None
on_uz_update = None
on_uz_finish = None
if print_status:
dl_progress_bar = [None]
on_dl_start = (
lambda t: _init_progress(dl_progress_bar,
f'Downloading package {repr(dep)}',
t))
on_dl_update = lambda c: _update_progress(dl_progress_bar, c)
on_dl_finish = lambda: _finish_progress(dl_progress_bar)
uz_progress_bar = [None]
on_uz_start = (
lambda t: _init_progress(uz_progress_bar,
f'Extracting package {repr(dep)}',
t))
on_uz_update = lambda c: _update_progress(uz_progress_bar, c)
on_uz_finish = lambda: _finish_progress(uz_progress_bar)
if dep_pkg.package_type == PackageType.HTTP:
HTTPDownloader.download(dep_pkg.url,
dep_pkg.destination,
is_zip=True,
on_download_start=on_dl_start,
on_download_update=on_dl_update,
on_download_finish=on_dl_finish,
on_unzip_start=on_uz_start,
on_unzip_update=on_uz_update,
on_unzip_finish=on_uz_finish)
# Update versions file
ct_versions[dep] = dep_pkg.version
with CT_VERSIONS_PATH.open('w', encoding='utf-8') as versions_fp:
json.dump(ct_versions, versions_fp, indent=4)
def post_install_package(self, package: str, args: List[str]):
if package not in self.packages:
raise CatalogueError(f'Invalid package name {repr(package)}')
pkg = self.packages[package]
config = pkg.post_install
if config is None:
return
post_install(config, pkg.destination, args)
| 17,448 | 31.614953 | 102 | py |
camel_tools | camel_tools-master/camel_tools/tagger/common.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This sub-module contains common functions and classes used for tagging.
"""
from abc import ABC, abstractmethod
class Tagger(ABC):
"""Base class for taggers. A tagger maps each item in a list of tokens
(i.e. a sentence) to some other value. Examples of taggers include POS
taggers, diacritizers, etc.
"""
@abstractmethod
def tag(self, sentence):
"""Generate a tag for each token in a given sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The sentence to be tagged.
Returns:
:obj:`list`: The list of tags corresponding to each token in
`sentence`.
"""
raise NotImplementedError
| 1,846 | 34.519231 | 79 | py |
camel_tools | camel_tools-master/camel_tools/tagger/__init__.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 1,151 | 45.08 | 79 | py |
camel_tools | camel_tools-master/camel_tools/tagger/default.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""contains the CAMeL Tools default tagger.
"""
from collections import OrderedDict
from camel_tools.tagger.common import Tagger
from camel_tools.disambig.common import Disambiguator
def _tag_passthrough(feat, word):
if len(word.analyses) == 0:
return word.word
feat_value = word.analyses[0].analysis.get(feat, None)
if feat_value is None or feat_value == 'NOAN':
return word.word
return feat_value
def _tag_none(feat, word):
if len(word.analyses) == 0:
return None
return word.analyses[0].analysis.get(feat, None)
def _tag_lex(feat, word):
if len(word.analyses) == 0:
return '{}_0'.format(word)
return word.analyses[0].analysis.get('lex', '{}_0'.format(word))
_FEAT_ACTIONS = {
'diac': _tag_passthrough,
'bw': _tag_none,
'lex': _tag_lex,
'gloss': _tag_none,
'pos': _tag_none,
'asp': _tag_none,
'cas': _tag_none,
'mod': _tag_none,
'num': _tag_none,
'gen': _tag_none,
'form_num': _tag_none,
'form_gen': _tag_none,
'stt': _tag_none,
'vox': _tag_none,
'per': _tag_none,
'enc0': _tag_none,
'enc1': _tag_none,
'enc2': _tag_none,
'prc0': _tag_none,
'prc1': _tag_none,
'prc2': _tag_none,
'prc3': _tag_none,
'atbtok': _tag_passthrough,
'atbseg': _tag_passthrough,
'bwtok': _tag_passthrough,
'd1tok': _tag_passthrough,
'd1seg': _tag_passthrough,
'd2tok': _tag_passthrough,
'd2seg': _tag_passthrough,
'd3tok': _tag_passthrough,
'd3seg': _tag_passthrough,
'catib6': _tag_none,
'ud': _tag_none,
'caphi': _tag_none
}
class DefaultTaggerError(Exception):
"""Base class for errors raised by :obj:`DefaultTagger`.
"""
pass
class InvalidDefaultTaggerDisambiguator(DefaultTaggerError, ValueError):
"""Error raised when a DefaultTagger is initialized with an object that
object does not implement
:obj:`~camel_tools.disambig.common.Disambiguator`.
"""
def __str__(self):
return 'Invalid disambiguator.'
class InvalidDefaultTaggerFeature(DefaultTaggerError, ValueError):
"""Error raised when a DefaultTagger is initialized with an invalid feature
name.
"""
def __init__(self, feature):
self._feature = feature
def __str__(self):
return 'Invalid feature {}'.format(repr(self._feature))
class DefaultTagger(Tagger):
"""The default camel_tools tagger. It generates tags for a given feature by
first disambiguating a word using a given disambiguator and then returning
the associated value for that feature. It also provides sensible default
values for when no analyses are generated by the disambiguator or when a
feature is not present in the disambiguation.
Args:
disambiguator (:obj:`~camel_tools.disambig.common.Disambiguator`): The
disambiguator used for disambiguating input.
feature (:obj:`str`): The feature to be produced.
Raises:
:obj:`InvalidDefaultTaggerDisambiguator`: If `disambiguator` is not an
instance of :obj:`~camel_tools.disambig.common.Disambiguator`.
:obj:`InvalidDefaultTaggerFeature`: If `feature` is not a valid feature
name.
"""
def __init__(self, disambiguator, feature):
if not isinstance(disambiguator, Disambiguator):
raise InvalidDefaultTaggerDisambiguator()
elif not feature in _FEAT_ACTIONS:
raise InvalidDefaultTaggerFeature(feature)
self._disambiguator = disambiguator
self._feature = feature
def _tag_disambiguated_word(self, word):
return _FEAT_ACTIONS[self._feature](self._feature, word)
def tag(self, sentence):
"""Generate a tag for each token in a given sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The sentence to be tagged.
Returns:
:obj:`list`: The list of tags corresponding to each token in
`sentence`.
"""
disambig_words = self._disambiguator.disambiguate(sentence)
return list(map(self._tag_disambiguated_word, disambig_words))
@staticmethod
def feature_list():
"""Returns list of valid features producible by :obj:`DefaultTagger`.
"""
return _FEAT_ACTIONS.keys()
| 5,459 | 29.333333 | 79 | py |
camel_tools | camel_tools-master/camel_tools/disambig/mle.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Contains a disambiguator that uses a Maximum Likelihood Estimation model.
"""
import json
from cachetools import LFUCache, cached
import editdistance
from camel_tools.utils.dediac import dediac_ar
from camel_tools.disambig.common import Disambiguator, DisambiguatedWord
from camel_tools.disambig.common import ScoredAnalysis
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.analyzer import Analyzer
from camel_tools.morphology.utils import strip_lex
from camel_tools.data import CATALOGUE
def _calima_msa_r13_analyzer():
db = MorphologyDB.builtin_db('calima-msa-r13', 'a')
analyzer = Analyzer(db, 'NOAN_PROP')
return analyzer
def _calima_egy_r13_analyzer():
db = MorphologyDB.builtin_db('calima-egy-r13', 'a')
analyzer = Analyzer(db, 'NOAN_PROP')
return analyzer
_MLE_ANALYZER_MAP = {
'calima-msa-r13': _calima_msa_r13_analyzer,
'calima-egy-r13': _calima_egy_r13_analyzer
}
def _get_pos_lex_logprob(analysis):
logprob = analysis.get('pos_lex_logprob', -99.0)
if logprob is None:
return -99
return logprob
_EQ_FEATS = frozenset(['asp', 'cas', 'enc0', 'gen', 'mod', 'num', 'per', 'pos',
'prc0', 'prc1', 'prc2', 'prc3', 'form_num', 'form_gen',
'enc1', 'enc2', 'stt', 'vox'])
_DISTANCE_FEATS = frozenset(['diac', 'lex', 'bw'])
def _score_analysis(analysis, reference):
score = 0.0
for feat in _EQ_FEATS:
if analysis.get(feat, '') == reference.get(feat, ''):
score += 1
for feat in _DISTANCE_FEATS:
feat_r = reference.get(feat, '')
feat_a = analysis.get(feat, '')
distance = editdistance.eval(feat_r, feat_a)
score += max(0.0, (len(feat_r) - distance) / len(feat_r))
return score
class MLEDisambiguator(Disambiguator):
"""A disambiguator using a Maximum Likelihood Estimation (MLE) model.
It first does a lookup in a given word-based MLE model. If none is provided
or a word is not in the word-based model, then an analyzer is used to
disambiguate words based on the pos-lex log probabilities of their analyses.
Args:
analyzer (:obj:`~camel_tools.morphology.analyzer.Analyzer`):
Disambiguator to use if a word is not in the word-based MLE model.
The analyzer should provide the pos-lex log probabilities for
analyses to disambiguate analyses.
mle_path (:obj:`str`, optional): Path to MLE JSON file. If `None`,
then no word-based MLE lookup is performed skipping directly to
using the pos-lex model. Defaults to `None`.
top (:obj:`int`, optional): The maximum number of top analyses to
return. Defaults to 1.
cache_size (:obj:`int`, optional): The number of unique word
disambiguations to cache. The cache uses a least-frequently-used
eviction policy. Defaults to 100000.
"""
def __init__(self, analyzer, mle_path=None, top=1, cache_size=100000):
if not isinstance(analyzer, Analyzer):
raise ValueError('Invalid analyzer instance.')
if not isinstance(top, int):
raise ValueError('Invalid value for top.')
if not isinstance(cache_size, int):
raise ValueError('Invalid value for cache_size.')
if mle_path is not None:
with open(mle_path, 'r', encoding='utf-8') as mle_fp:
self._mle = json.load(mle_fp)
# TODO: Remove this when MLE files are fixed
for analysis in self._mle.values():
analysis['lex'] = strip_lex(analysis['lex'])
else:
self._mle = None
self._analyzer = analyzer
if top < 1:
top = 1
self._top = top
if cache_size <= 0:
cache_size = 0
self._cache = None
self._score_fn = self._scored_analyses
else:
self._cache = LFUCache(cache_size)
self._score_fn = self._scored_analyses_cached
@staticmethod
def pretrained(model_name=None, analyzer=None, top=1, cache_size=100000):
"""Load a pre-trained MLE disambiguator provided with CAMeL Tools.
Args:
model_name (:obj:`str`, optional): The name of the pretrained
model. If none, the default model ('calima-msa-r13') is loaded.
At the moment, the model names available are the same as those
in :ref:`camel_morphology_dbs`.
Defaults to None.
analyzer (:obj:`Analyzer`, optional): Alternative
analyzer to use. If None, an instance of the model's default
analyzer is created. Defaults to None.
top (:obj:`int`, optional): The maximum number of top analyses to
return. Defaults to 1.
cache_size (:obj:`int`, optional): The number of unique word
disambiguations to cache. The cache uses a
least-frequently-used eviction policy. Defaults to 100000.
Returns:
:obj:`MLEDisambiguator`: The loaded MLE disambiguator.
"""
if model_name is None:
model_name = CATALOGUE.components['DisambigMLE'].default
model_info = CATALOGUE.components['DisambigMLE'].datasets[model_name]
mle_path = model_info.path / 'model.json'
if analyzer is None:
analyzer = _MLE_ANALYZER_MAP[model_info.name]()
return MLEDisambiguator(analyzer, str(mle_path), top, cache_size)
def _scored_analyses(self, word_dd):
if self._mle is not None and word_dd in self._mle:
mle_analysis = self._mle[word_dd]
analyses = self._analyzer.analyze(word_dd)
if len(analyses) == 0:
return []
scored = [(_score_analysis(a, mle_analysis), a) for a in analyses]
scored.sort(key=lambda s: (-s[0], len(s[1]['bw']), s[1]['diac']))
max_score = max([s[0] for s in scored])
if max_score == 0:
max_score = 1
scored_analyses = [
ScoredAnalysis(
s / max_score, # score
a, # analysis
a['diac'], # diac
a.get('pos_lex_logprob', -99), # pos_lex_logprob
a.get('lex_logprob', -99), # lex_logprob
) for s, a in scored]
return scored_analyses[0:self._top]
else:
analyses = self._analyzer.analyze(word_dd)
if len(analyses) == 0:
return []
probabilities = [10 ** _get_pos_lex_logprob(a) for a in analyses]
max_prob = max(probabilities)
scored_analyses = [
ScoredAnalysis(
p / max_prob, # score
a, # analysis
a['diac'], # diac
a.get('pos_lex_logprob', -99), # pos_lex_logprob
a.get('lex_logprob', -99), # lex_logprob
) for a, p in zip(analyses, probabilities)]
scored_analyses.sort()
return scored_analyses[0:self._top]
def _scored_analyses_cached(self, word_dd):
return self._cache.get(word_dd, self._scored_analyses(word_dd))
def _disambiguate_word(self, word):
word_dd = dediac_ar(word)
scored_analyses = self._score_fn(word_dd)
return DisambiguatedWord(word, scored_analyses)
def disambiguate_word(self, sentence, word_ndx):
"""Disambiguates a single word in a sentence. Note, that while MLE
disambiguation operates on each word out of context, we maintain this
interface to be compatible with disambiguators that work in context
of a sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The list of space and
punctuation seperated list of tokens comprising a given
sentence.
word_ndx (:obj:`int`): The index of the word token in `sentence` to
disambiguate.
Returns:
:obj:`~camel_tools.disambig.common.DisambiguatedWord`: The
disambiguation of the word token in `sentence` at `word_ndx`.
"""
return self._disambiguate_word(sentence[word_ndx])
def disambiguate(self, sentence):
"""Disambiguate all words in a given sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The list of space and
punctuation seperated list of tokens comprising a given
sentence.
Returns:
:obj:`list` of :obj:`~camel_tools.disambig.common.DisambiguatedWord`:
The list of disambiguations for each word in the given sentence.
"""
return [self._disambiguate_word(w) for w in sentence]
def all_feats(self):
"""Return a set of all features produced by this disambiguator.
Returns:
:obj:`frozenset` of :obj:`str`: The set all features produced by
this disambiguator.
"""
return self._analyzer.all_feats()
def tok_feats(self):
"""Return a set of tokenization features produced by this
disambiguator.
Returns:
:obj:`frozenset` of :obj:`str`: The set tokenization features
produced by this disambiguator.
"""
return self._analyzer.tok_feats()
| 10,769 | 36.137931 | 81 | py |
camel_tools | camel_tools-master/camel_tools/disambig/score_function.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import reduce
import sys
__all__ = [
'FEATS_10',
'FEATS_14',
'FEATS_16',
'FEATURE_SET_MAP',
'score_analysis_uniform'
]
_BACKOFF_PENALTY = float(f'1e-{sys.float_info.dig}')
# 10 features described in Khalifa et al., 2020
# Morphological Analysis and Disambiguation for Gulf Arabic: The Interplay
# between Resources and Methods
FEATS_10 = [
'pos', 'per', 'form_gen', 'form_num', 'asp', 'prc0', 'prc1', 'prc2',
'prc3', 'enc0'
]
FEATS_14 = [
'pos', 'per', 'form_gen', 'form_num', 'asp', 'mod', 'vox', 'stt', 'cas',
'prc0', 'prc1', 'prc2', 'prc3', 'enc0'
]
FEATS_16 = [
'pos', 'per', 'form_gen', 'form_num', 'asp', 'mod', 'vox', 'stt', 'cas',
'prc0', 'prc1', 'prc2', 'prc3', 'enc0', 'enc1', 'enc2'
]
FEATURE_SET_MAP = {
'feats_10': FEATS_10,
'feats_14': FEATS_14,
'feats_16': FEATS_16,
}
def score_analysis_uniform(analysis, reference, mle_model=None,
tie_breaker=None, features=FEATS_16):
"""Calculate the score of matches given the predictions from the classifier
and the analyses from the morphological analyzer.
"""
# for GLF and LEV analyzers that use num for form_num, and gen for form_gen
if 'form_num' not in analysis.keys():
analysis['form_num'] = analysis['num']
if 'form_gen' not in analysis.keys():
analysis['form_gen'] = analysis['gen']
score = sum(
analysis.get(feat, '') == reference.get(feat, '') for feat in features
)
if tie_breaker == 'tag':
score += _tie_breaker_tag(analysis, reference, mle_model)
if analysis['source'] == 'backoff':
score -= _BACKOFF_PENALTY
return score
def _tie_breaker_tag(analysis, reference, mle_model):
"""Calculate the tie breaker score using factored tag and unfactored tag
probabilities in the training data.
"""
ordered_feats = mle_model['info']['features']
score_factored_tag = reduce(lambda x, y: x*y,
[mle_model[x].get(analysis.get(x, ''), 0)
for x in ordered_feats])
unfactored_tag = ' '.join(
f'{x}:{analysis.get(x, "")}' for x in ordered_feats
)
score_unfactored_tag = mle_model['unfactored'].get(unfactored_tag, 0)
return (score_factored_tag + score_unfactored_tag) / 2
| 3,483 | 31.867925 | 79 | py |
camel_tools | camel_tools-master/camel_tools/disambig/common.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This sub-module contains common functions and classes used for
disambiguation.
"""
from abc import ABC, abstractmethod
from collections import namedtuple
class ScoredAnalysis(namedtuple('ScoredAnalysis',
[
'score',
'analysis',
'diac',
'pos_lex_logprob',
'lex_logprob'
])):
"""A named tuple containing an analysis and its score.
Attributes:
score (:obj:`float`): The overall score of the analysis.
analysis (:obj:`dict`): The analysis dictionary.
See :doc:`/reference/camel_morphology_features` for more
information on features and their values.
diac (:obj:`str`): The diactrized form of the associated analysis.
Used for tie-breaking equally scored analyses.
pos_lex_log_prob (:obj:`float`): The log (base 10) of the probability
of the associated pos-lex pair values.
Used for tie-breaking equally scored analyses.
lex_log_prob (:obj:`float`): The log (base 10) of the probability of
the associated lex value.
Used for tie-breaking equally scored analyses.
"""
def __lt__(self, other):
if self.score > other.score:
return True
elif self.score == other.score:
if self.pos_lex_logprob > other.pos_lex_logprob:
return True
elif self.pos_lex_logprob == other.pos_lex_logprob:
if self.lex_logprob > other.lex_logprob:
return True
elif self.lex_logprob == other.lex_logprob:
return self.diac < other.diac
return False
class DisambiguatedWord(namedtuple('DisambiguatedWord', ['word', 'analyses'])):
"""A named tuple containing a word and a sorted list (from high to low
score) of scored analyses.
Attributes:
word (:obj:`str`): The word being disambiguated.
analyses (:obj:`list` of \
:obj:`~camel_tools.disambig.common.ScoredAnalysis`): List of scored
analyses sorted from highest to lowest disambiguation score.
"""
class Disambiguator(ABC):
"""Abstract base class that all disambiguators should implement.
"""
@abstractmethod
def disambiguate(self, sentence, top=1):
"""Disambiguate words in a sentence.
Args:
sentence (:obj:`list` of :obj:`str`): list of words representing a
sentence to be disambiguated.
top (:obj:`int`, optional): The number of top analyses to return.
If set to zero or less, then all analyses are returned.
Defaults to 1.
Returns:
:obj:`list` of :obj:`~camel_tools.disambig.common.DisambiguatedWord`:
List of disambiguted words in **sentence**.
"""
raise NotImplementedError
@abstractmethod
def disambiguate_word(self, sentence, word_ndx, top=1):
"""Disambiguate a word at a given index in a sentence.
Args:
sentence (:obj:`list` of :obj:`str`): list of words representing a
sentence.
word_ndx (:obj:`int`): the index of the word in **sentence** to
disambiguate.
top (:obj:`int`, optional): The number of top analyses to return.
If set to zero or less, then all analyses are returned.
Defaults to 1.
Returns:
:obj:`~camel_tools.disambig.common.DisambiguatedWord`: The
disambiguated word at index **word_ndx** in **sentence**.
"""
raise NotImplementedError
@abstractmethod
def all_feats(self):
"""Return a set of all features produced by this disambiguator.
Returns:
:obj:`frozenset` of :obj:`str`: The set all features produced by
this disambiguator.
"""
raise NotImplementedError
@abstractmethod
def tok_feats(self):
"""Return a set of tokenization features produced by this
disambiguator.
Returns:
:obj:`frozenset` of :obj:`str`: The set tokenization features
produced by this disambiguator.
"""
raise NotImplementedError
| 5,555 | 34.845161 | 79 | py |
camel_tools | camel_tools-master/camel_tools/disambig/__init__.py | # MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains classes and functions for morphological disambiguation.
"""
| 1,210 | 47.44 | 79 | py |
camel_tools | camel_tools-master/camel_tools/disambig/bert/unfactored.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from pathlib import Path
import pickle
from cachetools import LFUCache
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from transformers import BertForTokenClassification, BertTokenizer
from camel_tools.data import CATALOGUE
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.analyzer import Analyzer
from camel_tools.disambig.common import Disambiguator, DisambiguatedWord
from camel_tools.disambig.common import ScoredAnalysis
from camel_tools.disambig.bert._bert_morph_dataset import MorphDataset
from camel_tools.disambig.score_function import score_analysis_uniform
from camel_tools.disambig.score_function import FEATURE_SET_MAP
from camel_tools.utils.dediac import dediac_ar
_SCORING_FUNCTION_MAP = {
'uniform': score_analysis_uniform
}
def _read_json(f_path):
with open(f_path) as f:
return json.load(f)
def _dediac_sentence(sentence):
dediaced_sentence = []
for word in sentence:
dediaced = dediac_ar(word)
if len(dediaced) > 0:
dediaced_sentence.append(dediaced)
else:
dediaced_sentence.append(word)
return dediaced_sentence
class _BERTFeatureTagger:
"""A feature tagger based on the fine-tuned BERT architecture.
Args:
model_path (:obj:`str`): The path to the fine-tuned model.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
"""
def __init__(self, model_path, use_gpu=True):
self._model = BertForTokenClassification.from_pretrained(model_path)
self._tokenizer = BertTokenizer.from_pretrained(model_path)
self._labels_map = self._model.config.id2label
self._use_gpu = use_gpu
def labels(self):
"""Get the list of Morph labels returned by predictions.
Returns:
:obj:`list` of :obj:`str`: List of Morph labels.
"""
return list(self._labels_map.values())
def _align_predictions(self, predictions, label_ids, sent_ids):
"""Aligns the predictions of the model with the inputs and it takes
care of getting rid of the padding token.
Args:
predictions (:obj:`np.ndarray`): The predictions of the model
label_ids (:obj:`np.ndarray`): The label ids of the inputs.
They will always be the ids of Os since we're dealing with a
test dataset. Note that label_ids are also padded.
sent_ids (:obj:`np.ndarray`): The sent ids of the inputs.
Returns:
:obj:`list` of :obj:`list` of :obj:`str`: The predicted labels for
all the sentences in the batch
"""
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
preds_list[i].append(self._labels_map[preds[i][j]])
# Collating the predicted labels based on the sentence ids
final_preds_list = [[] for _ in range(len(set(sent_ids)))]
for i, id in enumerate(sent_ids):
id = id - sent_ids[0]
final_preds_list[id].extend(preds_list[i])
return final_preds_list
def predict(self, sentences, batch_size=32, max_seq_length=512):
"""Predict the morphosyntactic labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
batch_size (:obj:`int`): The batch size.
max_seq_length (:obj:`int`): The max sequence size.
Returns:
:obj:`list` of :obj:`list` of :obj:`str`: The predicted
morphosyntactic labels for the given sentences.
"""
if len(sentences) == 0:
return []
sorted_sentences = list(enumerate(sentences))
sorted_sentences = sorted(sorted_sentences, key=lambda x: len(x[1]))
sorted_sentences_idx = [i[0] for i in sorted_sentences]
sorted_sentences_text = [i[1] for i in sorted_sentences]
test_dataset = MorphDataset(sentences=sorted_sentences_text,
tokenizer=self._tokenizer,
labels=list(self._labels_map.values()),
max_seq_length=max_seq_length)
data_loader = DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, drop_last=False,
collate_fn=self._collate_fn)
predictions = []
device = ('cuda' if self._use_gpu and torch.cuda.is_available()
else 'cpu')
self._model.to(device)
self._model.eval()
with torch.no_grad():
for batch in data_loader:
batch = {k: v.to(device) for k, v in batch.items()}
inputs = {'input_ids': batch['input_ids'],
'token_type_ids': batch['token_type_ids'],
'attention_mask': batch['attention_mask']}
label_ids = batch['label_ids']
sent_ids = batch['sent_id']
logits = self._model(**inputs)[0]
preds = logits
prediction = self._align_predictions(preds.cpu().numpy(),
label_ids.cpu().numpy(),
sent_ids.cpu().numpy())
predictions.extend(prediction)
sorted_predictions_pair = zip(sorted_sentences_idx, predictions)
sorted_predictions = sorted(sorted_predictions_pair,
key=lambda x: x[0])
return [i[1] for i in sorted_predictions]
def _collate_fn(self, batch):
input_ids = []
token_type_ids = []
attention_mask = []
label_ids = []
sent_id = []
# Find max length within the batch
max_seq_length = 0
for sent in batch:
l = len(sent['input_ids'][sent['input_ids'].nonzero()].squeeze())
max_seq_length = max(max_seq_length, l)
# Truncate the unnecessary paddings
for sent in batch:
for _, t in sent.items():
if _ != 'sent_id':
sent[_] = t[:max_seq_length]
for sent in batch:
input_ids.append(sent['input_ids'])
token_type_ids.append(sent['token_type_ids'])
attention_mask.append(sent['attention_mask'])
label_ids.append(sent['label_ids'])
sent_id.append(sent['sent_id'])
return {
'input_ids': torch.stack(input_ids),
'token_type_ids': torch.stack(token_type_ids),
'attention_mask': torch.stack(attention_mask),
'label_ids': torch.stack(label_ids),
'sent_id': torch.tensor(sent_id, dtype=torch.int32),
}
class BERTUnfactoredDisambiguator(Disambiguator):
"""A disambiguator using an unfactored BERT model. This model is based on
*Morphosyntactic Tagging with Pre-trained Language Models for Arabic and
its Dialects* by Inoue, Khalifa, and Habash. Findings of ACL 2022.
(https://arxiv.org/abs/2110.06852)
Args:
model_path (:obj:`str`): The path to the fine-tuned model.
analyzer (:obj:`~camel_tools.morphology.analyzer.Analyzer`): Analyzer
to use for providing full morphological analysis of a word.
features: :obj:`list`, optional): A list of morphological features
used in the model. Defaults to 14 features.
top (:obj:`int`, optional): The maximum number of top analyses to
return. Defaults to 1.
scorer (:obj:`str`, optional): The scoring function that computes
matches between the predicted features from the model and the
output from the analyzer. If `uniform`, the scoring based on the
uniform weight is used. Defaults to `uniform`.
tie_breaker (:obj:`str`, optional): The tie breaker used in the feature
match function. If `tag`, tie breaking based on the unfactored tag
MLE and factored tag MLE is used. Defaults to `tag`.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
batch_size (:obj:`int`, optional): The batch size. Defaults to 32.
ranking_cache (:obj:`LFUCache`, optional): The cache of pre-computed
scored analyses. Defaults to `None`.
ranking_cache_size (:obj:`int`, optional): The number of unique word
disambiguations to cache. If 0, no ranked analyses will be cached.
The cache uses a least-frequently-used eviction policy.
Defaults to 100000.
"""
def __init__(self, model_path, analyzer,
features=FEATURE_SET_MAP['feats_14'], top=1,
scorer='uniform', tie_breaker='tag', use_gpu=True,
batch_size=32, ranking_cache=None, ranking_cache_size=100000):
self._model = {
'unfactored': _BERTFeatureTagger(model_path, use_gpu=use_gpu)
}
self._analyzer = analyzer
self._features = features
self._top = max(top, 1)
self._scorer = _SCORING_FUNCTION_MAP.get(scorer, None)
self._tie_breaker = tie_breaker
self._use_gpu = use_gpu
self._batch_size = batch_size
self._mle = _read_json(f'{model_path}/mle_model.json')
if ranking_cache is None:
if ranking_cache_size <= 0:
self._ranking_cache = None
self._disambiguate_word_fn = self._disambiguate_word
else:
self._ranking_cache = LFUCache(ranking_cache_size)
self._disambiguate_word_fn = self._disambiguate_word_cached
else:
self._ranking_cache = ranking_cache
self._disambiguate_word_fn = self._disambiguate_word_cached
@staticmethod
def pretrained(model_name='msa', top=1, use_gpu=True, batch_size=32,
cache_size=10000, pretrained_cache=True,
ranking_cache_size=100000):
"""Load a pre-trained model provided with camel_tools.
Args:
model_name (:obj:`str`, optional): Name of pre-trained model to
load. Three models are available: 'msa', 'egy', and 'glf.
Defaults to `msa`.
top (:obj:`int`, optional): The maximum number of top analyses to
return. Defaults to 1.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
batch_size (:obj:`int`, optional): The batch size. Defaults to 32.
cache_size (:obj:`int`, optional): If greater than zero, then
the analyzer will cache the analyses for the cache_size most
frequent words, otherwise no analyses will be cached.
Defaults to 100000.
pretrained_cache (:obj:`bool`, optional): The flag to use a
pretrained cache that stores ranked analyses.
Defaults to True.
ranking_cache_size (:obj:`int`, optional): The number of unique
word disambiguations to cache. If 0, no ranked analyses will be
cached. The cache uses a least-frequently-used eviction policy.
This argument is ignored if pretrained_cache is True.
Defaults to 100000.
Returns:
:obj:`BERTUnfactoredDisambiguator`: Instance with loaded
pre-trained model.
"""
model_info = CATALOGUE.get_dataset('DisambigBertUnfactored',
model_name)
model_config = _read_json(Path(model_info.path, 'default_config.json'))
model_path = str(model_info.path)
features = FEATURE_SET_MAP[model_config['feature']]
db = MorphologyDB.builtin_db(model_config['db_name'], 'a')
analyzer = Analyzer(db, backoff=model_config['backoff'],
cache_size=cache_size)
scorer = model_config['scorer']
tie_breaker = model_config['tie_breaker']
if pretrained_cache:
cache_info = CATALOGUE.get_dataset('DisambigRankingCache',
model_config['ranking_cache'])
cache_path = Path(cache_info.path, 'default_cache.pickle')
with open(cache_path, 'rb') as f:
ranking_cache = pickle.load(f)
else:
ranking_cache = None
return BERTUnfactoredDisambiguator(
model_path,
analyzer,
top=top,
features=features,
scorer=scorer,
tie_breaker=tie_breaker,
use_gpu=use_gpu,
batch_size=batch_size,
ranking_cache=ranking_cache,
ranking_cache_size=ranking_cache_size)
@staticmethod
def _pretrained_from_config(config, top=1, use_gpu=True, batch_size=32,
cache_size=10000, pretrained_cache=True,
ranking_cache_size=100000):
"""Load a pre-trained model from a config file.
Args:
config (:obj:`str`): Config file that defines the model details.
Defaults to `None`.
top (:obj:`int`, optional): The maximum number of top analyses
to return. Defaults to 1.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
batch_size (:obj:`int`, optional): The batch size. Defaults to 32.
cache_size (:obj:`int`, optional): If greater than zero, then
the analyzer will cache the analyses for the cache_size
most frequent words, otherwise no analyses will be cached.
Defaults to 100000.
pretrained_cache (:obj:`bool`, optional): The flag to use a
pretrained cache that stores ranked analyses.
Defaults to True.
ranking_cache_size (:obj:`int`, optional): The number of unique
word disambiguations to cache. If 0, no ranked analyses will be
cached. The cache uses a least-frequently-used eviction policy.
This argument is ignored if pretrained_cache is True.
Defaults to 100000.
Returns:
:obj:`BERTUnfactoredDisambiguator`: Instance with loaded
pre-trained model.
"""
model_config = _read_json(config)
model_path = model_config['model_path']
features = FEATURE_SET_MAP[model_config['feature']]
db = MorphologyDB(model_config['db_path'], 'a')
analyzer = Analyzer(db,
backoff=model_config['backoff'],
cache_size=cache_size)
scorer = model_config['scorer']
tie_breaker = model_config['tie_breaker']
if pretrained_cache:
cache_path = model_config['ranking_cache']
with open(cache_path, 'rb') as f:
ranking_cache = pickle.load(f)
else:
ranking_cache = None
return BERTUnfactoredDisambiguator(
model_path,
analyzer,
top=top,
features=features,
scorer=scorer,
tie_breaker=tie_breaker,
use_gpu=use_gpu,
batch_size=batch_size,
ranking_cache=ranking_cache,
ranking_cache_size=ranking_cache_size)
def _predict_sentences(self, sentences):
"""Predict the morphosyntactic labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
Returns:
:obj:`list` of :obj:`list` of :obj:`dict`: The predicted
morphosyntactic labels for the given sentences.
"""
preds = self._model['unfactored'].predict(sentences, self._batch_size)
parsed_predictions = []
for sent, pred in zip(sentences, preds):
parsed_prediction = []
for word, pred in zip(sent, pred):
d = {}
for feat in pred.split('__'):
f, v = feat.split(':')
d[f] = v
d['lex'] = word # Copy the word when analyzer is not used
d['diac'] = word # Copy the word when analyzer is not used
parsed_prediction.append(d)
parsed_predictions.append(parsed_prediction)
return parsed_predictions
def _predict_sentence(self, sentence):
"""Predict the morphosyntactic labels of a single sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The input sentence.
Returns:
:obj:`list` of :obj:`dict`: The predicted morphosyntactic labels
for the given sentence.
"""
parsed_predictions = []
model = self._model['unfactored']
preds = model.predict([sentence], self._batch_size)[0]
for word, pred in zip(sentence, preds):
d = {}
for feat in pred.split('__'):
f, v = feat.split(':')
d[f] = v
d['lex'] = word # Copy the word when analyzer is not used
d['diac'] = word # Copy the word when analyzer is not used
parsed_predictions.append(d)
return parsed_predictions
def _scored_analyses(self, word_dd, prediction):
bert_analysis = prediction
analyses = self._analyzer.analyze(word_dd)
if len(analyses) == 0:
# If the word is not found in the analyzer,
# return the predictions from BERT
return [ScoredAnalysis(0, # score
bert_analysis, # analysis
word_dd, # diac
-99, # pos_lex_logprob
-99, # lex_logprob
)]
scored = [(self._scorer(a,
bert_analysis,
self._mle,
tie_breaker=self._tie_breaker,
features=self._features), a)
for a in analyses]
max_score = max(s[0] for s in scored)
if max_score == 0:
max_score = 1
scored_analyses = [
ScoredAnalysis(
s / max_score, # score
a, # analysis
a['diac'], # diac
a.get('pos_lex_logprob', -99), # pos_lex_logprob
a.get('lex_logprob', -99), # lex_logprob
) for s, a in scored]
scored_analyses.sort()
return scored_analyses
def _disambiguate_word(self, word, pred):
scored_analyses = self._scored_analyses(word, pred)
return DisambiguatedWord(word, scored_analyses[:self._top])
def _disambiguate_word_cached(self, word, pred):
# Create a key for caching scored analysis given word and bert
# predictions
key = (word, tuple(pred[feat] for feat in self._features))
if key in self._ranking_cache:
scored_analyses = self._ranking_cache[key]
else:
scored_analyses = self._scored_analyses(word, pred)
self._ranking_cache[key] = scored_analyses
return DisambiguatedWord(word, scored_analyses[:self._top])
def disambiguate_word(self, sentence, word_ndx):
"""Disambiguates a single word of a sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The input sentence.
word_ndx (:obj:`int`): The index of the word token in `sentence` to
disambiguate.
Returns:
:obj:`~camel_tools.disambig.common.DisambiguatedWord`: The
disambiguation of the word token in `sentence` at `word_ndx`.
"""
return self.disambiguate(sentence)[word_ndx]
def disambiguate(self, sentence):
"""Disambiguate all words of a single sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The input sentence.
Returns:
:obj:`list` of :obj:`~camel_tools.disambig.common.DisambiguatedWord`: The
disambiguated analyses for the given sentence.
"""
dediaced_sentence = _dediac_sentence(sentence)
predictions = self._predict_sentence(dediaced_sentence)
return [self._disambiguate_word_fn(w, p)
for (w, p) in zip(sentence, predictions)]
def disambiguate_sentences(self, sentences):
"""Disambiguate all words of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
Returns:
:obj:`list` of :obj:`list` of :obj:`~camel_tools.disambig.common.DisambiguatedWord`: The
disambiguated analyses for the given sentences.
"""
dediaced_sentences = [_dediac_sentence(s) for s in sentences]
predictions = self._predict_sentences(dediaced_sentences)
disambiguated_sentences = []
for sentence, prediction in zip(sentences, predictions):
disambiguated_sentence = [
self._disambiguate_word_fn(w, p)
for (w, p) in zip(sentence, prediction)
]
disambiguated_sentences.append(disambiguated_sentence)
return disambiguated_sentences
def tag_sentences(self, sentences, use_analyzer=True):
"""Predict the morphosyntactic labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
use_analyzer (:obj:`bool`): The flag to use an analyzer or not.
If set to False, we return the original input as diac and lex.
Defaults to True.
Returns:
:obj:`list` of :obj:`list` of :obj:`dict`: The predicted The list
of feature tags for each word in the given sentences
"""
if use_analyzer:
tagged_sentences = []
for prediction in self.disambiguate_sentences(sentences):
tagged_sentence = [a.analyses[0].analysis for a in prediction]
tagged_sentences.append(tagged_sentence)
return tagged_sentences
return self._predict_sentences(sentences)
def tag_sentence(self, sentence, use_analyzer=True):
"""Predict the morphosyntactic labels of a single sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The list of space and
punctuation seperated list of tokens comprising a given
sentence.
use_analyzer (:obj:`bool`): The flag to use an analyzer or not.
If set to False, we return the original input as diac and lex.
Defaults to True.
Returns:
:obj:`list` of :obj:`dict`: The list of feature tags for each word
in the given sentence
"""
if use_analyzer:
return [a.analyses[0].analysis
for a in self.disambiguate(sentence)]
return self._predict_sentence(sentence)
def all_feats(self):
"""Return a set of all features produced by this disambiguator.
Returns:
:obj:`frozenset` of :obj:`str`: The set all features produced by
this disambiguator.
"""
return self._analyzer.all_feats()
def tok_feats(self):
"""Return a set of tokenization features produced by this
disambiguator.
Returns:
:obj:`frozenset` of :obj:`str`: The set tokenization features
produced by this disambiguator.
"""
return self._analyzer.tok_feats()
| 25,500 | 38.536434 | 100 | py |
camel_tools | camel_tools-master/camel_tools/disambig/bert/__init__.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from camel_tools.disambig.bert.unfactored import BERTUnfactoredDisambiguator
__all__ = [
'BERTUnfactoredDisambiguator',
]
| 1,279 | 40.290323 | 79 | py |
camel_tools | camel_tools-master/camel_tools/disambig/bert/_bert_morph_dataset.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
from torch.utils.data import Dataset
def _prepare_sentences(sentences, placeholder=''):
"""
Encapsulates the input sentences into PrepSentence
objects.
Args:
sentences (:obj:`list` of :obj:`list` of :obj: `str): The input
sentences.
Returns:
:obj:`list` of :obj:`PrepSentence`: The list of PrepSentence objects.
"""
guid_index = 1
prepared_sentences = []
for words in sentences:
labels = [placeholder]*len(words)
prepared_sentences.append(_PrepSentence(guid=f"{guid_index}",
words=words,
labels=labels))
guid_index += 1
return prepared_sentences
class _PrepSentence:
"""A single input sentence for token classification.
Args:
guid (:obj:`str`): Unique id for the sentence.
words (:obj:`list` of :obj:`str`): list of words of the sentence.
labels (:obj:`list` of :obj:`str`): The labels for each word
of the sentence.
"""
def __init__(self, guid, words, labels):
self.guid = guid
self.words = words
self.labels = labels
class MorphDataset(Dataset):
"""Morph PyTorch Dataset
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained tokenizer.
labels (:obj:`list` of :obj:`str`): The labels which the model was
trained to classify.
max_seq_length (:obj:`int`): Maximum sentence length.
"""
def __init__(self, sentences, tokenizer, labels, max_seq_length):
prepared_sentences = _prepare_sentences(sentences,
placeholder=labels[0])
# Use cross entropy ignore_index as padding label id so that only
# real label ids contribute to the loss later.
self.pad_token_label_id = nn.CrossEntropyLoss().ignore_index
self.features = self._featurize_input(
prepared_sentences,
labels,
max_seq_length,
tokenizer,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
)
def _featurize_input(self, prepared_sentences, label_list, max_seq_length,
tokenizer, cls_token="[CLS]", cls_token_segment_id=0,
sep_token="[SEP]", pad_token=0, pad_token_segment_id=0,
pad_token_label_id=-100, sequence_a_segment_id=0,
mask_padding_with_zero=True):
"""Featurizes the input which will be fed to the fine-tuned BERT model.
Args:
prepared_sentences (:obj:`list` of :obj:`PrepSentence`): list of
PrepSentence objects.
label_list (:obj:`list` of :obj:`str`): The labels which the model
was trained to classify.
max_seq_length (:obj:`int`): Maximum sequence length.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained
tokenizer.
cls_token (:obj:`str`): BERT's CLS token. Defaults to [CLS].
cls_token_segment_id (:obj:`int`): BERT's CLS token segment id.
Defaults to 0.
sep_token (:obj:`str`): BERT's CLS token. Defaults to [SEP].
pad_token (:obj:`int`): BERT's pading token. Defaults to 0.
pad_token_segment_id (:obj:`int`): BERT's pading token segment id.
Defaults to 0.
pad_token_label_id (:obj:`int`): BERT's pading token label id.
Defaults to -100.
sequence_a_segment_id (:obj:`int`): BERT's segment id.
Defaults to 0.
mask_padding_with_zero (:obj:`bool`): Whether to masks the padding
tokens with zero or not. Defaults to True.
Returns:
obj:`list` of :obj:`Dict`: list of dicts of the needed features.
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for sent_id, sentence in enumerate(prepared_sentences):
tokens = []
label_ids = []
for word, label in zip(sentence.words, sentence.labels):
word_tokens = tokenizer.tokenize(word)
# bert-base-multilingual-cased sometimes output "nothing ([])
# when calling tokenize with just a space.
if len(word_tokens) > 0:
tokens.append(word_tokens)
# Use the real label id for the first token of the word,
# and padding ids for the remaining tokens
label_ids.append([label_map[label]] +
[pad_token_label_id] *
(len(word_tokens) - 1))
token_segments = []
token_segment = []
label_ids_segments = []
label_ids_segment = []
num_word_pieces = 0
seg_seq_length = max_seq_length - 2
# Dealing with empty sentences
if len(tokens) == 0:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
else:
# Chunking the tokenized sentence into multiple segments
# if it's longer than max_seq_length - 2
for idx, word_pieces in enumerate(tokens):
if num_word_pieces + len(word_pieces) > seg_seq_length:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
token_segments.append(token_segment)
label_ids_segments.append(label_ids_segment)
token_segment = list(word_pieces)
label_ids_segment = list(label_ids[idx])
num_word_pieces = len(word_pieces)
else:
token_segment.extend(word_pieces)
label_ids_segment.extend(label_ids[idx])
num_word_pieces += len(word_pieces)
# Adding the last segment
if len(token_segment) > 0:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
token_segments.append(token_segment)
label_ids_segments.append(label_ids_segment)
# DEBUG: Making sure we got all segments correctly
# assert sum([len(_) for _ in label_ids_segments]) == \
# sum([len(_) for _ in label_ids])
# assert sum([len(_) for _ in token_segments]) == \
# sum([len(_) for _ in tokens])
return features
def _add_special_tokens(self, tokens, label_ids, tokenizer, max_seq_length,
cls_token, sep_token, pad_token,
cls_token_segment_id, pad_token_segment_id,
pad_token_label_id, sequence_a_segment_id,
mask_padding_with_zero):
_tokens = list(tokens)
_label_ids = list(label_ids)
_tokens += [sep_token]
_label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(_tokens)
_tokens = [cls_token] + _tokens
_label_ids = [pad_token_label_id] + _label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only
# real tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
_label_ids += [pad_token_label_id] * padding_length
return {'input_ids': torch.tensor(input_ids),
'attention_mask': torch.tensor(input_mask),
'token_type_ids': torch.tensor(segment_ids),
'label_ids': torch.tensor(_label_ids)}
def __len__(self):
return len(self.features)
def __getitem__(self, i):
return self.features[i]
| 12,226 | 43.300725 | 79 | py |
camel_tools | camel_tools-master/tests/test_stringutils.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for camel_tools.utils.stringutils
"""
from camel_tools.utils.stringutils import isunicode
class TestIsUnicodeString(object):
"""Test class for testing the isunicode function.
"""
def test_isunicode_none(self):
"""Test that None is not a unicdoe string.
"""
assert not isunicode(None)
def test_isunicode_int(self):
"""Test that int (a primitive type) is not a unicode string.
"""
assert not isunicode(0)
def test_isunicode_list(self):
"""Test that a list (a Python object) is not a unicode string.
"""
assert not isunicode(['hello', 'world'])
def test_isunicode_byte(self):
"""Test that an explicit byte string is not a Unicode string.
"""
assert not isunicode(b'Hello, world!')
def test_isunicode_str(self):
"""Test that the default string type in Python 3 is a unicode string
but not in Python 2.
"""
assert isunicode('Hello, world!')
def test_isunicode_unicode(self):
"""Test that a unicode literal is a unicode string.
"""
assert isunicode(u'Hello, world!')
| 2,321 | 31.25 | 79 | py |
camel_tools | camel_tools-master/tests/test_charmap.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for camel_tools.utils.charmap
"""
from __future__ import absolute_import, print_function
from collections.abc import Mapping
import pytest
from camel_tools.utils.charmap import CharMapper
from camel_tools.utils.charmap import InvalidCharMapKeyError
from camel_tools.utils.charmap import BuiltinCharMapNotFoundError
# A valid map used for testing CharMapper.map_string
VALID_MAP = {
'e': 'u',
'h-m': '*',
'a-d': 'm',
'٠': '0',
'١': '1',
'\u0662': '2',
'\u0663-\u0665': '-',
'٦-٩': '+'
}
class AnotherMapping(Mapping):
"""A class that subclasses collections.Mappings.
"""
def __init__(self):
self._dict = {}
def __getitem__(self, key):
return self._dict.__getitem__(key)
def __setitem__(self, key, value):
return self._dict.__setitem__(key, value)
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return self._dict.__len__()
class TestCharMapperInit(object):
"""Test class for testing CharMapper initialization.
"""
def test_init_none(self):
"""Test that init with None raises a TypeError.
"""
with pytest.raises(TypeError):
CharMapper(None)
def test_init_empty_dict(self):
"""Test that init with an empty dict doesn't raise an exception.
"""
assert CharMapper({})
def test_init_dictlike_object(self):
"""Test that init with an dict-like object doesn't raise an exception.
"""
assert CharMapper(AnotherMapping())
def test_init_not_dict(self):
"""Test that a non-dict object (list) raises a TypeError.
"""
with pytest.raises(TypeError):
CharMapper([])
def test_init_default_not_valid1(self):
"""Test that an invalid type (list) for default raises a TypeError.
"""
with pytest.raises(TypeError):
CharMapper({}, [])
def test_init_default_not_valid2(self):
"""Test that an invalid type (byte string) for default raises a
TypeError.
"""
with pytest.raises(TypeError):
CharMapper({}, b'Hello')
def test_init_default_valid1(self):
"""Test that a None type for default doesn't raise an Exception.
"""
assert CharMapper({}, None)
def test_init_default_valid2(self):
"""Test that a Unicode string type for default doesn't raise an
Exception.
"""
assert CharMapper({}, 'Hello')
def test_init_charmap_valid1(self):
"""Test that a valid charMap doesn't raise an Exception.
"""
assert CharMapper({'a': 'Hello'})
def test_init_charmap_valid2(self):
"""Test that a valid charMap doesn't raise an Exception.
"""
assert CharMapper({'a': None})
def test_init_charmap_valid3(self):
"""Test that a valid charMap doesn't raise an Exception.
"""
assert CharMapper({'a-f': ''})
def test_init_charmap_valid4(self):
"""Test that a valid charMap doesn't raise an Exception.
"""
assert CharMapper({'a-f': '', 'b': None}, 'Hello')
def test_init_charmap_valid5(self):
"""Test that a valid charMap doesn't raise an Exception.
"""
assert CharMapper({'--a': ''})
def test_init_charmap_invalid1(self):
"""Test that an invalid key (byte string) type in charMap raises a
TypeError.
"""
with pytest.raises(TypeError):
CharMapper({b'a': 'Hello'})
def test_init_charmap_invalid2(self):
"""Test that an invalid value type (byte string) for a valid key
(single Unicode character) in charMap raises a TypeError.
"""
with pytest.raises(TypeError):
CharMapper({'a': b'Hello'})
def test_init_charmap_invalid3(self):
"""Test that an invalid value type (byte string) for an invalid key
(Unicode character range with wrong order) in charMap raises a
InvalidCharMapKeyError.
"""
with pytest.raises(InvalidCharMapKeyError):
CharMapper({'c-a': b'Hello'})
def test_init_charmap_invalid4(self):
"""Test that an invalid value type (byte string) for an invalid key
(neither a single Unicode character nor a Unicode character range) in
charMap raises a InvalidCharMapKeyError.
"""
with pytest.raises(InvalidCharMapKeyError):
CharMapper({'cdsn': b'Hello'})
def test_init_charmap_invalid5(self):
"""Test that an invalid key (neither a single Unicode character nor a
Unicode character range) in charMap raises a InvalidCharMapKeyError.
"""
with pytest.raises(InvalidCharMapKeyError):
CharMapper({'a-': 'Hello'})
def test_init_charmap_invalid6(self):
"""Test that an invalid key (neither a single Unicode character nor a
Unicode character range) in charMap raises a InvalidCharMapKeyError.
"""
with pytest.raises(InvalidCharMapKeyError):
CharMapper({'a--': 'Hello'})
def test_init_charpap_invalid7(self):
"""Test that an invalid key (neither a single Unicode character nor a
Unicode character range) in charMap raises a InvalidCharMapKeyError.
"""
with pytest.raises(TypeError):
CharMapper({'--a': b'Hello'})
class TestCharMapperMapString(object):
"""Test class for testing CharMapper's map_string method.
"""
def test_mapstring_none(self):
"""Test that a None value causes the map_string method to raise a
TypeError.
"""
with pytest.raises(TypeError):
mapper = CharMapper(VALID_MAP)
mapper.map_string(None)
def test_mapstring_empty_string(self):
"""Test that an empty string causes the map_string method to return an
empty string.
"""
mapper = CharMapper(VALID_MAP)
assert mapper.map_string('') == ''
def test_mapstring_not_unicode(self):
"""Test that a non-unicode string causes the map_string method to raise
a TypeError.
"""
with pytest.raises(TypeError):
mapper = CharMapper(VALID_MAP)
mapper.map_string(b'Hello, world!')
def test_mapstring_english(self):
"""Test that a map_string properly maps an English unicode string.
"""
mapper = CharMapper(VALID_MAP)
assert mapper.map_string('Hello, world!') == 'Hu**o, wor*m!'
def test_mapstring_arabic(self):
"""Test that a map_string properly maps an Arabic unicode string.
"""
mapper = CharMapper(VALID_MAP)
assert mapper.map_string('٠١٢٣٤٥٦٧٨٩') == '012---++++'
class TestCharMapperBuiltinMapper(object):
"""Test class for testing CharMapper's builtin_mapper method.
"""
def test_builtinmapper_ar2bw(self):
"""Test that the builtin 'ar2bw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('ar2bw')
def test_builtinmapper_ar2safebw(self):
"""Test that the builtin 'ar2safebw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('ar2safebw')
def test_builtinmapper_ar2xmlbw(self):
"""Test that the builtin 'ar2xmlbw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('ar2xmlbw')
def test_builtinmapper_ar2hsb(self):
"""Test that the builtin 'ar2hsb' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('ar2bw')
def test_builtinmapper_bw2ar(self):
"""Test that the builtin 'bw2ar' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('bw2ar')
def test_builtinmapper_bw2safebw(self):
"""Test that the builtin 'bw2safebw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('bw2safebw')
def test_builtinmapper_bw2xmlbw(self):
"""Test that the builtin 'bw2xmlbw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('bw2xmlbw')
def test_builtinmapper_bw2hsb(self):
"""Test that the builtin 'bw2hsb' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('bw2hsb')
def test_builtinmapper_safebw2ar(self):
"""Test that the builtin 'safebw2ar' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('safebw2ar')
def test_builtinmapper_safebw2bw(self):
"""Test that the builtin 'safebw2bw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('safebw2bw')
def test_builtinmapper_safebw2xmlbw(self):
"""Test that the builtin 'safebw2xmlbw' scheme is loaded without
errors.
"""
assert CharMapper.builtin_mapper('safebw2xmlbw')
def test_builtinmapper_safebw2hsb(self):
"""Test that the builtin 'safebw2hsb' scheme is loaded without
errors.
"""
assert CharMapper.builtin_mapper('safebw2hsb')
def test_builtinmapper_xmlbw2ar(self):
"""Test that the builtin 'xmlbw2ar' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('xmlbw2ar')
def test_builtinmapper_xmlbw2bw(self):
"""Test that the builtin 'xmlbw2bw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('xmlbw2bw')
def test_builtinmapper_xmlbw2safebw(self):
"""Test that the builtin 'xmlbw2safebw' scheme is loaded without
errors.
"""
assert CharMapper.builtin_mapper('xmlbw2safebw')
def test_builtinmapper_xmlbw2hsb(self):
"""Test that the builtin 'xmlbw2hsb' scheme is loaded without
errors.
"""
assert CharMapper.builtin_mapper('xmlbw2hsb')
def test_builtinmapper_hsb2ar(self):
"""Test that the builtin 'hsb2ar' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('hsb2ar')
def test_builtinmapper_hsb2bw(self):
"""Test that the builtin 'hsb2bw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('hsb2bw')
def test_builtinmapper_hsb2safebw(self):
"""Test that the builtin 'hsb2safebw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('hsb2safebw')
def test_builtinmapper_hsb2xmlbw(self):
"""Test that the builtin 'hsb2xmlbw' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('hsb2xmlbw')
def test_builtinmapper_arclean(self):
"""Test that the builtin 'arclean' scheme is loaded without errors.
"""
assert CharMapper.builtin_mapper('arclean')
def test_builtinmapper_invalid(self):
"""Test that an invalid builtin scheme name raises a
BuiltinCharMapNotFound exception.
"""
with pytest.raises(BuiltinCharMapNotFoundError):
CharMapper.builtin_mapper('hello')
| 12,276 | 29.46402 | 79 | py |
camel_tools | camel_tools-master/tests/test_meta.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This is a test module to make sure meta-data in all submodules are set
correctly.
"""
from __future__ import absolute_import, print_function
import os
import camel_tools as camelt
from camel_tools.cli import camel_calima_star
from camel_tools.cli import camel_transliterate
from camel_tools.cli import camel_arclean
VERSION_PATH = os.path.join(os.path.dirname(camelt.__file__), 'VERSION')
with open(VERSION_PATH, 'r', encoding='utf-8') as version_fp:
VERSION = version_fp.read().strip()
def test_camel_tools_version():
"""Test that all module and CLI script versions are the same as the version
file.
"""
assert(VERSION ==
camelt.__version__ ==
camel_calima_star.__version__ ==
camel_transliterate.__version__ ==
camel_arclean.__version__)
| 1,967 | 34.781818 | 79 | py |
camel_tools | camel_tools-master/tests/test_transliterate.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for camel_tools.transliterate.
"""
from __future__ import absolute_import
import pytest
from camel_tools.utils.charmap import CharMapper
from camel_tools.utils.transliterate import Transliterator
# A mapper that translates lower-case English characters to a lower-case x and
# upper-case English characters to an upper-case X. This makes it easy to
# predict what the transliteration should be.
TEST_MAP = {
u'A-Z': u'X',
u'a-z': u'x',
}
TEST_MAPPER = CharMapper(TEST_MAP, None)
class TestTransliteratorInit(object):
"""Test class for Transliterator.__init__.
"""
def test_init_none_mapper(self):
"""Test that init raises a TypeError when given a mapper that is None.
"""
with pytest.raises(TypeError):
Transliterator(None)
def test_init_invalid_type_mapper(self):
"""Test that init raises a TypeError when given a mapper that is not a
CharMapper instance.
"""
with pytest.raises(TypeError):
Transliterator({})
def test_init_valid_mapper(self):
"""Test that init doesn't raise an error when given a valid mapper.
"""
assert Transliterator(TEST_MAPPER)
def test_init_none_marker(self):
"""Test that init raises a TypeError when given a marker that is None.
"""
with pytest.raises(TypeError):
Transliterator(TEST_MAPPER, None)
def test_init_invalid_type_marker(self):
"""Test that init raises a TypeError when given a marker that is not a
string.
"""
with pytest.raises(TypeError):
Transliterator(TEST_MAPPER, [])
def test_init_empty_marker(self):
"""Test that init raises a ValueError when given a marker that is an
empty string.
"""
with pytest.raises(ValueError):
Transliterator(TEST_MAPPER, '')
def test_init_invalid_marker1(self):
"""Test that init raises a ValueError when given an invalid marker (
wgitespace in the middle).
"""
with pytest.raises(ValueError):
Transliterator(TEST_MAPPER, '@@LAT @@')
def test_init_invalid_marker2(self):
"""Test that init raises a ValueError when given an invalid marker (
whitespace at the end).
"""
with pytest.raises(ValueError):
Transliterator(TEST_MAPPER, '@@LAT@@ ')
def test_init_invalid_marker3(self):
"""Test that init raises a ValueError when given an invalid marker (
whitespace at the beginning).
"""
with pytest.raises(ValueError):
Transliterator(TEST_MAPPER, ' @@LAT@@')
def test_init_valid_marker1(self):
"""Test that init doesn't raise an error when given a valid marker.
"""
assert Transliterator(TEST_MAPPER, '@@LAT@@')
def test_init_valid_marker2(self):
"""Test that init doesn't raise an error when given a valid marker.
"""
assert Transliterator(TEST_MAPPER, u'@@LAT@@')
class TestTransliteratorTranslate(object):
"""Test class for Transliterator.translate.
"""
def test_trans_empty(self):
"""Test that transliterating an empty string returns an empty string.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'') == u''
def test_trans_single_no_markers(self):
"""Test that a single word with no markers gets transliterated.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'Hello') == u'Xxxxx'
def test_trans_single_with_markers(self):
"""Test that a single word with markers does not get transliterated.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'@@Hello') == u'@@Hello'
def test_trans_single_strip(self):
"""Test that a single word with markers does not get transliterated
but markers do get stripped when strip_markers is set to True.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'@@Hello', True) == u'Hello'
def test_trans_single_ignore(self):
"""Test that a single word with markers gets transliterated when ignore
markers is set to True.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'@@Hello', False, True) == u'@@Xxxxx'
def test_trans_single_ignore_strip(self):
"""Test that a single word with markers gets transliterated with
markers stripped when both strip_markers and ignore_markers are set to
True.
"""
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(u'@@Hello', True, True) == u'Xxxxx'
def test_trans_sent_no_markers(self):
"""Test that a sentence with no markers gets transliterated.
"""
sent_orig = u'Hello World, this is a sentence!'
sent_out = u'Xxxxx Xxxxx, xxxx xx x xxxxxxxx!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig) == sent_out
def test_trans_sent_with_markers(self):
"""Test that tokens with markers in a sentence do not get
transliterated.
"""
sent_orig = u'Hello @@World, this is a @@sentence!'
sent_out = u'Xxxxx @@World, xxxx xx x @@sentence!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig) == sent_out
def test_trans_sent_strip(self):
"""Test that tokens with markers in a sentence do not get
transliterated but markers do get stripped when strip_markers is set
to True.
"""
sent_orig = u'Hello @@World, this is a @@sentence!'
sent_out = u'Xxxxx World, xxxx xx x sentence!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig, True) == sent_out
def test_trans_sent_ignore(self):
"""Test that tokens with markers in a sentence get transliterated
when ignore markers is set to True.
"""
sent_orig = u'Hello @@World, this is a @@sentence!'
sent_out = u'Xxxxx @@Xxxxx, xxxx xx x @@xxxxxxxx!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig, False, True) == sent_out
def test_trans_sent_ignore_strip(self):
"""Test that tokens with markers in a sentence get transliterated with
markers stripped when both strip_markers and ignore_markers are set to
True.
"""
sent_orig = u'Hello @@World, this is a @@sentence!'
sent_out = u'Xxxxx Xxxxx, xxxx xx x xxxxxxxx!'
trans = Transliterator(TEST_MAPPER, '@@')
assert trans.transliterate(sent_orig, True, True) == sent_out
| 7,987 | 32.704641 | 79 | py |
camel_tools | camel_tools-master/tests/__init__.py | # MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 1,125 | 50.181818 | 79 | py |
camel_tools | camel_tools-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Extension Imports -------------------------------------------------------
from recommonmark.parser import CommonMarkParser
# -- Project information -----------------------------------------------------
project = 'camel_tools'
copyright = '2018-2022, New York University Abu Dhabi'
author = 'Ossama W. Obeid'
# The short X.Y version
version = '1.5'
# The full version, including alpha/beta/rc tags
release = '1.5.2'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.intersphinx'
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Source parsers
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'camel_tools_doc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'camel_tools.tex', 'CAMeL Tools Documentation',
'Ossama W. Obeid', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'camel_tools', 'camel_tools Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'camel_tools', 'camel_tools Documentation',
author, 'camel_tools',
'A suite of Arabic natural language processing tools developed by the '
'CAMeL Lab at New York University Abu Dhabi.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
autodoc_mock_imports = [
'kenlm'
]
| 5,775 | 28.025126 | 79 | py |
femocs | femocs-main/in/flat.py | #!/usr/bin/python
import numpy as np
width = 30
Nwidth = 8
x = np.linspace(-width, width, Nwidth)
y = np.copy(x)
X,Y = np.meshgrid(x,y)
Z = np.copy(X) * 0
X = np.reshape(X,-1)
Y = np.reshape(Y,-1)
Z = np.reshape(Z,-1)
with open("flat.ckx","w") as f:
f.write("%d\nMedium properties=type:I:1:pos:R:3\n"%len(X))
for i in range(len(X)):
f.write("2 %e %e %e\n"%(X[i],Y[i],Z[i]))
| 381 | 19.105263 | 59 | py |
cqr | cqr-master/nonconformist/nc.py | #!/usr/bin/env python
"""
Nonconformity functions.
"""
# Authors: Henrik Linusson
# Yaniv Romano modified RegressorNc class to include CQR
from __future__ import division
import abc
import numpy as np
import sklearn.base
from nonconformist.base import ClassifierAdapter, RegressorAdapter
from nonconformist.base import OobClassifierAdapter, OobRegressorAdapter
# -----------------------------------------------------------------------------
# Error functions
# -----------------------------------------------------------------------------
class ClassificationErrFunc(object):
"""Base class for classification model error functions.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
super(ClassificationErrFunc, self).__init__()
@abc.abstractmethod
def apply(self, prediction, y):
"""Apply the nonconformity function.
Parameters
----------
prediction : numpy array of shape [n_samples, n_classes]
Class probability estimates for each sample.
y : numpy array of shape [n_samples]
True output labels of each sample.
Returns
-------
nc : numpy array of shape [n_samples]
Nonconformity scores of the samples.
"""
pass
class RegressionErrFunc(object):
"""Base class for regression model error functions.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
super(RegressionErrFunc, self).__init__()
@abc.abstractmethod
def apply(self, prediction, y):#, norm=None, beta=0):
"""Apply the nonconformity function.
Parameters
----------
prediction : numpy array of shape [n_samples, n_classes]
Class probability estimates for each sample.
y : numpy array of shape [n_samples]
True output labels of each sample.
Returns
-------
nc : numpy array of shape [n_samples]
Nonconformity scores of the samples.
"""
pass
@abc.abstractmethod
def apply_inverse(self, nc, significance):#, norm=None, beta=0):
"""Apply the inverse of the nonconformity function (i.e.,
calculate prediction interval).
Parameters
----------
nc : numpy array of shape [n_calibration_samples]
Nonconformity scores obtained for conformal predictor.
significance : float
Significance level (0, 1).
Returns
-------
interval : numpy array of shape [n_samples, 2]
Minimum and maximum interval boundaries for each prediction.
"""
pass
class InverseProbabilityErrFunc(ClassificationErrFunc):
"""Calculates the probability of not predicting the correct class.
For each correct output in ``y``, nonconformity is defined as
.. math::
1 - \hat{P}(y_i | x) \, .
"""
def __init__(self):
super(InverseProbabilityErrFunc, self).__init__()
def apply(self, prediction, y):
prob = np.zeros(y.size, dtype=np.float32)
for i, y_ in enumerate(y):
if y_ >= prediction.shape[1]:
prob[i] = 0
else:
prob[i] = prediction[i, int(y_)]
return 1 - prob
class MarginErrFunc(ClassificationErrFunc):
"""
Calculates the margin error.
For each correct output in ``y``, nonconformity is defined as
.. math::
0.5 - \dfrac{\hat{P}(y_i | x) - max_{y \, != \, y_i} \hat{P}(y | x)}{2}
"""
def __init__(self):
super(MarginErrFunc, self).__init__()
def apply(self, prediction, y):
prob = np.zeros(y.size, dtype=np.float32)
for i, y_ in enumerate(y):
if y_ >= prediction.shape[1]:
prob[i] = 0
else:
prob[i] = prediction[i, int(y_)]
prediction[i, int(y_)] = -np.inf
return 0.5 - ((prob - prediction.max(axis=1)) / 2)
class AbsErrorErrFunc(RegressionErrFunc):
"""Calculates absolute error nonconformity for regression problems.
For each correct output in ``y``, nonconformity is defined as
.. math::
| y_i - \hat{y}_i |
"""
def __init__(self):
super(AbsErrorErrFunc, self).__init__()
def apply(self, prediction, y):
return np.abs(prediction - y)
def apply_inverse(self, nc, significance):
nc = np.sort(nc)[::-1]
border = int(np.floor(significance * (nc.size + 1))) - 1
# TODO: should probably warn against too few calibration examples
border = min(max(border, 0), nc.size - 1)
return np.vstack([nc[border], nc[border]])
class SignErrorErrFunc(RegressionErrFunc):
"""Calculates signed error nonconformity for regression problems.
For each correct output in ``y``, nonconformity is defined as
.. math::
y_i - \hat{y}_i
References
----------
.. [1] Linusson, Henrik, Ulf Johansson, and Tuve Lofstrom.
Signed-error conformal regression. Pacific-Asia Conference on Knowledge
Discovery and Data Mining. Springer International Publishing, 2014.
"""
def __init__(self):
super(SignErrorErrFunc, self).__init__()
def apply(self, prediction, y):
return (prediction - y)
def apply_inverse(self, nc, significance):
err_high = -nc
err_low = nc
err_high = np.reshape(err_high, (nc.shape[0],1))
err_low = np.reshape(err_low, (nc.shape[0],1))
nc = np.concatenate((err_low,err_high),1)
nc = np.sort(nc,0)
index = int(np.ceil((1 - significance / 2) * (nc.shape[0] + 1))) - 1
index = min(max(index, 0), nc.shape[0] - 1)
return np.vstack([nc[index,0], nc[index,1]])
# CQR symmetric error function
class QuantileRegErrFunc(RegressionErrFunc):
"""Calculates conformalized quantile regression error.
For each correct output in ``y``, nonconformity is defined as
.. math::
max{\hat{q}_low - y, y - \hat{q}_high}
"""
def __init__(self):
super(QuantileRegErrFunc, self).__init__()
def apply(self, prediction, y):
y_lower = prediction[:,0]
y_upper = prediction[:,-1]
error_low = y_lower - y
error_high = y - y_upper
err = np.maximum(error_high,error_low)
return err
def apply_inverse(self, nc, significance):
nc = np.sort(nc,0)
index = int(np.ceil((1 - significance) * (nc.shape[0] + 1))) - 1
index = min(max(index, 0), nc.shape[0] - 1)
return np.vstack([nc[index], nc[index]])
# CQR asymmetric error function
class QuantileRegAsymmetricErrFunc(RegressionErrFunc):
"""Calculates conformalized quantile regression asymmetric error function.
For each correct output in ``y``, nonconformity is defined as
.. math::
E_low = \hat{q}_low - y
E_high = y - \hat{q}_high
"""
def __init__(self):
super(QuantileRegAsymmetricErrFunc, self).__init__()
def apply(self, prediction, y):
y_lower = prediction[:,0]
y_upper = prediction[:,-1]
error_high = y - y_upper
error_low = y_lower - y
err_high = np.reshape(error_high, (y_upper.shape[0],1))
err_low = np.reshape(error_low, (y_lower.shape[0],1))
return np.concatenate((err_low,err_high),1)
def apply_inverse(self, nc, significance):
nc = np.sort(nc,0)
index = int(np.ceil((1 - significance / 2) * (nc.shape[0] + 1))) - 1
index = min(max(index, 0), nc.shape[0] - 1)
return np.vstack([nc[index,0], nc[index,1]])
# -----------------------------------------------------------------------------
# Base nonconformity scorer
# -----------------------------------------------------------------------------
class BaseScorer(sklearn.base.BaseEstimator):
__metaclass__ = abc.ABCMeta
def __init__(self):
super(BaseScorer, self).__init__()
@abc.abstractmethod
def fit(self, x, y):
pass
@abc.abstractmethod
def score(self, x, y=None):
pass
class RegressorNormalizer(BaseScorer):
def __init__(self, base_model, normalizer_model, err_func):
super(RegressorNormalizer, self).__init__()
self.base_model = base_model
self.normalizer_model = normalizer_model
self.err_func = err_func
def fit(self, x, y):
residual_prediction = self.base_model.predict(x)
residual_error = np.abs(self.err_func.apply(residual_prediction, y))
######################################################################
# Optional: use logarithmic function as in the original implementation
# available in https://github.com/donlnz/nonconformist
#
# CODE:
# residual_error += 0.00001 # Add small term to avoid log(0)
# log_err = np.log(residual_error)
######################################################################
log_err = residual_error
self.normalizer_model.fit(x, log_err)
def score(self, x, y=None):
######################################################################
# Optional: use logarithmic function as in the original implementation
# available in https://github.com/donlnz/nonconformist
#
# CODE:
# norm = np.exp(self.normalizer_model.predict(x))
######################################################################
norm = np.abs(self.normalizer_model.predict(x))
return norm
class NcFactory(object):
@staticmethod
def create_nc(model, err_func=None, normalizer_model=None, oob=False):
if normalizer_model is not None:
normalizer_adapter = RegressorAdapter(normalizer_model)
else:
normalizer_adapter = None
if isinstance(model, sklearn.base.ClassifierMixin):
err_func = MarginErrFunc() if err_func is None else err_func
if oob:
c = sklearn.base.clone(model)
c.fit([[0], [1]], [0, 1])
if hasattr(c, 'oob_decision_function_'):
adapter = OobClassifierAdapter(model)
else:
raise AttributeError('Cannot use out-of-bag '
'calibration with {}'.format(
model.__class__.__name__
))
else:
adapter = ClassifierAdapter(model)
if normalizer_adapter is not None:
normalizer = RegressorNormalizer(adapter,
normalizer_adapter,
err_func)
return ClassifierNc(adapter, err_func, normalizer)
else:
return ClassifierNc(adapter, err_func)
elif isinstance(model, sklearn.base.RegressorMixin):
err_func = AbsErrorErrFunc() if err_func is None else err_func
if oob:
c = sklearn.base.clone(model)
c.fit([[0], [1]], [0, 1])
if hasattr(c, 'oob_prediction_'):
adapter = OobRegressorAdapter(model)
else:
raise AttributeError('Cannot use out-of-bag '
'calibration with {}'.format(
model.__class__.__name__
))
else:
adapter = RegressorAdapter(model)
if normalizer_adapter is not None:
normalizer = RegressorNormalizer(adapter,
normalizer_adapter,
err_func)
return RegressorNc(adapter, err_func, normalizer)
else:
return RegressorNc(adapter, err_func)
class BaseModelNc(BaseScorer):
"""Base class for nonconformity scorers based on an underlying model.
Parameters
----------
model : ClassifierAdapter or RegressorAdapter
Underlying classification model used for calculating nonconformity
scores.
err_func : ClassificationErrFunc or RegressionErrFunc
Error function object.
normalizer : BaseScorer
Normalization model.
beta : float
Normalization smoothing parameter. As the beta-value increases,
the normalized nonconformity function approaches a non-normalized
equivalent.
"""
def __init__(self, model, err_func, normalizer=None, beta=1e-6):
super(BaseModelNc, self).__init__()
self.err_func = err_func
self.model = model
self.normalizer = normalizer
self.beta = beta
# If we use sklearn.base.clone (e.g., during cross-validation),
# object references get jumbled, so we need to make sure that the
# normalizer has a reference to the proper model adapter, if applicable.
if (self.normalizer is not None and
hasattr(self.normalizer, 'base_model')):
self.normalizer.base_model = self.model
self.last_x, self.last_y = None, None
self.last_prediction = None
self.clean = False
def fit(self, x, y):
"""Fits the underlying model of the nonconformity scorer.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for fitting the underlying model.
y : numpy array of shape [n_samples]
Outputs of examples for fitting the underlying model.
Returns
-------
None
"""
self.model.fit(x, y)
if self.normalizer is not None:
self.normalizer.fit(x, y)
self.clean = False
def score(self, x, y=None):
"""Calculates the nonconformity score of a set of samples.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for which to calculate a nonconformity score.
y : numpy array of shape [n_samples]
Outputs of examples for which to calculate a nonconformity score.
Returns
-------
nc : numpy array of shape [n_samples]
Nonconformity scores of samples.
"""
prediction = self.model.predict(x)
n_test = x.shape[0]
if self.normalizer is not None:
norm = self.normalizer.score(x) + self.beta
else:
norm = np.ones(n_test)
if prediction.ndim > 1:
ret_val = self.err_func.apply(prediction, y)
else:
ret_val = self.err_func.apply(prediction, y) / norm
return ret_val
# -----------------------------------------------------------------------------
# Classification nonconformity scorers
# -----------------------------------------------------------------------------
class ClassifierNc(BaseModelNc):
"""Nonconformity scorer using an underlying class probability estimating
model.
Parameters
----------
model : ClassifierAdapter
Underlying classification model used for calculating nonconformity
scores.
err_func : ClassificationErrFunc
Error function object.
normalizer : BaseScorer
Normalization model.
beta : float
Normalization smoothing parameter. As the beta-value increases,
the normalized nonconformity function approaches a non-normalized
equivalent.
Attributes
----------
model : ClassifierAdapter
Underlying model object.
err_func : ClassificationErrFunc
Scorer function used to calculate nonconformity scores.
See also
--------
RegressorNc, NormalizedRegressorNc
"""
def __init__(self,
model,
err_func=MarginErrFunc(),
normalizer=None,
beta=1e-6):
super(ClassifierNc, self).__init__(model,
err_func,
normalizer,
beta)
# -----------------------------------------------------------------------------
# Regression nonconformity scorers
# -----------------------------------------------------------------------------
class RegressorNc(BaseModelNc):
"""Nonconformity scorer using an underlying regression model.
Parameters
----------
model : RegressorAdapter
Underlying regression model used for calculating nonconformity scores.
err_func : RegressionErrFunc
Error function object.
normalizer : BaseScorer
Normalization model.
beta : float
Normalization smoothing parameter. As the beta-value increases,
the normalized nonconformity function approaches a non-normalized
equivalent.
Attributes
----------
model : RegressorAdapter
Underlying model object.
err_func : RegressionErrFunc
Scorer function used to calculate nonconformity scores.
See also
--------
ProbEstClassifierNc, NormalizedRegressorNc
"""
def __init__(self,
model,
err_func=AbsErrorErrFunc(),
normalizer=None,
beta=1e-6):
super(RegressorNc, self).__init__(model,
err_func,
normalizer,
beta)
def predict(self, x, nc, significance=None):
"""Constructs prediction intervals for a set of test examples.
Predicts the output of each test pattern using the underlying model,
and applies the (partial) inverse nonconformity function to each
prediction, resulting in a prediction interval for each test pattern.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
significance : float
Significance level (maximum allowed error rate) of predictions.
Should be a float between 0 and 1. If ``None``, then intervals for
all significance levels (0.01, 0.02, ..., 0.99) are output in a
3d-matrix.
Returns
-------
p : numpy array of shape [n_samples, 2] or [n_samples, 2, 99]
If significance is ``None``, then p contains the interval (minimum
and maximum boundaries) for each test pattern, and each significance
level (0.01, 0.02, ..., 0.99). If significance is a float between
0 and 1, then p contains the prediction intervals (minimum and
maximum boundaries) for the set of test patterns at the chosen
significance level.
"""
n_test = x.shape[0]
prediction = self.model.predict(x)
if self.normalizer is not None:
norm = self.normalizer.score(x) + self.beta
else:
norm = np.ones(n_test)
if significance:
intervals = np.zeros((x.shape[0], 2))
err_dist = self.err_func.apply_inverse(nc, significance)
err_dist = np.hstack([err_dist] * n_test)
if prediction.ndim > 1: # CQR
intervals[:, 0] = prediction[:,0] - err_dist[0, :]
intervals[:, 1] = prediction[:,-1] + err_dist[1, :]
else: # regular conformal prediction
err_dist *= norm
intervals[:, 0] = prediction - err_dist[0, :]
intervals[:, 1] = prediction + err_dist[1, :]
return intervals
else: # Not tested for CQR
significance = np.arange(0.01, 1.0, 0.01)
intervals = np.zeros((x.shape[0], 2, significance.size))
for i, s in enumerate(significance):
err_dist = self.err_func.apply_inverse(nc, s)
err_dist = np.hstack([err_dist] * n_test)
err_dist *= norm
intervals[:, 0, i] = prediction - err_dist[0, :]
intervals[:, 1, i] = prediction + err_dist[0, :]
return intervals
| 17,678 | 27.79316 | 79 | py |
cqr | cqr-master/nonconformist/base.py | #!/usr/bin/env python
"""
docstring
"""
# Authors: Henrik Linusson
import abc
import numpy as np
from sklearn.base import BaseEstimator
class RegressorMixin(object):
def __init__(self):
super(RegressorMixin, self).__init__()
@classmethod
def get_problem_type(cls):
return 'regression'
class ClassifierMixin(object):
def __init__(self):
super(ClassifierMixin, self).__init__()
@classmethod
def get_problem_type(cls):
return 'classification'
class BaseModelAdapter(BaseEstimator):
__metaclass__ = abc.ABCMeta
def __init__(self, model, fit_params=None):
super(BaseModelAdapter, self).__init__()
self.model = model
self.last_x, self.last_y = None, None
self.clean = False
self.fit_params = {} if fit_params is None else fit_params
def fit(self, x, y):
"""Fits the model.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for fitting the model.
y : numpy array of shape [n_samples]
Outputs of examples for fitting the model.
Returns
-------
None
"""
self.model.fit(x, y, **self.fit_params)
self.clean = False
def predict(self, x):
"""Returns the prediction made by the underlying model.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of test examples.
Returns
-------
y : numpy array of shape [n_samples]
Predicted outputs of test examples.
"""
if (
not self.clean or
self.last_x is None or
self.last_y is None or
not np.array_equal(self.last_x, x)
):
self.last_x = x
self.last_y = self._underlying_predict(x)
self.clean = True
return self.last_y.copy()
@abc.abstractmethod
def _underlying_predict(self, x):
"""Produces a prediction using the encapsulated model.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of test examples.
Returns
-------
y : numpy array of shape [n_samples]
Predicted outputs of test examples.
"""
pass
class ClassifierAdapter(BaseModelAdapter):
def __init__(self, model, fit_params=None):
super(ClassifierAdapter, self).__init__(model, fit_params)
def _underlying_predict(self, x):
return self.model.predict_proba(x)
class RegressorAdapter(BaseModelAdapter):
def __init__(self, model, fit_params=None):
super(RegressorAdapter, self).__init__(model, fit_params)
def _underlying_predict(self, x):
return self.model.predict(x)
class OobMixin(object):
def __init__(self, model, fit_params=None):
super(OobMixin, self).__init__(model, fit_params)
self.train_x = None
def fit(self, x, y):
super(OobMixin, self).fit(x, y)
self.train_x = x
def _underlying_predict(self, x):
# TODO: sub-sampling of ensemble for test patterns
oob = x == self.train_x
if hasattr(oob, 'all'):
oob = oob.all()
if oob:
return self._oob_prediction()
else:
return super(OobMixin, self)._underlying_predict(x)
class OobClassifierAdapter(OobMixin, ClassifierAdapter):
def __init__(self, model, fit_params=None):
super(OobClassifierAdapter, self).__init__(model, fit_params)
def _oob_prediction(self):
return self.model.oob_decision_function_
class OobRegressorAdapter(OobMixin, RegressorAdapter):
def __init__(self, model, fit_params=None):
super(OobRegressorAdapter, self).__init__(model, fit_params)
def _oob_prediction(self):
return self.model.oob_prediction_
| 3,379 | 20.528662 | 63 | py |
cqr | cqr-master/nonconformist/icp.py | #!/usr/bin/env python
"""
Inductive conformal predictors.
"""
# Authors: Henrik Linusson
from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.base import BaseEstimator
from nonconformist.base import RegressorMixin, ClassifierMixin
from nonconformist.util import calc_p
# -----------------------------------------------------------------------------
# Base inductive conformal predictor
# -----------------------------------------------------------------------------
class BaseIcp(BaseEstimator):
"""Base class for inductive conformal predictors.
"""
def __init__(self, nc_function, condition=None):
self.cal_x, self.cal_y = None, None
self.nc_function = nc_function
# Check if condition-parameter is the default function (i.e.,
# lambda x: 0). This is so we can safely clone the object without
# the clone accidentally having self.conditional = True.
default_condition = lambda x: 0
is_default = (callable(condition) and
(condition.__code__.co_code ==
default_condition.__code__.co_code))
if is_default:
self.condition = condition
self.conditional = False
elif callable(condition):
self.condition = condition
self.conditional = True
else:
self.condition = lambda x: 0
self.conditional = False
def fit(self, x, y):
"""Fit underlying nonconformity scorer.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for fitting the nonconformity scorer.
y : numpy array of shape [n_samples]
Outputs of examples for fitting the nonconformity scorer.
Returns
-------
None
"""
# TODO: incremental?
self.nc_function.fit(x, y)
def calibrate(self, x, y, increment=False):
"""Calibrate conformal predictor based on underlying nonconformity
scorer.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for calibrating the conformal predictor.
y : numpy array of shape [n_samples, n_features]
Outputs of examples for calibrating the conformal predictor.
increment : boolean
If ``True``, performs an incremental recalibration of the conformal
predictor. The supplied ``x`` and ``y`` are added to the set of
previously existing calibration examples, and the conformal
predictor is then calibrated on both the old and new calibration
examples.
Returns
-------
None
"""
self._calibrate_hook(x, y, increment)
self._update_calibration_set(x, y, increment)
if self.conditional:
category_map = np.array([self.condition((x[i, :], y[i]))
for i in range(y.size)])
self.categories = np.unique(category_map)
self.cal_scores = defaultdict(partial(np.ndarray, 0))
for cond in self.categories:
idx = category_map == cond
cal_scores = self.nc_function.score(self.cal_x[idx, :],
self.cal_y[idx])
self.cal_scores[cond] = np.sort(cal_scores,0)[::-1]
else:
self.categories = np.array([0])
cal_scores = self.nc_function.score(self.cal_x, self.cal_y)
self.cal_scores = {0: np.sort(cal_scores,0)[::-1]}
def _calibrate_hook(self, x, y, increment):
pass
def _update_calibration_set(self, x, y, increment):
if increment and self.cal_x is not None and self.cal_y is not None:
self.cal_x = np.vstack([self.cal_x, x])
self.cal_y = np.hstack([self.cal_y, y])
else:
self.cal_x, self.cal_y = x, y
# -----------------------------------------------------------------------------
# Inductive conformal classifier
# -----------------------------------------------------------------------------
class IcpClassifier(BaseIcp, ClassifierMixin):
"""Inductive conformal classifier.
Parameters
----------
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity of
calibration examples and test patterns. Should implement ``fit(x, y)``
and ``calc_nc(x, y)``.
smoothing : boolean
Decides whether to use stochastic smoothing of p-values.
Attributes
----------
cal_x : numpy array of shape [n_cal_examples, n_features]
Inputs of calibration set.
cal_y : numpy array of shape [n_cal_examples]
Outputs of calibration set.
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity scores.
classes : numpy array of shape [n_classes]
List of class labels, with indices corresponding to output columns
of IcpClassifier.predict()
See also
--------
IcpRegressor
References
----------
.. [1] Papadopoulos, H., & Haralambous, H. (2011). Reliable prediction
intervals with regression neural networks. Neural Networks, 24(8),
842-851.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> from sklearn.tree import DecisionTreeClassifier
>>> from nonconformist.base import ClassifierAdapter
>>> from nonconformist.icp import IcpClassifier
>>> from nonconformist.nc import ClassifierNc, MarginErrFunc
>>> iris = load_iris()
>>> idx = np.random.permutation(iris.target.size)
>>> train = idx[:int(idx.size / 3)]
>>> cal = idx[int(idx.size / 3):int(2 * idx.size / 3)]
>>> test = idx[int(2 * idx.size / 3):]
>>> model = ClassifierAdapter(DecisionTreeClassifier())
>>> nc = ClassifierNc(model, MarginErrFunc())
>>> icp = IcpClassifier(nc)
>>> icp.fit(iris.data[train, :], iris.target[train])
>>> icp.calibrate(iris.data[cal, :], iris.target[cal])
>>> icp.predict(iris.data[test, :], significance=0.10)
... # doctest: +SKIP
array([[ True, False, False],
[False, True, False],
...,
[False, True, False],
[False, True, False]], dtype=bool)
"""
def __init__(self, nc_function, condition=None, smoothing=True):
super(IcpClassifier, self).__init__(nc_function, condition)
self.classes = None
self.smoothing = smoothing
def _calibrate_hook(self, x, y, increment=False):
self._update_classes(y, increment)
def _update_classes(self, y, increment):
if self.classes is None or not increment:
self.classes = np.unique(y)
else:
self.classes = np.unique(np.hstack([self.classes, y]))
def predict(self, x, significance=None):
"""Predict the output values for a set of input patterns.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
significance : float or None
Significance level (maximum allowed error rate) of predictions.
Should be a float between 0 and 1. If ``None``, then the p-values
are output rather than the predictions.
Returns
-------
p : numpy array of shape [n_samples, n_classes]
If significance is ``None``, then p contains the p-values for each
sample-class pair; if significance is a float between 0 and 1, then
p is a boolean array denoting which labels are included in the
prediction sets.
"""
# TODO: if x == self.last_x ...
n_test_objects = x.shape[0]
p = np.zeros((n_test_objects, self.classes.size))
ncal_ngt_neq = self._get_stats(x)
for i in range(len(self.classes)):
for j in range(n_test_objects):
p[j, i] = calc_p(ncal_ngt_neq[j, i, 0],
ncal_ngt_neq[j, i, 1],
ncal_ngt_neq[j, i, 2],
self.smoothing)
if significance is not None:
return p > significance
else:
return p
def _get_stats(self, x):
n_test_objects = x.shape[0]
ncal_ngt_neq = np.zeros((n_test_objects, self.classes.size, 3))
for i, c in enumerate(self.classes):
test_class = np.zeros(x.shape[0], dtype=self.classes.dtype)
test_class.fill(c)
# TODO: maybe calculate p-values using cython or similar
# TODO: interpolated p-values
# TODO: nc_function.calc_nc should take X * {y1, y2, ... ,yn}
test_nc_scores = self.nc_function.score(x, test_class)
for j, nc in enumerate(test_nc_scores):
cal_scores = self.cal_scores[self.condition((x[j, :], c))][::-1]
n_cal = cal_scores.size
idx_left = np.searchsorted(cal_scores, nc, 'left')
idx_right = np.searchsorted(cal_scores, nc, 'right')
ncal_ngt_neq[j, i, 0] = n_cal
ncal_ngt_neq[j, i, 1] = n_cal - idx_right
ncal_ngt_neq[j, i, 2] = idx_right - idx_left
return ncal_ngt_neq
def predict_conf(self, x):
"""Predict the output values for a set of input patterns, using
the confidence-and-credibility output scheme.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
Returns
-------
p : numpy array of shape [n_samples, 3]
p contains three columns: the first column contains the most
likely class for each test pattern; the second column contains
the confidence in the predicted class label, and the third column
contains the credibility of the prediction.
"""
p = self.predict(x, significance=None)
label = p.argmax(axis=1)
credibility = p.max(axis=1)
for i, idx in enumerate(label):
p[i, idx] = -np.inf
confidence = 1 - p.max(axis=1)
return np.array([label, confidence, credibility]).T
# -----------------------------------------------------------------------------
# Inductive conformal regressor
# -----------------------------------------------------------------------------
class IcpRegressor(BaseIcp, RegressorMixin):
"""Inductive conformal regressor.
Parameters
----------
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity of
calibration examples and test patterns. Should implement ``fit(x, y)``,
``calc_nc(x, y)`` and ``predict(x, nc_scores, significance)``.
Attributes
----------
cal_x : numpy array of shape [n_cal_examples, n_features]
Inputs of calibration set.
cal_y : numpy array of shape [n_cal_examples]
Outputs of calibration set.
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity scores.
See also
--------
IcpClassifier
References
----------
.. [1] Papadopoulos, H., Proedrou, K., Vovk, V., & Gammerman, A. (2002).
Inductive confidence machines for regression. In Machine Learning: ECML
2002 (pp. 345-356). Springer Berlin Heidelberg.
.. [2] Papadopoulos, H., & Haralambous, H. (2011). Reliable prediction
intervals with regression neural networks. Neural Networks, 24(8),
842-851.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_boston
>>> from sklearn.tree import DecisionTreeRegressor
>>> from nonconformist.base import RegressorAdapter
>>> from nonconformist.icp import IcpRegressor
>>> from nonconformist.nc import RegressorNc, AbsErrorErrFunc
>>> boston = load_boston()
>>> idx = np.random.permutation(boston.target.size)
>>> train = idx[:int(idx.size / 3)]
>>> cal = idx[int(idx.size / 3):int(2 * idx.size / 3)]
>>> test = idx[int(2 * idx.size / 3):]
>>> model = RegressorAdapter(DecisionTreeRegressor())
>>> nc = RegressorNc(model, AbsErrorErrFunc())
>>> icp = IcpRegressor(nc)
>>> icp.fit(boston.data[train, :], boston.target[train])
>>> icp.calibrate(boston.data[cal, :], boston.target[cal])
>>> icp.predict(boston.data[test, :], significance=0.10)
... # doctest: +SKIP
array([[ 5. , 20.6],
[ 15.5, 31.1],
...,
[ 14.2, 29.8],
[ 11.6, 27.2]])
"""
def __init__(self, nc_function, condition=None):
super(IcpRegressor, self).__init__(nc_function, condition)
def predict(self, x, significance=None):
"""Predict the output values for a set of input patterns.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
significance : float
Significance level (maximum allowed error rate) of predictions.
Should be a float between 0 and 1. If ``None``, then intervals for
all significance levels (0.01, 0.02, ..., 0.99) are output in a
3d-matrix.
Returns
-------
p : numpy array of shape [n_samples, 2] or [n_samples, 2, 99}
If significance is ``None``, then p contains the interval (minimum
and maximum boundaries) for each test pattern, and each significance
level (0.01, 0.02, ..., 0.99). If significance is a float between
0 and 1, then p contains the prediction intervals (minimum and
maximum boundaries) for the set of test patterns at the chosen
significance level.
"""
# TODO: interpolated p-values
n_significance = (99 if significance is None
else np.array(significance).size)
if n_significance > 1:
prediction = np.zeros((x.shape[0], 2, n_significance))
else:
prediction = np.zeros((x.shape[0], 2))
condition_map = np.array([self.condition((x[i, :], None))
for i in range(x.shape[0])])
for condition in self.categories:
idx = condition_map == condition
if np.sum(idx) > 0:
p = self.nc_function.predict(x[idx, :],
self.cal_scores[condition],
significance)
if n_significance > 1:
prediction[idx, :, :] = p
else:
prediction[idx, :] = p
return prediction
class OobCpClassifier(IcpClassifier):
def __init__(self,
nc_function,
condition=None,
smoothing=True):
super(OobCpClassifier, self).__init__(nc_function,
condition,
smoothing)
def fit(self, x, y):
super(OobCpClassifier, self).fit(x, y)
super(OobCpClassifier, self).calibrate(x, y, False)
def calibrate(self, x, y, increment=False):
# Should throw exception (or really not be implemented for oob)
pass
class OobCpRegressor(IcpRegressor):
def __init__(self,
nc_function,
condition=None):
super(OobCpRegressor, self).__init__(nc_function,
condition)
def fit(self, x, y):
super(OobCpRegressor, self).fit(x, y)
super(OobCpRegressor, self).calibrate(x, y, False)
def calibrate(self, x, y, increment=False):
# Should throw exception (or really not be implemented for oob)
pass
| 13,978 | 30.698413 | 79 | py |
cqr | cqr-master/nonconformist/cp.py | from nonconformist.icp import *
# TODO: move contents from nonconformist.icp here
# -----------------------------------------------------------------------------
# TcpClassifier
# -----------------------------------------------------------------------------
class TcpClassifier(BaseEstimator, ClassifierMixin):
"""Transductive conformal classifier.
Parameters
----------
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity of
calibration examples and test patterns. Should implement ``fit(x, y)``
and ``calc_nc(x, y)``.
smoothing : boolean
Decides whether to use stochastic smoothing of p-values.
Attributes
----------
train_x : numpy array of shape [n_cal_examples, n_features]
Inputs of training set.
train_y : numpy array of shape [n_cal_examples]
Outputs of calibration set.
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity scores.
classes : numpy array of shape [n_classes]
List of class labels, with indices corresponding to output columns
of TcpClassifier.predict()
See also
--------
IcpClassifier
References
----------
.. [1] Vovk, V., Gammerman, A., & Shafer, G. (2005). Algorithmic learning
in a random world. Springer Science & Business Media.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> from sklearn.svm import SVC
>>> from nonconformist.base import ClassifierAdapter
>>> from nonconformist.cp import TcpClassifier
>>> from nonconformist.nc import ClassifierNc, MarginErrFunc
>>> iris = load_iris()
>>> idx = np.random.permutation(iris.target.size)
>>> train = idx[:int(idx.size / 2)]
>>> test = idx[int(idx.size / 2):]
>>> model = ClassifierAdapter(SVC(probability=True))
>>> nc = ClassifierNc(model, MarginErrFunc())
>>> tcp = TcpClassifier(nc)
>>> tcp.fit(iris.data[train, :], iris.target[train])
>>> tcp.predict(iris.data[test, :], significance=0.10)
... # doctest: +SKIP
array([[ True, False, False],
[False, True, False],
...,
[False, True, False],
[False, True, False]], dtype=bool)
"""
def __init__(self, nc_function, condition=None, smoothing=True):
self.train_x, self.train_y = None, None
self.nc_function = nc_function
super(TcpClassifier, self).__init__()
# Check if condition-parameter is the default function (i.e.,
# lambda x: 0). This is so we can safely clone the object without
# the clone accidentally having self.conditional = True.
default_condition = lambda x: 0
is_default = (callable(condition) and
(condition.__code__.co_code ==
default_condition.__code__.co_code))
if is_default:
self.condition = condition
self.conditional = False
elif callable(condition):
self.condition = condition
self.conditional = True
else:
self.condition = lambda x: 0
self.conditional = False
self.smoothing = smoothing
self.base_icp = IcpClassifier(
self.nc_function,
self.condition,
self.smoothing
)
self.classes = None
def fit(self, x, y):
self.train_x, self.train_y = x, y
self.classes = np.unique(y)
def predict(self, x, significance=None):
"""Predict the output values for a set of input patterns.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
significance : float or None
Significance level (maximum allowed error rate) of predictions.
Should be a float between 0 and 1. If ``None``, then the p-values
are output rather than the predictions.
Returns
-------
p : numpy array of shape [n_samples, n_classes]
If significance is ``None``, then p contains the p-values for each
sample-class pair; if significance is a float between 0 and 1, then
p is a boolean array denoting which labels are included in the
prediction sets.
"""
n_test = x.shape[0]
n_train = self.train_x.shape[0]
p = np.zeros((n_test, self.classes.size))
for i in range(n_test):
for j, y in enumerate(self.classes):
train_x = np.vstack([self.train_x, x[i, :]])
train_y = np.hstack([self.train_y, y])
self.base_icp.fit(train_x, train_y)
scores = self.base_icp.nc_function.score(train_x, train_y)
ngt = (scores[:-1] > scores[-1]).sum()
neq = (scores[:-1] == scores[-1]).sum()
p[i, j] = calc_p(n_train, ngt, neq, self.smoothing)
if significance is not None:
return p > significance
else:
return p
def predict_conf(self, x):
"""Predict the output values for a set of input patterns, using
the confidence-and-credibility output scheme.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
Returns
-------
p : numpy array of shape [n_samples, 3]
p contains three columns: the first column contains the most
likely class for each test pattern; the second column contains
the confidence in the predicted class label, and the third column
contains the credibility of the prediction.
"""
p = self.predict(x, significance=None)
label = p.argmax(axis=1)
credibility = p.max(axis=1)
for i, idx in enumerate(label):
p[i, idx] = -np.inf
confidence = 1 - p.max(axis=1)
return np.array([label, confidence, credibility]).T
| 5,299 | 29.813953 | 79 | py |
cqr | cqr-master/nonconformist/util.py | from __future__ import division
import numpy as np
def calc_p(ncal, ngt, neq, smoothing=False):
if smoothing:
return (ngt + (neq + 1) * np.random.uniform(0, 1)) / (ncal + 1)
else:
return (ngt + neq + 1) / (ncal + 1)
| 223 | 23.888889 | 65 | py |
cqr | cqr-master/nonconformist/__init__.py | #!/usr/bin/env python
"""
docstring
"""
# Authors: Henrik Linusson
# Yaniv Romano modified np.py file to include CQR
__version__ = '2.1.0'
__all__ = ['icp', 'nc', 'acp']
| 174 | 12.461538 | 49 | py |
cqr | cqr-master/nonconformist/evaluation.py | #!/usr/bin/env python
"""
Evaluation of conformal predictors.
"""
# Authors: Henrik Linusson
# TODO: cross_val_score/run_experiment should possibly allow multiple to be evaluated on identical folding
from __future__ import division
from nonconformist.base import RegressorMixin, ClassifierMixin
import sys
import numpy as np
import pandas as pd
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
from sklearn.base import clone, BaseEstimator
class BaseIcpCvHelper(BaseEstimator):
"""Base class for cross validation helpers.
"""
def __init__(self, icp, calibration_portion):
super(BaseIcpCvHelper, self).__init__()
self.icp = icp
self.calibration_portion = calibration_portion
def predict(self, x, significance=None):
return self.icp.predict(x, significance)
class ClassIcpCvHelper(BaseIcpCvHelper, ClassifierMixin):
"""Helper class for running the ``cross_val_score`` evaluation
method on IcpClassifiers.
See also
--------
IcpRegCrossValHelper
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from nonconformist.icp import IcpClassifier
>>> from nonconformist.nc import ClassifierNc, MarginErrFunc
>>> from nonconformist.evaluation import ClassIcpCvHelper
>>> from nonconformist.evaluation import class_mean_errors
>>> from nonconformist.evaluation import cross_val_score
>>> data = load_iris()
>>> nc = ProbEstClassifierNc(RandomForestClassifier(), MarginErrFunc())
>>> icp = IcpClassifier(nc)
>>> icp_cv = ClassIcpCvHelper(icp)
>>> cross_val_score(icp_cv,
... data.data,
... data.target,
... iterations=2,
... folds=2,
... scoring_funcs=[class_mean_errors],
... significance_levels=[0.1])
... # doctest: +SKIP
class_mean_errors fold iter significance
0 0.013333 0 0 0.1
1 0.080000 1 0 0.1
2 0.053333 0 1 0.1
3 0.080000 1 1 0.1
"""
def __init__(self, icp, calibration_portion=0.25):
super(ClassIcpCvHelper, self).__init__(icp, calibration_portion)
def fit(self, x, y):
split = StratifiedShuffleSplit(y, n_iter=1,
test_size=self.calibration_portion)
for train, cal in split:
self.icp.fit(x[train, :], y[train])
self.icp.calibrate(x[cal, :], y[cal])
class RegIcpCvHelper(BaseIcpCvHelper, RegressorMixin):
"""Helper class for running the ``cross_val_score`` evaluation
method on IcpRegressors.
See also
--------
IcpClassCrossValHelper
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.ensemble import RandomForestRegressor
>>> from nonconformist.icp import IcpRegressor
>>> from nonconformist.nc import RegressorNc, AbsErrorErrFunc
>>> from nonconformist.evaluation import RegIcpCvHelper
>>> from nonconformist.evaluation import reg_mean_errors
>>> from nonconformist.evaluation import cross_val_score
>>> data = load_boston()
>>> nc = RegressorNc(RandomForestRegressor(), AbsErrorErrFunc())
>>> icp = IcpRegressor(nc)
>>> icp_cv = RegIcpCvHelper(icp)
>>> cross_val_score(icp_cv,
... data.data,
... data.target,
... iterations=2,
... folds=2,
... scoring_funcs=[reg_mean_errors],
... significance_levels=[0.1])
... # doctest: +SKIP
fold iter reg_mean_errors significance
0 0 0 0.185771 0.1
1 1 0 0.138340 0.1
2 0 1 0.071146 0.1
3 1 1 0.043478 0.1
"""
def __init__(self, icp, calibration_portion=0.25):
super(RegIcpCvHelper, self).__init__(icp, calibration_portion)
def fit(self, x, y):
split = train_test_split(x, y, test_size=self.calibration_portion)
x_tr, x_cal, y_tr, y_cal = split[0], split[1], split[2], split[3]
self.icp.fit(x_tr, y_tr)
self.icp.calibrate(x_cal, y_cal)
# -----------------------------------------------------------------------------
#
# -----------------------------------------------------------------------------
def cross_val_score(model,x, y, iterations=10, folds=10, fit_params=None,
scoring_funcs=None, significance_levels=None,
verbose=False):
"""Evaluates a conformal predictor using cross-validation.
Parameters
----------
model : object
Conformal predictor to evaluate.
x : numpy array of shape [n_samples, n_features]
Inputs of data to use for evaluation.
y : numpy array of shape [n_samples]
Outputs of data to use for evaluation.
iterations : int
Number of iterations to use for evaluation. The data set is randomly
shuffled before each iteration.
folds : int
Number of folds to use for evaluation.
fit_params : dictionary
Parameters to supply to the conformal prediction object on training.
scoring_funcs : iterable
List of evaluation functions to apply to the conformal predictor in each
fold. Each evaluation function should have a signature
``scorer(prediction, y, significance)``.
significance_levels : iterable
List of significance levels at which to evaluate the conformal
predictor.
verbose : boolean
Indicates whether to output progress information during evaluation.
Returns
-------
scores : pandas DataFrame
Tabulated results for each iteration, fold and evaluation function.
"""
fit_params = fit_params if fit_params else {}
significance_levels = (significance_levels if significance_levels
is not None else np.arange(0.01, 1.0, 0.01))
df = pd.DataFrame()
columns = ['iter',
'fold',
'significance',
] + [f.__name__ for f in scoring_funcs]
for i in range(iterations):
idx = np.random.permutation(y.size)
x, y = x[idx, :], y[idx]
cv = KFold(y.size, folds)
for j, (train, test) in enumerate(cv):
if verbose:
sys.stdout.write('\riter {}/{} fold {}/{}'.format(
i + 1,
iterations,
j + 1,
folds
))
m = clone(model)
m.fit(x[train, :], y[train], **fit_params)
prediction = m.predict(x[test, :], significance=None)
for k, s in enumerate(significance_levels):
scores = [scoring_func(prediction, y[test], s)
for scoring_func in scoring_funcs]
df_score = pd.DataFrame([[i, j, s] + scores],
columns=columns)
df = df.append(df_score, ignore_index=True)
return df
def run_experiment(models, csv_files, iterations=10, folds=10, fit_params=None,
scoring_funcs=None, significance_levels=None,
normalize=False, verbose=False, header=0):
"""Performs a cross-validation evaluation of one or several conformal
predictors on a collection of data sets in csv format.
Parameters
----------
models : object or iterable
Conformal predictor(s) to evaluate.
csv_files : iterable
List of file names (with absolute paths) containing csv-data, used to
evaluate the conformal predictor.
iterations : int
Number of iterations to use for evaluation. The data set is randomly
shuffled before each iteration.
folds : int
Number of folds to use for evaluation.
fit_params : dictionary
Parameters to supply to the conformal prediction object on training.
scoring_funcs : iterable
List of evaluation functions to apply to the conformal predictor in each
fold. Each evaluation function should have a signature
``scorer(prediction, y, significance)``.
significance_levels : iterable
List of significance levels at which to evaluate the conformal
predictor.
verbose : boolean
Indicates whether to output progress information during evaluation.
Returns
-------
scores : pandas DataFrame
Tabulated results for each data set, iteration, fold and
evaluation function.
"""
df = pd.DataFrame()
if not hasattr(models, '__iter__'):
models = [models]
for model in models:
is_regression = model.get_problem_type() == 'regression'
n_data_sets = len(csv_files)
for i, csv_file in enumerate(csv_files):
if verbose:
print('\n{} ({} / {})'.format(csv_file, i + 1, n_data_sets))
data = pd.read_csv(csv_file, header=header)
x, y = data.values[:, :-1], data.values[:, -1]
x = np.array(x, dtype=np.float64)
if normalize:
if is_regression:
y = y - y.min() / (y.max() - y.min())
else:
for j, y_ in enumerate(np.unique(y)):
y[y == y_] = j
scores = cross_val_score(model, x, y, iterations, folds,
fit_params, scoring_funcs,
significance_levels, verbose)
ds_df = pd.DataFrame(scores)
ds_df['model'] = model.__class__.__name__
try:
ds_df['data_set'] = csv_file.split('/')[-1]
except:
ds_df['data_set'] = csv_file
df = df.append(ds_df)
return df
# -----------------------------------------------------------------------------
# Validity measures
# -----------------------------------------------------------------------------
def reg_n_correct(prediction, y, significance=None):
"""Calculates the number of correct predictions made by a conformal
regression model.
"""
if significance is not None:
idx = int(significance * 100 - 1)
prediction = prediction[:, :, idx]
low = y >= prediction[:, 0]
high = y <= prediction[:, 1]
correct = low * high
return y[correct].size
def reg_mean_errors(prediction, y, significance):
"""Calculates the average error rate of a conformal regression model.
"""
return 1 - reg_n_correct(prediction, y, significance) / y.size
def class_n_correct(prediction, y, significance):
"""Calculates the number of correct predictions made by a conformal
classification model.
"""
labels, y = np.unique(y, return_inverse=True)
prediction = prediction > significance
correct = np.zeros((y.size,), dtype=bool)
for i, y_ in enumerate(y):
correct[i] = prediction[i, int(y_)]
return np.sum(correct)
def class_mean_errors(prediction, y, significance=None):
"""Calculates the average error rate of a conformal classification model.
"""
return 1 - (class_n_correct(prediction, y, significance) / y.size)
def class_one_err(prediction, y, significance=None):
"""Calculates the error rate of conformal classifier predictions containing
only a single output label.
"""
labels, y = np.unique(y, return_inverse=True)
prediction = prediction > significance
idx = np.arange(0, y.size, 1)
idx = filter(lambda x: np.sum(prediction[x, :]) == 1, idx)
errors = filter(lambda x: not prediction[x, int(y[x])], idx)
if len(idx) > 0:
return np.size(errors) / np.size(idx)
else:
return 0
def class_mean_errors_one_class(prediction, y, significance, c=0):
"""Calculates the average error rate of a conformal classification model,
considering only test examples belonging to class ``c``. Use
``functools.partial`` in order to test other classes.
"""
labels, y = np.unique(y, return_inverse=True)
prediction = prediction > significance
idx = np.arange(0, y.size, 1)[y == c]
errs = np.sum(1 for _ in filter(lambda x: not prediction[x, c], idx))
if idx.size > 0:
return errs / idx.size
else:
return 0
def class_one_err_one_class(prediction, y, significance, c=0):
"""Calculates the error rate of conformal classifier predictions containing
only a single output label. Considers only test examples belonging to
class ``c``. Use ``functools.partial`` in order to test other classes.
"""
labels, y = np.unique(y, return_inverse=True)
prediction = prediction > significance
idx = np.arange(0, y.size, 1)
idx = filter(lambda x: prediction[x, c], idx)
idx = filter(lambda x: np.sum(prediction[x, :]) == 1, idx)
errors = filter(lambda x: int(y[x]) != c, idx)
if len(idx) > 0:
return np.size(errors) / np.size(idx)
else:
return 0
# -----------------------------------------------------------------------------
# Efficiency measures
# -----------------------------------------------------------------------------
def _reg_interval_size(prediction, y, significance):
idx = int(significance * 100 - 1)
prediction = prediction[:, :, idx]
return prediction[:, 1] - prediction[:, 0]
def reg_min_size(prediction, y, significance):
return np.min(_reg_interval_size(prediction, y, significance))
def reg_q1_size(prediction, y, significance):
return np.percentile(_reg_interval_size(prediction, y, significance), 25)
def reg_median_size(prediction, y, significance):
return np.median(_reg_interval_size(prediction, y, significance))
def reg_q3_size(prediction, y, significance):
return np.percentile(_reg_interval_size(prediction, y, significance), 75)
def reg_max_size(prediction, y, significance):
return np.max(_reg_interval_size(prediction, y, significance))
def reg_mean_size(prediction, y, significance):
"""Calculates the average prediction interval size of a conformal
regression model.
"""
return np.mean(_reg_interval_size(prediction, y, significance))
def class_avg_c(prediction, y, significance):
"""Calculates the average number of classes per prediction of a conformal
classification model.
"""
prediction = prediction > significance
return np.sum(prediction) / prediction.shape[0]
def class_mean_p_val(prediction, y, significance):
"""Calculates the mean of the p-values output by a conformal classification
model.
"""
return np.mean(prediction)
def class_one_c(prediction, y, significance):
"""Calculates the rate of singleton predictions (prediction sets containing
only a single class label) of a conformal classification model.
"""
prediction = prediction > significance
n_singletons = np.sum(1 for _ in filter(lambda x: np.sum(x) == 1,
prediction))
return n_singletons / y.size
def class_empty(prediction, y, significance):
"""Calculates the rate of singleton predictions (prediction sets containing
only a single class label) of a conformal classification model.
"""
prediction = prediction > significance
n_empty = np.sum(1 for _ in filter(lambda x: np.sum(x) == 0,
prediction))
return n_empty / y.size
def n_test(prediction, y, significance):
"""Provides the number of test patters used in the evaluation.
"""
return y.size | 14,339 | 30.378556 | 106 | py |
cqr | cqr-master/nonconformist/acp.py | #!/usr/bin/env python
"""
Aggregated conformal predictors
"""
# Authors: Henrik Linusson
import numpy as np
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.cross_validation import ShuffleSplit, StratifiedShuffleSplit
from sklearn.base import clone
from nonconformist.base import BaseEstimator
from nonconformist.util import calc_p
# -----------------------------------------------------------------------------
# Sampling strategies
# -----------------------------------------------------------------------------
class BootstrapSampler(object):
"""Bootstrap sampler.
See also
--------
CrossSampler, RandomSubSampler
Examples
--------
"""
def gen_samples(self, y, n_samples, problem_type):
for i in range(n_samples):
idx = np.array(range(y.size))
train = np.random.choice(y.size, y.size, replace=True)
cal_mask = np.array(np.ones(idx.size), dtype=bool)
for j in train:
cal_mask[j] = False
cal = idx[cal_mask]
yield train, cal
class CrossSampler(object):
"""Cross-fold sampler.
See also
--------
BootstrapSampler, RandomSubSampler
Examples
--------
"""
def gen_samples(self, y, n_samples, problem_type):
if problem_type == 'classification':
folds = StratifiedKFold(y, n_folds=n_samples)
else:
folds = KFold(y.size, n_folds=n_samples)
for train, cal in folds:
yield train, cal
class RandomSubSampler(object):
"""Random subsample sampler.
Parameters
----------
calibration_portion : float
Ratio (0-1) of examples to use for calibration.
See also
--------
BootstrapSampler, CrossSampler
Examples
--------
"""
def __init__(self, calibration_portion=0.3):
self.cal_portion = calibration_portion
def gen_samples(self, y, n_samples, problem_type):
if problem_type == 'classification':
splits = StratifiedShuffleSplit(y,
n_iter=n_samples,
test_size=self.cal_portion)
else:
splits = ShuffleSplit(y.size,
n_iter=n_samples,
test_size=self.cal_portion)
for train, cal in splits:
yield train, cal
# -----------------------------------------------------------------------------
# Conformal ensemble
# -----------------------------------------------------------------------------
class AggregatedCp(BaseEstimator):
"""Aggregated conformal predictor.
Combines multiple IcpClassifier or IcpRegressor predictors into an
aggregated model.
Parameters
----------
predictor : object
Prototype conformal predictor (e.g. IcpClassifier or IcpRegressor)
used for defining conformal predictors included in the aggregate model.
sampler : object
Sampler object used to generate training and calibration examples
for the underlying conformal predictors.
aggregation_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors. Defaults to ``numpy.mean``.
n_models : int
Number of models to aggregate.
Attributes
----------
predictor : object
Prototype conformal predictor.
predictors : list
List of underlying conformal predictors.
sampler : object
Sampler object used to generate training and calibration examples.
agg_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors
References
----------
.. [1] Vovk, V. (2013). Cross-conformal predictors. Annals of Mathematics
and Artificial Intelligence, 1-20.
.. [2] Carlsson, L., Eklund, M., & Norinder, U. (2014). Aggregated
Conformal Prediction. In Artificial Intelligence Applications and
Innovations (pp. 231-240). Springer Berlin Heidelberg.
Examples
--------
"""
def __init__(self,
predictor,
sampler=BootstrapSampler(),
aggregation_func=None,
n_models=10):
self.predictors = []
self.n_models = n_models
self.predictor = predictor
self.sampler = sampler
if aggregation_func is not None:
self.agg_func = aggregation_func
else:
self.agg_func = lambda x: np.mean(x, axis=2)
def fit(self, x, y):
"""Fit underlying conformal predictors.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for fitting the underlying conformal predictors.
y : numpy array of shape [n_samples]
Outputs of examples for fitting the underlying conformal predictors.
Returns
-------
None
"""
self.n_train = y.size
self.predictors = []
idx = np.random.permutation(y.size)
x, y = x[idx, :], y[idx]
problem_type = self.predictor.__class__.get_problem_type()
samples = self.sampler.gen_samples(y,
self.n_models,
problem_type)
for train, cal in samples:
predictor = clone(self.predictor)
predictor.fit(x[train, :], y[train])
predictor.calibrate(x[cal, :], y[cal])
self.predictors.append(predictor)
if problem_type == 'classification':
self.classes = self.predictors[0].classes
def predict(self, x, significance=None):
"""Predict the output values for a set of input patterns.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
significance : float or None
Significance level (maximum allowed error rate) of predictions.
Should be a float between 0 and 1. If ``None``, then the p-values
are output rather than the predictions. Note: ``significance=None``
is applicable to classification problems only.
Returns
-------
p : numpy array of shape [n_samples, n_classes] or [n_samples, 2]
For classification problems: If significance is ``None``, then p
contains the p-values for each sample-class pair; if significance
is a float between 0 and 1, then p is a boolean array denoting
which labels are included in the prediction sets.
For regression problems: Prediction interval (minimum and maximum
boundaries) for the set of test patterns.
"""
is_regression =\
self.predictor.__class__.get_problem_type() == 'regression'
n_examples = x.shape[0]
if is_regression and significance is None:
signs = np.arange(0.01, 1.0, 0.01)
pred = np.zeros((n_examples, 2, signs.size))
for i, s in enumerate(signs):
predictions = np.dstack([p.predict(x, s)
for p in self.predictors])
predictions = self.agg_func(predictions)
pred[:, :, i] = predictions
return pred
else:
def f(p, x):
return p.predict(x, significance if is_regression else None)
predictions = np.dstack([f(p, x) for p in self.predictors])
predictions = self.agg_func(predictions)
if significance and not is_regression:
return predictions >= significance
else:
return predictions
class CrossConformalClassifier(AggregatedCp):
"""Cross-conformal classifier.
Combines multiple IcpClassifiers into a cross-conformal classifier.
Parameters
----------
predictor : object
Prototype conformal predictor (e.g. IcpClassifier or IcpRegressor)
used for defining conformal predictors included in the aggregate model.
aggregation_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors. Defaults to ``numpy.mean``.
n_models : int
Number of models to aggregate.
Attributes
----------
predictor : object
Prototype conformal predictor.
predictors : list
List of underlying conformal predictors.
sampler : object
Sampler object used to generate training and calibration examples.
agg_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors
References
----------
.. [1] Vovk, V. (2013). Cross-conformal predictors. Annals of Mathematics
and Artificial Intelligence, 1-20.
Examples
--------
"""
def __init__(self,
predictor,
n_models=10):
super(CrossConformalClassifier, self).__init__(predictor,
CrossSampler(),
n_models)
def predict(self, x, significance=None):
ncal_ngt_neq = np.stack([p._get_stats(x) for p in self.predictors],
axis=3)
ncal_ngt_neq = ncal_ngt_neq.sum(axis=3)
p = calc_p(ncal_ngt_neq[:, :, 0],
ncal_ngt_neq[:, :, 1],
ncal_ngt_neq[:, :, 2],
smoothing=self.predictors[0].smoothing)
if significance:
return p > significance
else:
return p
class BootstrapConformalClassifier(AggregatedCp):
"""Bootstrap conformal classifier.
Combines multiple IcpClassifiers into a bootstrap conformal classifier.
Parameters
----------
predictor : object
Prototype conformal predictor (e.g. IcpClassifier or IcpRegressor)
used for defining conformal predictors included in the aggregate model.
aggregation_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors. Defaults to ``numpy.mean``.
n_models : int
Number of models to aggregate.
Attributes
----------
predictor : object
Prototype conformal predictor.
predictors : list
List of underlying conformal predictors.
sampler : object
Sampler object used to generate training and calibration examples.
agg_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors
References
----------
.. [1] Vovk, V. (2013). Cross-conformal predictors. Annals of Mathematics
and Artificial Intelligence, 1-20.
Examples
--------
"""
def __init__(self,
predictor,
n_models=10):
super(BootstrapConformalClassifier, self).__init__(predictor,
BootstrapSampler(),
n_models)
def predict(self, x, significance=None):
ncal_ngt_neq = np.stack([p._get_stats(x) for p in self.predictors],
axis=3)
ncal_ngt_neq = ncal_ngt_neq.sum(axis=3)
p = calc_p(ncal_ngt_neq[:, :, 0] + ncal_ngt_neq[:, :, 0] / self.n_train,
ncal_ngt_neq[:, :, 1] + ncal_ngt_neq[:, :, 0] / self.n_train,
ncal_ngt_neq[:, :, 2],
smoothing=self.predictors[0].smoothing)
if significance:
return p > significance
else:
return p
| 10,138 | 26.402703 | 79 | py |
cqr | cqr-master/get_meps_data/meps_dataset_panel19_fy2015_reg.py | # This code is a variant of
# https://github.com/IBM/AIF360/blob/master/aif360/datasets/meps_dataset_panel19_fy2015.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pandas as pd
from save_dataset import SaveDataset
default_mappings = {
'protected_attribute_maps': [{1.0: 'White', 0.0: 'Non-White'}]
}
def default_preprocessing(df):
"""
1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White
and 'non-White' otherwise
2. Restrict to Panel 19
3. RENAME all columns that are PANEL/ROUND SPECIFIC
4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1
5. Compute UTILIZATION
"""
def race(row):
if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return 'White'
return 'Non-White'
df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)
df = df.rename(columns = {'RACEV2X' : 'RACE'})
df = df[df['PANEL'] == 19]
# RENAME COLUMNS
df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',
'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',
'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',
'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',
'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'})
df = df[df['REGION'] >= 0] # remove values -1
df = df[df['AGE'] >= 0] # remove values -1
df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9
df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9
df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1
df = df[(df[['OBTOTV15', 'OPTOTV15', 'ERTOT15', 'IPNGTD15', 'HHTOTD15']]>=0).all(1)]
def utilization(row):
return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15']
df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1)
df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION_reg'})
return df
class MEPSDataset19Reg(SaveDataset):
"""MEPS Dataset.
"""
def __init__(self, label_name='UTILIZATION_reg', favorable_classes=[1.0],
protected_attribute_names=['RACE'],
privileged_classes=[['White']],
instance_weights_name='PERWT15F',
categorical_features=['REGION','SEX','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV'],
features_to_keep=['REGION','AGE','SEX','RACE','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42','PCS42',
'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION_reg','PERWT15F'],
features_to_drop=[],
na_values=[], custom_preprocessing=default_preprocessing,
metadata=default_mappings):
filepath = './h181.csv'
df = pd.read_csv(filepath, sep=',', na_values=na_values)
super(MEPSDataset19Reg, self).__init__(df=df, label_name=label_name,
favorable_classes=favorable_classes,
protected_attribute_names=protected_attribute_names,
privileged_classes=privileged_classes,
instance_weights_name=instance_weights_name,
categorical_features=categorical_features,
features_to_keep=features_to_keep,
features_to_drop=features_to_drop, na_values=na_values,
custom_preprocessing=custom_preprocessing, metadata=metadata, dataset_name='meps_19_reg')
| 5,141 | 48.442308 | 137 | py |
cqr | cqr-master/get_meps_data/base_dataset.py | # Code copied from IBM's AIF360 package:
# https://github.com/IBM/AIF360/blob/master/aif360/datasets/dataset.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import copy
import sys
if sys.version_info >= (3, 4):
ABC = abc.ABC
else:
ABC = abc.ABCMeta(str('ABC'), (), {})
class BaseDataset(ABC):
"""Abstract base class for datasets."""
@abc.abstractmethod
def __init__(self, **kwargs):
self.metadata = kwargs.pop('metadata', dict()) or dict()
self.metadata.update({
'transformer': '{}.__init__'.format(type(self).__name__),
'params': kwargs,
'previous': []
})
self.validate_dataset()
def validate_dataset(self):
"""Error checking and type validation."""
pass
def copy(self, deepcopy=False):
"""Convenience method to return a copy of this dataset.
Args:
deepcopy (bool, optional): :func:`~copy.deepcopy` this dataset if
`True`, shallow copy otherwise.
Returns:
Dataset: A new dataset with fields copied from this object and
metadata set accordingly.
"""
cpy = copy.deepcopy(self) if deepcopy else copy.copy(self)
# preserve any user-created fields
cpy.metadata = cpy.metadata.copy()
cpy.metadata.update({
'transformer': '{}.copy'.format(type(self).__name__),
'params': {'deepcopy': deepcopy},
'previous': [self]
})
return cpy
@abc.abstractmethod
def export_dataset(self):
"""Save this Dataset to disk."""
raise NotImplementedError
@abc.abstractmethod
def split(self, num_or_size_splits, shuffle=False):
"""Split this dataset into multiple partitions.
Args:
num_or_size_splits (array or int): If `num_or_size_splits` is an
int, *k*, the value is the number of equal-sized folds to make
(if *k* does not evenly divide the dataset these folds are
approximately equal-sized). If `num_or_size_splits` is an array
of type int, the values are taken as the indices at which to
split the dataset. If the values are floats (< 1.), they are
considered to be fractional proportions of the dataset at which
to split.
shuffle (bool, optional): Randomly shuffle the dataset before
splitting.
Returns:
list(Dataset): Splits. Contains *k* or `len(num_or_size_splits) + 1`
datasets depending on `num_or_size_splits`.
"""
raise NotImplementedError
| 2,782 | 32.53012 | 80 | py |
cqr | cqr-master/get_meps_data/meps_dataset_panel21_fy2016_reg.py | # This code is a variant of
# https://github.com/IBM/AIF360/blob/master/aif360/datasets/meps_dataset_panel21_fy2016.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pandas as pd
#from standard_dataset import StandardDataset
from save_dataset import SaveDataset
default_mappings = {
'protected_attribute_maps': [{1.0: 'White', 0.0: 'Non-White'}]
}
def default_preprocessing(df):
"""
1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White
and 'Non-White' otherwise
2. Restrict to Panel 21
3. RENAME all columns that are PANEL/ROUND SPECIFIC
4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1
5. Compute UTILIZATION, binarize it to 0 (< 10) and 1 (>= 10)
"""
def race(row):
if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return 'White'
return 'Non-White'
df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)
df = df.rename(columns = {'RACEV2X' : 'RACE'})
df = df[df['PANEL'] == 21]
# RENAME COLUMNS
df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',
'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',
'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',
'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',
'POVCAT16' : 'POVCAT', 'INSCOV16' : 'INSCOV'})
df = df[df['REGION'] >= 0] # remove values -1
df = df[df['AGE'] >= 0] # remove values -1
df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9
df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9
df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1
df = df[(df[['OBTOTV16', 'OPTOTV16', 'ERTOT16', 'IPNGTD16', 'HHTOTD16']]>=0).all(1)]
def utilization(row):
return row['OBTOTV16'] + row['OPTOTV16'] + row['ERTOT16'] + row['IPNGTD16'] + row['HHTOTD16']
df['TOTEXP16'] = df.apply(lambda row: utilization(row), axis=1)
df = df.rename(columns = {'TOTEXP16' : 'UTILIZATION_reg'})
return df
class MEPSDataset21Reg(SaveDataset):
"""MEPS Dataset.
"""
def __init__(self, label_name='UTILIZATION_reg', favorable_classes=[1.0],
protected_attribute_names=['RACE'],
privileged_classes=[['White']],
instance_weights_name='PERWT16F',
categorical_features=['REGION','SEX','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42', 'ADSMOK42', 'PHQ242',
'EMPST','POVCAT','INSCOV'],
features_to_keep=['REGION','AGE','SEX','RACE','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PCS42',
'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION_reg', 'PERWT16F'],
features_to_drop=[],
na_values=[], custom_preprocessing=default_preprocessing,
metadata=default_mappings):
filepath = './h192.csv'
df = pd.read_csv(filepath, sep=',', na_values=na_values)
super(MEPSDataset21Reg, self).__init__(df=df, label_name=label_name,
favorable_classes=favorable_classes,
protected_attribute_names=protected_attribute_names,
privileged_classes=privileged_classes,
instance_weights_name=instance_weights_name,
categorical_features=categorical_features,
features_to_keep=features_to_keep,
features_to_drop=features_to_drop, na_values=na_values,
custom_preprocessing=custom_preprocessing, metadata=metadata, dataset_name='meps_21_reg')
| 5,262 | 49.12381 | 137 | py |
cqr | cqr-master/get_meps_data/structured_dataset.py | # Code copied from IBM's AIF360 package
# https://github.com/IBM/AIF360/blob/master/aif360/datasets/structured_dataset.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
from contextlib import contextmanager
from copy import deepcopy
from logging import warning
import numpy as np
import pandas as pd
from base_dataset import BaseDataset
class StructuredDataset(BaseDataset):
"""Base class for all structured datasets.
A StructuredDataset requires data to be stored in :obj:`numpy.ndarray`
objects with :obj:`~numpy.dtype` as :obj:`~numpy.float64`.
Attributes:
features (numpy.ndarray): Dataset features for each instance.
labels (numpy.ndarray): Generic label corresponding to each instance
(could be ground-truth, predicted, cluster assignments, etc.).
scores (numpy.ndarray): Probability score associated with each label.
Same shape as `labels`. Only valid for binary labels (this includes
one-hot categorical labels as well).
protected_attributes (numpy.ndarray): A subset of `features` for which
fairness is desired.
feature_names (list(str)): Names describing each dataset feature.
label_names (list(str)): Names describing each label.
protected_attribute_names (list(str)): A subset of `feature_names`
corresponding to `protected_attributes`.
privileged_protected_attributes (list(numpy.ndarray)): A subset of
protected attribute values which are considered privileged from a
fairness perspective.
unprivileged_protected_attributes (list(numpy.ndarray)): The remaining
possible protected attribute values which are not included in
`privileged_protected_attributes`.
instance_names (list(str)): Indentifiers for each instance. Sequential
integers by default.
instance_weights (numpy.ndarray): Weighting for each instance. All
equal (ones) by default. Pursuant to standard practice in social
science data, 1 means one person or entity. These weights are hence
person or entity multipliers (see:
https://www.ibm.com/support/knowledgecenter/en/SS3RA7_15.0.0/com.ibm.spss.modeler.help/netezza_decisiontrees_weights.htm)
These weights *may not* be normalized to sum to 1 across the entire
dataset, rather the nominal (default) weight of each entity/record
in the data is 1. This is similar in spirit to the person weight in
census microdata samples.
https://www.census.gov/programs-surveys/acs/technical-documentation/pums/about.html
ignore_fields (set(str)): Attribute names to ignore when doing equality
comparisons. Always at least contains `'metadata'`.
metadata (dict): Details about the creation of this dataset. For
example::
{
'transformer': 'Dataset.__init__',
'params': kwargs,
'previous': None
}
"""
def __init__(self, df, label_names, protected_attribute_names,
instance_weights_name=None, scores_names=[],
unprivileged_protected_attributes=[],
privileged_protected_attributes=[], metadata=None):
"""
Args:
df (pandas.DataFrame): Input DataFrame with features, labels, and
protected attributes. Values should be preprocessed
to remove NAs and make all data numerical. Index values are
taken as instance names.
label_names (iterable): Names of the label columns in `df`.
protected_attribute_names (iterable): List of names corresponding to
protected attribute columns in `df`.
instance_weights_name (optional): Column name in `df` corresponding
to instance weights. If not provided, `instance_weights` will be
all set to 1.
unprivileged_protected_attributes (optional): If not provided, all
but the highest numerical value of each protected attribute will
be considered not privileged.
privileged_protected_attributes (optional): If not provided, the
highest numerical value of each protected attribute will be
considered privileged.
metadata (optional): Additional metadata to append.
Raises:
TypeError: Certain fields must be np.ndarrays as specified in the
class description.
ValueError: ndarray shapes must match.
"""
if df is None:
raise TypeError("Must provide a pandas DataFrame representing "
"the data (features, labels, protected attributes)")
if df.isna().any().any():
raise ValueError("Input DataFrames cannot contain NA values.")
try:
df = df.astype(np.float64)
except ValueError as e:
print("ValueError: {}".format(e))
raise ValueError("DataFrame values must be numerical.")
# Convert all column names to strings
df.columns = df.columns.astype(str).tolist()
label_names = list(map(str, label_names))
protected_attribute_names = list(map(str, protected_attribute_names))
self.feature_names = [n for n in df.columns if n not in label_names
and (not scores_names or n not in scores_names)
and n != instance_weights_name]
self.label_names = label_names
self.features = df[self.feature_names].values.copy()
self.labels = df[self.label_names].values.copy()
self.instance_names = df.index.astype(str).tolist()
if scores_names:
self.scores = df[scores_names].values.copy()
else:
self.scores = self.labels.copy()
df_prot = df.loc[:, protected_attribute_names]
self.protected_attribute_names = df_prot.columns.astype(str).tolist()
self.protected_attributes = df_prot.values.copy()
# Infer the privileged and unprivileged values in not provided
if unprivileged_protected_attributes and privileged_protected_attributes:
self.unprivileged_protected_attributes = unprivileged_protected_attributes
self.privileged_protected_attributes = privileged_protected_attributes
else:
self.unprivileged_protected_attributes = [
np.sort(np.unique(df_prot[attr].values))[:-1]
for attr in self.protected_attribute_names]
self.privileged_protected_attributes = [
np.sort(np.unique(df_prot[attr].values))[-1:]
for attr in self.protected_attribute_names]
if instance_weights_name:
self.instance_weights = df[instance_weights_name].values.copy()
else:
self.instance_weights = np.ones_like(self.instance_names,
dtype=np.float64)
# always ignore metadata and ignore_fields
self.ignore_fields = {'metadata', 'ignore_fields'}
# sets metadata
super(StructuredDataset, self).__init__(df=df, label_names=label_names,
protected_attribute_names=protected_attribute_names,
instance_weights_name=instance_weights_name,
unprivileged_protected_attributes=unprivileged_protected_attributes,
privileged_protected_attributes=privileged_protected_attributes,
metadata=metadata)
def __eq__(self, other):
"""Equality comparison for StructuredDatasets.
Note: Compares all fields other than those specified in `ignore_fields`.
"""
if not isinstance(other, StructuredDataset):
return False
def _eq(x, y):
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
return np.all(x == y)
elif isinstance(x, list) and isinstance(y, list):
return len(x) == len(y) and all(_eq(xi, yi) for xi, yi in zip(x, y))
return x == y
return all(_eq(self.__dict__[k], other.__dict__[k])
for k in self.__dict__.keys() if k not in self.ignore_fields)
def __ne__(self, other):
return not self == other
def __repr__(self):
# return repr(self.metadata)
return str(self)
def __str__(self):
df, _ = self.convert_to_dataframe()
df.insert(0, 'instance_weights', self.instance_weights)
highest_level = ['instance weights'] + \
['features']*len(self.feature_names) + \
['labels']*len(self.label_names)
middle_level = [''] + \
['protected attribute'
if f in self.protected_attribute_names else ''
for f in self.feature_names] + \
['']*len(self.label_names)
lowest_level = [''] + self.feature_names + ['']*len(self.label_names)
df.columns = pd.MultiIndex.from_arrays(
[highest_level, middle_level, lowest_level])
df.index.name = 'instance names'
return str(df)
# TODO: *_names checks
def validate_dataset(self):
"""Error checking and type validation.
Raises:
TypeError: Certain fields must be np.ndarrays as specified in the
class description.
ValueError: ndarray shapes must match.
"""
super(StructuredDataset, self).validate_dataset()
# =========================== TYPE CHECKING ============================
for f in [self.features, self.protected_attributes, self.labels,
self.scores, self.instance_weights]:
if not isinstance(f, np.ndarray):
raise TypeError("'{}' must be an np.ndarray.".format(f.__name__))
# convert ndarrays to float64
self.features = self.features.astype(np.float64)
self.protected_attributes = self.protected_attributes.astype(np.float64)
self.labels = self.labels.astype(np.float64)
self.instance_weights = self.instance_weights.astype(np.float64)
# =========================== SHAPE CHECKING ===========================
if len(self.labels.shape) == 1:
self.labels = self.labels.reshape((-1, 1))
try:
self.scores.reshape(self.labels.shape)
except ValueError as e:
print("ValueError: {}".format(e))
raise ValueError("'scores' should have the same shape as 'labels'.")
if not self.labels.shape[0] == self.features.shape[0]:
raise ValueError("Number of labels must match number of instances:"
"\n\tlabels.shape = {}\n\tfeatures.shape = {}".format(
self.labels.shape, self.features.shape))
if not self.instance_weights.shape[0] == self.features.shape[0]:
raise ValueError("Number of weights must match number of instances:"
"\n\tinstance_weights.shape = {}\n\tfeatures.shape = {}".format(
self.instance_weights.shape, self.features.shape))
# =========================== VALUE CHECKING ===========================
if np.any(np.logical_or(self.scores < 0., self.scores > 1.)):
warning("'scores' has no well-defined meaning out of range [0, 1].")
for i in range(len(self.privileged_protected_attributes)):
priv = set(self.privileged_protected_attributes[i])
unpriv = set(self.unprivileged_protected_attributes[i])
# check for duplicates
if priv & unpriv:
raise ValueError("'privileged_protected_attributes' and "
"'unprivileged_protected_attributes' should not share any "
"common elements:\n\tBoth contain {} for feature {}".format(
list(priv & unpriv), self.protected_attribute_names[i]))
# check for unclassified values
if not set(self.protected_attributes[:, i]) <= (priv | unpriv):
raise ValueError("All observed values for protected attributes "
"should be designated as either privileged or unprivileged:"
"\n\t{} not designated for feature {}".format(
list(set(self.protected_attributes[:, i])
- (priv | unpriv)),
self.protected_attribute_names[i]))
# warn for unobserved values
if not (priv | unpriv) <= set(self.protected_attributes[:, i]):
warning("{} listed but not observed for feature {}".format(
list((priv | unpriv) - set(self.protected_attributes[:, i])),
self.protected_attribute_names[i]))
@contextmanager
def temporarily_ignore(self, *fields):
"""Temporarily add the fields provided to `ignore_fields`.
To be used in a `with` statement. Upon completing the `with` block,
`ignore_fields` is restored to its original value.
Args:
*fields: Additional fields to ignore for equality comparison within
the scope of this context manager, e.g.
`temporarily_ignore('features', 'labels')`. The temporary
`ignore_fields` attribute is the union of the old attribute and
the set of these fields.
Examples:
>>> sd = StructuredDataset(...)
>>> modified = sd.copy()
>>> modified.labels = sd.labels + 1
>>> assert sd != modified
>>> with sd.temporarily_ignore('labels'):
>>> assert sd == modified
>>> assert 'labels' not in sd.ignore_fields
"""
old_ignore = deepcopy(self.ignore_fields)
self.ignore_fields |= set(fields)
try:
yield
finally:
self.ignore_fields = old_ignore
def align_datasets(self, other):
"""Align the other dataset features, labels and protected_attributes to
this dataset.
Args:
other (StructuredDataset): Other dataset that needs to be aligned
Returns:
StructuredDataset: New aligned dataset
"""
if (set(self.feature_names) != set(other.feature_names) or
set(self.label_names) != set(other.label_names) or
set(self.protected_attribute_names)
!= set(other.protected_attribute_names)):
raise ValueError(
"feature_names, label_names, and protected_attribute_names "
"should match between this and other dataset.")
# New dataset
new = other.copy()
# re-order the columns of the new dataset
feat_inds = [new.feature_names.index(f) for f in self.feature_names]
label_inds = [new.label_names.index(f) for f in self.label_names]
prot_inds = [new.protected_attribute_names.index(f)
for f in self.protected_attribute_names]
new.features = new.features[:, feat_inds]
new.labels = new.labels[:, label_inds]
new.scores = new.scores[:, label_inds]
new.protected_attributes = new.protected_attributes[:, prot_inds]
new.privileged_protected_attributes = [
new.privileged_protected_attributes[i] for i in prot_inds]
new.unprivileged_protected_attributes = [
new.unprivileged_protected_attributes[i] for i in prot_inds]
new.feature_names = deepcopy(self.feature_names)
new.label_names = deepcopy(self.label_names)
new.protected_attribute_names = deepcopy(self.protected_attribute_names)
return new
# TODO: Should we store the protected attributes as a separate dataframe
def convert_to_dataframe(self, de_dummy_code=False, sep='=',
set_category=True):
"""Convert the StructuredDataset to a :obj:`pandas.DataFrame`.
Args:
de_dummy_code (bool): Performs de_dummy_coding, converting dummy-
coded columns to categories. If `de_dummy_code` is `True` and
this dataset contains mappings for label and/or protected
attribute values to strings in the `metadata`, this method will
convert those as well.
set_category (bool): Set the de-dummy coded features to categorical
type.
Returns:
(pandas.DataFrame, dict):
* `pandas.DataFrame`: Equivalent dataframe for a dataset. All
columns will have only numeric values. The
`protected_attributes` field in the dataset will override the
values in the `features` field.
* `dict`: Attributes. Will contain additional information pulled
from the dataset such as `feature_names`, `label_names`,
`protected_attribute_names`, `instance_names`,
`instance_weights`, `privileged_protected_attributes`,
`unprivileged_protected_attributes`. The metadata will not be
returned.
"""
df = pd.DataFrame(np.hstack((self.features, self.labels)),
columns=self.feature_names+self.label_names,
index=self.instance_names)
df.loc[:, self.protected_attribute_names] = self.protected_attributes
# De-dummy code if necessary
if de_dummy_code:
df = self._de_dummy_code_df(df, sep=sep, set_category=set_category)
if 'label_maps' in self.metadata:
for i, label in enumerate(self.label_names):
df[label] = df[label].replace(self.metadata['label_maps'][i])
if 'protected_attribute_maps' in self.metadata:
for i, prot_attr in enumerate(self.protected_attribute_names):
df[prot_attr] = df[prot_attr].replace(
self.metadata['protected_attribute_maps'][i])
# Attributes
attributes = {
"feature_names": self.feature_names,
"label_names": self.label_names,
"protected_attribute_names": self.protected_attribute_names,
"instance_names": self.instance_names,
"instance_weights": self.instance_weights,
"privileged_protected_attributes": self.privileged_protected_attributes,
"unprivileged_protected_attributes": self.unprivileged_protected_attributes
}
return df, attributes
def export_dataset(self, export_metadata=False):
"""
Export the dataset and supporting attributes
TODO: The preferred file format is HDF
"""
if export_metadata:
raise NotImplementedError("The option to export metadata has not been implemented yet")
return None
def import_dataset(self, import_metadata=False):
""" Import the dataset and supporting attributes
TODO: The preferred file format is HDF
"""
if import_metadata:
raise NotImplementedError("The option to import metadata has not been implemented yet")
return None
def split(self, num_or_size_splits, shuffle=False, seed=None):
"""Split the dataset into multiple datasets
Args:
num_or_size_splits (list or int):
shuffle (bool):
seed (int or array_like): takes the same argument as `numpy.random.seed()`
function
Returns:
list: Each element of this list is a dataset obtained during the split
"""
# Set seed
if seed is not None:
np.random.seed(seed)
n = self.features.shape[0]
if isinstance(num_or_size_splits, list):
num_folds = len(num_or_size_splits) + 1
if num_folds > 1 and all(x <= 1. for x in num_or_size_splits):
num_or_size_splits = [int(x * n) for x in num_or_size_splits]
else:
num_folds = num_or_size_splits
order = list(np.random.permutation(n) if shuffle else range(n))
folds = [self.copy() for _ in range(num_folds)]
features = np.array_split(self.features[order], num_or_size_splits)
labels = np.array_split(self.labels[order], num_or_size_splits)
scores = np.array_split(self.scores[order], num_or_size_splits)
protected_attributes = np.array_split(self.protected_attributes[order],
num_or_size_splits)
instance_weights = np.array_split(self.instance_weights[order],
num_or_size_splits)
instance_names = np.array_split(np.array(self.instance_names)[order],
num_or_size_splits)
for fold, feats, labs, scors, prot_attrs, inst_wgts, inst_name in zip(
folds, features, labels, scores, protected_attributes, instance_weights,
instance_names):
fold.features = feats
fold.labels = labs
fold.scores = scors
fold.protected_attributes = prot_attrs
fold.instance_weights = inst_wgts
fold.instance_names = list(map(str, inst_name))
fold.metadata = fold.metadata.copy()
fold.metadata.update({
'transformer': '{}.split'.format(type(self).__name__),
'params': {'num_or_size_splits': num_or_size_splits,
'shuffle': shuffle},
'previous': [self]
})
return folds
@staticmethod
def _de_dummy_code_df(df, sep="=", set_category=False):
"""De-dummy code a dummy-coded dataframe obtained with pd.get_dummies().
After reversing dummy coding the corresponding fields will be converted
to categorical.
Args:
df (pandas.DataFrame): Input dummy coded dataframe
sep (char): Separator between base name and dummy code
set_category (bool): Set the de-dummy coded features
to categorical type
Examples:
>>> columns = ["Age", "Gender=Male", "Gender=Female"]
>>> df = pd.DataFrame([[10, 1, 0], [20, 0, 1]], columns=columns)
>>> _de_dummy_code_df(df, sep="=")
Age Gender
0 10 Male
1 20 Female
"""
feature_names_dum_d, feature_names_nodum = \
StructuredDataset._parse_feature_names(df.columns)
df_new = pd.DataFrame(index=df.index,
columns=feature_names_nodum + list(feature_names_dum_d.keys()))
for fname in feature_names_nodum:
df_new[fname] = df[fname].values.copy()
for fname, vl in feature_names_dum_d.items():
for v in vl:
df_new.loc[df[fname+sep+str(v)] == 1, fname] = str(v)
if set_category:
for fname in feature_names_dum_d.keys():
df_new[fname] = df_new[fname].astype('category')
return df_new
@staticmethod
def _parse_feature_names(feature_names, sep="="):
"""Parse feature names to ordinary and dummy coded candidates.
Args:
feature_names (list): Names of features
sep (char): Separator to designate the dummy coded category in the
feature name
Returns:
(dict, list):
* feature_names_dum_d (dict): Keys are the base feature names
and values are the categories.
* feature_names_nodum (list): Non-dummy coded feature names.
Examples:
>>> feature_names = ["Age", "Gender=Male", "Gender=Female"]
>>> StructuredDataset._parse_feature_names(feature_names, sep="=")
(defaultdict(<type 'list'>, {'Gender': ['Male', 'Female']}), ['Age'])
"""
feature_names_dum_d = defaultdict(list)
feature_names_nodum = list()
for fname in feature_names:
if sep in fname:
fname_dum, v = fname.split(sep, 1)
feature_names_dum_d[fname_dum].append(v)
else:
feature_names_nodum.append(fname)
return feature_names_dum_d, feature_names_nodum
| 24,495 | 43.70073 | 133 | py |
cqr | cqr-master/get_meps_data/save_dataset.py | # Code copied from IBM's AIF360 package
# https://github.com/IBM/AIF360/blob/master/aif360/datasets/standard_dataset.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from logging import warning
import numpy as np
import pandas as pd
from regression_dataset import RegressionDataset
class SaveDataset(RegressionDataset):
"""Base class for every :obj:`RegressionDataset`. The code is similar
to that of aif360.
It is not strictly necessary to inherit this class when adding custom
datasets but it may be useful.
This class is very loosely based on code from
https://github.com/algofairness/fairness-comparison.
"""
def __init__(self, df, label_name, favorable_classes,
protected_attribute_names, privileged_classes,
instance_weights_name='', scores_name='',
categorical_features=[], features_to_keep=[],
features_to_drop=[], na_values=[], custom_preprocessing=None,
metadata=None, dataset_name='my_data'):
"""
Subclasses of StandardDataset should perform the following before
calling `super().__init__`:
1. Load the dataframe from a raw file.
Then, this class will go through a standard preprocessing routine which:
2. (optional) Performs some dataset-specific preprocessing (e.g.
renaming columns/values, handling missing data).
3. Drops unrequested columns (see `features_to_keep` and
`features_to_drop` for details).
4. Drops rows with NA values.
5. Creates a one-hot encoding of the categorical variables.
6. Maps protected attributes to binary privileged/unprivileged
values (1/0).
Args:
df (pandas.DataFrame): DataFrame on which to perform standard
processing.
label_name: Name of the label column in `df`.
favorable_classes (list or function): Label values which are
considered favorable or a boolean function which returns `True`
if favorable. All others are unfavorable. Label values are
mapped to 1 (favorable) and 0 (unfavorable) if they are not
already binary and numerical.
protected_attribute_names (list): List of names corresponding to
protected attribute columns in `df`.
privileged_classes (list(list or function)): Each element is
a list of values which are considered privileged or a boolean
function which return `True` if privileged for the corresponding
column in `protected_attribute_names`. All others are
unprivileged. Values are mapped to 1 (privileged) and 0
(unprivileged) if they are not already numerical.
instance_weights_name (optional): Name of the instance weights
column in `df`.
categorical_features (optional, list): List of column names in the
DataFrame which are to be expanded into one-hot vectors.
features_to_keep (optional, list): Column names to keep. All others
are dropped except those present in `protected_attribute_names`,
`categorical_features`, `label_name` or `instance_weights_name`.
Defaults to all columns if not provided.
features_to_drop (optional, list): Column names to drop. *Note: this
overrides* `features_to_keep`.
na_values (optional): Additional strings to recognize as NA. See
:func:`pandas.read_csv` for details.
custom_preprocessing (function): A function object which
acts on and returns a DataFrame (f: DataFrame -> DataFrame). If
`None`, no extra preprocessing is applied.
metadata (optional): Additional metadata to append.
"""
# 2. Perform dataset-specific preprocessing
if custom_preprocessing:
df = custom_preprocessing(df)
# 3. Drop unrequested columns
features_to_keep = features_to_keep or df.columns.tolist()
keep = (set(features_to_keep) | set(protected_attribute_names)
| set(categorical_features) | set([label_name]))
if instance_weights_name:
keep |= set([instance_weights_name])
df = df[sorted(keep - set(features_to_drop), key=df.columns.get_loc)]
categorical_features = sorted(set(categorical_features) - set(features_to_drop), key=df.columns.get_loc)
# 4. Remove any rows that have missing data.
dropped = df.dropna()
count = df.shape[0] - dropped.shape[0]
if count > 0:
warning("Missing Data: {} rows removed from {}.".format(count,
type(self).__name__))
df = dropped
# 5. Create a one-hot encoding of the categorical variables.
df = pd.get_dummies(df, columns=categorical_features, prefix_sep='=')
# 6. Map protected attributes to privileged/unprivileged
privileged_protected_attributes = []
unprivileged_protected_attributes = []
for attr, vals in zip(protected_attribute_names, privileged_classes):
privileged_values = [1.]
unprivileged_values = [0.]
if callable(vals):
df[attr] = df[attr].apply(vals)
elif np.issubdtype(df[attr].dtype, np.number):
# this attribute is numeric; no remapping needed
privileged_values = vals
unprivileged_values = list(set(df[attr]).difference(vals))
else:
# find all instances which match any of the attribute values
priv = np.array([ ( el in vals ) for el in df[attr] ])
df.loc[priv, attr] = privileged_values[0]
df.loc[~priv, attr] = unprivileged_values[0]
privileged_protected_attributes.append(
np.array(privileged_values, dtype=np.float64))
unprivileged_protected_attributes.append(
np.array(unprivileged_values, dtype=np.float64))
full_name = dataset_name + ".csv"
print("writing file: " + full_name)
df.to_csv(full_name)
| 6,412 | 45.136691 | 112 | py |
cqr | cqr-master/get_meps_data/regression_dataset.py | # Code copied from IBM's AIF360 package
# https://github.com/IBM/AIF360/blob/master/aif360/datasets/binary_label_dataset.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from structured_dataset import StructuredDataset
class RegressionDataset(StructuredDataset):
"""Base class for all structured datasets with binary labels."""
def __init__(self, favorable_label=1., unfavorable_label=0., **kwargs):
"""
Args:
favorable_label (float): Label value which is considered favorable
(i.e. "positive").
unfavorable_label (float): Label value which is considered
unfavorable (i.e. "negative").
**kwargs: StructuredDataset arguments.
"""
self.favorable_label = float(favorable_label)
self.unfavorable_label = float(unfavorable_label)
super(RegressionDataset, self).__init__(**kwargs)
def validate_dataset(self):
"""Error checking and type validation.
Raises:
ValueError: `labels` must be shape [n, 1].
ValueError: `favorable_label` and `unfavorable_label` must be the
only values present in `labels`.
"""
super(RegressionDataset, self).validate_dataset()
# =========================== SHAPE CHECKING ===========================
# Verify if the labels are only 1 column
if self.labels.shape[1] != 1:
raise ValueError("BinaryLabelDataset only supports single-column "
"labels:\n\tlabels.shape = {}".format(self.labels.shape))
# =========================== VALUE CHECKING ===========================
# Check if the favorable and unfavorable labels match those in the dataset
if (not set(self.labels.ravel()) <=
set([self.favorable_label, self.unfavorable_label])):
raise ValueError("The favorable and unfavorable labels provided do "
"not match the labels in the dataset.")
if np.all(self.scores == self.labels):
self.scores = (self.scores == self.favorable_label).astype(np.float64)
| 2,259 | 39.357143 | 83 | py |
cqr | cqr-master/get_meps_data/meps_dataset_panel20_fy2015_reg.py | # This code is a variant of
# https://github.com/IBM/AIF360/blob/master/aif360/datasets/meps_dataset_panel20_fy2015.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pandas as pd
#from standard_datasets import StandardDataset
from save_dataset import SaveDataset
default_mappings = {
'protected_attribute_maps': [{1.0: 'White', 0.0: 'Non-White'}]
}
def default_preprocessing(df):
"""
1.Create a new column, RACE that is 'White' if RACEV2X = 1 and HISPANX = 2 i.e. non Hispanic White
and 'non-White' otherwise
2. Restrict to Panel 20
3. RENAME all columns that are PANEL/ROUND SPECIFIC
4. Drop rows based on certain values of individual features that correspond to missing/unknown - generally < -1
5. Compute UTILIZATION, binarize it to 0 (< 10) and 1 (>= 10)
"""
def race(row):
if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE
return 'White'
return 'Non-White'
df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)
df = df.rename(columns = {'RACEV2X' : 'RACE'})
df = df[df['PANEL'] == 20]
# RENAME COLUMNS
df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',
'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',
'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',
'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',
'POVCAT15' : 'POVCAT', 'INSCOV15' : 'INSCOV'})
df = df[df['REGION'] >= 0] # remove values -1
df = df[df['AGE'] >= 0] # remove values -1
df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9
df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9
df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',
'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1
df = df[(df[['OBTOTV15', 'OPTOTV15', 'ERTOT15', 'IPNGTD15', 'HHTOTD15']]>=0).all(1)]
def utilization(row):
return row['OBTOTV15'] + row['OPTOTV15'] + row['ERTOT15'] + row['IPNGTD15'] + row['HHTOTD15']
df['TOTEXP15'] = df.apply(lambda row: utilization(row), axis=1)
df = df.rename(columns = {'TOTEXP15' : 'UTILIZATION_reg'})
return df
class MEPSDataset20Reg(SaveDataset):
"""MEPS Dataset.
"""
def __init__(self, label_name='UTILIZATION_reg', favorable_classes=[1.0],
protected_attribute_names=['RACE'],
privileged_classes=[['White']],
instance_weights_name='PERWT15F',
categorical_features=['REGION','SEX','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42', 'ADSMOK42', 'PHQ242',
'EMPST','POVCAT','INSCOV'],
features_to_keep=['REGION','AGE','SEX','RACE','MARRY',
'FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX',
'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',
'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',
'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42', 'ADSMOK42',
'PCS42',
'MCS42','K6SUM42','PHQ242','EMPST','POVCAT','INSCOV','UTILIZATION_reg', 'PERWT15F'],
features_to_drop=[],
na_values=[], custom_preprocessing=default_preprocessing,
metadata=default_mappings):
filepath = './h181.csv'
df = pd.read_csv(filepath, sep=',', na_values=na_values)
super(MEPSDataset20Reg, self).__init__(df=df, label_name=label_name,
favorable_classes=favorable_classes,
protected_attribute_names=protected_attribute_names,
privileged_classes=privileged_classes,
instance_weights_name=instance_weights_name,
categorical_features=categorical_features,
features_to_keep=features_to_keep,
features_to_drop=features_to_drop, na_values=na_values,
custom_preprocessing=custom_preprocessing, metadata=metadata, dataset_name='meps_20_reg')
| 5,265 | 48.679245 | 137 | py |
cqr | cqr-master/get_meps_data/main_clean_and_save_to_csv.py |
# Code based on IBM's AIF360 software package, suggesting a simple modification
# that accumulates the medical utilization variables without binarization
# Load packages
from meps_dataset_panel19_fy2015_reg import MEPSDataset19Reg
from meps_dataset_panel20_fy2015_reg import MEPSDataset20Reg
from meps_dataset_panel21_fy2016_reg import MEPSDataset21Reg
import numpy as np
print("Cleaning and saving MEPS 19, 20 and 21")
# Load raw MEPS 19 data, extract and clean the features, then save to meps_19.csv
MEPSDataset19Reg()
# Load raw MEPS 20 data, extract and clean the features, then save to meps_20.csv
MEPSDataset20Reg()
# Load raw MEPS 21 data, extract and clean the features, then save to meps_21.csv
MEPSDataset21Reg()
print("Done.")
###############################################################################
###############################################################################
# We now show how to load the processed csv file
import pandas as pd
print("Loading processed data and printing the dimensions")
##############################################################################
# MEPS 19
##############################################################################
# Load the processed meps_19_reg.csv, extract features X and response y
df = pd.read_csv('meps_19_reg.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
print("MEPS 19: n = " + str(X.shape[0]) + " p = " + str(X.shape[1]) + " response len = " + str(y.shape[0]))
##############################################################################
# MEPS 20
##############################################################################
# Load the processed meps_20_reg.csv, extract features X and response y
df = pd.read_csv('meps_20_reg.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
print("MEPS 20: n = " + str(X.shape[0]) + " p = " + str(X.shape[1]) + " response len = " + str(y.shape[0]))
##############################################################################
# MEPS 21
##############################################################################
# Load the processed meps_21_reg.csv, extract features X and response y
df = pd.read_csv('meps_21_reg.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT16F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
print("MEPS 21: n = " + str(X.shape[0]) + " p = " + str(X.shape[1]) + " response len = " + str(y.shape[0]))
| 8,836 | 50.678363 | 107 | py |
cqr | cqr-master/cqr/torch_models.py |
import sys
import copy
import torch
import numpy as np
import torch.nn as nn
from cqr import helper
from sklearn.model_selection import train_test_split
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
###############################################################################
# Helper functions
###############################################################################
def epoch_internal_train(model, loss_func, x_train, y_train, batch_size, optimizer, cnt=0, best_cnt=np.Inf):
""" Sweep over the data and update the model's parameters
Parameters
----------
model : class of neural net model
loss_func : class of loss function
x_train : pytorch tensor n training features, each of dimension p (nXp)
batch_size : integer, size of the mini-batch
optimizer : class of SGD solver
cnt : integer, counting the gradient steps
best_cnt: integer, stop the training if current cnt > best_cnt
Returns
-------
epoch_loss : mean loss value
cnt : integer, cumulative number of gradient steps
"""
model.train()
shuffle_idx = np.arange(x_train.shape[0])
np.random.shuffle(shuffle_idx)
x_train = x_train[shuffle_idx]
y_train = y_train[shuffle_idx]
epoch_losses = []
for idx in range(0, x_train.shape[0], batch_size):
cnt = cnt + 1
optimizer.zero_grad()
batch_x = x_train[idx : min(idx + batch_size, x_train.shape[0]),:]
batch_y = y_train[idx : min(idx + batch_size, y_train.shape[0])]
preds = model(batch_x)
loss = loss_func(preds, batch_y)
loss.backward()
optimizer.step()
epoch_losses.append(loss.cpu().detach().numpy())
if cnt >= best_cnt:
break
epoch_loss = np.mean(epoch_losses)
return epoch_loss, cnt
def rearrange(all_quantiles, quantile_low, quantile_high, test_preds):
""" Produce monotonic quantiles
Parameters
----------
all_quantiles : numpy array (q), grid of quantile levels in the range (0,1)
quantile_low : float, desired low quantile in the range (0,1)
quantile_high : float, desired high quantile in the range (0,1)
test_preds : numpy array of predicted quantile (nXq)
Returns
-------
q_fixed : numpy array (nX2), containing the rearranged estimates of the
desired low and high quantile
References
----------
.. [1] Chernozhukov, Victor, Iván Fernández‐Val, and Alfred Galichon.
"Quantile and probability curves without crossing."
Econometrica 78.3 (2010): 1093-1125.
"""
scaling = all_quantiles[-1] - all_quantiles[0]
low_val = (quantile_low - all_quantiles[0])/scaling
high_val = (quantile_high - all_quantiles[0])/scaling
q_fixed = np.quantile(test_preds,(low_val, high_val),interpolation='linear',axis=1)
return q_fixed.T
###############################################################################
# Deep conditional mean regression
# Minimizing MSE loss
###############################################################################
# Define the network
class mse_model(nn.Module):
""" Conditional mean estimator, formulated as neural net
"""
def __init__(self,
in_shape=1,
hidden_size=64,
dropout=0.5):
""" Initialization
Parameters
----------
in_shape : integer, input signal dimension (p)
hidden_size : integer, hidden layer dimension
dropout : float, dropout rate
"""
super().__init__()
self.in_shape = in_shape
self.out_shape = 1
self.hidden_size = hidden_size
self.dropout = dropout
self.build_model()
self.init_weights()
def build_model(self):
""" Construct the network
"""
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, 1),
)
def init_weights(self):
""" Initialize the network parameters
"""
for m in self.base_model:
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
""" Run forward pass
"""
return torch.squeeze(self.base_model(x))
# Define the training procedure
class LearnerOptimized:
""" Fit a neural network (conditional mean) to training data
"""
def __init__(self, model, optimizer_class, loss_func, device='cpu', test_ratio=0.2, random_state=0):
""" Initialization
Parameters
----------
model : class of neural network model
optimizer_class : class of SGD optimizer (e.g. Adam)
loss_func : loss to minimize
device : string, "cuda:0" or "cpu"
test_ratio : float, test size used in cross-validation (CV)
random_state : int, seed to be used in CV when splitting to train-test
"""
self.model = model.to(device)
self.optimizer_class = optimizer_class
self.optimizer = optimizer_class(self.model.parameters())
self.loss_func = loss_func.to(device)
self.device = device
self.test_ratio = test_ratio
self.random_state = random_state
self.loss_history = []
self.test_loss_history = []
self.full_loss_history = []
def fit(self, x, y, epochs, batch_size, verbose=False):
""" Fit the model to data
Parameters
----------
x : numpy array, containing the training features (nXp)
y : numpy array, containing the training labels (n)
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
"""
sys.stdout.flush()
model = copy.deepcopy(self.model)
model = model.to(device)
optimizer = self.optimizer_class(model.parameters())
best_epoch = epochs
x_train, xx, y_train, yy = train_test_split(x, y, test_size=self.test_ratio,random_state=self.random_state)
x_train = torch.from_numpy(x_train).float().to(self.device).requires_grad_(False)
xx = torch.from_numpy(xx).float().to(self.device).requires_grad_(False)
y_train = torch.from_numpy(y_train).float().to(self.device).requires_grad_(False)
yy = torch.from_numpy(yy).float().to(self.device).requires_grad_(False)
best_cnt = 1e10
best_test_epoch_loss = 1e10
cnt = 0
for e in range(epochs):
epoch_loss, cnt = epoch_internal_train(model, self.loss_func, x_train, y_train, batch_size, optimizer, cnt)
self.loss_history.append(epoch_loss)
# test
model.eval()
preds = model(xx)
test_preds = preds.cpu().detach().numpy()
test_preds = np.squeeze(test_preds)
test_epoch_loss = self.loss_func(preds, yy).cpu().detach().numpy()
self.test_loss_history.append(test_epoch_loss)
if (test_epoch_loss <= best_test_epoch_loss):
best_test_epoch_loss = test_epoch_loss
best_epoch = e
best_cnt = cnt
if (e+1) % 100 == 0 and verbose:
print("CV: Epoch {}: Train {}, Test {}, Best epoch {}, Best loss {}".format(e+1, epoch_loss, test_epoch_loss, best_epoch, best_test_epoch_loss))
sys.stdout.flush()
# use all the data to train the model, for best_cnt steps
x = torch.from_numpy(x).float().to(self.device).requires_grad_(False)
y = torch.from_numpy(y).float().to(self.device).requires_grad_(False)
cnt = 0
for e in range(best_epoch+1):
if cnt > best_cnt:
break
epoch_loss, cnt = epoch_internal_train(self.model, self.loss_func, x, y, batch_size, self.optimizer, cnt, best_cnt)
self.full_loss_history.append(epoch_loss)
if (e+1) % 100 == 0 and verbose:
print("Full: Epoch {}: {}, cnt {}".format(e+1, epoch_loss, cnt))
sys.stdout.flush()
def predict(self, x):
""" Estimate the label given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of predicted labels (n)
"""
self.model.eval()
ret_val = self.model(torch.from_numpy(x).to(self.device).requires_grad_(False)).cpu().detach().numpy()
return ret_val
##############################################################################
# Quantile regression
# Implementation inspired by:
# https://github.com/ceshine/quantile-regression-tensorflow
##############################################################################
class AllQuantileLoss(nn.Module):
""" Pinball loss function
"""
def __init__(self, quantiles):
""" Initialize
Parameters
----------
quantiles : pytorch vector of quantile levels, each in the range (0,1)
"""
super().__init__()
self.quantiles = quantiles
def forward(self, preds, target):
""" Compute the pinball loss
Parameters
----------
preds : pytorch tensor of estimated labels (n)
target : pytorch tensor of true labels (n)
Returns
-------
loss : cost function value
"""
assert not target.requires_grad
assert preds.size(0) == target.size(0)
losses = []
for i, q in enumerate(self.quantiles):
errors = target - preds[:, i]
losses.append(torch.max((q-1) * errors, q * errors).unsqueeze(1))
loss = torch.mean(torch.sum(torch.cat(losses, dim=1), dim=1))
return loss
class all_q_model(nn.Module):
""" Conditional quantile estimator, formulated as neural net
"""
def __init__(self,
quantiles,
in_shape=1,
hidden_size=64,
dropout=0.5):
""" Initialization
Parameters
----------
quantiles : numpy array of quantile levels (q), each in the range (0,1)
in_shape : integer, input signal dimension (p)
hidden_size : integer, hidden layer dimension
dropout : float, dropout rate
"""
super().__init__()
self.quantiles = quantiles
self.num_quantiles = len(quantiles)
self.hidden_size = hidden_size
self.in_shape = in_shape
self.out_shape = len(quantiles)
self.dropout = dropout
self.build_model()
self.init_weights()
def build_model(self):
""" Construct the network
"""
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.num_quantiles),
)
def init_weights(self):
""" Initialize the network parameters
"""
for m in self.base_model:
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
""" Run forward pass
"""
return self.base_model(x)
class LearnerOptimizedCrossing:
""" Fit a neural network (conditional quantile) to training data
"""
def __init__(self, model, optimizer_class, loss_func, device='cpu', test_ratio=0.2, random_state=0,
qlow=0.05, qhigh=0.95, use_rearrangement=False):
""" Initialization
Parameters
----------
model : class of neural network model
optimizer_class : class of SGD optimizer (e.g. pytorch's Adam)
loss_func : loss to minimize
device : string, "cuda:0" or "cpu"
test_ratio : float, test size used in cross-validation (CV)
random_state : integer, seed used in CV when splitting to train-test
qlow : float, low quantile level in the range (0,1)
qhigh : float, high quantile level in the range (0,1)
use_rearrangement : boolean, use the rearrangement algorithm (True)
of not (False)
"""
self.model = model.to(device)
self.use_rearrangement = use_rearrangement
self.compute_coverage = True
self.quantile_low = qlow
self.quantile_high = qhigh
self.target_coverage = 100.0*(self.quantile_high - self.quantile_low)
self.all_quantiles = loss_func.quantiles
self.optimizer_class = optimizer_class
self.optimizer = optimizer_class(self.model.parameters())
self.loss_func = loss_func.to(device)
self.device = device
self.test_ratio = test_ratio
self.random_state = random_state
self.loss_history = []
self.test_loss_history = []
self.full_loss_history = []
def fit(self, x, y, epochs, batch_size, verbose=False):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size used in SGD solver
"""
sys.stdout.flush()
model = copy.deepcopy(self.model)
model = model.to(device)
optimizer = self.optimizer_class(model.parameters())
best_epoch = epochs
x_train, xx, y_train, yy = train_test_split(x,
y,
test_size=self.test_ratio,
random_state=self.random_state)
x_train = torch.from_numpy(x_train).float().to(self.device).requires_grad_(False)
xx = torch.from_numpy(xx).float().to(self.device).requires_grad_(False)
y_train = torch.from_numpy(y_train).float().to(self.device).requires_grad_(False)
yy_cpu = yy
yy = torch.from_numpy(yy).float().to(self.device).requires_grad_(False)
best_avg_length = 1e10
best_coverage = 0
best_cnt = 1e10
cnt = 0
for e in range(epochs):
model.train()
epoch_loss, cnt = epoch_internal_train(model, self.loss_func, x_train, y_train, batch_size, optimizer, cnt)
self.loss_history.append(epoch_loss)
model.eval()
preds = model(xx)
test_epoch_loss = self.loss_func(preds, yy).cpu().detach().numpy()
self.test_loss_history.append(test_epoch_loss)
test_preds = preds.cpu().detach().numpy()
test_preds = np.squeeze(test_preds)
if self.use_rearrangement:
test_preds = rearrange(self.all_quantiles, self.quantile_low, self.quantile_high, test_preds)
y_lower = test_preds[:,0]
y_upper = test_preds[:,1]
coverage, avg_length = helper.compute_coverage_len(yy_cpu, y_lower, y_upper)
if (coverage >= self.target_coverage) and (avg_length < best_avg_length):
best_avg_length = avg_length
best_coverage = coverage
best_epoch = e
best_cnt = cnt
if (e+1) % 100 == 0 and verbose:
print("CV: Epoch {}: Train {}, Test {}, Best epoch {}, Best Coverage {} Best Length {} Cur Coverage {}".format(e+1, epoch_loss, test_epoch_loss, best_epoch, best_coverage, best_avg_length, coverage))
sys.stdout.flush()
x = torch.from_numpy(x).float().to(self.device).requires_grad_(False)
y = torch.from_numpy(y).float().to(self.device).requires_grad_(False)
cnt = 0
for e in range(best_epoch+1):
if cnt > best_cnt:
break
epoch_loss, cnt = epoch_internal_train(self.model, self.loss_func, x, y, batch_size, self.optimizer, cnt, best_cnt)
self.full_loss_history.append(epoch_loss)
if (e+1) % 100 == 0 and verbose:
print("Full: Epoch {}: {}, cnt {}".format(e+1, epoch_loss, cnt))
sys.stdout.flush()
def predict(self, x):
""" Estimate the conditional low and high quantile given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
test_preds : numpy array of predicted low and high quantiles (nX2)
"""
self.model.eval()
test_preds = self.model(torch.from_numpy(x).to(self.device).requires_grad_(False)).cpu().detach().numpy()
if self.use_rearrangement:
test_preds = rearrange(self.all_quantiles, self.quantile_low, self.quantile_high, test_preds)
else:
test_preds[:,0] = np.min(test_preds,axis=1)
test_preds[:,1] = np.max(test_preds,axis=1)
return test_preds
| 17,313 | 33.217391 | 215 | py |
cqr | cqr-master/cqr/tune_params_cv.py |
from cqr import helper
from skgarden import RandomForestQuantileRegressor
from sklearn.model_selection import train_test_split
def CV_quntiles_rf(params,
X,
y,
target_coverage,
grid_q,
test_ratio,
random_state,
coverage_factor=0.9):
""" Tune the low and high quantile level parameters of quantile random
forests method, using cross-validation
Parameters
----------
params : dictionary of parameters
params["random_state"] : integer, seed for splitting the data
in cross-validation. Also used as the
seed in quantile random forest (QRF)
params["min_samples_leaf"] : integer, parameter of QRF
params["n_estimators"] : integer, parameter of QRF
params["max_features"] : integer, parameter of QRF
X : numpy array, containing the training features (nXp)
y : numpy array, containing the training labels (n)
target_coverage : desired coverage of prediction band. The output coverage
may be smaller if coverage_factor <= 1, in this case the
target will be modified to target_coverage*coverage_factor
grid_q : numpy array, of low and high quantile levels to test
test_ratio : float, test size of the held-out data
random_state : integer, seed for splitting the data in cross-validation.
Also used as the seed in QRF.
coverage_factor : float, when tuning the two QRF quantile levels one may
ask for prediction band with smaller average coverage,
equal to coverage_factor*(q_high - q_low) to avoid too
conservative estimation of the prediction band
Returns
-------
best_q : numpy array of low and high quantile levels (length 2)
References
----------
.. [1] Meinshausen, Nicolai. "Quantile regression forests."
Journal of Machine Learning Research 7.Jun (2006): 983-999.
"""
target_coverage = coverage_factor*target_coverage
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,random_state=random_state)
best_avg_length = 1e10
best_q = grid_q[0]
rf = RandomForestQuantileRegressor(random_state=params["random_state"],
min_samples_leaf=params["min_samples_leaf"],
n_estimators=params["n_estimators"],
max_features=params["max_features"])
rf.fit(X_train, y_train)
for q in grid_q:
y_lower = rf.predict(X_test, quantile=q[0])
y_upper = rf.predict(X_test, quantile=q[1])
coverage, avg_length = helper.compute_coverage_len(y_test, y_lower, y_upper)
if (coverage >= target_coverage) and (avg_length < best_avg_length):
best_avg_length = avg_length
best_q = q
else:
break
return best_q
| 3,123 | 42.388889 | 109 | py |
cqr | cqr-master/cqr/helper.py |
import sys
import torch
import numpy as np
from cqr import torch_models
from functools import partial
from cqr import tune_params_cv
from nonconformist.cp import IcpRegressor
from nonconformist.base import RegressorAdapter
from skgarden import RandomForestQuantileRegressor
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
def compute_coverage_len(y_test, y_lower, y_upper):
""" Compute average coverage and length of prediction intervals
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
coverage = in_the_range / len(y_test) * 100
avg_length = np.mean(abs(y_upper - y_lower))
return coverage, avg_length
def run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance, condition=None):
""" Run split conformal method
Parameters
----------
nc : class of nonconformist object
X_train : numpy array, training features (n1Xp)
y_train : numpy array, training labels (n1)
X_test : numpy array, testing features (n2Xp)
idx_train : numpy array, indices of proper training set examples
idx_cal : numpy array, indices of calibration set examples
significance : float, significance level (e.g. 0.1)
condition : function, mapping feature vector to group id
Returns
-------
y_lower : numpy array, estimated lower bound for the labels (n2)
y_upper : numpy array, estimated upper bound for the labels (n2)
"""
icp = IcpRegressor(nc,condition=condition)
# Fit the ICP using the proper training set
icp.fit(X_train[idx_train,:], y_train[idx_train])
# Calibrate the ICP using the calibration set
icp.calibrate(X_train[idx_cal,:], y_train[idx_cal])
# Produce predictions for the test set, with confidence 90%
predictions = icp.predict(X_test, significance=significance)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
return y_lower, y_upper
def run_icp_sep(nc, X_train, y_train, X_test, idx_train, idx_cal, significance, condition):
""" Run split conformal method, train a seperate regressor for each group
Parameters
----------
nc : class of nonconformist object
X_train : numpy array, training features (n1Xp)
y_train : numpy array, training labels (n1)
X_test : numpy array, testing features (n2Xp)
idx_train : numpy array, indices of proper training set examples
idx_cal : numpy array, indices of calibration set examples
significance : float, significance level (e.g. 0.1)
condition : function, mapping a feature vector to group id
Returns
-------
y_lower : numpy array, estimated lower bound for the labels (n2)
y_upper : numpy array, estimated upper bound for the labels (n2)
"""
X_proper_train = X_train[idx_train,:]
y_proper_train = y_train[idx_train]
X_calibration = X_train[idx_cal,:]
y_calibration = y_train[idx_cal]
category_map_proper_train = np.array([condition((X_proper_train[i, :], y_proper_train[i])) for i in range(y_proper_train.size)])
category_map_calibration = np.array([condition((X_calibration[i, :], y_calibration[i])) for i in range(y_calibration.size)])
category_map_test = np.array([condition((X_test[i, :], None)) for i in range(X_test.shape[0])])
categories = np.unique(category_map_proper_train)
y_lower = np.zeros(X_test.shape[0])
y_upper = np.zeros(X_test.shape[0])
cnt = 0
for cond in categories:
icp = IcpRegressor(nc[cnt])
idx_proper_train_group = category_map_proper_train == cond
# Fit the ICP using the proper training set
icp.fit(X_proper_train[idx_proper_train_group,:], y_proper_train[idx_proper_train_group])
idx_calibration_group = category_map_calibration == cond
# Calibrate the ICP using the calibration set
icp.calibrate(X_calibration[idx_calibration_group,:], y_calibration[idx_calibration_group])
idx_test_group = category_map_test == cond
# Produce predictions for the test set, with confidence 90%
predictions = icp.predict(X_test[idx_test_group,:], significance=significance)
y_lower[idx_test_group] = predictions[:,0]
y_upper[idx_test_group] = predictions[:,1]
cnt = cnt + 1
return y_lower, y_upper
def compute_coverage(y_test,y_lower,y_upper,significance,name=""):
""" Compute average coverage and length, and print results
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
significance : float, desired significance level
name : string, optional output string (e.g. the method name)
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
coverage = in_the_range / len(y_test) * 100
print("%s: Percentage in the range (expecting %.2f): %f" % (name, 100 - significance*100, coverage))
sys.stdout.flush()
avg_length = abs(np.mean(y_lower - y_upper))
print("%s: Average length: %f" % (name, avg_length))
sys.stdout.flush()
return coverage, avg_length
def compute_coverage_per_sample(y_test,y_lower,y_upper,significance,name="",x_test=None,condition=None):
""" Compute average coverage and length, and print results
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
significance : float, desired significance level
name : string, optional output string (e.g. the method name)
x_test : numpy array, test features
condition : function, mapping a feature vector to group id
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
if condition is not None:
category_map = np.array([condition((x_test[i, :], y_test[i])) for i in range(y_test.size)])
categories = np.unique(category_map)
coverage = np.empty(len(categories), dtype=np.object)
length = np.empty(len(categories), dtype=np.object)
cnt = 0
for cond in categories:
idx = category_map == cond
coverage[cnt] = (y_test[idx] >= y_lower[idx]) & (y_test[idx] <= y_upper[idx])
coverage_avg = np.sum( coverage[cnt] ) / len(y_test[idx]) * 100
print("%s: Group %d : Percentage in the range (expecting %.2f): %f" % (name, cond, 100 - significance*100, coverage_avg))
sys.stdout.flush()
length[cnt] = abs(y_upper[idx] - y_lower[idx])
print("%s: Group %d : Average length: %f" % (name, cond, np.mean(length[cnt])))
sys.stdout.flush()
cnt = cnt + 1
else:
coverage = (y_test >= y_lower) & (y_test <= y_upper)
coverage_avg = np.sum(coverage) / len(y_test) * 100
print("%s: Percentage in the range (expecting %.2f): %f" % (name, 100 - significance*100, coverage_avg))
sys.stdout.flush()
length = abs(y_upper - y_lower)
print("%s: Average length: %f" % (name, np.mean(length)))
sys.stdout.flush()
return coverage, length
def plot_func_data(y_test,y_lower,y_upper,name=""):
""" Plot the test labels along with the constructed prediction band
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
name : string, optional output string (e.g. the method name)
"""
# allowed to import graphics
import matplotlib.pyplot as plt
interval = y_upper - y_lower
sort_ind = np.argsort(interval)
y_test_sorted = y_test[sort_ind]
upper_sorted = y_upper[sort_ind]
lower_sorted = y_lower[sort_ind]
mean = (upper_sorted + lower_sorted) / 2
# Center such that the mean of the prediction interval is at 0.0
y_test_sorted -= mean
upper_sorted -= mean
lower_sorted -= mean
plt.plot(y_test_sorted, "ro")
plt.fill_between(
np.arange(len(upper_sorted)), lower_sorted, upper_sorted, alpha=0.2, color="r",
label="Pred. interval")
plt.xlabel("Ordered samples")
plt.ylabel("Values and prediction intervals")
plt.title(name)
plt.show()
interval = y_upper - y_lower
sort_ind = np.argsort(y_test)
y_test_sorted = y_test[sort_ind]
upper_sorted = y_upper[sort_ind]
lower_sorted = y_lower[sort_ind]
plt.plot(y_test_sorted, "ro")
plt.fill_between(
np.arange(len(upper_sorted)), lower_sorted, upper_sorted, alpha=0.2, color="r",
label="Pred. interval")
plt.xlabel("Ordered samples by response")
plt.ylabel("Values and prediction intervals")
plt.title(name)
plt.show()
###############################################################################
# Deep conditional mean regression
# Minimizing MSE loss
###############################################################################
class MSENet_RegressorAdapter(RegressorAdapter):
""" Conditional mean estimator, formulated as neural net
"""
def __init__(self,
model,
fit_params=None,
in_shape=1,
hidden_size=1,
learn_func=torch.optim.Adam,
epochs=1000,
batch_size=10,
dropout=0.1,
lr=0.01,
wd=1e-6,
test_ratio=0.2,
random_state=0):
""" Initialization
Parameters
----------
model : unused parameter (for compatibility with nc class)
fit_params : unused parameter (for compatibility with nc class)
in_shape : integer, input signal dimension
hidden_size : integer, hidden layer dimension
learn_func : class of Pytorch's SGD optimizer
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
dropout : float, dropout rate
lr : float, learning rate for SGD
wd : float, weight decay
test_ratio : float, ratio of held-out data, used in cross-validation
random_state : integer, seed for splitting the data in cross-validation
"""
super(MSENet_RegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.epochs = epochs
self.batch_size = batch_size
self.dropout = dropout
self.lr = lr
self.wd = wd
self.test_ratio = test_ratio
self.random_state = random_state
self.model = torch_models.mse_model(in_shape=in_shape, hidden_size=hidden_size, dropout=dropout)
self.loss_func = torch.nn.MSELoss()
self.learner = torch_models.LearnerOptimized(self.model,
partial(learn_func, lr=lr, weight_decay=wd),
self.loss_func,
device=device,
test_ratio=self.test_ratio,
random_state=self.random_state)
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
self.learner.fit(x, y, self.epochs, batch_size=self.batch_size)
def predict(self, x):
""" Estimate the label given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of predicted labels (n)
"""
return self.learner.predict(x)
###############################################################################
# Deep neural network for conditional quantile regression
# Minimizing pinball loss
###############################################################################
class AllQNet_RegressorAdapter(RegressorAdapter):
""" Conditional quantile estimator, formulated as neural net
"""
def __init__(self,
model,
fit_params=None,
in_shape=1,
hidden_size=1,
quantiles=[.05, .95],
learn_func=torch.optim.Adam,
epochs=1000,
batch_size=10,
dropout=0.1,
lr=0.01,
wd=1e-6,
test_ratio=0.2,
random_state=0,
use_rearrangement=False):
""" Initialization
Parameters
----------
model : None, unused parameter (for compatibility with nc class)
fit_params : None, unused parameter (for compatibility with nc class)
in_shape : integer, input signal dimension
hidden_size : integer, hidden layer dimension
quantiles : numpy array, low and high quantile levels in range (0,1)
learn_func : class of Pytorch's SGD optimizer
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
dropout : float, dropout rate
lr : float, learning rate for SGD
wd : float, weight decay
test_ratio : float, ratio of held-out data, used in cross-validation
random_state : integer, seed for splitting the data in cross-validation
use_rearrangement : boolean, use the rearrangement algorithm (True)
of not (False). See reference [1].
References
----------
.. [1] Chernozhukov, Victor, Iván Fernández‐Val, and Alfred Galichon.
"Quantile and probability curves without crossing."
Econometrica 78.3 (2010): 1093-1125.
"""
super(AllQNet_RegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.quantiles = quantiles
if use_rearrangement:
self.all_quantiles = torch.from_numpy(np.linspace(0.01,0.99,99)).float()
else:
self.all_quantiles = self.quantiles
self.epochs = epochs
self.batch_size = batch_size
self.dropout = dropout
self.lr = lr
self.wd = wd
self.test_ratio = test_ratio
self.random_state = random_state
self.model = torch_models.all_q_model(quantiles=self.all_quantiles,
in_shape=in_shape,
hidden_size=hidden_size,
dropout=dropout)
self.loss_func = torch_models.AllQuantileLoss(self.all_quantiles)
self.learner = torch_models.LearnerOptimizedCrossing(self.model,
partial(learn_func, lr=lr, weight_decay=wd),
self.loss_func,
device=device,
test_ratio=self.test_ratio,
random_state=self.random_state,
qlow=self.quantiles[0],
qhigh=self.quantiles[1],
use_rearrangement=use_rearrangement)
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
self.learner.fit(x, y, self.epochs, self.batch_size)
def predict(self, x):
""" Estimate the conditional low and high quantiles given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of estimated conditional quantiles (nX2)
"""
return self.learner.predict(x)
###############################################################################
# Quantile random forests model
###############################################################################
class QuantileForestRegressorAdapter(RegressorAdapter):
""" Conditional quantile estimator, defined as quantile random forests (QRF)
References
----------
.. [1] Meinshausen, Nicolai. "Quantile regression forests."
Journal of Machine Learning Research 7.Jun (2006): 983-999.
"""
def __init__(self,
model,
fit_params=None,
quantiles=[5, 95],
params=None):
""" Initialization
Parameters
----------
model : None, unused parameter (for compatibility with nc class)
fit_params : None, unused parameter (for compatibility with nc class)
quantiles : numpy array, low and high quantile levels in range (0,100)
params : dictionary of parameters
params["random_state"] : integer, seed for splitting the data
in cross-validation. Also used as the
seed in quantile random forests (QRF)
params["min_samples_leaf"] : integer, parameter of QRF
params["n_estimators"] : integer, parameter of QRF
params["max_features"] : integer, parameter of QRF
params["CV"] : boolean, use cross-validation (True) or
not (False) to tune the two QRF quantile levels
to obtain the desired coverage
params["test_ratio"] : float, ratio of held-out data, used
in cross-validation
params["coverage_factor"] : float, to avoid too conservative
estimation of the prediction band,
when tuning the two QRF quantile
levels in cross-validation one may
ask for prediction intervals with
reduced average coverage, equal to
coverage_factor*(q_high - q_low).
params["range_vals"] : float, determines the lowest and highest
quantile level parameters when tuning
the quanitle levels bt cross-validation.
The smallest value is equal to
quantiles[0] - range_vals.
Similarly, the largest is equal to
quantiles[1] + range_vals.
params["num_vals"] : integer, when tuning QRF's quantile
parameters, sweep over a grid of length
num_vals.
"""
super(QuantileForestRegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.quantiles = quantiles
self.cv_quantiles = self.quantiles
self.params = params
self.rfqr = RandomForestQuantileRegressor(random_state=params["random_state"],
min_samples_leaf=params["min_samples_leaf"],
n_estimators=params["n_estimators"],
max_features=params["max_features"])
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
if self.params["CV"]:
target_coverage = self.quantiles[1] - self.quantiles[0]
coverage_factor = self.params["coverage_factor"]
range_vals = self.params["range_vals"]
num_vals = self.params["num_vals"]
grid_q_low = np.linspace(self.quantiles[0],self.quantiles[0]+range_vals,num_vals).reshape(-1,1)
grid_q_high = np.linspace(self.quantiles[1],self.quantiles[1]-range_vals,num_vals).reshape(-1,1)
grid_q = np.concatenate((grid_q_low,grid_q_high),1)
self.cv_quantiles = tune_params_cv.CV_quntiles_rf(self.params,
x,
y,
target_coverage,
grid_q,
self.params["test_ratio"],
self.params["random_state"],
coverage_factor)
self.rfqr.fit(x, y)
def predict(self, x):
""" Estimate the conditional low and high quantiles given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of estimated conditional quantiles (nX2)
"""
lower = self.rfqr.predict(x, quantile=self.cv_quantiles[0])
upper = self.rfqr.predict(x, quantile=self.cv_quantiles[1])
ret_val = np.zeros((len(lower),2))
ret_val[:,0] = lower
ret_val[:,1] = upper
return ret_val
| 22,414 | 36.927242 | 133 | py |
cqr | cqr-master/cqr/__init__.py | #!/usr/bin/env python
| 22 | 10.5 | 21 | py |
cqr | cqr-master/datasets/datasets.py |
import numpy as np
import pandas as pd
def GetDataset(name, base_path):
""" Load a dataset
Parameters
----------
name : string, dataset name
base_path : string, e.g. "path/to/datasets/directory/"
Returns
-------
X : features (nXp)
y : labels (n)
"""
if name=="meps_19":
df = pd.read_csv(base_path + 'meps_19_reg_fix.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
if name=="meps_20":
df = pd.read_csv(base_path + 'meps_20_reg_fix.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT15F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
if name=="meps_21":
df = pd.read_csv(base_path + 'meps_21_reg_fix.csv')
column_names = df.columns
response_name = "UTILIZATION_reg"
column_names = column_names[column_names!=response_name]
column_names = column_names[column_names!="Unnamed: 0"]
col_names = ['AGE', 'PCS42', 'MCS42', 'K6SUM42', 'PERWT16F', 'REGION=1',
'REGION=2', 'REGION=3', 'REGION=4', 'SEX=1', 'SEX=2', 'MARRY=1',
'MARRY=2', 'MARRY=3', 'MARRY=4', 'MARRY=5', 'MARRY=6', 'MARRY=7',
'MARRY=8', 'MARRY=9', 'MARRY=10', 'FTSTU=-1', 'FTSTU=1', 'FTSTU=2',
'FTSTU=3', 'ACTDTY=1', 'ACTDTY=2', 'ACTDTY=3', 'ACTDTY=4',
'HONRDC=1', 'HONRDC=2', 'HONRDC=3', 'HONRDC=4', 'RTHLTH=-1',
'RTHLTH=1', 'RTHLTH=2', 'RTHLTH=3', 'RTHLTH=4', 'RTHLTH=5',
'MNHLTH=-1', 'MNHLTH=1', 'MNHLTH=2', 'MNHLTH=3', 'MNHLTH=4',
'MNHLTH=5', 'HIBPDX=-1', 'HIBPDX=1', 'HIBPDX=2', 'CHDDX=-1',
'CHDDX=1', 'CHDDX=2', 'ANGIDX=-1', 'ANGIDX=1', 'ANGIDX=2',
'MIDX=-1', 'MIDX=1', 'MIDX=2', 'OHRTDX=-1', 'OHRTDX=1', 'OHRTDX=2',
'STRKDX=-1', 'STRKDX=1', 'STRKDX=2', 'EMPHDX=-1', 'EMPHDX=1',
'EMPHDX=2', 'CHBRON=-1', 'CHBRON=1', 'CHBRON=2', 'CHOLDX=-1',
'CHOLDX=1', 'CHOLDX=2', 'CANCERDX=-1', 'CANCERDX=1', 'CANCERDX=2',
'DIABDX=-1', 'DIABDX=1', 'DIABDX=2', 'JTPAIN=-1', 'JTPAIN=1',
'JTPAIN=2', 'ARTHDX=-1', 'ARTHDX=1', 'ARTHDX=2', 'ARTHTYPE=-1',
'ARTHTYPE=1', 'ARTHTYPE=2', 'ARTHTYPE=3', 'ASTHDX=1', 'ASTHDX=2',
'ADHDADDX=-1', 'ADHDADDX=1', 'ADHDADDX=2', 'PREGNT=-1', 'PREGNT=1',
'PREGNT=2', 'WLKLIM=-1', 'WLKLIM=1', 'WLKLIM=2', 'ACTLIM=-1',
'ACTLIM=1', 'ACTLIM=2', 'SOCLIM=-1', 'SOCLIM=1', 'SOCLIM=2',
'COGLIM=-1', 'COGLIM=1', 'COGLIM=2', 'DFHEAR42=-1', 'DFHEAR42=1',
'DFHEAR42=2', 'DFSEE42=-1', 'DFSEE42=1', 'DFSEE42=2',
'ADSMOK42=-1', 'ADSMOK42=1', 'ADSMOK42=2', 'PHQ242=-1', 'PHQ242=0',
'PHQ242=1', 'PHQ242=2', 'PHQ242=3', 'PHQ242=4', 'PHQ242=5',
'PHQ242=6', 'EMPST=-1', 'EMPST=1', 'EMPST=2', 'EMPST=3', 'EMPST=4',
'POVCAT=1', 'POVCAT=2', 'POVCAT=3', 'POVCAT=4', 'POVCAT=5',
'INSCOV=1', 'INSCOV=2', 'INSCOV=3', 'RACE']
y = df[response_name].values
X = df[col_names].values
if name=="star":
df = pd.read_csv(base_path + 'STAR.csv')
df.loc[df['gender'] == 'female', 'gender'] = 0
df.loc[df['gender'] == 'male', 'gender'] = 1
df.loc[df['ethnicity'] == 'cauc', 'ethnicity'] = 0
df.loc[df['ethnicity'] == 'afam', 'ethnicity'] = 1
df.loc[df['ethnicity'] == 'asian', 'ethnicity'] = 2
df.loc[df['ethnicity'] == 'hispanic', 'ethnicity'] = 3
df.loc[df['ethnicity'] == 'amindian', 'ethnicity'] = 4
df.loc[df['ethnicity'] == 'other', 'ethnicity'] = 5
df.loc[df['stark'] == 'regular', 'stark'] = 0
df.loc[df['stark'] == 'small', 'stark'] = 1
df.loc[df['stark'] == 'regular+aide', 'stark'] = 2
df.loc[df['star1'] == 'regular', 'star1'] = 0
df.loc[df['star1'] == 'small', 'star1'] = 1
df.loc[df['star1'] == 'regular+aide', 'star1'] = 2
df.loc[df['star2'] == 'regular', 'star2'] = 0
df.loc[df['star2'] == 'small', 'star2'] = 1
df.loc[df['star2'] == 'regular+aide', 'star2'] = 2
df.loc[df['star3'] == 'regular', 'star3'] = 0
df.loc[df['star3'] == 'small', 'star3'] = 1
df.loc[df['star3'] == 'regular+aide', 'star3'] = 2
df.loc[df['lunchk'] == 'free', 'lunchk'] = 0
df.loc[df['lunchk'] == 'non-free', 'lunchk'] = 1
df.loc[df['lunch1'] == 'free', 'lunch1'] = 0
df.loc[df['lunch1'] == 'non-free', 'lunch1'] = 1
df.loc[df['lunch2'] == 'free', 'lunch2'] = 0
df.loc[df['lunch2'] == 'non-free', 'lunch2'] = 1
df.loc[df['lunch3'] == 'free', 'lunch3'] = 0
df.loc[df['lunch3'] == 'non-free', 'lunch3'] = 1
df.loc[df['schoolk'] == 'inner-city', 'schoolk'] = 0
df.loc[df['schoolk'] == 'suburban', 'schoolk'] = 1
df.loc[df['schoolk'] == 'rural', 'schoolk'] = 2
df.loc[df['schoolk'] == 'urban', 'schoolk'] = 3
df.loc[df['school1'] == 'inner-city', 'school1'] = 0
df.loc[df['school1'] == 'suburban', 'school1'] = 1
df.loc[df['school1'] == 'rural', 'school1'] = 2
df.loc[df['school1'] == 'urban', 'school1'] = 3
df.loc[df['school2'] == 'inner-city', 'school2'] = 0
df.loc[df['school2'] == 'suburban', 'school2'] = 1
df.loc[df['school2'] == 'rural', 'school2'] = 2
df.loc[df['school2'] == 'urban', 'school2'] = 3
df.loc[df['school3'] == 'inner-city', 'school3'] = 0
df.loc[df['school3'] == 'suburban', 'school3'] = 1
df.loc[df['school3'] == 'rural', 'school3'] = 2
df.loc[df['school3'] == 'urban', 'school3'] = 3
df.loc[df['degreek'] == 'bachelor', 'degreek'] = 0
df.loc[df['degreek'] == 'master', 'degreek'] = 1
df.loc[df['degreek'] == 'specialist', 'degreek'] = 2
df.loc[df['degreek'] == 'master+', 'degreek'] = 3
df.loc[df['degree1'] == 'bachelor', 'degree1'] = 0
df.loc[df['degree1'] == 'master', 'degree1'] = 1
df.loc[df['degree1'] == 'specialist', 'degree1'] = 2
df.loc[df['degree1'] == 'phd', 'degree1'] = 3
df.loc[df['degree2'] == 'bachelor', 'degree2'] = 0
df.loc[df['degree2'] == 'master', 'degree2'] = 1
df.loc[df['degree2'] == 'specialist', 'degree2'] = 2
df.loc[df['degree2'] == 'phd', 'degree2'] = 3
df.loc[df['degree3'] == 'bachelor', 'degree3'] = 0
df.loc[df['degree3'] == 'master', 'degree3'] = 1
df.loc[df['degree3'] == 'specialist', 'degree3'] = 2
df.loc[df['degree3'] == 'phd', 'degree3'] = 3
df.loc[df['ladderk'] == 'level1', 'ladderk'] = 0
df.loc[df['ladderk'] == 'level2', 'ladderk'] = 1
df.loc[df['ladderk'] == 'level3', 'ladderk'] = 2
df.loc[df['ladderk'] == 'apprentice', 'ladderk'] = 3
df.loc[df['ladderk'] == 'probation', 'ladderk'] = 4
df.loc[df['ladderk'] == 'pending', 'ladderk'] = 5
df.loc[df['ladderk'] == 'notladder', 'ladderk'] = 6
df.loc[df['ladder1'] == 'level1', 'ladder1'] = 0
df.loc[df['ladder1'] == 'level2', 'ladder1'] = 1
df.loc[df['ladder1'] == 'level3', 'ladder1'] = 2
df.loc[df['ladder1'] == 'apprentice', 'ladder1'] = 3
df.loc[df['ladder1'] == 'probation', 'ladder1'] = 4
df.loc[df['ladder1'] == 'noladder', 'ladder1'] = 5
df.loc[df['ladder1'] == 'notladder', 'ladder1'] = 6
df.loc[df['ladder2'] == 'level1', 'ladder2'] = 0
df.loc[df['ladder2'] == 'level2', 'ladder2'] = 1
df.loc[df['ladder2'] == 'level3', 'ladder2'] = 2
df.loc[df['ladder2'] == 'apprentice', 'ladder2'] = 3
df.loc[df['ladder2'] == 'probation', 'ladder2'] = 4
df.loc[df['ladder2'] == 'noladder', 'ladder2'] = 5
df.loc[df['ladder2'] == 'notladder', 'ladder2'] = 6
df.loc[df['ladder3'] == 'level1', 'ladder3'] = 0
df.loc[df['ladder3'] == 'level2', 'ladder3'] = 1
df.loc[df['ladder3'] == 'level3', 'ladder3'] = 2
df.loc[df['ladder3'] == 'apprentice', 'ladder3'] = 3
df.loc[df['ladder3'] == 'probation', 'ladder3'] = 4
df.loc[df['ladder3'] == 'noladder', 'ladder3'] = 5
df.loc[df['ladder3'] == 'notladder', 'ladder3'] = 6
df.loc[df['tethnicityk'] == 'cauc', 'tethnicityk'] = 0
df.loc[df['tethnicityk'] == 'afam', 'tethnicityk'] = 1
df.loc[df['tethnicity1'] == 'cauc', 'tethnicity1'] = 0
df.loc[df['tethnicity1'] == 'afam', 'tethnicity1'] = 1
df.loc[df['tethnicity2'] == 'cauc', 'tethnicity2'] = 0
df.loc[df['tethnicity2'] == 'afam', 'tethnicity2'] = 1
df.loc[df['tethnicity3'] == 'cauc', 'tethnicity3'] = 0
df.loc[df['tethnicity3'] == 'afam', 'tethnicity3'] = 1
df.loc[df['tethnicity3'] == 'asian', 'tethnicity3'] = 2
df = df.dropna()
grade = df["readk"] + df["read1"] + df["read2"] + df["read3"]
grade += df["mathk"] + df["math1"] + df["math2"] + df["math3"]
names = df.columns
target_names = names[8:16]
data_names = np.concatenate((names[0:8],names[17:]))
X = df.loc[:, data_names].values
y = grade.values
if name=="facebook_1":
df = pd.read_csv(base_path + 'facebook/Features_Variant_1.csv')
y = df.iloc[:,53].values
X = df.iloc[:,0:53].values
if name=="facebook_2":
df = pd.read_csv(base_path + 'facebook/Features_Variant_2.csv')
y = df.iloc[:,53].values
X = df.iloc[:,0:53].values
if name=="bio":
#https://github.com/joefavergel/TertiaryPhysicochemicalProperties/blob/master/RMSD-ProteinTertiaryStructures.ipynb
df = pd.read_csv(base_path + 'CASP.csv')
y = df.iloc[:,0].values
X = df.iloc[:,1:].values
if name=='blog_data':
# https://github.com/xinbinhuang/feature-selection_blogfeedback
df = pd.read_csv(base_path + 'blogData_train.csv', header=None)
X = df.iloc[:,0:280].values
y = df.iloc[:,-1].values
if name == "concrete":
dataset = np.loadtxt(open(base_path + 'Concrete_Data.csv', "rb"), delimiter=",", skiprows=1)
X = dataset[:, :-1]
y = dataset[:, -1:]
if name=="bike":
# https://www.kaggle.com/rajmehra03/bike-sharing-demand-rmsle-0-3194
df=pd.read_csv(base_path + 'bike_train.csv')
# # seperating season as per values. this is bcoz this will enhance features.
season=pd.get_dummies(df['season'],prefix='season')
df=pd.concat([df,season],axis=1)
# # # same for weather. this is bcoz this will enhance features.
weather=pd.get_dummies(df['weather'],prefix='weather')
df=pd.concat([df,weather],axis=1)
# # # now can drop weather and season.
df.drop(['season','weather'],inplace=True,axis=1)
df.head()
df["hour"] = [t.hour for t in pd.DatetimeIndex(df.datetime)]
df["day"] = [t.dayofweek for t in pd.DatetimeIndex(df.datetime)]
df["month"] = [t.month for t in pd.DatetimeIndex(df.datetime)]
df['year'] = [t.year for t in pd.DatetimeIndex(df.datetime)]
df['year'] = df['year'].map({2011:0, 2012:1})
df.drop('datetime',axis=1,inplace=True)
df.drop(['casual','registered'],axis=1,inplace=True)
df.columns.to_series().groupby(df.dtypes).groups
X = df.drop('count',axis=1).values
y = df['count'].values
if name=="community":
# https://github.com/vbordalo/Communities-Crime/blob/master/Crime_v1.ipynb
attrib = pd.read_csv(base_path + 'communities_attributes.csv', delim_whitespace = True)
data = pd.read_csv(base_path + 'communities.data', names = attrib['attributes'])
data = data.drop(columns=['state','county',
'community','communityname',
'fold'], axis=1)
data = data.replace('?', np.nan)
# Impute mean values for samples with missing values
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(data[['OtherPerCap']])
data[['OtherPerCap']] = imputer.transform(data[['OtherPerCap']])
data = data.dropna(axis=1)
X = data.iloc[:, 0:100].values
y = data.iloc[:, 100].values
X = X.astype(np.float32)
y = y.astype(np.float32)
return X, y
| 17,896 | 49.414085 | 122 | py |
cqr | cqr-master/reproducible_experiments/run_equalized_coverage_experiment.py | #!/usr/bin/env python
# coding: utf-8
import os
import torch
import random
import numpy as np
np.warnings.filterwarnings('ignore')
from datasets import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import pandas as pd
# for MEPS
def condition(x, y=None):
return int(x[0][-1]>0)
from cqr import helper
from nonconformist.nc import RegressorNc
from nonconformist.nc import SignErrorErrFunc
from nonconformist.nc import QuantileRegAsymmetricErrFunc
def append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1):
dataset_name_group = [dataset_name_group_0, dataset_name_group_1]
for group_id in range(len(dataset_name_group)):
coverage = (coverage_sample[group_id]).astype(np.float)
length = length_sample[group_id]
for i in range(len(coverage)):
dataset_name_vec.append(dataset_name_group[group_id])
method_vec.append(method_name)
coverage_vec.append(coverage[i])
length_vec.append(length[i])
seed_vec.append(seed)
test_ratio_vec.append(test_ratio)
def run_equalized_coverage_experiment(dataset_name, method, seed, save_to_csv=True, test_ratio = 0.2):
random_state_train_test = seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if os.path.isdir('/scratch'):
local_machine = 0
else:
local_machine = 1
if local_machine:
dataset_base_path = '/Users/romano/mydata/regression_data/'
else:
dataset_base_path = '/scratch/users/yromano/data/regression_data/'
# desired miscoverage error
alpha = 0.1
# desired quanitile levels
quantiles = [0.05, 0.95]
# name of dataset
dataset_name_group_0 = dataset_name + "_non_white"
dataset_name_group_1 = dataset_name + "_white"
# load the dataset
X, y = datasets.GetDataset(dataset_name, dataset_base_path)
# divide the dataset into test and train based on the test_ratio parameter
x_train, x_test, y_train, y_test = train_test_split(X,
y,
test_size=test_ratio,
random_state=random_state_train_test)
# In[2]:
# compute input dimensions
n_train = x_train.shape[0]
in_shape = x_train.shape[1]
# divide the data into proper training set and calibration set
idx = np.random.permutation(n_train)
n_half = int(np.floor(n_train/2))
idx_train, idx_cal = idx[:n_half], idx[n_half:2*n_half]
# zero mean and unit variance scaling
scalerX = StandardScaler()
scalerX = scalerX.fit(x_train[idx_train])
# scale
x_train = scalerX.transform(x_train)
x_test = scalerX.transform(x_test)
y_train = np.log(1.0 + y_train)
y_test = np.log(1.0 + y_test)
# reshape the data
x_train = np.asarray(x_train)
y_train = np.squeeze(np.asarray(y_train))
x_test = np.asarray(x_test)
y_test = np.squeeze(np.asarray(y_test))
# display basic information
print("Dataset: %s" % (dataset_name))
print("Dimensions: train set (n=%d, p=%d) ; test set (n=%d, p=%d)" %
(x_train.shape[0], x_train.shape[1], x_test.shape[0], x_test.shape[1]))
# In[3]:
dataset_name_vec = []
method_vec = []
coverage_vec = []
length_vec = []
seed_vec = []
test_ratio_vec = []
if method == "net":
# pytorch's optimizer object
nn_learn_func = torch.optim.Adam
# number of epochs
epochs = 1000
# learning rate
lr = 0.0005
# mini-batch size
batch_size = 64
# hidden dimension of the network
hidden_size = 64
# dropout regularization rate
dropout = 0.1
# weight decay regularization
wd = 1e-6
# ratio of held-out data, used in cross-validation
cv_test_ratio = 0.1
# seed for splitting the data in cross-validation.
# Also used as the seed in quantile random forests function
cv_random_state = 1
# In[4]:
model = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
nc = RegressorNc(model, SignErrorErrFunc())
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha)
method_name = "Marginal Conformal Neural Network"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
model = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
nc = RegressorNc(model, SignErrorErrFunc())
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional Conformal Neural Network (joint)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[6]
category_map = np.array([condition((x_train[i, :], None)) for i in range(x_train.shape[0])])
categories = np.unique(category_map)
estimator_list = []
nc_list = []
for i in range(len(categories)):
# define a QRF model per group
estimator_list.append(helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state))
# define the CQR object
nc_list.append(RegressorNc(estimator_list[i], SignErrorErrFunc()))
# run CQR procedure
y_lower, y_upper = helper.run_icp_sep(nc_list, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional Conformal Neural Network (groupwise)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
if method == "qnet":
# pytorch's optimizer object
nn_learn_func = torch.optim.Adam
# number of epochs
epochs = 1000
# learning rate
lr = 0.0005
# mini-batch size
batch_size = 64
# hidden dimension of the network
hidden_size = 64
# dropout regularization rate
dropout = 0.1
# weight decay regularization
wd = 1e-6
# desired quantiles
quantiles_net = [0.05, 0.95]
# ratio of held-out data, used in cross-validation
cv_test_ratio = 0.1
# seed for splitting the data in cross-validation.
# Also used as the seed in quantile random forests function
cv_random_state = 1
# In[7]:
# define quantile neural network model
quantile_estimator = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
# define the CQR object, computing the absolute residual error of points
# located outside the estimated quantile neural network band
nc = RegressorNc(quantile_estimator, QuantileRegAsymmetricErrFunc())
# run CQR procedure
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha)
method_name = "Marginal CQR Neural Network"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
# define qnet model
quantile_estimator = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
# define the CQR object
nc = RegressorNc(quantile_estimator, QuantileRegAsymmetricErrFunc())
# run CQR procedure
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional CQR Neural Network (joint)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[6]
category_map = np.array([condition((x_train[i, :], None)) for i in range(x_train.shape[0])])
categories = np.unique(category_map)
quantile_estimator_list = []
nc_list = []
for i in range(len(categories)):
# define a QRF model per group
quantile_estimator_list.append(helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False))
# append a CQR object
nc_list.append(RegressorNc(quantile_estimator_list[i], QuantileRegAsymmetricErrFunc()))
# run CQR procedure
y_lower, y_upper = helper.run_icp_sep(nc_list, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional CQR Neural Network (groupwise)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
############### Summary
coverage_str = 'Coverage (expected ' + str(100 - alpha*100) + '%)'
if save_to_csv:
outdir = './results/'
if not os.path.exists(outdir):
os.mkdir(outdir)
out_name = outdir + 'results.csv'
df = pd.DataFrame({'name': dataset_name_vec,
'method': method_vec,
coverage_str : coverage_vec,
'Avg. Length' : length_vec,
'seed' : seed_vec,
'train test ratio' : test_ratio_vec})
if os.path.isfile(out_name):
df2 = pd.read_csv(out_name)
df = pd.concat([df2, df], ignore_index=True)
df.to_csv(out_name, index=False) | 22,793 | 41.36803 | 118 | py |
cqr | cqr-master/reproducible_experiments/all_equalized_coverage_experiments.py | ###############################################################################
# Script for reproducing the results of CQR paper
###############################################################################
import numpy as np
from reproducible_experiments.run_equalized_coverage_experiment import run_equalized_coverage_experiment
#from run_equalized_coverage_experiment import run_equalized_coverage_experiment
# list methods to test
test_methods = ['net',
'qnet']
dataset_names = ["meps_21"]
test_ratio_vec = [0.2]
# vector of random seeds
random_state_train_test = np.arange(40)
for test_method_id in range(2):
for random_state_train_test_id in range(40):
for dataset_name_id in range(1):
for test_ratio_id in range(1):
test_ratio = test_ratio_vec[test_ratio_id]
test_method = test_methods[test_method_id]
random_state = random_state_train_test[random_state_train_test_id]
dataset_name = dataset_names[dataset_name_id]
# run an experiment and save average results to CSV file
run_equalized_coverage_experiment(dataset_name,
test_method,
random_state,
True,
test_ratio)
| 1,442 | 40.228571 | 104 | py |
cqr | cqr-master/reproducible_experiments/all_cqr_experiments.py |
###############################################################################
# Script for reproducing the results of CQR paper
###############################################################################
import numpy as np
from reproducible_experiments.run_cqr_experiment import run_experiment
#from run_cqr_experiment import run_experiment
# list methods to test
test_methods = ['linear',
'neural_net',
'random_forest',
'quantile_net',
'cqr_quantile_net',
'cqr_asymmetric_quantile_net',
'rearrangement',
'cqr_rearrangement',
'cqr_asymmetric_rearrangement',
'quantile_forest',
'cqr_quantile_forest',
'cqr_asymmetric_quantile_forest']
# list of datasets
dataset_names = ['meps_19',
'meps_20',
'meps_21',
'star',
'facebook_1',
'facebook_2',
'bio',
'blog_data',
'concrete',
'bike',
'community']
# vector of random seeds
random_state_train_test = np.arange(20)
for test_method_id in range(12):
for dataset_name_id in range(11):
for random_state_train_test_id in range(20):
dataset_name = dataset_names[dataset_name_id]
test_method = test_methods[test_method_id]
random_state = random_state_train_test[random_state_train_test_id]
# run an experiment and save average results to CSV file
run_experiment(dataset_name, test_method, random_state)
| 1,670 | 32.42 | 79 | py |
cqr | cqr-master/reproducible_experiments/run_cqr_experiment.py | import os
import sys
import torch
import random
import numpy as np
import pandas as pd
from cqr import helper
from datasets import datasets
from sklearn import linear_model
from nonconformist.nc import NcFactory
from nonconformist.nc import RegressorNc
from nonconformist.nc import AbsErrorErrFunc
from nonconformist.nc import QuantileRegErrFunc
from nonconformist.nc import RegressorNormalizer
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from nonconformist.nc import QuantileRegAsymmetricErrFunc
pd.set_option('precision', 3)
base_dataset_path = './datasets/'
if os.path.isdir('/scratch'):
local_machine = 0
else:
local_machine = 1
if local_machine:
base_dataset_path = '/Users/romano/mydata/regression_data/'
else:
base_dataset_path = '/scratch/users/yromano/data/regression_data/'
plot_results = False
def run_experiment(dataset_name,
test_method,
random_state_train_test,
save_to_csv=True):
""" Estimate prediction intervals and print the average length and coverage
Parameters
----------
dataset_name : array of strings, list of datasets
test_method : string, method to be tested, estimating
the 90% prediction interval
random_state_train_test : integer, random seed to be used
save_to_csv : boolean, save average length and coverage to csv (True)
or not (False)
"""
dataset_name_vec = []
method_vec = []
coverage_vec = []
length_vec = []
seed_vec = []
seed = random_state_train_test
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
coverage_linear=0
length_linear=0
coverage_linear_local=0
length_linear_local=0
coverage_net=0
length_net=0
coverage_net_local=0
length_net_local=0
coverage_forest=0
length_forest=0
coverage_forest_local=0
length_forest_local=0
coverage_cp_qnet=0
length_cp_qnet=0
coverage_qnet=0
length_qnet=0
coverage_cp_sign_qnet=0
length_cp_sign_qnet=0
coverage_cp_re_qnet=0
length_cp_re_qnet=0
coverage_re_qnet=0
length_re_qnet=0
coverage_cp_sign_re_qnet=0
length_cp_sign_re_qnet=0
coverage_cp_qforest=0
length_cp_qforest=0
coverage_qforest=0
length_qforest=0
coverage_cp_sign_qforest=0
length_cp_sign_qforest=0
# determines the size of test set
test_ratio = 0.2
# conformal prediction miscoverage level
significance = 0.1
# desired quantile levels, used by the quantile regression methods
quantiles = [0.05, 0.95]
# Random forests parameters (shared by conditional quantile random forests
# and conditional mean random forests regression).
n_estimators = 1000 # usual random forests n_estimators parameter
min_samples_leaf = 1 # default parameter of sklearn
# Quantile random forests parameters.
# See QuantileForestRegressorAdapter class for more details
quantiles_forest = [5, 95]
CV_qforest = True
coverage_factor = 0.85
cv_test_ratio = 0.05
cv_random_state = 1
cv_range_vals = 30
cv_num_vals = 10
# Neural network parameters (shared by conditional quantile neural network
# and conditional mean neural network regression)
# See AllQNet_RegressorAdapter and MSENet_RegressorAdapter in helper.py
nn_learn_func = torch.optim.Adam
epochs = 1000
lr = 0.0005
hidden_size = 64
batch_size = 64
dropout = 0.1
wd = 1e-6
# Ask for a reduced coverage when tuning the network parameters by
# cross-validation to avoid too conservative initial estimation of the
# prediction interval. This estimation will be conformalized by CQR.
quantiles_net = [0.1, 0.9]
# local conformal prediction parameter.
# See RegressorNc class for more details.
beta = 1
beta_net = 1
# local conformal prediction parameter. The local ridge regression method
# uses nearest neighbor regression as the MAD estimator.
# Number of neighbors used by nearest neighbor regression.
n_neighbors = 11
print(dataset_name)
sys.stdout.flush()
try:
# load the dataset
X, y = datasets.GetDataset(dataset_name, base_dataset_path)
except:
print("CANNOT LOAD DATASET!")
return
# Dataset is divided into test and train data based on test_ratio parameter
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=test_ratio,
random_state=random_state_train_test)
# fit a simple ridge regression model (sanity check)
model = linear_model.RidgeCV()
model = model.fit(X_train, np.squeeze(y_train))
predicted_data = model.predict(X_test).astype(np.float32)
# calculate the normalized mean squared error
print("Ridge relative error: %f" % (np.sum((np.squeeze(y_test)-predicted_data)**2)/np.sum(np.squeeze(y_test)**2)))
sys.stdout.flush()
# reshape the data
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
X_test = np.asarray(X_test)
y_test = np.asarray(y_test)
# input dimensions
n_train = X_train.shape[0]
in_shape = X_train.shape[1]
print("Size: train (%d, %d), test (%d, %d)" % (X_train.shape[0], X_train.shape[1], X_test.shape[0], X_test.shape[1]))
sys.stdout.flush()
# set seed for splitting the data into proper train and calibration
np.random.seed(seed)
idx = np.random.permutation(n_train)
# divide the data into proper training set and calibration set
n_half = int(np.floor(n_train/2))
idx_train, idx_cal = idx[:n_half], idx[n_half:2*n_half]
# zero mean and unit variance scaling of the train and test features
scalerX = StandardScaler()
scalerX = scalerX.fit(X_train[idx_train])
X_train = scalerX.transform(X_train)
X_test = scalerX.transform(X_test)
# scale the labels by dividing each by the mean absolute response
mean_ytrain = np.mean(np.abs(y_train[idx_train]))
y_train = np.squeeze(y_train)/mean_ytrain
y_test = np.squeeze(y_test)/mean_ytrain
######################## Linear
if 'linear' == test_method:
model = linear_model.RidgeCV()
nc = RegressorNc(model)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Ridge")
coverage_linear, length_linear = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Ridge")
dataset_name_vec.append(dataset_name)
method_vec.append('Ridge')
coverage_vec.append(coverage_linear)
length_vec.append(length_linear)
seed_vec.append(seed)
nc = NcFactory.create_nc(
linear_model.RidgeCV(),
normalizer_model=KNeighborsRegressor(n_neighbors=n_neighbors)
)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Ridge-L")
coverage_linear_local, length_linear_local = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Ridge-L")
dataset_name_vec.append(dataset_name)
method_vec.append('Ridge-L')
coverage_vec.append(coverage_linear_local)
length_vec.append(length_linear_local)
seed_vec.append(seed)
######################### Neural net
if 'neural_net' == test_method:
model = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
nc = RegressorNc(model)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Net")
coverage_net, length_net = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Net")
dataset_name_vec.append(dataset_name)
method_vec.append('Net')
coverage_vec.append(coverage_net)
length_vec.append(length_net)
seed_vec.append(seed)
normalizer_adapter = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
adapter = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
normalizer = RegressorNormalizer(adapter,
normalizer_adapter,
AbsErrorErrFunc())
nc = RegressorNc(adapter, AbsErrorErrFunc(), normalizer, beta=beta_net)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Net-L")
coverage_net_local, length_net_local = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Net-L")
dataset_name_vec.append(dataset_name)
method_vec.append('Net-L')
coverage_vec.append(coverage_net_local)
length_vec.append(length_net_local)
seed_vec.append(seed)
################## Random Forest
if 'random_forest' == test_method:
model = RandomForestRegressor(n_estimators=n_estimators,min_samples_leaf=min_samples_leaf, random_state=0)
nc = RegressorNc(model, AbsErrorErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"RF")
coverage_forest, length_forest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"RF")
dataset_name_vec.append(dataset_name)
method_vec.append('RF')
coverage_vec.append(coverage_forest)
length_vec.append(length_forest)
seed_vec.append(seed)
normalizer_adapter = RandomForestRegressor(n_estimators=n_estimators, min_samples_leaf=min_samples_leaf, random_state=0)
adapter = RandomForestRegressor(n_estimators=n_estimators, min_samples_leaf=min_samples_leaf, random_state=0)
normalizer = RegressorNormalizer(adapter,
normalizer_adapter,
AbsErrorErrFunc())
nc = RegressorNc(adapter, AbsErrorErrFunc(), normalizer, beta=beta)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"RF-L")
coverage_forest_local, length_forest_local = helper.compute_coverage(y_test,y_lower,y_upper,significance,"RF-L")
dataset_name_vec.append(dataset_name)
method_vec.append('RF-L')
coverage_vec.append(coverage_forest_local)
length_vec.append(length_forest_local)
seed_vec.append(seed)
################## Quantile Net
if 'quantile_net' == test_method:
model_full = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
model_full.fit(X_train, y_train)
tmp = model_full.predict(X_test)
y_lower = tmp[:,0]
y_upper = tmp[:,1]
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"QNet")
coverage_qnet, length_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"QNet")
dataset_name_vec.append(dataset_name)
method_vec.append('QNet')
coverage_vec.append(coverage_qnet)
length_vec.append(length_qnet)
seed_vec.append(seed)
if 'cqr_quantile_net' == test_method:
model = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles_net,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
nc = RegressorNc(model, QuantileRegErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"CQR Net")
coverage_cp_qnet, length_cp_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"CQR Net")
dataset_name_vec.append(dataset_name)
method_vec.append('CQR Net')
coverage_vec.append(coverage_cp_qnet)
length_vec.append(length_cp_qnet)
seed_vec.append(seed)
if 'cqr_asymmetric_quantile_net' == test_method:
model = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles_net,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
nc = RegressorNc(model, QuantileRegAsymmetricErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"CQR Sign Net")
coverage_cp_sign_qnet, length_cp_sign_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"CQR Sign Net")
dataset_name_vec.append(dataset_name)
method_vec.append('CQR Sign Net')
coverage_vec.append(coverage_cp_sign_qnet)
length_vec.append(length_cp_sign_qnet)
seed_vec.append(seed)
################### Rearrangement Quantile Net
if 'rearrangement' == test_method:
model_full = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=True)
model_full.fit(X_train, y_train)
tmp = model_full.predict(X_test)
y_lower = tmp[:,0]
y_upper = tmp[:,1]
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Rearrange QNet")
coverage_re_qnet, length_re_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Rearrange QNet")
dataset_name_vec.append(dataset_name)
method_vec.append('Rearrange QNet')
coverage_vec.append(coverage_re_qnet)
length_vec.append(length_re_qnet)
seed_vec.append(seed)
if 'cqr_rearrangement' == test_method:
model = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles_net,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=True)
nc = RegressorNc(model, QuantileRegErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Rearrange CQR Net")
coverage_cp_re_qnet, length_cp_re_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Rearrange CQR Net")
dataset_name_vec.append(dataset_name)
method_vec.append('Rearrange CQR Net')
coverage_vec.append(coverage_cp_re_qnet)
length_vec.append(length_cp_re_qnet)
seed_vec.append(seed)
if 'cqr_asymmetric_rearrangement' == test_method:
model = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles_net,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=True)
nc = RegressorNc(model, QuantileRegAsymmetricErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Rearrange CQR Sign Net")
coverage_cp_sign_re_qnet, length_cp_sign_re_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Rearrange CQR Net")
dataset_name_vec.append(dataset_name)
method_vec.append('Rearrange CQR Sign Net')
coverage_vec.append(coverage_cp_sign_re_qnet)
length_vec.append(length_cp_sign_re_qnet)
seed_vec.append(seed)
################### Quantile Random Forest
if 'quantile_forest' == test_method:
params_qforest = dict()
params_qforest["random_state"] = 0
params_qforest["min_samples_leaf"] = min_samples_leaf
params_qforest["n_estimators"] = n_estimators
params_qforest["max_features"] = X_train.shape[1]
params_qforest["CV"]=False
params_qforest["coverage_factor"] = coverage_factor
params_qforest["test_ratio"]=cv_test_ratio
params_qforest["random_state"]=cv_random_state
params_qforest["range_vals"] = cv_range_vals
params_qforest["num_vals"] = cv_num_vals
model_full = helper.QuantileForestRegressorAdapter(model = None,
fit_params=None,
quantiles=np.dot(100,quantiles),
params = params_qforest)
model_full.fit(X_train, y_train)
tmp = model_full.predict(X_test)
y_lower = tmp[:,0]
y_upper = tmp[:,1]
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"QRF")
coverage_qforest, length_qforest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"QRF")
dataset_name_vec.append(dataset_name)
method_vec.append('QRF')
coverage_vec.append(coverage_qforest)
length_vec.append(length_qforest)
seed_vec.append(seed)
if 'cqr_quantile_forest' == test_method:
params_qforest = dict()
params_qforest["random_state"] = 0
params_qforest["min_samples_leaf"] = min_samples_leaf
params_qforest["n_estimators"] = n_estimators
params_qforest["max_features"] = X_train.shape[1]
params_qforest["CV"]=CV_qforest
params_qforest["coverage_factor"] = coverage_factor
params_qforest["test_ratio"]=cv_test_ratio
params_qforest["random_state"]=cv_random_state
params_qforest["range_vals"] = cv_range_vals
params_qforest["num_vals"] = cv_num_vals
model = helper.QuantileForestRegressorAdapter(model = None,
fit_params=None,
quantiles=quantiles_forest,
params = params_qforest)
nc = RegressorNc(model, QuantileRegErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"CQR RF")
coverage_cp_qforest, length_cp_qforest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"CQR RF")
dataset_name_vec.append(dataset_name)
method_vec.append('CQR RF')
coverage_vec.append(coverage_cp_qforest)
length_vec.append(length_cp_qforest)
seed_vec.append(seed)
if 'cqr_asymmetric_quantile_forest' == test_method:
params_qforest = dict()
params_qforest["random_state"] = 0
params_qforest["min_samples_leaf"] = min_samples_leaf
params_qforest["n_estimators"] = n_estimators
params_qforest["max_features"] = X_train.shape[1]
params_qforest["CV"]=CV_qforest
params_qforest["coverage_factor"] = coverage_factor
params_qforest["test_ratio"]=cv_test_ratio
params_qforest["random_state"]=cv_random_state
params_qforest["range_vals"] = cv_range_vals
params_qforest["num_vals"] = cv_num_vals
model = helper.QuantileForestRegressorAdapter(model = None,
fit_params=None,
quantiles=quantiles_forest,
params = params_qforest)
nc = RegressorNc(model, QuantileRegAsymmetricErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"CQR Sign RF")
coverage_cp_sign_qforest, length_cp_sign_qforest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"CQR Sign RF")
dataset_name_vec.append(dataset_name)
method_vec.append('CQR Sign RF')
coverage_vec.append(coverage_cp_sign_qforest)
length_vec.append(length_cp_sign_qforest)
seed_vec.append(seed)
# tmp = model.predict(X_test)
# y_lower = tmp[:,0]
# y_upper = tmp[:,1]
# if plot_results:
# helper.plot_func_data(y_test,y_lower,y_upper,"QRF")
# coverage_qforest, length_qforest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"QRF")
#
# dataset_name_vec.append(dataset_name)
# method_vec.append('QRF')
# coverage_vec.append(coverage_qforest)
# length_vec.append(length_qforest)
# seed_vec.append(seed)
############### Summary
coverage_str = 'Coverage (expected ' + str(100 - significance*100) + '%)'
results = np.array([[dataset_name, coverage_str, 'Avg. Length', 'Seed'],
['CP Linear', coverage_linear, length_linear, seed],
['CP Linear Local', coverage_linear_local, length_linear_local, seed],
['CP Neural Net', coverage_net, length_net, seed],
['CP Neural Net Local', coverage_net_local, length_net_local, seed],
['CP Random Forest', coverage_forest, length_forest, seed],
['CP Random Forest Local', coverage_forest_local, length_forest_local, seed],
['CP Quantile Net', coverage_cp_qnet, length_cp_qnet, seed],
['CP Asymmetric Quantile Net', coverage_cp_sign_qnet, length_cp_sign_qnet, seed],
['Quantile Net', coverage_qnet, length_qnet, seed],
['CP Rearrange Quantile Net', coverage_cp_re_qnet, length_cp_re_qnet, seed],
['CP Asymmetric Rearrange Quantile Net', coverage_cp_sign_re_qnet, length_cp_sign_re_qnet, seed],
['Rearrange Quantile Net', coverage_re_qnet, length_re_qnet, seed],
['CP Quantile Random Forest', coverage_cp_qforest, length_cp_qforest, seed],
['CP Asymmetric Quantile Random Forest', coverage_cp_sign_qforest, length_cp_sign_qforest, seed],
['Quantile Random Forest', coverage_qforest, length_qforest, seed]])
results_ = pd.DataFrame(data=results[1:,1:],
index=results[1:,0],
columns=results[0,1:])
print("== SUMMARY == ")
print("dataset name: " + dataset_name)
print(results_)
sys.stdout.flush()
if save_to_csv:
results = pd.DataFrame(results)
outdir = './results/'
if not os.path.exists(outdir):
os.mkdir(outdir)
out_name = outdir + 'results.csv'
df = pd.DataFrame({'name': dataset_name_vec,
'method': method_vec,
coverage_str : coverage_vec,
'Avg. Length' : length_vec,
'seed': seed_vec})
if os.path.isfile(out_name):
df2 = pd.read_csv(out_name)
df = pd.concat([df2, df], ignore_index=True)
df.to_csv(out_name, index=False)
| 30,512 | 41.915612 | 139 | py |
tmbidl | tmbidl-master/catalogs/generateContCat.py | # generateContCat.py - generate continuum catalog
#
# Usage:
#
# > python generateContCat.py <catalog name>
#
def generateContCat(proj='hii'):
# set the output file
outfile = 'source_catalog'
fout = open(outfile, 'w')
print " "
print "Reading data from proj :", proj
print " "
# define project and catalog lists
if proj == 'hii':
path = '/home/groups/3helium/GBT/hii/obs/'
infile = ['fluxcal.cat', 'pointing.cat', 'final_18-30_good.cat', 'final_50-65_good.cat', 'reobserve_30-50.cat', 'reobserve_30-50_continuum.cat', 'far_arm_nvss.cat', 'far_arm_2nd_tier.cat', 'cross_cal.cat', 'final_30-65.cat', 'final_18-30_fainter.cat']
elif proj == 'te':
path = '/home/groups/3helium/GBT/te/obs/'
infile = ['fluxcal.cat', 'pointing.cat', 'FC72.cat', 'S83.cat', 'R97.cat', 'R96.cat', 'EC.cat', 'OG.cat', 'FJL96.cat', 'FJL89.cat', 'QRBBW06.cat', 'glimpse.cat']
elif proj == 'he3':
path = '/home/groups/3helium/GBT/he3/obs/'
infile = ['fluxcal.cat', 'pointing.cat', 'pne.cat', 'hii.cat']
elif proj == 'cii':
path = '/home/groups/3helium/GBT/cii/obs/'
infile = ['fluxcal.cat', 'pointing.cat', 'hii.cat']
else:
print 'No valid projects. Use: hii, te, he3, cii.'
return
# write out header
fout.write("CONTINUUM SOURCE CATALOG for HII Region Survey ==============\n")
fout.write("NOVEMBER 2008\n")
fout.write(" \n")
# loop through catalog list
for icat in range(len(infile)):
lines = open(path+infile[icat], 'r').readlines()
print 'Processing catalog: ', infile[icat]
# loop through each catalog
start = 0
for i in range(len(lines)):
# get the line for each table
x = lines[i].split()
# read sources
if start == 1:
# if no elements (e.g., blank line) break out of loop
if len(x) == 0:
break
source = x[0]
# check that this is not a comment statement
if (source[0] + source[1]) != '##':
# remove comments from source names
if source[0] == '#':
source = source[1:]
ra = x[1].split(':')
dec = x[2].split(':')
# output info
fout.write("%-12s %-2s %-2s %-7s %-3s %-2s %-6s %-5s\n" % (source, ra[0], ra[1], ra[2], dec[0], dec[1], dec[2], epoch))
# check for the epoch
if x[0] == 'COORDMODE' or x[0] == 'coordmode':
epoch = x[2]
# check when to begin reading sources
if x[0] == 'HEAD' or x[0] == 'head':
start = 1
fout.close()
if __name__=="__main__":
import sys
import pdb
generateContCat(str(sys.argv[1]).strip())
| 2,910 | 34.5 | 259 | py |
MCEvidence | MCEvidence-master/examples.py | '''
Collection of codes that can be used to test the MCEvidence code.
The examples below demostrate the validitiy of MCEvidence for
three MCMC samplers:
* Gibbs Sampling
* PyStan NUT sampler
* EMCEE sampler
Two types of likelihood surface is considered
* Gaussian Linear Model - 3 dimensions
* N-dimensional Gaussian - 10 dimensions
'''
from __future__ import print_function
import IPython
import pickle
#
import os, sys, math,glob
import pandas as pd
import time
import numpy as np
import sklearn as skl
import statistics
from sklearn.neighbors import NearestNeighbors, DistanceMetric
import scipy.special as sp
#
from MCEvidence import MCEvidence
#pretty plots if seaborn is installed
try:
import seaborn as sns
sns.set(style='ticks', palette='Set2',font_scale=1.5)
#sns.set()
except:
pass
class glm_eg(object):
def __init__(self,x=None,theta=None,
rms=0.2,ptheta=None,verbose=1):
# Generate Data for a Quadratic Function
if x is None:
xmin = 0.0
xmax = 4.0
nDataPoints = 200
x = np.linspace(xmin, xmax, nDataPoints)
#data points
self.x=x
self.ndata=len(x)
# Data simulation inputs
if theta is None:
theta0_true = 1.0
theta1_true = 4.0
theta2_true = -1.0
theta = np.array([theta0_true, theta1_true, theta2_true])
#parameters
self.theta=theta
self.ndim=len(theta)
#flat priors on parameters
if ptheta is None:
ptheta = np.repeat(10.0,self.ndim)
# Generate quadratic data with noise
self.y = self.quadratic(self.theta)
self.noise_rms = np.ones(self.ndata)*rms
self.y_sample = self.y + np.random.normal(0.0, self.noise_rms)
self.D = np.zeros(shape = (self.ndata, self.ndim))
self.D[:,0] = 1.0/self.noise_rms
self.D[:,1] = self.x/self.noise_rms
self.D[:,2] = self.x**2/self.noise_rms
self.b = self.y_sample/self.noise_rms
#Initial point to start sampling
self.theta_sample=reduce(np.dot, [np.linalg.inv(np.dot(self.D.T, self.D)), self.D.T, self.b])
def quadratic(self,parameters):
return parameters[0] + parameters[1]*self.x + parameters[2]*self.x**2
def evidence(self):
# Calculate the Bayesian Evidence
b=self.b
D=self.D
#
num1 = np.log(det(2.0 * np.pi * np.linalg.inv(np.dot(D.T, D))))
num2 = -0.5 * (np.dot(b.T, b) - reduce(np.dot, [b.T, D, np.linalg.inv(np.dot(D.T, D)), D.T, b]))
den1 = np.log(self.ptheta.prod()) #prior volume
#
log_Evidence = num1 + num2 - den1 #(We have ignored k)
#
print('\nThe log-Bayesian Evidence is equal to: {}'.format(log_Evidence))
return log_Evidence
def gibbs_dist(self, params, label):
# The conditional distributions for each parameter
# This will be used in the Gibbs sampling
b=self.b
D=self.D
sigmaNoise=self.noise_rms
x=self.x
ndata=self.ndata
#
D0 = np.zeros(shape = (ndata, 2)); D0[:,0] = x/sigmaNoise; D0[:,1] = x**2/sigmaNoise
D1 = np.zeros(shape = (ndata, 2)); D1[:,0] = 1./sigmaNoise; D1[:,1] = x**2/sigmaNoise
D2 = np.zeros(shape = (ndata, 2)); D2[:,0] = 1./sigmaNoise; D2[:,1] = x/sigmaNoise
if label == 't0':
theta_r = np.array([params[1], params[2]])
v = 1.0/sigmaNoise
A = np.dot(v.T, v)
B = -2.0 * (np.dot(b.T, v) - reduce(np.dot, [theta_r.T, D0.T, v]))
mu = -B/(2.0 * A)
sig = np.sqrt(1.0/A)
if label == 't1':
theta_r = np.array([params[0], params[2]])
v = x/sigmaNoise
A = np.dot(v.T, v)
B = -2.0 * (np.dot(b.T, v) - reduce(np.dot, [theta_r.T, D1.T, v]))
mu = -B/(2.0 * A)
sig = np.sqrt(1.0/A)
if label == 't2':
theta_r = np.array([params[0], params[1]])
v = x**2/sigmaNoise
A = np.dot(v.T, v)
B = -2.0 * (np.dot(b.T, v) - reduce(np.dot, [theta_r.T, D2.T, v]))
mu = -B/(2.0 * A)
sig = np.sqrt(1.0/A)
return np.random.normal(mu, sig)
def Sampler(self,nsamples=1000):
b=self.b
D=self.D
Niters = int(nsamples)
trace = np.zeros(shape = (Niters, 3))
logLikelihood = np.zeros(Niters)
#previous state
params=self.theta_sample
for i in range(Niters):
params[0] = self.gibbs_dist(params, 't0')
params[1] = self.gibbs_dist(params, 't1')
params[2] = self.gibbs_dist(params, 't2')
trace[i,:] = params
logLikelihood[i] = -0.5 * np.dot((b - np.dot(D,trace[i,:])).T, (b - np.dot(D,trace[i,:])))
#save the current state back to theta_sample
self.theta_sample=params
return trace, logLikelihood
def info(self):
return '''Example adabted from Harry's Jupyter notebook.
\n{0}-dimensional Polynomial function.'''.format(self.ndim)
#===================================
# 2d likelihood for emcee sampler
#==================================
# Define the posterior PDF
# Reminder: post_pdf(theta, data) = likelihood(data, theta) * prior_pdf(theta)
# We take the logarithm since emcee needs it.
#---------------
class model_2d(object):
def __init__(self,p=[-0.9594,4.294],pprior=None,
N=50,x=None,**kwargs):
f=lambda t,s: np.array([t-s*abs(t),t+s*abs(t)])
if pprior is None:
self.pprior={'p'+str(i) : f(t,10) for i,t in enumerate(p) }
self.label=self.pprior.keys()
self.ndim=len(p)
self.p=p
if x is None:
self.N=N
self.x = np.sort(10*np.random.rand(N))
else:
self.N=len(x)
self.x=x
self.y,self.yerr=self.data(**kwargs)
# As prior, we assume an 'uniform' prior (i.e. constant prob. density)
def inprior(self,t,i):
prange=self.pprior[self.label[i]]
if prange[0] < t < prange[1]:
return 1.0
else:
return 0.0
def lnprior(self,theta):
for i,t in enumerate(theta):
if self.inprior(t,i)==1.0:
pass
else:
return -np.inf
return 0.0
# As likelihood, we assume the chi-square.
def lnlike(self,theta):
m, b = theta
model = m * self.x + b
return -0.5*(np.sum( ((self.y-model)/self.yerr)**2. ))
def lnprob(self,theta):
lp = self.lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + self.lnlike(theta)
def data(self,sigma=0.5,aerr=0.2):
# Generate synthetic data from a model.
# For simplicity, let us assume a LINEAR model y = m*x + b
# where we want to fit m and b
yerr = aerr + sigma*np.random.rand(self.N)
y = self.p[0]*self.x + self.p[1]
y += sigma * np.random.randn(self.N)
return y,yerr
def pos(self,nwalkers):
# uniform sample over prior space
# will be used as starting place for
# emcee sampler
r=np.random.rand(nwalkers,self.ndim)
pos=r
for i,k in enumerate(self.pprior):
prange=self.pprior[k]
psize = prange.max() - prange.min()
pos[:,i]=prange.min()+psize*r[:,i]
return pos
def vis(self,n=300,figsize=(10,10),**kwargs):
# Visualize the chains
try:
import corner
fig = corner.corner(self.pos(n),
labels=self.label,
truths=self.p,**kwargs)
fig.set_size_inches(figsize)
except:
print('corner package not installed - no plot is produced.')
pass
#
#============================================
class gaussian_eg(object):
def __init__(self,ndim=10,ndata=10000,verbose=1):
# Generate data
# Number of dimensions: up to 15 this seems to work OK.
self.ndim=ndim
# Number of data points (not actually very important)
self.ndata=ndata
# Some fairly arbitrary mean values for the data.
# Standard deviation is unity in all parameter directions.
std = 1.0
self.mean = np.zeros(ndim)
for i in range(0,ndim):
self.mean[i] = np.float(i+1)
# Generate random data all at once:
self.d2d=np.random.normal(self.mean,std,size=(ndata,ndim))
# Compute the sample mean and standard deviations, for each dimension
# The s.d. should be ~1/sqrt(ndata))
self.mean_sample = np.mean(self.d2d,axis=0)
self.var_sample = np.var(self.d2d,axis=0)
#1sigma error on the mean values estimated from ndata points
self.sigma_mean = np.std(self.d2d,axis=0)/np.sqrt(np.float(ndata))
if verbose>0:
std_sample = np.sqrt(self.var_sample)
print()
print('mean_sample=',self.mean_sample)
print('std_sample=',std_sample)
print()
# Compute ln(likelihood)
def lnprob(self,theta):
dM=(theta-self.mean_sample)/self.sigma_mean
return (-0.5*np.dot(dM,dM) -
self.ndim*0.5*np.log(2.0*math.pi) -
np.sum(np.log(self.sigma_mean)))
# Define a routine to generate samples in parameter space:
def Sampler(self,nsamples=1000):
# Number of samples: nsamples
# Dimensionality of parameter space: ndim
# Means: mean
# Standard deviations: stdev
ndim=self.ndim
ndata=self.ndata
mean=self.mean_sample
sigma=self.sigma_mean
#
#Initialize vectors:
theta = np.zeros((nsamples,ndim))
f = np.zeros(nsamples)
# Generate samples from an ndim-dimension multivariate gaussian:
theta = np.random.normal(mean,sigma,size=(nsamples,ndim))
for i in range(nsamples):
f[i]=self.lnprob(theta[i,:])
return theta, f
def pos(self,n):
# Generate samples over prior space volume
return np.random.normal(self.mean_sample,5*self.sigma_mean,size=(n,self.ndim))
def info(self):
print("Example adabted from Alan's Jupyter notebook")
print('{0}-dimensional Multidimensional gaussian.'.format(self.ndim))
print('ndata=',self.ndata)
print()
#====================================
# PyStan chain example
#====================================
def glm_stan(iterations=10000,outdir='chains'):
import pystan
stanmodel='''
data {
int<lower=1> K;
int<lower=0> N;
real y[N];
matrix[N,K] x;
}
parameters {
vector[K] beta;
real sigma;
}
model {
real mu[N];
vector[N] eta ;
eta <- x*beta;
for (i in 1:N) {
mu[i] <- (eta[i]);
};
increment_log_prob(normal_log(y,mu,sigma));
}
'''
glmq=glm_eg()
df=pd.DataFrame()
df['x1']=glmq.x
df['x2']=glmq.x**2
df['y']=glmq.y_sample
data={'N':glmq.ndata,
'K':glmq.ndim,
'x':df[['x1','x2']],
'y':glmq.y_sample}
if os.path.exists(outdir):
os.makedirs(outdir)
cache_fname='{}/glm2d_pystan_chain.pkl'.format(outdir)
#read chain from cache if possible
try:
raise
print('reading chain from: '+cache_fname)
stan_chain = pickle.load(open(cache_fname, 'rb'))
except:
# Intialize pystan -- this will convert our pystan code into C++
# and run MCMC
fit = pystan.stan(model_code=stanmodel, data=data,
iter=1000, chains=4)
# Extract PyStan chain for GLM example
stan_chain=fit.extract(permuted=True)
# Check input parameter recovery and estimate evidence
if 'beta' in stan_chain.keys(): stan_chain['samples']=stan_chain.pop('beta')
if 'lp__' in stan_chain.keys(): stan_chain['loglikes']=stan_chain.pop('lp__')
print('writing chain in: '+cache_fname)
with open(cache_fname, 'wb') as f:
pickle.dump(stan_chain, f)
theta_means = stan_chain['beta'].mean(axis=0)
print('GLM example input parameter values: ',harry.theta)
print('GLM example estimated parameter values: ',theta_means)
# Here given pystan samples and log probability, we compute evidence ratio
mce=MCEvidence(stan_chain,verbose=2,ischain=True,brange=[3,4.2]).evidence()
return mce
#====================================
# Emcee chain example
#====================================
import emcee
class make_emcee_chain(object):
# A wrapper to the emcee MCMC sampler
#
def __init__(self,model,nwalkers=500,nburn=300,arg={}):
#check if model is string or not
if isinstance(model,str):
print('name of model: ',model)
XClass = getattr(sys.modules[__name__], model)
else:
XClass=model
#check if XClass is instance or not
if hasattr(XClass, '__class__'):
print('instance of a model class is passed')
self.model=XClass #it is instance
else:
print('class variable is passed .. instantiating class')
self.model=XClass(*arg)
self.ndim=self.model.ndim
#init emcee sampler
self.nwalkers=nwalkers
self.emcee_sampler = emcee.EnsembleSampler(self.nwalkers,
self.model.ndim,
self.model.lnprob)
# burnin phase
pos0=self.model.pos(self.nwalkers)
pos, prob, state = self.emcee_sampler.run_mcmc(pos0, nburn)
#save emcee state
self.prob=prob
self.pos=pos
self.state=state
#discard burnin chain
self.samples = self.emcee_sampler.flatchain
self.emcee_sampler.reset()
def mcmc(self,nmcmc=2000,**kwargs):
# perform MCMC - no resetting
# size of the chain increases in time
time0 = time.time()
#
#pos=None makes the chain start from previous state of sampler
self.pos, self.prob, self.state = self.emcee_sampler.run_mcmc(self.pos,nmcmc,**kwargs)
self.samples = self.emcee_sampler.flatchain
self.lnp = self.emcee_sampler.flatlnprobability
#
time1=time.time()
#
print('emcee total time spent: ',time1-time0)
print('samples shape: ',self.samples.shape)
return self.samples,self.lnp
def Sampler(self,nsamples=2000):
# perform MCMC and return exactly nsamples
# reset sampler so that chains don't grow
#
N=(nsamples+self.nwalkers-1)/self.nwalkers #ceil to next integer
print('emcee: nsamples, nmcmc: ',nsamples,N*self.nwalkers)
#
#pos=None makes the chain start from previous state of sampler
self.pos, self.prob, self.state = self.emcee_sampler.run_mcmc(self.pos,N)
self.samples = self.emcee_sampler.flatchain
self.lnp = self.emcee_sampler.flatlnprobability
self.emcee_sampler.reset()
return self.samples[0:nsamples,:],self.lnp[0:nsamples]
def vis(self,chain=None,figsize=(10,10),**kwargs):
# Visualize the chains
if chain is None:
chain=self.samples
fig = corner.corner(chain, labels=self.model.label,
truths=self.model.p,
**kwargs)
fig.set_size_inches(figsize)
def info(self):
print("Example using emcee sampling")
print('nwalkers=',self.walkers)
try:
self.model.info()
except:
pass
print()
def gaussian_emcee(nwalkers=300,thin=5,nmcmc=5000):
#Evidence calculation based on emcee sampling
mNd=gaussian_eg()
mecNd=make_emcee_chain(mNd,nwalkers=nwalkers)
samples,lnp=mecNd.mcmc(nmcmc=nmcmc,thin=thin)
#estimate evidence
chain={'samples':samples,'loglikes':lnp}
mce=MCEvidence(chain, verbose=2,ischain=True,
brange=[3,4.2]).evidence(rand=True)
return mce
#===============================================
if __name__ == '__main__':
if len(sys.argv) > 1:
method=sys.argv[1]
else:
method='gaussian_eg'
if len(sys.argv) > 2:
nsamples=sys.argv[2]
else:
nsamples=10000
if method in ['gaussian_eg','glm_eg']:
print('Using example: ',method)
#get class instance
XClass = getattr(sys.modules[__name__], method)
# Now Generate samples.
print('Calling sampler to get MCMC chain: nsamples=',nsamples)
samples,logl=XClass(verbose=2).Sampler(nsamples=nsamples)
print('samples and loglikes shape: ',samples.shape,logl.shape)
chain={'samples':samples,'loglikes':logl}
mce=MCEvidence(chain,thinlen=2,burnlen=0.1,verbose=2,ischain=True).evidence()
else:
mce=eval(method+'()')
| 18,044 | 30.994681 | 104 | py |
MCEvidence | MCEvidence-master/setup.py | #!/usr/bin/env python
from __future__ import absolute_import
import io
import re
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def find_version():
version_file = io.open(os.path.join(os.path.dirname(__file__), 'MCEvidence.py')).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name='MCEvidence',
version=find_version(),
description='MCEvidence evidence estimation from MCMC chains',
author='Yabebal Fantaye',
author_email='yabi@aims.ac.za',
url="https://github.com/yabebalFantaye/MCEvidence",
packages=[''],
scripts=['MCEvidence.py'],
test_suite='example.py',
#package_data={'planck_fullgrid_R2': ['AllChains','SingleChains']}
install_requires=[
'numpy',
'statistics',
'sklearn',
"scipy (>=0.11.0)",
'pandas (>=0.14.0)',
],
classifiers=[
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords=['MCMC', 'Evidence', 'bayesian evidence', 'marginal likelihood']
)
| 1,451 | 29.25 | 91 | py |
MCEvidence | MCEvidence-master/MCEvidence.py | #!usr/bin/env python
"""
Authors : Yabebal Fantaye
Email : yabi@aims.ac.za
Affiliation : African Institute for Mathematical Sciences - South Africa
Stellenbosch University - South Africa
License : MIT
Status : Under Development
Description :
Python implementation of the evidence estimation from MCMC chains
as presented in A. Heavens et. al. 2017
(paper can be found here : https://arxiv.org/abs/1704.03472 ).
This code is tested in Python 2 version 2.7.12 and Python 3 version 3.5.2
"""
from __future__ import absolute_import
from __future__ import print_function
import importlib
import itertools
from functools import reduce
from collections import namedtuple
import io
import tempfile
import os
import glob
import sys
import math
import numpy as np
import pandas as pd
import sklearn as skl
import statistics
from sklearn.neighbors import NearestNeighbors, DistanceMetric
import scipy.special as sp
from numpy.linalg import inv
from numpy.linalg import det
import logging
from argparse import ArgumentParser
#====================================
try:
'''
If getdist is installed, use that to reach chains.
Otherwise, use the minimal chain reader class implemented below.
'''
from getdist import MCSamples, chains
from getdist import plots, IniFile
import getdist as gd
use_getdist=True
except:
'''
getdist is not installed
use a simple chain reader
'''
use_getdist=False
#====================================
FORMAT = "%(levelname)s:%(filename)s.%(funcName)s():%(lineno)-8s %(message)s"
logging.basicConfig(level=logging.INFO,format=FORMAT)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.INFO)
__author__ = "Yabebal Fantaye"
__email__ = "yabi@aims.ac.za"
__license__ = "MIT"
__version_info__ = ('17','04','2018')
__version__ = '-'.join(__version_info__)
__status__ = "Development"
desc='Planck Chains MCEvidence. Returns the log Bayesian Evidence computed using the kth NN'
cite='''
**
When using this code in published work, please cite the following paper: **
Heavens et. al. (2017)
Marginal Likelihoods from Monte Carlo Markov Chains
https://arxiv.org/abs/1704.03472
'''
#list of cosmology parameters
cosmo_params_list=['omegabh2','omegach2','theta','tau','omegak','mnu','meffsterile','w','wa',
'nnu','yhe','alpha1','deltazrei','Alens','Alensf','fdm','logA','ns','nrun',
'nrunrun','r','nt','ntrun','Aphiphi']
#np.random.seed(1)
# Create a base class
class LoggingHandler(object):
def set_logger(self):
self.logger = logging.getLogger(self.log_message()) #self.__class__.__name__
def log_message(self):
import inspect
stack = inspect.stack()
return str(stack[2][4])
class data_set(object):
def __init__(self,d):
self.samples=d['samples']
self.weights=d['weights']
self.loglikes=d['loglikes']
self.adjusted_weights=d['aweights']
class SamplesMIXIN(object):
'''
The following routines must be defined to use this class:
__init__: where certain variables are defined
load_from_file: where data is read from file and
returned as python dict
'''
def __init__(self):
raise NotImplementedError()
def load_from_file(self):
raise NotImplementedError()
def setup(self,str_or_dict,**kwargs):
#Get the getdist MCSamples objects for the samples, specifying same parameter
#names and labels; if not specified weights are assumed to all be unity
#
#TODO expose this
self.iw=kwargs.pop('iw',0)
self.ilike=kwargs.pop('ilike',1)
self.itheta=kwargs.pop('itheta',2)
#
level=kwargs.pop('log_level',logging.INFO)
logging.basicConfig(level=level,format=FORMAT)
#
self.logger = logging.getLogger(__name__) #+self.__class__.__name__)
#self.logger.addHandler(handler)
if self.debug:
self.logger.setLevel(logging.DEBUG)
#read MCMC samples from file
if isinstance(str_or_dict,str):
fileroot=str_or_dict
self.logger.info('Loading chain from '+fileroot)
self.data = self.load_from_file(fileroot,**kwargs)
#MCMC chains are passed as dict, list or tuple
elif isinstance(str_or_dict,(dict,list,tuple)):
if isinstance(str_or_dict,(list,tuple)):
self.chains=str_or_dict
else:
self.chains=str_or_dict.values()
self.data=self.chains2samples()
#MCMC chains passed in unsupported format
else:
self.logger.info('Passed first argument type is: %s'%type(str_or_dict))
self.logger.error('first argument to samples2getdist should be a file name string, list, tuple or dict.')
raise
ndim=self.get_shape()[1]
if hasattr(self, 'names'):
if self.names is None:
self.names = ["%s%s"%('p',i) for i in range(ndim)]
if hasattr(self, 'labels'):
if self.labels is None:
self.labels = ["%s_%s"%(self.px,i) for i in range(ndim)]
if not hasattr(self, 'trueval'):
self.trueval=None
self.nparamMC=self.get_shape()[1]
def chains2samples(self,**kwargs):
"""
Combines separate chains into one samples array, so self.samples has all the samples
and this instance can then be used as a general :class:`~.chains.WeightedSamples` instance.
#
ACTIONS:
does burn-in if kwargs contains burnlen>0
does thinning if kwargs contains thinlen>0
:return: self
"""
if self.chains is None:
self.logger.error('The chains array is empty!')
raise
#
burnlen = kwargs.pop('burnlen',0)
thinlen = kwargs.pop('thinlen',0)
nchains=len(self.chains)
#
#store labels of original chain
self.nchains = nchains
self.logger.debug('Chain2Sample: nchain=%s'%nchains)
self.ichain=np.concatenate([(i+1)*np.ones(len(c)) for i, c in enumerate(self.chains)])
#
#before concatnating do burn-in
if burnlen>0:
self.logger.debug('Chain2Sample: applying burn-in with burn length=%s'%burnlen)
self.chains = [self.removeBurn(burnlen, chain=c) for c in self.chains]
#keep chain index offsets
self.chain_offsets = np.cumsum(np.array([0] + [chain.shape[0] for chain in self.chains]))
#concatnate burned chains into single array
self.samples=np.concatenate(self.chains)
#before splitting chain do thinning
if np.abs(thinlen)>0:
self.logger.debug('Chain2Sample: applying weighted thinning with thin length=%s'%thinlen)
self.samples=self.thin(nthin=thinlen,chain=self.samples)
#free array
self.chains = None
#split chains if necessary
return self.chain_split(self.samples)
def chain_split(self,s):
if self.split:
nrow=len(s)
rowid=range(nrow)
ix=np.random.choice(rowid,size=int(nrow*self.s1frac),replace=False)
not_ix = np.setxor1d(rowid, ix)
#now split
text='{} chain with nrow={} split to ns1={}, ns2={}'
self.logger.info(text.format(self.nchains, nrow, len(ix),len(not_ix)))
s1=s[ix,:]
s2=s[not_ix,:]
#change to dict
s1_dict = {'weights':s1[:,self.iw], 'loglikes':s1[:,self.ilike],
'samples':s1[:,self.itheta:],'ichain':ix}
s2_dict = {'weights':s2[:,self.iw], 'loglikes':s2[:,self.ilike],
'samples':s2[:,self.itheta:],'ichain':not_ix}
else:
#no split, so just assign s1 and s2 to same array
s1_dict = {'weights':s[:,self.iw], 'loglikes':s[:,self.ilike],
'samples':s[:,self.itheta:],'ichain':range(len(s))}
#s1_dict = {'weights':s[:,0],'loglikes':s[:,1],'samples':s[:,2:],'ichain':}
s2_dict = {'weights':None,'loglikes':None,'samples':None,'ichain':None}
# a copy of the weights that can be altered to
# independently to the original weights
s1_dict['aweights']=np.copy(s1_dict['weights'])
s2_dict['aweights']=np.copy(s2_dict['weights'])
return {'s1':data_set(s1_dict),'s2':data_set(s2_dict)}
def get_shape(self,name='s1'):
def gsape(s):
if not s is None:
return s.shape
else:
return (0,0)
if name in ['s1','s2']:
return gsape(self.data[name].samples)
else:
s1 = gsape(self.data['s1'].samples)
s2 = gsape(self.data['s2'].samples)
return (s1[0]+s2[0],s1[1])
def importance_sample(self,func,name='s1'):
#importance sample with external function
self.logger.info('Importance sampling partition: '.format(name))
negLogLikes=func(self.data[name].samples)
scale=0 #negLogLikes.min()
self.data[name].adjusted_weights *= np.exp(-(negLogLikes-scale))
def get_thin_index(self,nthin,weights):
'''
Get the thinning indexes and adjusted weights
'''
if nthin<1:
thin_ix,new_weights = self.poisson_thin(nthin,weights=weights)
else:
#call weighted thinning
try:
#if weights are integers, use getdist algorithm
thin_ix,new_weights = self.thin_indices(nthin,weights=weights)
except:
#if weights are not integers, use internal algorithm
thin_ix,new_weights = self.weighted_thin(nthin,weights=weights)
return new_weights, thin_ix
def thin(self,nthin=1,name=None,chain=None):
'''
Thin samples according to nthin and weights type
Returns:
output
'''
if nthin==1:
return
try:
if not chain is None:
self.logger.info('Thinning input sample chain ')
weights = chain[:,self.iw]
norig = len(weights)
#
new_weights, thin_ix = self.get_thin_index(nthin,weights)
#now thin samples and related quantities
output = chain[thin_ix, :]
output[:,self.iw] = new_weights
elif name is None:
self.logger.info('Thinning concatnated samples ')
weights = self.samples[:,self.iw]
norig = len(weights)
#
new_weights, thin_ix = self.get_thin_index(nthin,weights)
#now thin samples and related quantities
self.samples = self.samples[thin_ix, :]
self.samples[:,self.iw] = new_weights
output = self.samples
else:
self.logger.info('Thinning sample partition: '.format(name))
#now thin samples and related quantities
weights = self.data[name].weights
norig = len(weights)
#
new_weights, thin_ix = self.get_thin_index(nthin,weights)
#now thin samples and related quantities
self.data[name].weights = new_weights
self.data[name].samples=self.data[name].samples[thin_ix, :]
self.data[name].loglikes=self.data[name].loglikes[thin_ix]
self.data[name].adjusted_weights=self.data[name].weights.copy()
output = self.data[name]
nnew=len(new_weights)
self.logger.info('''Thinning with thin length={}
#old_chain={},#new_chain={}'''.format(nthin,norig,nnew))
except:
self.logger.info('Thinning not possible.')
raise
return output
def removeBurn(self,remove,chain=None,name=None):
'''
given either name or chain samples, perform burn-in
'''
nstart = remove
#no need to do anything if nither name or chain is given
if chain is None and name is None:
return nstart
#chain or name is given
if remove<1:
if not chain is None:
self.logger.debug('burning passed chain sample')
nstart=int(chain.shape[0]*remove)
if not name is None:
self.logger.debug('burning for sample partition={}'.format(name))
nstart=int(len(self.data[name].loglikes)*remove)
else:
pass
#
self.logger.info('Removing %s lines as burn in' % nstart)
#
if not chain is None:
try:
return chain[nstart:,:]
except:
nsamples = chain.shape[0]
self.logger.info('burn-in failed: burn length %s > sample length %s' % (nstart,nsamples))
raise
if not name is None:
try:
self.data[name].samples=self.data[name].samples[nstart:, :]
self.data[name].loglikes=self.data[name].loglikes[nstart:]
self.data[name].weights=self.data[name].weights[nstart:]
except:
nsamples=len(self.data[name].loglikes)
self.logger.info('burn-in failed: burn length %s > sample length %s' % (nstart,nsamples))
raise
def arrays(self,name='s1'):
self.logger.debug('extracting arrays for sample partition: '.format(name))
if name in ['s1','s2']:
s=self.data[name].samples
if not s is None:
lnp=-self.data[name].loglikes
w=self.data[name].weights
return s, lnp, w
else:
return None,None,None
else:
return self.all_sample_arrays()
def all_sample_arrays(self):
s,lnp,w=self.arrays('s1')
s2,lnp2,w2=self.arrays('s2')
if s2 is None:
return s,lnp,w
else:
return (np.concatenate((s,s2)),
np.concatenate((lnp,lnp2)),
np.concatenate((w,w2)))
def poisson_thin(self,thin_retain_frac,name='s1',weights=None):
'''
Given a weight array and thinning retain fraction, perform thinning.
The algorithm works by randomly sampling from a Poisson distribution
with mean equal to the weight.
'''
if weights is None:
weights=self.data[name].weights.copy()
w = weights*thin_retain_frac
new_w = np.array([float(np.random.poisson(x)) for x in w])
thin_ix = np.where(new_w>0)[0]
new_w = new_w[thin_ix]
text='''Thinning with Poisson Sampling: thinfrac={}.
new_nsamples={},old_nsamples={}'''
self.logger.debug(text.format(thin_retain_frac,len(thin_ix),len(w)))
if self.debug:
print('Poisson thinned chain:', len(thin_ix),
'<w>', '{:5.2f}'.format(np.mean(weights)),
'{:5.2f}'.format(np.mean(new_w)))
print('Sum of old weights:',np.sum(weights))
print('Sum of new weights:',np.sum(new_w))
print('Thinned:','{:5.3f}'.format(np.sum(new_w)/np.sum(weights)))
# return {'ix':thin_ix, 'w':weights[thin_ix]}
return thin_ix, new_w
def weighted_thin(self,thin_unit,name='s1',weights=None):
'''
Given a weight array, perform thinning.
If the all weights are equal, this should
be equivalent to selecting every N/((thinfrac*N)
where N=len(weights).
'''
if weights is None:
weights=self.data[name].weights.copy()
N=len(weights)
if thin_unit==0: return range(N),weights
if thin_unit<1:
N2=np.int(N*thin_unit)
else:
N2=N//thin_unit
#bin the weight index to have the desired length
#this defines the bin edges
bins = np.linspace(-1, N, N2+1)
#this collects the indices of the weight array in each bin
ind = np.digitize(np.arange(N), bins)
#this gets the maximum weight in each bin
thin_ix=pd.Series(weights).groupby(ind).idxmax().tolist()
thin_ix=np.array(thin_ix,dtype=np.intp)
new_w = weights[thin_ix]
text='''Thinning with weighted binning: thinfrac={}.
new_nsamples={},old_nsamples={}'''
self.logger.info(text.format(thin_unit,len(thin_ix),len(new_w)))
return thin_ix, new_w
def thin_indices(self, factor,name='s1',weights=None):
"""
Ref:
http://getdist.readthedocs.io/en/latest/_modules/getdist/chains.html#WeightedSamples.thin
Indices to make single weight 1 samples. Assumes integer weights.
:param factor: The factor to thin by, should be int.
:param weights: The weights to thin,
:return: array of indices of samples to keep
"""
if weights is None:
weights=self.data[name].weights.copy()
numrows = len(weights)
norm1 = np.sum(weights)
weights = weights.astype(np.int)
norm = np.sum(weights)
if abs(norm - norm1) > 1e-4:
print('Can only thin with integer weights')
raise
if factor != int(factor):
print('Thin factor must be integer')
raise
factor = int(factor)
if factor >= np.max(weights):
cumsum = np.cumsum(weights) // factor
# noinspection PyTupleAssignmentBalance
_, thin_ix = np.unique(cumsum, return_index=True)
else:
tot = 0
i = 0
thin_ix = np.empty(norm // factor, dtype=np.int)
ix = 0
mult = weights[i]
while i < numrows:
if mult + tot < factor:
tot += mult
i += 1
if i < numrows: mult = weights[i]
else:
thin_ix[ix] = i
ix += 1
if mult == factor - tot:
i += 1
if i < numrows: mult = weights[i]
else:
mult -= (factor - tot)
tot = 0
return thin_ix,weights[thin_ix]
#==================
class MCSamples(SamplesMIXIN):
def __init__(self,str_or_dict,trueval=None,
debug=False,csplit=None,
names=None,labels=None,px='x',
**kwargs):
self.debug=debug
self.names=None
self.labels=None
self.trueval=trueval
self.px=px
if csplit is None:
self.split=False
self.s1frac=0.5
self.shuffle=True
else:
self.split=csplit.split
self.s1frac=csplit.frac
self.shuffle=csplit.shuffle
self.setup(str_or_dict,**kwargs)
def read_list_to_array(self,flist):
chains=[]
for f in flist:
self.logger.info('loading: '+f)
chains.append(np.loadtxt(f))
return chains
def load_from_file(self,fname,**kwargs):
f = 'weight loglike param1 param2 ...'
self.logger.debug('Loading file assuming CosmoMC columns order: '+f)
#fname can be (a list of) string filename, or filename with wildcard
#to handle those possibilities, we use try..except case
try:
#make fname file name list if it is not already
if not isinstance(fname,(list,tuple)):
flist=[fname]
else:
flist=fname
#if not file assume
if not os.path.isfile(flist[0]):
raise
except:
#get file names from matching pattern
if '*' in fname or '?' in fname:
flist=glob.glob(fname)
else:
idchain=kwargs.pop('idchain', 0)
if idchain>0:
flist=[fname+'_{}.txt'.format(idchain)]
else:
idpattern=kwargs.pop('idpattern', '_?.txt')
self.logger.info(' loading files: '+fname+idpattern)
flist=glob.glob(fname+idpattern)
try:
#load files
self.logger.debug('Reading from files: ' + ', '.join(flist))
self.chains=self.read_list_to_array(flist)
except:
print('Can not read chain from the following list of files: ',flist)
raise
#
return self.chains2samples(**kwargs)
#============================================================
#====== Here starts the main Evidence calculation code =====
#============================================================
class MCEvidence(object):
def __init__(self,method,ischain=True,isfunc=None,
thinlen=0.0,burnlen=0.0,
split=False,s1frac=0.5,shuffle=True,
ndim=None, kmax= 5,
priorvolume=1,debug=False,
nsample=None,covtype='single',
nbatch=1,
brange=None,
bscale='',
verbose=1,args={},
**gdkwargs):
"""Evidence estimation from MCMC chains
:param method: chain names (str or list of strings) or list/tuple/dict of arrays (np.ndarray) or python class
If string or numpy array, it is interpreted as MCMC chain.
Otherwise, it is interpreted as a python class with at least
a single method sampler and will be used to generate chain.
:param ischain (bool): True indicates the passed method is to be interpreted as a chain.
This is important as a string name can be passed for to
refer to a class or chain name
:param nbatch (int): the number of batchs to divide the chain (default=1)
The evidence can be estimated by dividing the whole chain
in n batches. In the case nbatch>1, the batch range (brange)
and batch scaling (bscale) should also be set
:param brange (int or list): the minimum and maximum size of batches in linear or log10 scale
e.g. [3,4] with bscale='logscale' means minimum and maximum batch size
of 10^3 and 10^4. The range is divided nbatch times.
:param bscale (str): the scaling in batch size. Allowed values are 'log','linear','constant'/
:param kmax (int): kth-nearest-neighbours, with k between 1 and kmax-1
:param args (dict): argument to be passed to method. Only valid if method is a class.
:param gdkwargs (dict): arguments to be passed to getdist.
:param verbose: chattiness of the run
"""
logging.basicConfig(level=logging.DEBUG,format=FORMAT)
self.logger = logging.getLogger(__name__) # +self.__class__.__name__)
#self.logger.addHandler(handler)
self.verbose=verbose
self.debug=False
if debug or verbose>1:
self.debug=True
log_level = logging.DEBUG
if verbose==1:
log_level = logging.INFO
if verbose==0:
log_level = logging.WARNING
#
self.logger.setLevel(log_level)
#print('log level: ',logging.getLogger().getEffectiveLevel())
self.info={}
#
self.split=split
self.covtype=covtype
self.nbatch=nbatch
self.brange=brange #todo: check for [N]
self.bscale=bscale if not isinstance(self.brange,int) else 'constant'
#
self.snames=['s1']
if self.split:
self.snames.append('s2')
#
# The arrays of powers and nchain record the number of samples
# that will be analysed at each iteration.
#idtrial is just an index
self.idbatch=np.arange(self.nbatch,dtype=int)
self.powers = np.zeros((self.nbatch,len(self.snames)))
self.bsize = np.zeros((self.nbatch,len(self.snames)),dtype=int)
self.nchain = np.zeros((self.nbatch,len(self.snames)),dtype=int)
#
self.kmax=max(2,kmax)
self.priorvolume=priorvolume
#
self.ischain=ischain
#
self.fname=None
#
if ischain:
if isinstance(method,str):
self.fname=method
self.logger.debug('Using chain: %s'%method)
else:
if not isinstance(method,dict):
if isinstance(method[0],str):
self.logger.debug('Using file name list: %s'%method)
else:
self.logger.debug('list/tuple of MCMC sample arrays')
else:
self.logger.debug('dict of MCMC sample arrays')
else: #python class which includes a method called sampler
if nsample is None:
self.nsample=100000
else:
self.nsample=nsample
#given a class name, get an instance
if isinstance(method,str):
XClass = getattr(sys.modules[__name__], method)
else:
XClass=method
# Output should be file name(s) or list/tuple/dict of chains
if hasattr(XClass, '__class__'):
self.logger.debug('method is an instance of a class')
self.method=XClass
else:
self.logger.debug('method is class variable .. instantiating class')
self.method=XClass(*args)
#if passed class has some info, display it
try:
print()
msg=self.method.info()
print()
except:
pass
# Now Generate samples.
method=self.method.Sampler(nsamples=self.nsamples)
#======== By this line we expect only chains either in file or dict ====
gdkwargs.setdefault('thinlen', thinlen)
gdkwargs.setdefault('burnlen', burnlen)
gdkwargs.setdefault('log_level', log_level)
#
split_var = namedtuple('split_var','split frac shuffle')
csplit = split_var(split=self.split,frac=s1frac,shuffle=shuffle)
#
self.gd = MCSamples(method,csplit=csplit,debug=self.debug,**gdkwargs)
if isfunc:
#try:
self.gd.importance_sample(isfunc,name='s1')
if self.split: self.gd.importance_sample(isfunc,name='s2')
#except:
# self.logger.warn('Importance sampling failed. Make sure getdist is installed.')
self.info['NparamsMC']=self.gd.nparamMC
self.info['Nsamples_read']=self.gd.get_shape()[0]
self.info['Nparams_read']=self.gd.get_shape()[1]
#
#after burn-in and thinning
self.nsample = [self.gd.get_shape(name=s)[0] for s in self.snames]
if ndim is None: ndim=self.gd.nparamMC
self.ndim=ndim
self.logger.debug('using ndim=%s'%ndim)
#
self.info['NparamsCosmo']=self.ndim
self.info['Nsamples']=', '.join([str(x) for x in self.nsample])
if self.debug:
print('partition s1.shape',self.gd.get_shape(name='s1'))
if split:
print('partition s2.shape',self.gd.get_shape(name='s2'))
#
self.logger.info('chain array dimensions: %s x %s ='%(self.nsample,self.ndim))
#
self.set_batch()
def summary(self):
print()
print('ndim={}'.format(self.ndim))
print('nsample={}'.format(self.nsample))
print('kmax={}'.format(self.kmax))
print('brange={}'.format(self.brange))
print('bsize'.format(self.bsize))
print('powers={}'.format(self.powers))
print('nchain={}'.format(self.nchain))
print()
def get_batch_range(self):
if self.brange is None:
powmin,powmax=None,None
else:
powmin=np.array(self.brange).min()
powmax=np.array(self.brange).max()
if powmin==powmax and self.nbatch>1:
self.logger.error('nbatch>1 but batch range is set to zero.')
raise
return powmin,powmax
def set_batch(self,bscale=None):
if bscale is None:
bscale=self.bscale
else:
self.bscale=bscale
#
if self.brange is None:
self.bsize=self.brange #check
powmin,powmax=None,None
for ix, nn in enumerate(self.nsample):
self.nchain[0,ix]=nn
self.powers[0,ix]=np.log10(nn)
else:
if bscale=='logpower':
powmin,powmax=self.get_batch_range()
for ix, nn in enumerate(self.nsample):
self.powers[:,ix]=np.linspace(powmin,powmax,self.nbatch)
self.bsize[:,ix] = np.array([int(pow(10.0,x)) for x in self.powers])
self.nchain=self.bsize
elif bscale=='linear':
powmin,powmax=self.get_batch_range()
for ix, nn in enumerate(self.nsample):
self.bsize[:,ix]=np.linspace(powmin,powmax,self.nbatch,dtype=np.int)
self.powers[:,ix]=np.array([int(log10(x)) for x in self.nchain])
self.nchain=self.bsize
else: #constant
self.bsize[:,:]=self.brange #check
self.powers[:,:]=self.idbatch
for ix, nn in enumerate(self.nsample):
self.nchain[:,ix]=np.array([x for x in self.bsize[:,ix].cumsum()])
def diagonalise_chain(self,s,eigenVec,eigenVal):
# Prewhiten: First diagonalise:
s = np.dot(s,eigenVec);
# And renormalise new parameters to have unit covariance matrix:
for i in range(self.ndim):
s[:,i]= s[:,i]/math.sqrt(eigenVal[i])
return s
def get_covariance(self,s=None):
'''
Estimate samples covariance matrix and eigenvectors
and eigenvalues using all samples from all chains
'''
#
if s is None:
self.logger.info('Estimating covariance matrix using all chains')
s,lnp,w=self.gd.all_sample_arrays()
s = s[:,0:self.ndim]
self.logger.info('covariance matrix estimated using nsample=%s'%len(s))
ChainCov = np.cov(s.T)
eigenVal,eigenVec = np.linalg.eig(ChainCov)
if (eigenVal<0).any():
self.logger.warn('''Some of the eigenvalues of the
covariance matrix are negative and/or complex:''')
for i,e in enumerate(eigenVal):
print("Eigenvalue Param_{} = {}".format(i,e))
#no diagonalisation
Jacobian=1
diag=False
else:
#all eigenvalues are positive
Jacobian = math.sqrt(np.linalg.det(ChainCov))
diag=True
return {'cov':ChainCov,'posdef':diag,
'J':Jacobian,'eVec':eigenVec,
'eVal':eigenVal}
def get_samples(self,nsamples,istart=0,
rand=False,name='s1',
prewhiten=True):
# If we are reading chain, it will be handled here
# istart - will set row index to start getting the samples
ntot=self.gd.get_shape(name)[0]
if rand and not self.brange is None:
if nsamples>ntot:
self.logger.error('partition %s nsamples=%s, ntotal_chian=%s'%(name,nsamples,ntot))
raise
idx=np.random.randint(0,high=ntot,size=nsamples)
else:
idx=np.arange(istart,nsamples+istart)
s,lnp,w=self.gd.arrays(name)
s = s[:,0:self.ndim]
#if nsamples is 0, return everything
if nsamples>0:
s,lnp,w = s[idx,:],lnp[idx],w[idx]
else:
nsamples=ntot
self.logger.info('getting samples for partition %s: nsamples=%s'%(name,nsamples))
if prewhiten:
self.logger.debug('Prewhitenning chain partition: %s '%name)
try:
# Covariance matrix of the samples, and eigenvalues (in w) and eigenvectors (in v):
ChainCov = np.cov(s.T)
eigenVal,eigenVec = np.linalg.eig(ChainCov)
#check for negative eigenvalues
if (eigenVal<0).any():
self.logger.warn("Some of the eigenvalues of the covariance matrix are negative and/or complex:")
for i,e in enumerate(eigenVal):
print("Eigenvalue Param_{} = {}".format(i,e))
print("")
print("=================================================================================")
print(" Chain is not diagonalized! Estimated Evidence may not be accurate! ")
print(" Consider using smaller set of parameters using --ndim ")
print("=================================================================================")
print("")
#no diagonalisation
Jacobian=1
else:
#all eigenvalues are positive
Jacobian = math.sqrt(np.linalg.det(ChainCov))
#diagonalise chain
s = self.diagonalise_chain(s,eigenVec,eigenVal)
except:
self.logger.error("Unknown error during diagonalizing the chain with its covariance matrix.")
raise
else:
#no diagonalisation
Jacobian=1
eigenVal=None
eigenVec=None
return s,lnp,w,{'J':Jacobian,'eVec':eigenVec,'eVal':eigenVal}
def evidence(self,verbose=None,rand=False,info=False,covtype='all',
profile=False,pvolume=None,pos_lnp=False,
nproc=-1,prewhiten=True):
'''
MARGINAL LIKELIHOODS FROM MONTE CARLO MARKOV CHAINS algorithm described in Heavens et. al. (2017)
If SPLIT=TRUE:
EVIDENCE IS COMPUTED USING TWO INDEPENDENT CHAINS. THIS MEANS
NEAREST NEIGHBOUR OF POINT "A" IN AN MCMC SAMPLE MC1 IS SEARCHED IN MCMC SAMPLE MC2.
THE ERROR ON THE EVIDENCE FROM (AUTO) EVIDENCE IS LARGER THAN THE CROSS EVIDENCE BY ~SQRT(2)
OWING TO:
if the nearest neighbour of A is B, then the NN to B is LIKELY to be A
case covtype:
all: use all MCMC samples to compute covariance matrix
single: the samples MC1 are diagonalized by covariance matrix
estimated using MC1 samples. same for MC2
Parameters
---------
:param verbose - controls the amount of information outputted during run time
:param rand - randomised sub sampling of the MCMC chains
:param info - if True information about the analysis will be returd to the caller
:param pvolume - prior volume
:param pos_lnp - if input log likelihood is multiplied by negative or not
:param nproc - determined how many processors the scikit package should use or not
:param prewhiten - if True chains will be normalised to have unit variance
Returns
---------
MLE - maximum likelihood estimate of evidence:
self.info (optional) - returned if info=True. Contains useful information about the chain analysed
Notes
---------
The MCEvidence algorithm is implemented using scikit nearest neighbour code.
Examples
---------
To run the evidence estimation from an ipython terminal or notebook
>> from MCEvidence import MCEvidence
>> MLE = MCEvidence('/path/to/chain').evidence()
To run MCEvidence from shell
$ python MCEvidence.py </path/to/chain>
References
-----------
.. [1] Heavens etl. al. (2017)
'''
if verbose is None:
verbose=self.verbose
#get prior volume
if pvolume is None:
logPriorVolume=math.log(self.priorvolume)
else:
logPriorVolume=math.log(pvolume)
self.logger.debug('log prior volume: %s'%logPriorVolume)
kmax=self.kmax
ndim=self.ndim
MLE = np.zeros((self.nbatch,kmax))
self.logger.debug('covtype=%s'%covtype)
if covtype is None:
covtype=self.covtype
#get covariance matrix of the total chain
if covtype=='all':
covstat = self.get_covariance()
#we will need the Jacobian to adjust Number of samples in MLE
Jacobian=covstat['J']
# Loop over different numbers of MCMC samples (=S):
itot=0
for ipow,nsample in zip(self.idbatch,self.nchain):
S=int(nsample[0])
DkNN = np.zeros((S,kmax))
indices = np.zeros((S,kmax))
volume = np.zeros((S,kmax))
#get samples only without diagonalisation - prewhiten=False
samples,logL,weight,jvv=self.get_samples(S,istart=itot,
rand=rand,
prewhiten=False,
name='s1')
if covtype=='single':
covstat = self.get_covariance(s=samples)
Jacobian=covstat['J']
#diagonalise samples to have unit variance
samples = self.diagonalise_chain(samples, covstat['eVec'], covstat['eVal'])
#We need the logarithm of the likelihood - not the negative log
if pos_lnp: logL=-logL
# Renormalise loglikelihood (temporarily) to avoid underflows:
logLmax = np.amax(logL)
fs = logL-logLmax
#print('samples, after prewhiten', samples[1000:1010,0:ndim])
#print('Loglikes ',logLmax,logL[1000:1010],fs[1000:1010])
#print('weights',weight[1000:1010])
#print('EigenValues=',eigenVal)
# Use sklearn nearest neightbour routine, which chooses the 'best' algorithm.
# This is where the hard work is done:
if self.split:
#MCMC samples to estimate NN distances nsample[1]
samples2,logL2,weight2,jvv2=self.get_samples(0,istart=itot,
rand=rand,
prewhiten=False,
name='s2')
if covtype=='single':
covstat = self.get_covariance(s=samples2)
#Jacobian=covstat['J']
#diag sample2 by eigen vec and values of sample1
samples2 = self.diagonalise_chain(samples2, covstat['eVec'], covstat['eVal'])
#
txt='using XMCEvidence. NN distance is estimated using nsamples=(%s, %s)'
self.logger.info(txt%(S,samples2.shape[0]))
#indexing for knn is done using a different MCMC sample
nbrs = NearestNeighbors(n_neighbors=kmax+1,metric='euclidean',leaf_size=20,
algorithm='auto',n_jobs=nproc).fit(samples2)
k0=0 #k0 is the first knn
else:
#indexing for knn is done with the same MCMC samples
k0=1 #avoid nn which is the point itself
nbrs = NearestNeighbors(n_neighbors=kmax+1,metric='euclidean',leaf_size=20,
algorithm='auto',n_jobs=nproc).fit(samples)
#compute knn distance. If indexed in same samples, DkNN(k=1)=0
DkNN, indices = nbrs.kneighbors(samples)
# Create the posterior for 'a' from the distances (volumes) to nearest neighbour:
for k in range(k0,self.kmax):
for j in range(0,S):
# Use analytic formula for the volume of ndim-sphere:
volume[j,k] = math.pow(math.pi,ndim/2)*math.pow(DkNN[j,k],ndim)/sp.gamma(1+ndim/2)
#print('volume minmax: ',volume[:,k].min(),volume[:,k].max())
#print('weight minmax: ',weight.min(),weight.max())
# dotp is the summation term in the notes:
dotp = np.dot(volume[:,k]/weight[:],np.exp(fs))
# The MAP value of 'a' is obtained analytically from the expression for the posterior:
k_nn=k
if k0==0:
k_nn=k+1
amax = dotp/(S*k_nn+1.0)
# Maximum likelihood estimator for the evidence
SumW = np.sum(self.gd.data['s1'].adjusted_weights)
#
txt='********SumW={:0.2f},amax={:0.2f},Jacobian={:0.2f},logLmax={:0.2f}'
self.logger.debug(txt.format(SumW,amax,Jacobian,logLmax))
#
MLE[ipow,k] = math.log(SumW*amax*Jacobian) + logLmax - logPriorVolume
self.logger.debug('SumW={} \t S={} '.format(SumW,S))
self.logger.debug('amax={} \t Jacobian={}'.format(amax,Jacobian))
self.logger.debug('logLmax={} \t logPriorVolume={}'.format(logLmax,logPriorVolume))
self.logger.debug('MLE={}:'.format(MLE[ipow,k]))
#print('---')
# Output is: for each sample size (S), compute the evidence for kmax-1 different values of k.
# Final columm gives the evidence in units of the analytic value.
# The values for different k are clearly not independent. If ndim is large, k=1 does best.
if self.brange is None:
#print('(mean,min,max) of LogLikelihood: ',fs.mean(),fs.min(),fs.max())
if verbose>1:
self.logger.debug('k={},nsample={}, dotp={}, median_volume={}, a_max={}, MLE={}'.format(
k,S,dotp,statistics.median(volume[:,k]),amax,MLE[ipow,k]))
else:
if verbose>1:
if ipow==0:
self.logger.debug('(iter,mean,min,max) of LogLikelihood: ',ipow,fs.mean(),fs.min(),fs.max())
self.logger.debug('-------------------- useful intermediate parameter values ------- ')
self.logger.debug('nsample, dotp, median volume, amax, MLE')
self.logger.debug(S,k,dotp,statistics.median(volume[:,k]),amax,MLE[ipow,k])
#MLE[:,0] is zero - return only from k=1
if self.brange is None:
MLE=MLE[0,1:]
else:
MLE=MLE[:,1:]
if verbose>0:
for k in range(1,self.kmax):
self.logger.info(' ln(B)[k={}] = {}'.format(k,MLE[k-1]))
#print('')
if info:
return MLE, self.info
else:
return MLE
#===============================================
# The next two functions are directly taken from montepythons analyze.py
def extract_array(line):
rhs = line.split('=')[-1].strip()
rhs = rhs.strip(']').lstrip('[')
sequence = [e.strip().strip('"').strip("'") for e in rhs.split(',')]
for index, elem in enumerate(sequence):
try:
sequence[index] = int(elem)
except ValueError:
try:
sequence[index] = float(elem)
except ValueError:
pass
return sequence
def extract_dict(line):
sequence = extract_array(line)
lhs = line.split('=')[0].strip()
name = lhs.split('[')[-1].strip(']')
name = name.strip('"').strip("'")
return name, sequence
def iscosmo_param(p,cosmo_params=None):
'''
check if parameter 'p' is cosmological or nuisance
'''
if not cosmo_params is None:
cosmo_params_list.extend(cosmo_params)
return p in cosmo_params_list
def params_info(fname,cosmo=False, volumes={}):
'''
Extract parameter names, ranges, and prior space volume
from CosmoMC *.ranges or montepython log.param file
'''
parMC={'name':[],'min':[],'max':[],'range':[]}
nr_of_cosmo_params = 0
#CosmoMC
if glob.glob('{}*.ranges'.format(fname)):
logger.info('getting params info from COSMOMC file %s.ranges'%fname)
par=np.genfromtxt(fname+'.ranges',dtype=None,names=('name','min','max'))#,unpack=True)
parName=par['name']
parMin=par['min']
parMax=[float(p) if p != "N" else np.Infinity for p in par['max']]
for p,pmin,pmax in zip(parName, parMin,parMax):
#if parameter info is to be computed only for cosmological parameters
pcond=iscosmo_param(p) if cosmo else True
#now get info
if not np.isclose(pmax,pmin) and pcond:
parMC['name'].append(p)
parMC['min'].append(pmin)
parMC['max'].append(pmax)
parMC['range'].append(np.abs(pmax-pmin))
nr_of_cosmo_params += 1
#MontePython
elif glob.glob('{}/log.param'.format(fname)):
logger.info('getting params info from montepython log.params file')
with open('{}/log.param'.format(fname), 'r') as param:
for line in param:
if line.find('#') == -1:
if line.find('data.parameters') != -1:
name, array = extract_dict(line)
pcond = array[5] == 'cosmo' if cosmo else True
if pcond and not array[5] == 'derived':
nr_of_cosmo_params += 1
if array[1] == 'None' or array[2] == 'None':
raise Exception('Unbounded priors are not supported - please specify priors')
vmin=float(array[1]); vmax=float(array[2])
parMC['name'].append(name)
parMC['min'].append(vmin)
parMC['max'].append(vmax)
parMC['range'].append(vmax - vmin)
# if name in volumes:
# parMC['name'].append(name)
# parMC['range'].append(volumes[name])
# else:
# raise Exception('''Unbounded priors are not
# supported but prior for {} is not bound -
# please specify priors'''.format(name))
# else:
# parMC['name'].append(name)
# parMC['min'].append(array[1])
# parMC['max'].append(array[2])
# parMC['range'].append(array[2] - array[1])
else:
raise Exception('Could not read parameter volume from COSMOMC .ranges file or montepython log.param file')
#
parMC['str']=','.join(parMC['name'])
parMC['ndim']=len(parMC['name'])
parMC['nr_of_params'] = nr_of_cosmo_params
parMC['volume']=np.array(parMC['range']).prod()
return parMC
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
try:
choice = raw_input().lower() #python 2.X
except:
choice = input().lower() #python 3.X
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def get_prior_volume(args, **kwargs):
#compute prior volume or set it to unity
try:
parMC=params_info(args.root_name, **kwargs)
if args.verbose>1: print(parMC)
prior_volume=parMC['volume']
args.ndim = parMC['ndim']
logger.info('getting prior volume using cosmomc *.ranges or montepython log.param outputs')
logger.info('prior_volume=%s'%prior_volume)
logger.info('Number of params to use: ndim=%s'%parMC['ndim'])
except:
raise
if args.priorvolume == None:
logger.info('''Error in reading cosmomc *.ranges or montepython log.param files.
These files are needed to compute prior volume''')
logger.info('''If you choose to proceed with prior_volume=1,
using the estimated evidence for model comparison will be incrporate the prior ratio''')
if query_yes_no("Do you want to proceed by setting prior_volume=1?", default='yes'):
print('setting prior_volume=1')
prior_volume=1
else:
raise
else:
prior_volume = args.priorvolume
return prior_volume
#==============================================
if __name__ == '__main__':
#print('---')
#---------------------------------------
#---- Extract command line arguments ---
#---------------------------------------
parser = ArgumentParser(prog=sys.argv[0],add_help=True,
description=desc,
epilog=cite)
# positional args
parser.add_argument("root_name",help='Root filename for MCMC chains or python class filename')
vstring=">>> %(prog)s :: {0} version date: {1} <<<"
parser.add_argument('--version', action='version',
version=vstring.format(__status__,__version__))
# optional args
parser.add_argument("-k","--kmax",
dest="kmax",
default=2,
type=int,
help="scikit maximum K-NN ")
parser.add_argument("-ic","--idchain",
dest="idchain",
default=0,
type=int,
help="Which chains to use - the id e.g 1 means read only *_1.txt (default=None - use all available) ")
parser.add_argument("-np", "--ndim",
dest="ndim",
default=None,
type=int,
help="How many parameters to use (default=None - use all params) ")
parser.add_argument("--paramsfile",
dest="paramsfile",
default="",
type=str,
help="text file name containing additional parameter names to consider as cosmological parameters")
parser.add_argument("--burn","--burnlen",
dest="burnlen",
default=0,
type=float,
help="Burn-in length or fraction. burnlen<1 is interpreted as fraction e.g. 0.3 - 30%%")
parser.add_argument("--thin", "--thinlen",
dest="thinlen",
default=0,
type=float,
help='''Thinning fraction.
If 0<thinlen<1, MCMC weights are adjusted based on Poisson sampling
If thinlen>1, weighted thinning based on getdist algorithm
If thinlen<0, thinning length will be the autocorrelation length of the chain
''')
parser.add_argument("-vb", "--verbose",
dest="verbose",
default=1,
type=int,
help="Increase output verbosity: 0: WARNNINGS, 1: INFO, 2: DEBUG, >2: EVERYTHING")
parser.add_argument("-pv", "--pvolume",
dest="priorvolume",
default=None,
type=float,
help='prior volume to use. If *.range exist, prior_volume estimated internally is used.')
parser.add_argument('--allparams', help='flag to use all params and not use iscosmo_params condition',
action='store_true')
desc = '''
Cross EVIDENCE IS COMPUTED USING TWO INDEPENDENT CHAINS. THIS MEANS
NEAREST NEIGHBOUR OF POINT "A" IN AN MCMC SAMPLE MC1 IS SEARCHED IN MCMC SAMPLE MC2.
THE ERROR ON THE EVIDENCE FROM (AUTO) EVIDENCE IS LARGER THAN THE CROSS EVIDENCE BY ~SQRT(2)
OWING TO:
if the nearest neighbour of A is B, then the NN to B is LIKELY to be A
'''
parser.add_argument('--cross', help='''flag to split chain (s) to estimate cross Evidence.
Otherwise auto Evidence is calculated. ''' + desc,
action='store_true')
args = parser.parse_args()
newCosmoParams=[]
if args.paramsfile!="":
with open(args.paramsfile,'r') as fp:
for OneLine in fp:
line = OneLine.strip()
if line!="" and line.find('#') == -1:
newCosmoParams.append(line)
#add new parameter names
print('Adding additional parameter names to cosmo_params list from %s..'%args.paramsfile)
print('adding the following names:',newCosmoParams)
cosmo_params_list.extend(newCosmoParams)
#get unique name
cosmo_params_list = list(set(cosmo_params_list))
#get prior volume
cosmo = not args.allparams
prior_volume = get_prior_volume(args,cosmo=cosmo)
#-----------------------------
#------ control parameters----
#-----------------------------
method=args.root_name
kmax=args.kmax
idchain=args.idchain
ndim=args.ndim
burnlen=args.burnlen
thinlen=args.thinlen
verbose=args.verbose
split = args.cross
logger = logging.getLogger(__name__)
if verbose>1:
logger.setLevel(logging.DEBUG)
if verbose==1:
logger.setLevel(logging.INFO)
if verbose==0:
logger.setLevel(logging.WARNING)
print()
print('Using file: ',method)
mce=MCEvidence(method,split=split, ndim=ndim,priorvolume=prior_volume,
idchain=idchain,
kmax=kmax,verbose=verbose,burnlen=burnlen,
thinlen=thinlen)
mce.evidence()
print('* ln(B)[k] is the natural logarithm of the Baysian evidence estimated using the kth Nearest Neighbour.')
print('')
| 58,030 | 38.343051 | 126 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.