repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
czechflek/leaguedirector_evolved | leaguedirector/settings.py | import os
import json
from leaguedirector.widgets import userpath
class Settings(object):
def __init__(self):
self.data = {}
self.path = userpath('config.json')
self.loadFile()
def value(self, key, default=None):
return self.data.get(key, default)
def setValue(self, key, value):
self.data[key] = value
self.saveFile()
def saveFile(self):
with open(self.path, 'w') as f:
json.dump(self.data, f, sort_keys=True, indent=4)
def loadFile(self):
if os.path.isfile(self.path):
with open(self.path, 'r') as f:
self.data = json.load(f)
|
czechflek/leaguedirector_evolved | leaguedirector/api.py | import os
import time
import json
import copy
import logging
import functools
from leaguedirector.widgets import userpath
from PySide2.QtCore import *
from PySide2.QtNetwork import *
class Resource(QObject):
"""
Base class for a remote api resources.
"""
updated = Signal()
host = 'https://127.0.0.1:2999'
url = ''
fields = {}
connected = False
readonly = False
writeonly = False
network = None
def __init__(self):
object.__setattr__(self, 'timestamp', time.time())
for name, default in self.fields.items():
object.__setattr__(self, name, default)
QObject.__init__(self)
def __setattr__(self, name, value):
if name in self.fields:
if self.readonly:
raise AttributeError("Resource is readonly")
if getattr(self, name) != value:
object.__setattr__(self, name, value)
self.update({name: value})
else:
object.__setattr__(self, name, value)
def sslErrors(self, response, errors):
allowed = [QSslError.CertificateUntrusted, QSslError.HostNameMismatch]
response.ignoreSslErrors([e for e in errors if e.error() in allowed])
def manager(self):
if Resource.network is None:
# QT does not ship SSL binaries so we have to bundle them in our res directory
os.environ['PATH'] = os.path.abspath('resources') + os.pathsep + os.environ['PATH']
# Then setup our certificate for the lol game client
QSslSocket.addDefaultCaCertificates(os.path.abspath('resources/riotgames.pem'))
Resource.network = QNetworkAccessManager(QCoreApplication.instance())
Resource.network.sslErrors.connect(self.sslErrors)
return Resource.network
def set(self, name, value):
self.__setattr__(name, value)
def get(self, name):
return getattr(self, name)
def shutdown(self):
pass
def data(self):
return {name: getattr(self, name) for name in self.fields}
def keys(self):
return self.fields.keys()
def update(self, data=None):
request = QNetworkRequest(QUrl(self.host + self.url))
if data is not None:
request.setHeader(QNetworkRequest.ContentTypeHeader, "application/json")
response = self.manager().post(request, QByteArray(json.dumps(data).encode()))
else:
response = self.manager().get(request)
response.finished.connect(functools.partial(self.finished, response))
def finished(self, response):
error = response.error()
if error == QNetworkReply.NoError:
Resource.connected = True
self.apply(json.loads(response.readAll().data().decode()))
self.timestamp = time.time()
elif error in (QNetworkReply.ConnectionRefusedError, QNetworkReply.TimeoutError):
Resource.connected = False
else:
logging.error("Request Failed: {} {}".format(self.url, response.errorString()))
self.updated.emit()
def apply(self, data):
if not self.writeonly:
for key, value in data.items():
if key in self.fields:
object.__setattr__(self, key, value)
class Game(Resource):
url = '/replay/game'
fields = {'processID': 0}
readonly = True
class Recording(Resource):
url = '/replay/recording'
fields = {
'recording': False,
'path': '',
'codec': '',
'startTime': 0,
'endTime': 0,
'currentTime': 0,
'width': 0,
'height': 0,
'framesPerSecond': 0,
'enforceFrameRate': False,
'replaySpeed': 0,
}
class Render(Resource):
url = '/replay/render'
fields = {
'cameraMode' : '',
'cameraPosition' : {'x': 0, 'y': 0, 'z': 0},
'cameraRotation' : {'x': 0, 'y': 0, 'z': 0},
'cameraAttached' : False,
'cameraMoveSpeed' : 0,
'cameraLookSpeed' : 0,
'fieldOfView' : 0,
'nearClip' : 0,
'farClip' : 0,
'fogOfWar' : True,
'outlineSelect' : True,
'outlineHover' : True,
'floatingText' : True,
'navGridOffset' : 0,
'interfaceAll' : True,
'interfaceReplay' : True,
'interfaceScore' : True,
'interfaceScoreboard' : True,
'interfaceFrames' : True,
'interfaceMinimap' : True,
'interfaceTimeline' : True,
'interfaceChat' : True,
'interfaceTarget' : True,
'interfaceQuests' : True,
'interfaceAnnounce' : True,
'healthBarChampions' : True,
'healthBarStructures' : True,
'healthBarWards' : True,
'healthBarPets' : True,
'healthBarMinions' : True,
'environment' : True,
'characters' : True,
'particles' : True,
'skyboxPath' : '',
'skyboxRotation' : 0,
'skyboxRadius' : 0,
'skyboxOffset' : 0,
'sunDirection' : {'x': 0, 'y': 0, 'z': 0},
'depthFogEnabled' : False,
'depthFogStart' : 0,
'depthFogEnd' : 0,
'depthFogIntensity' : 1,
'depthFogColor' : {'r': 0, 'g': 0, 'b': 0, 'a': 0},
'heightFogEnabled' : False,
'heightFogStart' : 0,
'heightFogEnd' : 0,
'heightFogIntensity' : 1,
'heightFogColor' : {'r': 0, 'g': 0, 'b': 0, 'a': 0},
'depthOfFieldEnabled' : False,
'depthOfFieldDebug' : False,
'depthOfFieldCircle' : 0,
'depthOfFieldWidth' : 0,
'depthOfFieldNear' : 0,
'depthOfFieldMid' : 0,
'depthOfFieldFar' : 0,
}
def __init__(self):
Resource.__init__(self)
self.cameraLockX = None
self.cameraLockY = None
self.cameraLockZ = None
self.cameraLockLast = None
self.timer = QTimer()
self.timer.timeout.connect(self.updateCameraLock)
self.timer.start(600)
def updateCameraLock(self, *args):
# Wait until the camera stops moving before snapping it
if self.cameraLockLast != self.cameraPosition:
self.cameraLockLast = self.cameraPosition
else:
copy = dict(self.cameraPosition)
if self.cameraLockX is not None:
copy['x'] = self.cameraLockX
if self.cameraLockY is not None:
copy['y'] = self.cameraLockY
if self.cameraLockZ is not None:
copy['z'] = self.cameraLockZ
self.cameraPosition = copy
def toggleCameraLockX(self):
self.cameraLockX = self.cameraPosition['x'] if self.cameraLockX is None else None
def toggleCameraLockY(self):
self.cameraLockY = self.cameraPosition['y'] if self.cameraLockY is None else None
def toggleCameraLockZ(self):
self.cameraLockZ = self.cameraPosition['z'] if self.cameraLockZ is None else None
def moveCamera(self, x=0, y=0, z=0):
copy = dict(self.cameraPosition)
copy['x'] += x
copy['y'] += y
copy['z'] += z
self.cameraPosition = copy
def rotateCamera(self, x=0, y=0, z=0):
copy = dict(self.cameraRotation)
copy['x'] += x
copy['y'] += y
copy['z'] += z
self.cameraRotation = copy
class Particles(Resource):
url = '/replay/particles'
fields = {}
particles = {}
def apply(self, data):
self.particles = data
def items(self):
return self.particles.items()
def hasParticle(self, particle):
return particle in self.particles
def setParticle(self, particle, enabled):
if particle in self.particles:
self.update({particle:enabled})
def getParticle(self, particle):
return self.particles.get(particle, True)
class Playback(Resource):
url = '/replay/playback'
fields = {
'paused': False,
'seeking': False,
'time': 0.0,
'speed': 0.0,
'length': 1.0,
}
@property
def currentTime(self):
if self.paused:
return self.time
else:
return min(self.time + (time.time() - self.timestamp) * self.speed, self.length)
@property
def currentTimeFormatted(self):
minutes, seconds = divmod(self.currentTime, 60)
return '{0:02}:{1:05.2f}'.format(int(minutes), seconds)
def togglePlay(self):
self.paused = not self.paused
def setSpeed(self, speed):
self.speed = speed
def adjustTime(self, delta):
self.time = self.currentTime + delta
def play(self, time=None):
if not self.seeking:
data = {'paused': False}
if time is not None:
data['time'] = time
self.update(data)
def pause(self, time=None):
if not self.seeking:
data = {'paused': True}
if time is not None:
data['time'] = time
self.update(data)
class Sequence(Resource):
dataLoaded = Signal()
namesLoaded = Signal()
url = '/replay/sequence'
writeonly = True
history = []
history_index = 0
fields = {
'playbackSpeed': [],
'cameraPosition': [],
'cameraRotation': [],
'fieldOfView': [],
'nearClip': [],
'farClip': [],
'navGridOffset': [],
'skyboxRotation': [],
'skyboxRadius': [],
'skyboxOffset': [],
'sunDirection': [],
'depthFogEnabled': [],
'depthFogStart': [],
'depthFogEnd': [],
'depthFogIntensity': [],
'depthFogColor': [],
'heightFogEnabled': [],
'heightFogStart': [],
'heightFogEnd': [],
'heightFogIntensity': [],
'heightFogColor': [],
'depthOfFieldEnabled': [],
'depthOfFieldCircle': [],
'depthOfFieldWidth': [],
'depthOfFieldNear': [],
'depthOfFieldMid': [],
'depthOfFieldFar': [],
}
blendOptions = [
'linear',
'snap',
'smoothStep',
'smootherStep',
'quadraticEaseIn',
'quadraticEaseOut',
'quadraticEaseInOut',
'cubicEaseIn',
'cubicEaseOut',
'cubicEaseInOut',
'quarticEaseIn',
'quarticEaseOut',
'quarticEaseInOut',
'quinticEaseIn',
'quinticEaseOut',
'quinticEaseInOut',
'sineEaseIn',
'sineEaseOut',
'sineEaseInOut',
'circularEaseIn',
'circularEaseOut',
'circularEaseInOut',
'exponentialEaseIn',
'exponentialEaseOut',
'exponentialEaseInOut',
'elasticEaseIn',
'elasticEaseOut',
'elasticEaseInOut',
'backEaseIn',
'backEaseOut',
'backEaseInOut',
'bounceEaseIn',
'bounceEaseOut',
'bounceEaseInOut',
]
def __init__(self, render, playback):
Resource.__init__(self)
self.render = render
self.playback = playback
self.name = ''
self.names = []
self.directory = None
self.sequencing = False
self.saveRemoteTimer = QTimer()
self.saveRemoteTimer.timeout.connect(self.saveRemoteNow)
self.saveRemoteTimer.setSingleShot(True)
self.saveHistoryTimer = QTimer()
self.saveHistoryTimer.timeout.connect(self.saveHistoryNow)
self.saveHistoryTimer.setSingleShot(True)
self.saveFileTimer = QTimer()
self.saveFileTimer.timeout.connect(self.saveFileNow)
self.saveFileTimer.setSingleShot(True)
def update(self, *args):
self.saveRemote()
self.saveFile()
self.saveHistory()
def data(self):
return {key:getattr(self, key) for key in self.fields}
@property
def startTime(self):
keyframes = self.cameraPosition + self.cameraRotation
if len(keyframes):
return min(keyframe['time'] for keyframe in keyframes)
@property
def endTime(self):
keyframes = self.cameraPosition + self.cameraRotation
if len(keyframes):
return max(keyframe['time'] for keyframe in keyframes)
def path(self):
return os.path.join(self.directory, self.name + '.json')
def load(self, name):
self.saveFileNow()
self.loadFile(name)
def create(self, name):
self.saveFileNow()
self.clearData()
self.resetHistory()
self.saveFileNow(name)
self.reloadNames()
def save(self, name=None):
self.saveFile(name)
def copy(self, name):
oldName = self.name
self.saveFileNow(name)
self.saveFileNow(oldName)
self.reloadNames()
def undo(self):
self.loadHistory(self.history_index - 1)
def redo(self):
self.loadHistory(self.history_index + 1)
def setDirectory(self, path):
if os.path.exists(path) and os.path.isdir(path):
self.directory = path
self.clearData()
self.loadFile('default')
self.saveFileNow()
self.reloadNames()
def saveRemoteNow(self):
self.sortData()
if self.sequencing:
Resource.update(self, self.data())
else:
Resource.update(self, {})
def saveRemote(self):
self.saveRemoteTimer.start(0)
def saveHistoryNow(self):
self.history = self.history[0:self.history_index + 1]
self.history_index = len(self.history)
self.history.append(copy.deepcopy(self.data()))
def saveHistory(self):
self.saveHistoryTimer.start(500)
def loadHistory(self, index):
if len(self.history):
self.history_index = max(min(index, len(self.history) - 1), 0)
self.loadData(copy.deepcopy(self.history[self.history_index]))
self.saveRemote()
self.saveFileNow()
def resetHistory(self):
self.history = []
self.history_index = 0
def loadFile(self, name):
self.name = name
if os.path.exists(self.path()):
with open(self.path(), 'r') as f:
self.resetHistory()
self.loadData(json.load(f))
self.saveRemote()
self.saveHistory()
def saveFileNow(self, name=None):
self.name = name or self.name
if self.name:
path = self.path()
exists = os.path.exists(path)
with open(path, 'w') as f:
json.dump(self.data(), f, sort_keys=True, indent=4)
if not exists:
self.reloadNames()
def saveFile(self, name=None):
self.name = name or self.name
self.saveFileTimer.start(1000)
def clearData(self):
for track in self.fields:
getattr(self, track, []).clear()
self.dataLoaded.emit()
def loadData(self, data):
if isinstance(data, dict):
for key, value in data.items():
if value is not None:
object.__setattr__(self, key, value)
self.dataLoaded.emit()
def sortData(self):
for track in self.fields:
if getattr(self, track):
getattr(self, track).sort(key = lambda item: item['time'])
def reloadNames(self):
self.names = sorted([f.replace('.json', '') for f in os.listdir(self.directory) if f.endswith('.json')], key=str.lower)
self.namesLoaded.emit()
@property
def index(self):
try:
return self.names.index(self.name)
except ValueError:
return 0
def setSequencing(self, value):
self.sequencing = value
self.update()
def getKeyframes(self, name):
return getattr(self, name)
def createKeyframe(self, name):
keyframe = {
'time': self.playback.time,
'value': self.getValue(name),
'blend': 'linear',
}
self.appendKeyframe(name, keyframe)
return keyframe
def appendKeyframe(self, name, keyframe):
getattr(self, name).append(keyframe)
self.update()
def removeKeyframe(self, name, item):
getattr(self, name).remove(item)
self.update()
def getLabel(self, name):
if name == 'cameraPosition':
return 'Camera Position'
if name == 'cameraRotation':
return 'Camera Rotation'
if name == 'playbackSpeed':
return 'Playback Speed'
if name == 'fieldOfView':
return 'Field Of View'
if name == 'nearClip':
return 'Near Clip'
if name == 'farClip':
return 'Far Clip'
if name == 'navGridOffset':
return 'Nav Grid Offset'
if name == 'skyboxRotation':
return 'Skybox Rotation'
if name == 'skyboxRadius':
return 'Skybox Radius'
if name == 'skyboxOffset':
return 'Skybox Offset'
if name == 'sunDirection':
return 'Sun Direction'
if name == 'depthFogEnabled':
return 'Depth Fog Enable'
if name == 'depthFogStart':
return 'Depth Fog Start'
if name == 'depthFogEnd':
return 'Depth Fog End'
if name == 'depthFogIntensity':
return 'Depth Fog Intensity'
if name == 'depthFogColor':
return 'Depth Fog Color'
if name == 'heightFogEnabled':
return 'Height Fog Enabled'
if name == 'heightFogStart':
return 'Height Fog Start'
if name == 'heightFogEnd':
return 'Height Fog End'
if name == 'heightFogIntensity':
return 'Height Fog Intensity'
if name == 'heightFogColor':
return 'Height Fog Color'
if name == 'depthOfFieldEnabled':
return 'DOF Enabled'
if name == 'depthOfFieldCircle':
return 'DOF Circle'
if name == 'depthOfFieldWidth':
return 'DOF Width'
if name == 'depthOfFieldNear':
return 'DOF Near'
if name == 'depthOfFieldMid':
return 'DOF Mid'
if name == 'depthOfFieldFar':
return 'DOF Far'
return name
def getValue(self, name):
if name == 'cameraPosition':
return self.render.cameraPosition
if name == 'cameraRotation':
return self.render.cameraRotation
if name == 'playbackSpeed':
return self.playback.speed
if name == 'fieldOfView':
return self.render.fieldOfView
if name == 'nearClip':
return self.render.nearClip
if name == 'farClip':
return self.render.farClip
if name == 'navGridOffset':
return self.render.navGridOffset
if name == 'skyboxRotation':
return self.render.skyboxRotation
if name == 'skyboxRadius':
return self.render.skyboxRadius
if name == 'skyboxOffset':
return self.render.skyboxOffset
if name == 'sunDirection':
return self.render.sunDirection
if name == 'depthFogEnabled':
return self.render.depthFogEnabled
if name == 'depthFogStart':
return self.render.depthFogStart
if name == 'depthFogEnd':
return self.render.depthFogEnd
if name == 'depthFogIntensity':
return self.render.depthFogIntensity
if name == 'depthFogColor':
return self.render.depthFogColor
if name == 'heightFogEnabled':
return self.render.heightFogEnabled
if name == 'heightFogStart':
return self.render.heightFogStart
if name == 'heightFogEnd':
return self.render.heightFogEnd
if name == 'heightFogIntensity':
return self.render.heightFogIntensity
if name == 'heightFogColor':
return self.render.heightFogColor
if name == 'depthOfFieldEnabled':
return self.render.depthOfFieldEnabled
if name == 'depthOfFieldCircle':
return self.render.depthOfFieldCircle
if name == 'depthOfFieldWidth':
return self.render.depthOfFieldWidth
if name == 'depthOfFieldNear':
return self.render.depthOfFieldNear
if name == 'depthOfFieldMid':
return self.render.depthOfFieldMid
if name == 'depthOfFieldFar':
return self.render.depthOfFieldFar
|
projectmesapackages/SIR | mesa_SIR/calculations_and_plots.py |
import numpy as np
from datetime import date as datemethod
from datetime import datetime
import matplotlib.pyplot as plt
import os
''''
example of data collection function:
m is variable for model
c_p is import calaculations_and_plots
Example DataCollector function
self.datacollector = DataCollector(model_reporters={"infected": lambda m: c_p.compute(m,'infected'),
"recovered": lambda m: c_p.compute(m,'recovered'),
"susceptible": lambda m: c_p.compute(m,"susceptible"),
"dead": lambda m: c_p.compute(m, "dead"),
"R0": lambda m: c_p.compute(m, "R0"),
"severe_cases": lambda m: c_p.compute(m,"severe")})
Plots:
plot_SIR parameters: datacollection dataframe from get_model_vars_dataframe(); outputpath
plot_R0 parameters: datacollection dataframe get_model_vars_dataframe(); outputpath
plot_severe parameters: datacollection dataframe get_model_vars_dataframe(); outputpath
'''
# Compute SIR and dead at any point in time
def compute(model, report):
total = 0
if report == "R0":
induced_infections = [a.induced_infections for a in model.schedule.agents if a.infected_others == True]
if len(induced_infections) == 0:
induced_infections = [0]
# induced_infections_ = [value for value in induced_infections if value != 0]
infection_array = np.array(induced_infections)
R0 = np.average(infection_array)
return R0
elif report == "dead":
total_dead = float(model.population - len(model.schedule.agents))
return total_dead
else:
for a in model.schedule.agents:
if getattr(a, report) == True:
total += 1
return total
# Plot output
def save_data(output_data, output_path = None, filename = "SIR_datafile.csv"):
if output_path != None:
output_file = os.path.join(output_path, filename)
output_data.to_csv(output_file, encoding = "UTF8")
else:
output_data.to_csv(filename, encoding="UTF8")
# Plot output
def plot_SIR(df_out, title, output_path = None):
today = datemethod.strftime(datetime.utcnow(), '%Y%m%dZ%H%M%S')
plot_name = 'SIR_' + today + '_.png'
ax = plt.subplot(111)
for column in list(df_out.columns):
if (column != 'R0') and (column != 'severe_cases'):
ax.plot(df_out[column], label=column)
plt.title(title +' - SIR')
plt.xlabel('Day')
plt.ylabel('Population')
ax.legend()
if output_path != None:
plt.savefig(os.path.join(output_path, plot_name), dpi=300)
else:
plt.savefig(plot_name, dpi=300)
plt.close()
def plot_R0(df_out, title, output_path = None):
today = datemethod.strftime(datetime.utcnow(), '%Y%m%dZ%H%M%S')
plot_name = 'R0_' + today + '_.png'
ax = plt.subplot(111)
for column in list(df_out.columns):
if column == 'R0':
ax.plot(df_out[column], label=column)
plt.title(title + ' - R0')
plt.xlabel('Day')
plt.ylabel('R0')
ax.legend()
if output_path != None:
plt.savefig(os.path.join(output_path, plot_name), dpi=300)
else:
plt.savefig(plot_name, dpi = 300)
plt.close()
def plot_severe(df_out, title, output_path =None):
today = datemethod.strftime(datetime.utcnow(), '%Y%m%dZ%H%M%S')
plot_name = 'Severe_Cases_' + today + '_.png'
ax = plt.subplot(111)
for column in list(df_out.columns):
if column == 'severe_cases':
ax.plot(df_out[column], label=column)
plt.title(title + ' - Severe Cases')
plt.xlabel('Day')
plt.ylabel('Number of Severe Cases')
ax.legend()
if output_path != None:
plt.savefig(os.path.join(output_path, plot_name), dpi=300)
else:
plt.savefig(plot_name, dpi = 300)
plt.close()
|
projectmesapackages/SIR | mesa_SIR/SIR.py | <reponame>projectmesapackages/SIR
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 14:36:25 2020
@author: metalcorebear
"""
import random
import networkx as nx
import numpy as np
#Random output generator
def coin_flip(ptrue):
test = random.uniform(0.0,1.0)
if ptrue == 0:
out = False
elif test < ptrue:
out = True
else:
out = False
return out
#####################################################################
# #
# Initial Build #
# #
#####################################################################
#Instantiate social network
#Chaos parameter allows for variability in following social distancing recommendations.
def build_network(interactions, population, chaos = 0.001):
G = nx.Graph()
G.add_nodes_from(range(population))
nodes_list = list(G.nodes())
edge_set = set()
top_row = 0
for node_1 in nodes_list:
top_row += 1
for node_2 in range(top_row):
if (G.degree(node_2) < interactions) and (G.degree(node_1) < interactions):
edge = (node_1, node_2)
if ((edge[0],edge[1])) and ((edge[1],edge[0])) not in edge_set:
if not coin_flip(chaos):
G.add_edge(*edge)
edge_set.add(edge)
else:
if coin_flip(chaos):
edge = (node_1, node_2)
if ((edge[0],edge[1])) and ((edge[1],edge[0])) not in edge_set:
G.add_edge(*edge)
edge_set.add(edge)
return G
##########################################################################################
# #
# Infection #
# #
# #
#########################################################################################
class Infection:
def __init__(self, model, ptrans = 0.25, reinfection_rate = 0.00, I0 =0.10, severe =0.18,
progression_period = 3, progression_sd = 2, death_rate = 0.0193, recovery_days = 21,
recovery_sd = 7):
self.model = model
self.ptrans = ptrans
self.reinfection_rate = reinfection_rate
self.I0 = I0
self.severe = severe
self.progression_period = progression_period
self.progression_sd = progression_sd
self.death_rate = death_rate
self.recovery_days = recovery_days
self.recovery_sd = recovery_sd
self.dead_agents = []
def initial_infection(self):
infected = coin_flip(self.I0)
if infected == False:
susceptible = True
severe = False
else:
susceptible = False
severe = coin_flip(self.severe)
return infected, susceptible, severe
# Determine if infection is transmitted
def infect(self, agent_1, agent_2):
was_infected = agent_2.was_infected
if (agent_2.infected == False) and (agent_2.susceptible == True):
if agent_1.infected == True:
if agent_2.was_infected == False:
agent_2.infected = coin_flip(self.ptrans)
if agent_2.infected == True:
infected_bool = True
agent_2.severe = coin_flip(self.severe)
else:
infected_bool = False
agent_2.severe = False
else:
#agent_2.infected = coin_flip(reinfection_rate)
agent_2.infected = coin_flip(self.ptrans)
if agent_2.infected == True:
infected_bool = True
agent_2.severe = coin_flip(self.severe)
else:
infected_bool = False
agent_2.severe = False
else:
agent_2.infected = agent_2.infected
infected_bool = False
else:
infected_bool = False
return infected_bool, was_infected
def interact(self, agent):
neighbors = self.model.grid.get_neighbors(agent.pos)
if len(neighbors)== 0:
print (agent.unique_id + " is lonely")
else:
for neighbor in neighbors:
if neighbor in self.dead_agents:
pass
else:
neighbor_obj = self.model.schedule._agents[neighbor]
infected_bool, was_infected = self.infect(agent, neighbor_obj)
if infected_bool == True:
neighbor_obj.susceptible = False
neighbor_obj.day = 0
agent.induced_infections += 1
agent.infected_others = True
if was_infected == True:
agent.recovered = False
if agent.infected == True:
agent.susceptible = False
progression_threshold = int(np.random.normal(self.progression_period, self.progression_sd))
if agent.day >= progression_threshold:
if agent.severe == True:
agent.alive = coin_flip((1 - self.death_rate))
#Agent Dies sequence
if agent.alive == False:
agent.susceptible = False
agent.severe = False
agent.was_infected = True
self.dead_agents.append(agent.unique_id)
self.model.schedule.remove(agent)
if agent.alive == True:
recovery_threshold = int(np.random.normal(self.recovery_days, self.recovery_sd))
if agent.day >= recovery_threshold:
agent.infected = False
agent.severe = False
if coin_flip(self.reinfection_rate):
agent.susceptible = True
else:
agent.susceptible = False
agent.was_infected = True
agent.recovered = True
agent.day = 0
|
projectmesapackages/SIR | mesa_SIR/__init__.py | """
Mesa Agent-Based Modeling Framework Extension
Core Objects: Model, and Agent.
"""
import datetime
from mesa_SIR import calculations_and_plots
from mesa_SIR import SIR
__all__ = ["Infection"]
__title__ = 'Mesa_SIR'
__version__ = '0.0.1'
__license__ = 'MIT'
__copyright__ = 'Copyright %s <NAME>' % datetime.date.today().year |
dashan-emr/dsslpy | code/example.py | import numpy as np
from dssl import DSSL
###########################
# Gen data
###########################
N = 100
X = np.random.randn(N,2)
T = np.arange(N)
w = np.random.randn(2)
scores = np.dot(X,w)
ranked_pairs = []
for i in range(X.shape[0]):
for j in range(X.shape[0]):
if scores[i] < scores[j] and np.random.rand() < 2.0/N:
ranked_pairs.append([i,j])
ranked_pairs = np.array(ranked_pairs)
smoothed_pairs = np.vstack([np.random.choice(100,N,replace=True),np.random.choice(100,N,replace=True)]).T
smoothed_pairs = smoothed_pairs[smoothed_pairs[:,0] != smoothed_pairs[:,1]]
###########################
# Fit model
###########################
smoothness_reg = 1.0
l2_reg = 1.0
dssl = DSSL(l2_reg=l2_reg,smoothness_reg=smoothness_reg,disp=True)
dssl.fit(X,T,ranked_pairs,smoothed_pairs)
###########################
# Get scores
###########################
scores = dssl.predict(X) |
dashan-emr/dsslpy | code/test_dssl.py | import numpy as np
from dssl import huber,DSSL
import unittest
class TestDSSL(unittest.TestCase):
def test_toy(self):
"""
Test the loss on a couple of toy examples
"""
l2_reg = 0.75
smoothness_reg = 0.0
h = 0.01
X = np.array([[1,2],
[3,4],
[5,6]])
ranked_pairs = np.array([[0,1],
[2,1]])
smoothed_pairs = None
w = np.array([1.0,-1.0])
T = np.arange(3)
l_true = (huber(1-np.dot(X[0]-X[1],w),h) + huber(1-np.dot(X[2]-X[1],w),h))/2.0
l_true += l2_reg * np.sum(w*w)/2.0
dssl = DSSL(l2_reg=l2_reg)
obj = dssl.get_obj(X,T,ranked_pairs,smoothed_pairs)
l_dssl = obj(w)
self.assertAlmostEqual(l_true,l_dssl)
l2_reg = 0.75
smoothness_reg = 1.0
h = 0.01
X = np.random.randn(4,2)
ranked_pairs = np.array([[0,1],
[2,1]])
smoothed_pairs = np.array([[1,0],
[0,2]])
w = np.random.randn(2)
T = np.arange(4)
l_true = (huber(1-np.dot(X[0]-X[1],w),h) + huber(1-np.dot(X[2]-X[1],w),h))/2.0
l_true += smoothness_reg*(np.dot(X[1]-X[0],w)**2 + np.dot(X[0]-X[2],w)**2/4.0)/2.0
l_true += l2_reg * np.sum(w*w)/2.0
dssl = DSSL(l2_reg=l2_reg,smoothness_reg=smoothness_reg)
obj = dssl.get_obj(X,T,ranked_pairs,smoothed_pairs)
l_dssl = obj(w)
self.assertAlmostEqual(l_true,l_dssl)
def test_fit_toy(self):
"""
Ranked pairs is created such that X is perfectly ordered and
the final loss should be zero.
"""
N = 10
X = np.random.randn(N,2)
T = np.arange(N)
w = np.random.randn(2)
scores = np.dot(X,w)
ranked_pairs = []
for i in range(X.shape[0]):
for j in range(X.shape[0]):
if scores[i] < scores[j]:
ranked_pairs.append([i,j])
ranked_pairs = np.array(ranked_pairs)
smoothed_pairs = None
smoothness_reg = 0.0
l2_reg = 0.0
dssl = DSSL(disp=False)
dssl.fit(X,T,ranked_pairs,smoothed_pairs=None)
self.assertAlmostEqual(0.0,dssl.res.fun)
def test_fit_weigthed(self):
"""
Ranked pairs is created such that X is perfectly ordered and
the final loss should be zero.
"""
N = 10
X = np.random.randn(N,2)
T = np.arange(N)
w = np.random.randn(2)
scores = np.dot(X,w)
ranked_pairs = []
weights = []
for i in range(X.shape[0]):
for j in range(X.shape[0]):
if i != j:
if scores[i] < scores[j]:
ranked_pairs.append([i,j])
weights.append(1.0)
else:
ranked_pairs.append([i,j])
weights.append(0.0)
ranked_pairs = np.array(ranked_pairs)
weights = np.array(weights)
smoothed_pairs = None
smoothness_reg = 0.0
l2_reg = 0.0
dssl = DSSL(disp=False)
dssl.fit(X,T,ranked_pairs,smoothed_pairs=None,ranked_pair_weights=weights)
# print(dssl.res.fun)
self.assertAlmostEqual(0.0,dssl.res.fun)
def test_fit(self):
"""
Test ability to run on reasonable data
"""
N = 1000
X = np.random.randn(N,2)
T = np.arange(N)
w = np.random.randn(2)
scores = np.dot(X,w)
ranked_pairs = []
for i in range(X.shape[0]):
for j in range(X.shape[0]):
if scores[i] < scores[j] and np.random.rand() < 2.0/N:
ranked_pairs.append([i,j])
ranked_pairs = np.array(ranked_pairs)
smoothed_pairs = np.vstack([np.random.choice(100,N,replace=True),np.random.choice(100,N,replace=True)]).T
smoothed_pairs = smoothed_pairs[smoothed_pairs[:,0] != smoothed_pairs[:,1]]
smoothness_reg = 1.0
l2_reg = 1.0
dssl = DSSL(l2_reg=l2_reg,smoothness_reg=smoothness_reg,disp=False)
dssl.fit(X,T,ranked_pairs,smoothed_pairs)
# print(dssl.w)
if __name__ == "__main__":
unittest.main() |
dashan-emr/dsslpy | code/dssl.py | import autograd.numpy as np
from autograd import grad
from sklearn.base import BaseEstimator
from scipy.optimize import minimize
def huber(x,h):
"""
Huber loss
"""
assert 0 < h and h < 1
condlist = [x < -h, np.abs(x) <= h, x > h]
choicelist = [0.0, (h+x)**2/(4*h), x]
return np.select(condlist,choicelist)
class DSSL(BaseEstimator):
"""
DSSL class
"""
def __init__(self,l2_reg=0.0,smoothness_reg=0.0,h=0.01,maxiter=1000,tol=1e-8,gtol=1e-6,disp=False):
"""
Args:
l2_reg - float:
L2 regularization parameter
smoothness_reg - float:
Weight given to the temporal smoothness objective
h - float:
Huber loss parameter. Must be between 0 and 1. Values closer
to zero give a closer approximation to the hinge loss.
maxiter - int:
Maximum number of optimization iterations
tol - float:
Loss tolerance (see scipy.optimize.minimize doc for details)
gtol - float:
Gradient magnitude tolerance (see scipy.optimize.minimize
doc for details)
disp - bool:
Set to True to display optimization iterations.
"""
self.__dict__.update(locals())
def loss(self,ranked_pair_diffs,smoothed_pair_diffs,T_diffs,ranked_pair_weights=None,smoothed_pair_weights=None):
"""
DSSL loss
"""
if ranked_pair_weights is None:
ranked_pair_weights = np.ones(ranked_pair_diffs.shape[0])
if smoothed_pair_weights is None:
smoothed_pair_weights = np.ones(smoothed_pair_diffs.shape[0])
f = 0.0
# mean huber loss of the difference in scores between ranked pairs
ranking_loss = np.dot(ranked_pair_weights,huber(1.0-np.dot(ranked_pair_diffs,self.w),self.h))/(np.sum(ranked_pair_weights) + 1e-12)
# mean squared normalized difference in scores between smoothed pairs
smoothed_pair_score_diffs = np.dot(smoothed_pair_diffs,self.w)
smoothing_loss = self.smoothness_reg*np.dot(smoothed_pair_weights,(smoothed_pair_score_diffs/T_diffs)**2)/(np.sum(smoothed_pair_weights) + 1e-12)
# \ell_2 regularization
regularization = self.l2_reg*np.dot(self.w,self.w)/2.0
return ranking_loss + smoothing_loss + regularization
def set_params(self,w):
self.w = w
def get_obj(self,X,T,ranked_pairs,smoothed_pairs,ranked_pair_weights=None,smoothed_pair_weights=None):
# precalculate differences between pairs
if ranked_pairs is None:
ranked_pair_diffs = np.zeros((0,X.shape[1]))
else:
ranked_pair_diffs = X[ranked_pairs[:,0]] - X[ranked_pairs[:,1]]
if smoothed_pairs is None:
smoothed_pair_diffs = np.zeros((0,X.shape[1]))
T_diffs = np.array([])
else:
smoothed_pair_diffs = X[smoothed_pairs[:,0]] - X[smoothed_pairs[:,1]]
T_diffs = T[smoothed_pairs[:,0]] - T[smoothed_pairs[:,1]]
def obj(w):
self.set_params(w)
return self.loss(ranked_pair_diffs,smoothed_pair_diffs,T_diffs,ranked_pair_weights,smoothed_pair_weights)
return obj
def fit(self,X,T,ranked_pairs,smoothed_pairs,ranked_pair_weights=None,smoothed_pair_weights=None):
"""
Fit the DSSL loss
Args:
X - (n_samples,n_features) ndarray:
Design matrix
T - (n_samples,) ndarray of:
Vector of continuous timestamps
ranked_pairs - (n_ranked_pairs,2) integer ndarray:
Contains ranked pairs of samples. Model will try to find
parameters such that score(ranked_pairs[i,0]) > score(ranked_pairs[i,1])
for all i.
smoothed_pairs - (n_smoothed_pairs,2) integer ndarray:
Contains pairs of samples that are close in time. Model will
try to find parameters such that minimizes
(score(ranked_pairs[i,0]) - score(ranked_pairs[i,1]))**2/(T(ranked_pairs[i,0]) - T(ranked_pairs[i,1]))**2
for all i.
ranked_pair_weights - (n_ranked_pairs,) float ndarray:
Contains sample weights for each of the ranked pairs.
smoothed_pair_weights - (n_smoothed_pairs,) float ndarray:
Contains sample weights for each of the smoothed pairs.
"""
assert X.shape[0] > 0
assert T.shape == (X.shape[0],)
assert ranked_pairs is None or np.issubdtype(ranked_pairs.dtype, np.dtype(int).type)
assert smoothed_pairs is None or np.issubdtype(smoothed_pairs.dtype, np.dtype(int).type)
assert ranked_pairs is None or np.all(np.logical_and(ranked_pairs >= 0,ranked_pairs <= X.shape[0]))
assert smoothed_pairs is None or np.all(np.logical_and(smoothed_pairs >= 0,smoothed_pairs <= X.shape[0]))
assert ranked_pairs is None or np.all(ranked_pairs[:,0] != ranked_pairs[:,1])
assert smoothed_pairs is None or np.all(smoothed_pairs[:,0] != smoothed_pairs[:,1])
# get obj
obj = self.get_obj(X,T,ranked_pairs,smoothed_pairs,ranked_pair_weights,smoothed_pair_weights)
# get the gradient function using autograd
gfun = grad(obj)
# init params
w0 = np.zeros(X.shape[1])
# optimize objective
self.res = minimize(obj,w0,method="L-BFGS-B",jac=gfun,options={"gtol":self.gtol,"maxiter":self.maxiter,"disp":self.disp},tol=self.tol)
self.set_params(self.res.x)
return self
def predict(self,X):
"""
Calculate scores
"""
return np.dot(X,self.w) |
willemkokke/MayaLauncher | Tests/create_test_files.py | import os
from maya import standalone
standalone.initialize(name='python')
from maya import cmds
from maya.OpenMaya import MGlobal
folder = R'C:\repositories\MayaLauncher\Tests\Files'
types = [
('.ma', 'mayaAscii'),
('.mb', 'mayaBinary')
]
apiVersion = str(MGlobal.apiVersion())
for type in types:
extension = type[0]
format = type[1]
filename = os.path.join(folder, apiVersion + extension)
filename_ref = os.path.join(folder, apiVersion + '_ref' + extension)
cmds.file(new=True, force=True)
cmds.file(rename=filename_ref)
cmds.polyCube(name='cube')
cmds.file(type=format, save=True, force=True)
cmds.file(new=True, force=True)
cmds.file(rename=filename)
cmds.file(filename_ref, r=True, ns='ref' )
cmds.loadPlugin( "stereoCamera", qt=True )
from maya.app.stereo import stereoCameraRig
rig = stereoCameraRig.createStereoCameraRig('StereoCamera')
cmds.file(type=format, save=True, force=True)
|
s-ariga/NRAJ_Report | scripts/NRAJ_Results.py | <reponame>s-ariga/NRAJ_Report<gh_stars>0
# -*- coding : utf-8 -*-
import os
import sys
import argparse
import NRAJ_Config as nc
class NRAM_Results():
def __init__(self):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = '日ラ報告用フォーマットを出力')
parser.add_argument('--version', '-v', action='version',
version=os.path.basename(__file__) + ' ver.0.1')
args = parser.parse_args()
nraj = NRAJ_Results()
|
s-ariga/NRAJ_Report | scripts/ReadSIUS.py | #!/usr/bin/env python
# -*- coding : utf-8 -*-
class ReadSIUS():
def __init__(self, input:str):
self.data_file = input
|
s-ariga/NRAJ_Report | scripts/NRAJ_Config.py | # -*- coding : utf-8 -*-
import os
HOME = "../"
DATA_DIR = HOME + "data/"
DATA_10M = DATA_DIR + "AllAJapanClubChampionship2019-10m.srk"
DATA_50M = DATA_DIR + "AllAJapanClubChampionship2019-50m.srk"
OUTPUT_DIR = HOME + "outputs/"
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/backups/parseDBIdAndUNIIsByInChikey.py | <gh_stars>10-100
'''
Created 03/20/2014
@authors: <NAME>
@summary: parse inchikey and drugbank_id from drugbank.xml then parse
inchikey, FDA Preferred Term and UNII from UNIIs records
match the results from drugbank and results of parse UNIIs
records output terms: FDA Preferred Term, UNII, Drugbank URI
output file: PT-UNIIs-Drugbank-03202014.txt
'''
from lxml import etree
from lxml.etree import XMLParser, parse
import os, sys
DRUGBANK_XML = "drugbank.xml"
UNIIS_RECORDS = "UNIIs 27Jun2014 Records.txt"
NS = "{http://www.drugbank.ca}"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
dict_ickey_dbid = {}
'''
data structure of drugbank.xml
</drug><drug type="small molecule" created="2005-06-13 07:24:05 -0600"
updated="2013-09-16 17:11:29 -0600" version="4.0">
<drugbank-id>DB00641</drugbank-id>
<name>Simvastatin</name>
<calculated-properties>
<property>
<kind>InChIKey</kind>
<value>InChIKey=RYMZZMVNJRMUDD-HGQWONQESA-N</value>
<source>ChemAxon</source>
</property>
'''
#get mappings of inchikey and drugbank id
def parseDbIdAndInChiKey(root):
for childDrug in root.iter(tag=NS + "drug"):
subId = childDrug.find(NS + "drugbank-id")
if subId == None:
continue
else:
drugbankid = subId.text
drugbankName = unicode(childDrug.find(NS + "name").text)
for subProp in childDrug.iter(NS + "property"):
subKind = subProp.find(NS + "kind")
if subKind == None:
continue
elif subKind.text == "InChIKey":
subValue = subProp.find(NS + "value")
if subValue == None:
continue
else:
#print drugbankid + '\t' + subValue.text[9:]
ikey = subValue.text[9:]
dict_ickey_dbid [ikey] = (drugbankid,drugbankName)
p = XMLParser(huge_tree=True)
tree = parse(DRUGBANK_XML,parser=p)
root = tree.getroot()
parseDbIdAndInChiKey(root)
#print dict_ickey_dbid
#read mapping file that contains UNII PT INCHIKEY
for line in open(UNIIS_RECORDS,'r').readlines():
row = line.split('\t')
inchikey = row[4]
if len(inchikey) == 0:
continue
#print "mapping inchikey:" + inchikey
if dict_ickey_dbid.has_key(inchikey):
drugbankid = dict_ickey_dbid[inchikey][0]
drugbankName = dict_ickey_dbid[inchikey][1]
output = row[1] +'\t'+ row[0] +'\t'+ drugbankName +'\t'+ DRUGBANK_CA + drugbankid +'\t'+ DRUGBANK_BIO2RDF + drugbankid
print output.encode('utf-8').strip()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/backups/addRxTermAndRxcui.py | <reponame>kqingcan/bio2rdf-scripts
'''
Created 10/27/2014
@authors: <NAME>
@summary: using restfull api, get rxnorm term by rxcui and merge term and rxcui to preferred substance and drugbank URI
input: PS-to-RxNorm-mapping.txt and INCHI-OR-Syns-OR-Name.txt
output: preferred substance, UNII, drugbank name, drug bank id, rxnorm uri and rxnorm term
'''
import urllib2
from xml.dom.minidom import parseString
import os, sys
import codecs
RXNORM_BASE="http://rxnav.nlm.nih.gov/REST/"
HEADERS = { 'Accept' : 'application/json'}
PS_DRUGBANK = "../UNII-data/INCHI-OR-Syns-OR-Name-09162014.txt"
PS_DRUGBANK_AND = "../UNII-data/INCHI-AND-Syns-OR-Name-09162014.txt"
PS_RxCUI = "../../RxNORM-mapping/PS-to-Rxcui-mapping-10272014.txt"
OUTPUTS = "merge-error.log"
OUTPUTS_OR = "../UNII-data/PT-UNII-Name-DrugBank-Rxnorm-OR-mappings-10272014.tsv"
OUTPUTS_AND = "../UNII-data/PT-UNII-Name-DrugBank-Rxnorm-AND-mappings-10272014.tsv"
dict_RxCUIs = {}
if len(sys.argv) > 1:
mapping_mode = str(sys.argv[1])
else:
print "Usage: addRxTermAndRxcui.py <merge mode>(0: merge inchi-name-synomyns or mappings with rxcui and rxnorm type), 1: merge Inchi-name-synomyns and mappings with rxcui and rxnorm type))"
sys.exit(1)
if mapping_mode == "0":
OUTPUTS = OUTPUTS_OR
elif mapping_mode == "1":
PS_DRUGBANK = PS_DRUGBANK_AND
OUTPUTS = OUTPUTS_AND
else:
print "unknown mapping mode - please check if the mapping mode is 0 or 1"
for line in codecs.open(PS_RxCUI, 'r', encoding='utf-8').readlines():
row = line.split('|')
if len(row) == 2:
dict_RxCUIs[row[0]] = row[1]
# add header
f = codecs.open(OUTPUTS, 'w', encoding='utf-8')
header = "FDA preferred term" + '\t' + "UNII" + '\t' + "Drugbank Name" + '\t' + "Drugbank Id" + '\t' + "Rxcui" + '\t' + "RxTerm" + '\n'
f.write(header)
for line in codecs.open(PS_DRUGBANK, 'r', encoding='utf-8').readlines():
rxcui = ""
RxTerm = ""
row = line.split('\t')
# match by FDA preferred term to get Rxcui
# request Rxterm by rxcui
if len(row)==4 and dict_RxCUIs.has_key(row[0]):
rxcui = dict_RxCUIs[row[0]].strip('http://purl.bioontology.org/ontology/RXNORM/')
requestURL = RXNORM_BASE+'rxcui/'+ rxcui.strip('\n') + '/properties'
file = urllib2.urlopen(requestURL)
data = file.read()
file.close()
dom = parseString(data)
termXmlTag = dom.getElementsByTagName('tty')
if termXmlTag:
RxTerm = unicode(termXmlTag[0].toxml().strip('<tty>').strip('</tty>'))
output = line.strip('\n') + '\t' + rxcui.strip('\n') + '\t' + RxTerm + '\n'
f.write(output)
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/parseDBIdBySynsInchiName.py | '''
Created 09/04/2014
@authors: <NAME> and <NAME>
@summary: parse drug synonymns, dbid, name from drugbank.xml then parse synonymns
from UNIIs records and match the results.
Output: FDA PreferredTerm, FDA synonymn, UNII, Drugbank drug, drugbank id, matchedByKey
'''
from lxml import etree
from lxml.etree import XMLParser, parse
import os, sys
from sets import Set
DRUGBANK_XML = "../drugbank.xml"
UNIIS_NAMES = "../UNII-data/UNIIs 27Jun2014 Names.txt"
PT_INCHI_RECORDS = "../UNII-data/UNIIs 27Jun2014 Records.txt"
NS = "{http://www.drugbank.ca}"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
'''
data structure of drugbank.xml
</drug><drug type="small molecule" created="2005-06-13 07:24:05 -0600"
updated="2013-09-16 17:11:29 -0600" version="4.0">
<drugbank-id>DB00641</drugbank-id>
<name>Simvastatin</name>
<property>
<kind>InChIKey</kind>
<value>InChIKey=RYMZZMVNJRMUDD-HGQWONQESA-N</value>
<source>ChemAxon</source>
</property>
<synonymns>
<synonymn>...</synonymn>
</synonyms>
<external-identifiers>
<external-identifier>
<resource>ChEBI</resource>
<identifier>6427</identifier>
</external-identifier>
</external-identifiers>
'''
if len(sys.argv) > 4:
validate_mode = str(sys.argv[1])
DRUGBANK_XML = str(sys.argv[2])
UNIIS_NAMES = str(sys.argv[3])
PT_INCHI_RECORDS = str(sys.argv[4])
else:
print "Usage: parseDBIdAndUNIIsBySynonymns.py <match mode>(0: (Inchi | name | synomyns) matched, 1: (Inchi && (name | synomyns matched)) <drugbank.xml> <FDA_UNII_Names> <FDA_UNII_Records>)"
sys.exit(1)
## get dict of mappings of drugbank id, name, inchikeys and synonmymns
def parseDbIdAndSynonymns(root):
dict_name_inchi_syns = {}
for childDrug in root.iter(tag=NS + "drug"):
subId = childDrug.find(NS + "drugbank-id")
if subId == None:
continue
else:
drugbankid = subId.text
drugbankName = unicode(childDrug.find(NS + "name").text.upper())
dict_name_inchi_syns[drugbankName]={}
dict_name_inchi_syns[drugbankName]["dbid"] = drugbankid
## get inchikey
ikey = ""
for subProp in childDrug.iter(NS + "property"):
subKind = subProp.find(NS + "kind")
if subKind == None:
continue
elif subKind.text == "InChIKey":
subValue = subProp.find(NS + "value")
if subValue is not None:
ikey = subValue.text[9:]
dict_name_inchi_syns[drugbankName]["inchi"] = ikey
## get synonyms
set_syns = set()
syns = childDrug.find(NS + "synonyms")
if syns is not None:
for subProp in syns.iter():
if subProp == None or subProp.text == None:
continue
if subProp.text.strip().replace('\n',"") is not "":
set_syns.add(subProp.text.upper())
dict_name_inchi_syns[drugbankName]["syns"] = set_syns
return dict_name_inchi_syns
## get dict of unii with inchi from PT_INCHI_RECORDS
## UNII PT RN MF INCHIKEY EINECS NCIt ITIS NCBI PLANTS SMILES
def parsePTAndInchi(path):
dict_inchi = {}
for line in open(path,'r').readlines():
row = line.split('\t')
if len(row) == 0:
continue
unii = row[0]
inchi = row[4].strip().upper()
if unii and inchi:
dict_inchi[unii]=inchi
return dict_inchi
def validates(dict_unii_inchi, dict_xml, validate_mode):
#print "mode:" + validate_mode
#read mapping file that contains Name TYPE UNII PT
(NAME, TYPE, UNII, PT) = range(0,4)
for line in open(UNIIS_NAMES,'r').readlines():
row = line.split('\t')
if len(row) == 0:
continue
name = row[NAME].strip().upper()
unii = row[UNII]
inchi=""
if dict_unii_inchi.has_key(unii):
inchi = dict_unii_inchi[unii]
if inchi == "":
continue
drug_type = row[TYPE]
if (drug_type == "PT") or (drug_type == "SY") or (drug_type == "SN"):
if validate_mode is "0":
for k,v in dict_xml.items():
matchedBy = ""
if k == name:
matchedBy = "name"
if name in v["syns"]:
if matchedBy == "":
matchedBy = "synonyms"
else:
matchedBy += "ANDsynonyms"
if inchi == v["inchi"]:
if matchedBy == "":
matchedBy = "inchi"
else:
matchedBy += "ANDinchi"
if matchedBy is not "":
#print "MATCHED:" + matchedBy
#print "NAMES:" + name + "|" + unii + "|" + inchi
#print "DICT_XML:" + str(k) + "|" + str(v)
drugbankid = v["dbid"]
drugbankName = k
output = row[PT].strip() +'\t' + row[NAME].strip() +'\t' + row[UNII].strip() +'\t'+ drugbankName +'\t'+ drugbankid + '\t' + matchedBy
print output.encode('utf-8').strip()
break
elif validate_mode == "1":
for k,v in dict_xml.items():
#print str(k) + "|" + str(v)
matchedBy = ""
if inchi == v["inchi"]:
if k == name:
matchedBy = "nameANDinchi"
if name in v["syns"]:
if matchedBy == "":
matchedBy = "synonymsANDinchi"
else:
matchedBy = "nameANDsynonymsANDinchi"
if matchedBy is not "":
drugbankid = v["dbid"]
drugbankName = k
output = row[PT].strip() +'\t' + row[NAME].strip() +'\t' + row[UNII].strip() +'\t'+ drugbankName +'\t'+ drugbankid+ '\t' + matchedBy
print output.encode('utf-8').strip()
break
def main():
p = XMLParser(huge_tree=True)
tree = parse(DRUGBANK_XML,parser=p)
root = tree.getroot()
## get name, syns and inchi from drugbank.xml
dict_xml = parseDbIdAndSynonymns(root)
#print str(dict_xml)
dict_unii_inchi = parsePTAndInchi(PT_INCHI_RECORDS)
#print str(dict_unii_inchi)
validates(dict_unii_inchi, dict_xml, validate_mode)
if __name__ == "__main__":
main()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/load-dailymed-spls/dailymed_rss.py | '''
Created 03/15/2012
@auther: gag30
@summary: Download and extract all of the xml files for drugs updated on Dailymed within the past 7 days.
Extracted xml files are saved to ./spls and ./spls/updates, both of which are created if they
don't exist.
'''
import feedparser
from lxml.html import fromstring
import os
import shutil
import string
import sys
import time
import urllib
import urllib2
from zipfile import ZipFile, is_zipfile
import pdb
## Remove the zip files that were downloaded from Dailymed/any
## contents of tmpdir
def clean(tmpdir):
files = [tmpdir + f for f in os.listdir(tmpdir)]
for f in files:
try:
os.remove(f)
except OSError, err:
print "Couldn't delete " + f + ": " + err
try:
os.rmdir(tmpdir)
except OSError, err:
print "Couldn't delete " + tmpdir + ": " + err
## After the zip files have been downloaded and extracted to
## the temp folder, copy them to ./updateDir
## (./spls/updates by default)
def copy_xml(tmpdir, spldir):
tmpFiles = [tmpdir + f for f in os.listdir(tmpdir)]
updateDir = os.path.join(spldir, "updates")
for f in os.listdir(updateDir):
os.remove(os.path.join(updateDir, f))
for tmpFile in tmpFiles:
if tmpFile.endswith(".xml"):
shutil.copy(tmpFile, updateDir)
## Display download progress, updated on a single line
def dl_progress(current, total):
percent = int(float(current) / float(total) * 100)
message = " ".join([str(percent) + "%", "downloaded (" + str(current), "of", str(total) + ")"])
if percent == 100:
message += "...done\n"
sys.stdout.write("\r\x1b[K" + message)
sys.stdout.flush()
## No longer used
def get_download_name(title):
name = ""
for char in title:
if char in string.uppercase:
name += char
elif char == " ":
name += "%20"
else:
return name.strip("%20")
## Parse html for a link to xml file for a single drug
def get_xml_url(url):
usock = urllib2.urlopen(url)
html = usock.read()
usock.close()
baseurl = "http://dailymed.nlm.nih.gov"
root = fromstring(html)
for div in root.iter("div"):
if div.get("id") == "options":
for link in div.iter("a"):
href = link.get("href")
if "getFile.cfm?id" in href and "type=zip" in href:
return baseurl + href
return None
##Try to get the xml file url from url num
##times before failing
def get_xml_url_retry(url, num):
cnt = 0
while cnt < num:
cnt += 1
xmlUrl = get_xml_url(url)
if xmlUrl:
return xmlUrl
time.sleep(1)
return None
## Create a directory if one doesn't exist, else continue
def make_dir(name):
try:
os.mkdir(name)
except OSError:
pass
## Extract xml files from downloaded zip files
## to a temp dir
def unzip(tmpdir):
files = [tmpdir + f for f in os.listdir(tmpdir)]
for f in files:
try:
zipfile = ZipFile(f)
contents = zipfile.infolist()
for c in contents:
if c.filename[-4:] == ".xml":
zipfile.extract(c, tmpdir)
except:
print "Downloaded file {0} does not appear to be a zip file!".format(f)
sys.exit(1)
## Get the Dailymed rss feed for drugs updated within the past 7 days.
## Download the files for each, extract the spl and copy it to a
## master directory of spls and directory for spls contained in the
## current update. Delete downloaded files when finished.
def run():
TMPDIR = "tmp_spls/"
SPLDIR = "spls/"
rssparser = feedparser.parse('http://dailymed.nlm.nih.gov/dailymed/rss.cfm')
make_dir(TMPDIR)
make_dir(SPLDIR)
make_dir(os.path.join(SPLDIR, "updates"))
for ctr, entry in enumerate(rssparser['entries']):
#downloadURL = get_xml_url_retry(entry['link'], 3)
#downloadURL = downloadURL[:downloadURL.index("name=")]
setid = entry['id'].split('setid=')[1]
downloadURL = "http://dailymed.nlm.nih.gov/dailymed/downloadzipfile.cfm?setId={0}".format(setid)
#dailymedid = downloadURL.split("id=")[1].split("&")[0]
filename = os.path.join(TMPDIR, setid + ".zip")
urllib.urlretrieve(downloadURL, filename)
dl_progress(ctr+1, len(rssparser['entries']))
unzip(TMPDIR)
copy_xml(TMPDIR, SPLDIR)
clean(TMPDIR)
if __name__=="__main__":
run()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/backups/parseDBIdAndUNIIsByMF.py | '''
Created 03/20/2014
@authors: <NAME>
@summary: parse Molecular Formula and drugbank_id from drugbank.xml
then parse MF(Molecular Formula), FDA Preferred Term and UNNI from UNNIs records
match the results from drugbank and results of parse UNNIs records
output terms: FDA Preferred Term, UNII, Drugbank URI
output file: PT-UNIIs-Drugbank-byMF-03202014.txt
'''
import xml.etree.ElementTree as ET
import os, sys
DRUGBANK_XML = "drugbank.xml"
UNIIS_RECORDS = "UNIIs 25Jan2014 Records.txt"
NS = "{http://drugbank.ca}"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
dict_ickey_dbid = {}
'''
<property>
<kind>Molecular Formula</kind>
<value>C4H6N4O3S2</value>
<source>ChemAxon</source>
</property>
'''
def parseDbIdAndMF(root):
for drug in root.iter(tag=NS + "drug"):
dbid = drug.find(NS + "drugbank-id")
if dbid == None:
continue
else:
drugbankid = dbid.text
for subProp in drug.iter(NS + "property"):
msKind = subProp.find(NS + "kind")
if msKind == None:
continue
elif msKind.text == "Molecular Formula":
msValue = subProp.find(NS + "value")
if msValue == None:
continue
else:
#print drugbankid + '\t' + subValue.text[9:]
ms = msValue.text
dict_ickey_dbid [ms] = drugbankid
tree = ET.parse(DRUGBANK_XML)
root = tree.getroot()
parseDbIdAndMF(root)
#read mapping file that contains UNII PT MF
for line in open(UNIIS_RECORDS,'r').readlines():
row = line.split('\t')
mf = row[2]
if len(mf) == 0:
continue
if dict_ickey_dbid.has_key(mf):
drugbankid = dict_ickey_dbid[mf]
output = row[1] +'\t'+ row[0] +'\t'+ DRUGBANK_CA + drugbankid +'\t'+ DRUGBANK_BIO2RDF + drugbankid
print output.encode('utf-8').strip()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/addBio2rdf_UNII_to_DrugBank.py | '''
Created 09/04/2014
@authors: <NAME>
@summary: add drugbank base uri to mappings of preferred term and drugbank uri
'''
import os, sys
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
PT_DRUGBANK = "../UNII-data/INCHI-OR-Syns-OR-Name.txt"
if len(sys.argv) > 1:
PT_DRUGBANK = str(sys.argv[1])
else:
print "Usage: addBio2rdf_UNII_to_DrugBank.py <UNII-data/INCHI-OR-Syns-OR-Name.txt>"
sys.exit(1)
for line in open(PT_DRUGBANK,'r').readlines():
columns = line.split('\t')
if len(columns):
PT = columns[0]
DBid = columns[3]
DBbio2rdf = DRUGBANK_BIO2RDF + DBid
DBca = DRUGBANK_CA + DBid
print (PT + '\t' + DBca + '\t' + DBbio2rdf).replace("\n","").replace("\r","")
#out = (line+bio2rdf).replace("\r\n","")
#print out.replace("|","\t")
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/FDA-pharmacogenetic-info-mapping/createFDAPharmgxDBTable.py | # createFDAPharmgxDBTable.py
#
# load data from a cleaned up version ofthe FDA's pharmacogenomic
# biomarker table
# <http://www.fda.gov/Drugs/ScienceResearch/ResearchAreas/Pharmacogenetics/ucm083378.htm>
# into a table format that can be used to link the data and SPL
# section date within linkedSPLs
import sys
sys.path = sys.path + ['.']
import re
import string
import pprint
import pickle
## import Sparql-related
from SPARQLWrapper import SPARQLWrapper, JSON
import simplejson as json
reload(sys)
sys.setdefaultencoding("utf-8")
RAW_DATA_FILE = "genetic-biomarker-table-raw-import.csv"
OUTFILE = "FDAPharmgxTable.csv"
PT_RXCUI = "../RxNORM-mapping/PreferredTermRxcui-mapping.txt"
LINKED_SPL_SPARQL = SPARQLWrapper("http://dbmi-icode-01.dbmi.pitt.edu/sparql")
#LINKED_SPL_SPARQL.addDefaultGraph("http://dbmi-icode-01.dbmi.pitt.edu/linkedSPLs/")
#LINKED_SPL_SPARQL = SPARQLWrapper("http://172.16.31.10:8890/sparql")
RXNORM_BASE = "http://purl.bioontology.org/ontology/RXNORM/"
########################################################################################################################
if len(sys.argv) > 3:
RAW_DATA_FILE = str(sys.argv[1])
PT_RXCUI = str(sys.argv[2])
OUTFILE = str(sys.argv[3])
else:
print "Usage: createFDAPharmgxDBTable.py <path to raw data file> <PreferredTermRxcui-mapping.txt> <path to output file>"
sys.exit(1)
'''
SELECT DISTINCT ?setId WHERE {
?splId dailymed:activeMoietyRxCUI <%s>.
?splId dailymed:setId ?setId.
}
'''
def getSPLSetIdsForMoiety(sparql, rxcuiMoiety):
qry = '''
PREFIX linkedspls_vocabulary: <http://bio2rdf.org/linkedspls_vocabulary:>
SELECT DISTINCT ?setId WHERE {
?s rdf:type linkedspls_vocabulary:structuredProductLabelMetadata;
linkedspls_vocabulary:setId ?setId;
linkedspls_vocabulary:activeMoiety ?activeMoiety.
?activeMoiety linkedspls_vocabulary:RxCUI <%s>.
}
''' % rxcuiMoiety
#print "QUERY: %s" % qry
sparql.setQuery(qry)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
if len(results["results"]["bindings"]) == 0:
print "ERROR: no results from query"
return {}
setIds = []
for elt in results["results"]["bindings"]:
setIds.append(elt["setId"]["value"])
return setIds
"""
qry = '''
PREFIX dailymed: <http://dbmi-icode-01.dbmi.pitt.edu/linkedSPLs/vocab/resource/>
SELECT DISTINCT ?setId WHERE {
%s
?splId dailymed:setId ?setId.
}
''' % "\n".join(["?splId dailymed:activeMoietyRxCUI <%s>." % x for x in rxcuiMoietyL])
print "QUERY: %s" % qry
"""
def getSPLSetIdsForMultipleMoieties(sparql, rxcuiMoietyL):
qry = '''
PREFIX linkedspls_vocabulary: <http://bio2rdf.org/linkedspls_vocabulary:>
SELECT DISTINCT ?setId WHERE {
%s
?setId linkedspls_vocabulary:setId ?setId.
}
''' % "\n".join(["?s rdf:type linkedspls_vocabulary:structuredProductLabelMetadata; linkedspls_vocabulary:setId ?setId; linkedspls_vocabulary:activeMoiety ?activeMoiety. ?activeMoiety linkedspls_vocabulary:RxCUI <%s>." % x for x in rxcuiMoietyL]
)
#print "QUERY: %s" % qry
sparql.setQuery(qry)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
if len(results["results"]["bindings"]) == 0:
print "ERROR: no results from query"
return {}
setIds = []
for elt in results["results"]["bindings"]:
setIds.append(elt["setId"]["value"])
return setIds
##########################################################################################
# load genetic biomarker data from FDA
f = open(RAW_DATA_FILE,"r")
lines = f.readlines()
f.close()
# get RXCUI to active moiety mappings
rxcuis = []
f = open(PT_RXCUI, "r")
rxcuiLines = f.readlines()
f.close()
for l in rxcuiLines:
if not l:
break
l = l.strip()
(activeIngred, rxcui) = l.split("\t")
rxcuis.append((activeIngred, RXNORM_BASE + rxcui))
#print rxcuis
newLines = []
ingredRxcuiToAdd = ""
idx = 1
for l in lines:
# if idx >5:
# break
# idx = idx + 1
l = l.strip()
elts = l.split("|")
#print "***" + str(elts)
rxcuiL = filter(lambda x: x[0].upper() == elts[0].strip('"').upper(), rxcuis)
if len(rxcuiL) == 0:
print "ERROR: no active moiety match found for %s; testing if this is a multiple active moiety case " % elts[0].strip('"').upper()
sL = elts[0].strip('"').upper().split(" AND ")
mL = []
for ingr in sL:
rxcuiL2 = filter(lambda x: x[0].upper() == ingr, rxcuis)
if len(rxcuiL2) == 0:
print "ERROR: no active moiety match found for %s; skipping case %s " % (ingr, elts[0].strip('"').upper())
break
elif len(rxcuiL2) > 1:
print "WARNING: more than one active moiety/rxcui match %s:%s" % (ingr, rxcuiL2)
mL += rxcuiL2
if len(sL) != len(mL):
print "ERROR: possible multiple active moiety case but could not acquire at least one rxcui per moiety"
else:
rxcuiL.append(mL)
elif len(rxcuiL) > 1:
print "WARNING: more than one active moiety/rxcui match (single active moiety case) %s:%s" % (elts[0].strip('"').upper(), rxcuiL)
for rxcui in rxcuiL:
sects = elts[-1].strip('"').split(",")
# get all setids for spls containing the active moieties
setids = None
if type(rxcui[0]) == type(()): # multiple active moieties
print "multi %s" % rxcui
ingredRxcuiToAdd += "\n" +"\n".join([elts[0].strip('"').upper() + "\t" + x[1] for x in rxcui])
setids = getSPLSetIdsForMultipleMoieties(LINKED_SPL_SPARQL, [x[1] for x in rxcui])
for setid in setids:
for sect in sects:
newLines.append("%s %s %s %s %s" % (elts[0].strip('"').upper(), elts[1].strip('"'), elts[2].strip('"'), setid, sect.strip()))
else: # single active moiety
print "single"
setids = getSPLSetIdsForMoiety(LINKED_SPL_SPARQL, rxcui[1])
for setid in setids:
for sect in sects:
newLines.append("%s %s %s %s %s" % (elts[0].strip('"').upper(), elts[1].strip('"'), elts[2].strip('"'), setid, sect.strip()))
print "TODO: ADD THE FOLLOWING LINES TO THE FILE THAT MAPS FDA ACTIVE INGREDIENTS TO RXCUIS"
print ingredRxcuiToAdd
f = open(OUTFILE,"w")
for ln in newLines:
f.write(ln + "\n")
f.close()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/backups/combineMappings.py | # combineMappings.py
#
# Combine RxNorm, FDA, DrugBank, and ChEBI identifiers into one file
#
# Author: <NAME>
#
FDA_DRUGBANK = "FDA_UNII_to_DrugBank_03132014.txt"
FDA_CHEBI = "FDA_UNII_to_ChEBI_03132014.txt"
FDA_RXNORM = "PreferredSubstance-to-Rxcui-mapping_03132014.txt"
FDA_UNII = "FDAPreferredSubstanceToUNII_03132014.txt"
FDA_RXNORM_DRUGBANK_CHEBI = "FDA_RXNORM_DRUGBANK_CHEBI_COMBINED_MAPPING_04042014_TRIADS.tsv"
f = open(FDA_RXNORM,'r')
buf = f.read()
f.close()
l = buf.split("\n")
fdaD = {}
for elt in l:
if not elt.strip():
break
(pt,rxpurl) = elt.split("\t")
fdaD[pt] = ["None",rxpurl.replace("http://purl.bioontology.org/ontology/RXNORM/","").strip(),"None","None","None"]
db = f = open(FDA_UNII,'r')
buf = f.read()
f.close()
l = buf.split("\n")
for elt in l:
if not elt.strip():
break
(pt,unii) = elt.split("\t")
if fdaD.get(pt):
fdaD[pt][0] = unii.strip()
db = f = open(FDA_DRUGBANK,'r')
buf = f.read()
f.close()
l = buf.split("\n")
for elt in l:
if not elt.strip():
break
(pt,dbname,dbpurl) = elt.split("|")
if fdaD.get(pt):
fdaD[pt][2] = dbname
fdaD[pt][3] = dbpurl.replace("http://www.drugbank.ca/drugs/","").strip()
db = f = open(FDA_CHEBI,'r')
buf = f.read()
f.close()
l = buf.split("\n")
for elt in l:
if not elt.strip():
break
(pt,dbname,chebipurl) = elt.split("|")
if fdaD.get(pt):
fdaD[pt][4] = chebipurl.replace("http://purl.obolibrary.org/obo/","").strip()
f = open(FDA_RXNORM_DRUGBANK_CHEBI,'w')
f.write("FDA_PreferredTerm\tFDA_UNII\tRxNorm_CUI\tDrugBank_Name\tDrugBank_CUI\tChEBI_CUI\n")
for k,v in fdaD.iteritems():
ln = "%s\t%s\n" % (k ,"\t".join(v))
f.write(ln)
f.close()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-activeMoiety/mergeToActiveMoiety.py | <reponame>kqingcan/bio2rdf-scripts<gh_stars>10-100
'''
Created 08/15/2014
@authors: <NAME>
@summary: merge preferred term, UNII, NUI, preferredNameAndRole, Drug bank URI, ChEBI URI, rxnorm URI, OMOP id, DrOn id for active moiety together.
'''
import os, sys
import pandas as pd
from StringIO import StringIO
import numpy as np
## Define namespaces
CHEBI_BASE_URI = "http://purl.obolibrary.org/obo/"
RXNORM_BASE_URI = "http://purl.bioontology.org/ontology/RXNORM/"
## Define data inputs
PT_UNII = "../LinkedSPLs-update/data/FDA/FDAPreferredSubstanceToUNII.txt"
UNII_RXCUI = "../LinkedSPLs-update/data/UMLS/UNIIs-Rxcuis-from-UMLS.txt"
PT_CHEBI = "../LinkedSPLs-update/mappings/PT-UNII-ChEBI-mapping/UNIIToChEBI.txt"
PT_DRUGBANK = "../LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/fda-substance-preferred-name-to-drugbank.txt"
UNII_NUI_PREFERRED_NAME_ROLE = "../LinkedSPLs-update/mappings/pharmacologic_class_indexing/EPC_extraction_most_recent.txt"
DRON_CHEBI_RXCUI = "../LinkedSPLs-update/mappings/DrOn-to-RxNorm/cleaned-dron-chebi-rxcui-ingredient.txt"
OMOP_RXCUI = "mappings/active-ingredient-omopid-rxcui.dsv"
## Get UNII - PT - RXCUI
unii_pt_cols = ['unii','pt']
unii_pt_DF = pd.read_csv(PT_UNII, sep='\t', names=unii_pt_cols)
rxcui_unii_cols = ['rxcui','unii']
rxcui_unii_DF = pd.read_csv(UNII_RXCUI, sep='|', names=rxcui_unii_cols)
rxcui_unii_DF['rxcui'] = rxcui_unii_DF['rxcui'].astype('str')
unii_pt_rxcui_DF = unii_pt_DF.merge(rxcui_unii_DF, on=['unii'], how='left')
print unii_pt_rxcui_DF.info()
## read mappings of pt and drugbank uri
pt_drugbank_cols = ['pt','db_uri1','db_uri2']
pt_drugbank_DF = pd.read_csv(PT_DRUGBANK, sep='\t', names=pt_drugbank_cols)
## read mappings of pt and chebi
pt_chebi_cols = ['pt','chebi']
pt_chebi_DF = pd.read_csv(PT_CHEBI, sep='\t', names=pt_chebi_cols)
## read mappings of dron and rxcui
dron_chebi_rxcui_cols = ['dron','chebi','rxcui']
dron_chebi_rxcui_DF = pd.read_csv(DRON_CHEBI_RXCUI, sep='|', names=dron_chebi_rxcui_cols, usecols=[0,2])
dron_chebi_rxcui_DF['rxcui'] = dron_chebi_rxcui_DF['rxcui'].astype('str')
## read mappings of unii, nui and preferredNameAndRole
unii_nui_namerole_cols = ['setid', 'unii','nui','nameAndRole']
unii_nui_namerole_DF = pd.read_csv(UNII_NUI_PREFERRED_NAME_ROLE, sep='\t', names=unii_nui_namerole_cols)[['unii','nui','nameAndRole']]
## read mappings of omopid and rxcui
omop_rxcui_cols = ['omopid','rxcui']
omop_rxcui_DF = pd.read_csv(OMOP_RXCUI, sep='|', names=omop_rxcui_cols)
omop_rxcui_DF['rxcui'] = omop_rxcui_DF['rxcui'].astype('str')
## merge pt, unii, rxcui and drugbank uri
unii_pt_rxcui_db_DF = unii_pt_rxcui_DF.merge(pt_drugbank_DF, on=['pt'], how='left')
unii_pt_rxcui_db_DF.to_csv('PT-RXCUI-UNII-DB.csv', sep='\t', index=False)
## merge chebi
merged_chebi_DF = unii_pt_rxcui_db_DF.merge(pt_chebi_DF, on=['pt'], how='left')
## merge dron id
merged_dron_DF = merged_chebi_DF.merge(dron_chebi_rxcui_DF, on=['rxcui'], how = 'left')
## merge omop id
merged_omop_DF = merged_dron_DF.merge(omop_rxcui_DF, on=['rxcui'], how = 'left')
## merge <nui> and <preferred name and role>
merged_epc_DF = merged_omop_DF.merge(unii_nui_namerole_DF, on=['unii'], how='left')
print merged_epc_DF.info()
merged_epc_DF.to_csv('mergedActiveMoiety.csv', sep='\t', index=False)
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/parseDBIdAndChEBI.py | '''
Created 09/04/2014
@authors: <NAME>
@summary: parse drugbank id and chebi id from drugbank xml
'''
from lxml import etree
from lxml.etree import XMLParser, parse
import os, sys
from sets import Set
#DRUGBANK_XML = "../drugbank.xml"
NS = "{http://www.drugbank.ca}"
CHEBI_OBO = "http://purl.obolibrary.org/obo/CHEBI_"
CHEBI_BIO2RDF = "http://bio2rdf.org/chebi:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
'''
data structure of drugbank.xml
</drug><drug type="small molecule" created="2005-06-13 07:24:05 -0600"
updated="2013-09-16 17:11:29 -0600" version="4.0">
<drugbank-id>DB00007</drugbank-id>
<name>Simvastatin</name>
<property>
<kind>InChIKey</kind>
<value>InChIKey=RYMZZMVNJRMUDD-HGQWONQESA-N</value>
<source>ChemAxon</source>
</property>
<synonymns>
<synonymn>...</synonymn>
</synonyms>
<external-identifiers>
<external-identifier>
<resource>ChEBI</resource>
<identifier>6427</identifier>
</external-identifier>
</external-identifiers>
'''
if len(sys.argv) > 1:
DRUGBANK_XML = str(sys.argv[1])
else:
print "Usage: parseDBIdAndChEBI.py <drugbank.xml>"
sys.exit(1)
## get dict of mappings of drugbank id, name, inchikeys and synonmymns
def parseDbIdAndChEBI(root):
dbidchebiD = {}
for childDrug in root.iter(tag=NS + "drug"):
subId = childDrug.find(NS + "drugbank-id")
if subId == None:
continue
else:
drugbankid = subId.text
externalIds = childDrug.find(NS + "external-identifiers")
if externalIds is not None:
#print "[INFO] external-identifiers:"
for subProp in externalIds.iter(NS + "external-identifier"):
resource = subProp.find(NS + "resource").text
#print "resource: " + resource
if "ChEBI" == resource:
ChEBIId = subProp.find(NS + "identifier").text
#print "[INFO] drugbankId: %s - chebiId: %s" % (drugbankid, ChEBIId)
print "%s\t%s\t%s\t%s" % (CHEBI_OBO + ChEBIId, CHEBI_BIO2RDF + ChEBIId, DRUGBANK_CA + drugbankid, DRUGBANK_BIO2RDF + drugbankid)
def main():
p = XMLParser(huge_tree=True)
tree = parse(DRUGBANK_XML,parser=p)
root = tree.getroot()
## mappings of drugbank Id and ChEBI id from drugbank.xml
parseDbIdAndChEBI(root)
if __name__ == "__main__":
main()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-clinicalDrug/createClinicalDrugSubInRDF.py | <filename>linkedSPLs/LinkedSPLs-clinicalDrug/createClinicalDrugSubInRDF.py<gh_stars>10-100
'''
Created 08/18/2014
@authors: <NAME>
@summary: Read csv file
Inputs: mappings of dron, rxcui, omop concept id
Outputs: RDF/XML graph for clinical drug
'''
import sys
import time
sys.path = sys.path + ['.']
import re, codecs, uuid, datetime
import urllib2
import urllib
import traceback
import csv
import difflib
from rdflib import Graph, BNode, Literal, Namespace, URIRef, RDF, RDFS
from sets import Set
OUT_FILE = "clinicalDrugSub-in-rdf.xml"
CLINICALDRUG_BASE = "http://bio2rdf.org/linkedspls_resource:"
RXNORM_BASE = "http://purl.bioontology.org/ontology/RXNORM/"
OHDSI_BASE = "http://purl.org/net/ohdsi#"
DRON_BASE = "http://purl.obolibrary.org/obo/"
class DictItem:
def __init__(self, setid, dron, rxcui, omop, fullname):
self.dron = str(dron).strip()
self.rxcui = str(rxcui).strip()
self.omop = str(omop).strip()
self.fullname = str(fullname).strip()
self.setid = str(setid).strip()
data_set = csv.DictReader(open("mergedClinicalDrug.tsv","rb"), delimiter='\t')
drugsL = []
## convert data from csv to dict
for item in data_set:
#if item["setid"] and item["fullname"] and item["rxcui"] and item["dron"] and item["omop"]:
if item["setid"]:
drugRow = DictItem(item["setid"], item["dron"], item["rxcui"], item["omop"], item["fullname"])
drugsL.append(drugRow)
## set up RDF graph
# identify namespaces for other ontologies to be used
dcterms = Namespace("http://purl.org/dc/terms/")
pav = Namespace("http://purl.org/pav")
dctypes = Namespace("http://purl.org/dc/dcmitype/")
linkedspls_vocabulary = Namespace('http://bio2rdf.org/linkedspls_vocabulary:')
sio = Namespace('http://semanticscience.org/resource/')
oa = Namespace('http://www.w3.org/ns/oa#')
cnt = Namespace('http://www.w3.org/2011/content#')
gcds = Namespace('http://www.genomic-cds.org/ont/genomic-cds.owl#')
siocns = Namespace('http://rdfs.org/sioc/ns#')
swande = Namespace('http://purl.org/swan/1.2/discourse-elements#')
dikbD2R = Namespace('http://dbmi-icode-01.dbmi.pitt.edu/dikb/vocab/resource/')
linkedspls = Namespace('file:///home/rdb20/Downloads/d2rq-0.8.1/linkedspls-dump.nt#structuredProductLabelMetadata/')
ncbit = Namespace('http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#')
dikbEvidence = Namespace('http://dbmi-icode-01.dbmi.pitt.edu/dikb-evidence/DIKB_evidence_ontology_v1.3.owl#')
ndfrt = Namespace('http://purl.bioontology.org/ontology/NDFRT/')
activemoiety = Namespace('http://purl.org/net/nlprepository/spl-active-moiety')
graph = Graph()
graph.namespace_manager.reset()
graph.namespace_manager.bind("dcterms", "http://purl.org/dc/terms/")
graph.namespace_manager.bind("pav", "http://purl.org/pav");
graph.namespace_manager.bind("dctypes", "http://purl.org/dc/dcmitype/")
graph.namespace_manager.bind('linkedspls_vocabulary', 'http://bio2rdf.org/linkedspls_vocabulary:')
graph.namespace_manager.bind('sio', 'http://semanticscience.org/resource/')
graph.namespace_manager.bind('oa', 'http://www.w3.org/ns/oa#')
graph.namespace_manager.bind('cnt', 'http://www.w3.org/2011/content#')
graph.namespace_manager.bind('gcds','http://www.genomic-cds.org/ont/genomic-cds.owl#')
graph.namespace_manager.bind('siocns','http://rdfs.org/sioc/ns#')
graph.namespace_manager.bind('swande','http://purl.org/swan/1.2/discourse-elements#')
graph.namespace_manager.bind('dikbD2R','http://dbmi-icode-01.dbmi.pitt.edu/dikb/vocab/resource/')
graph.namespace_manager.bind('linkedspls','file:///home/rdb20/Downloads/d2rq-0.8.1/linkedspls-dump.nt#structuredProductLabelMetadata/')
graph.namespace_manager.bind('ncbit','http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#')
graph.namespace_manager.bind('dikbEvidence','http://dbmi-icode-01.dbmi.pitt.edu/dikb-evidence/DIKB_evidence_ontology_v1.3.owl#')
graph.namespace_manager.bind('ndfrt','http://purl.bioontology.org/ontology/NDFRT/')
graph.namespace_manager.bind('activemoiety','http://purl.org/net/nlprepository/spl-active-moiety')
## metadata
graph.add((URIRef(activemoiety), pav["createdBy"], Literal('<NAME>, PhD')))
graph.add((URIRef(activemoiety), pav["contributedBy"], Literal('<NAME>, MS')))
graph.add((URIRef(activemoiety), pav["createdOn"], Literal(time.strftime("%m/%d/%Y-%H:%M"))))
graph.add((URIRef(activemoiety), dcterms['publisher'], Literal("Department of Biomedical Informatics, University of Pittsburgh")))
graph.add((URIRef(activemoiety), dcterms['license'], URIRef("http://www.opendatacommons.org/licenses/by/1.0")))
index =1
for drug in drugsL:
clinicalDrug = CLINICALDRUG_BASE + drug.setid
graph.add((URIRef(clinicalDrug), RDF.type, linkedspls_vocabulary["clinicalDrug"]))
if drug.rxcui:
graph.add((URIRef(clinicalDrug), linkedspls_vocabulary["RxCUI"], URIRef(RXNORM_BASE + str(drug.rxcui))))
if drug.fullname.strip():
graph.add((URIRef(clinicalDrug), RDFS.label, Literal(drug.fullname.strip())))
if drug.omop:
graph.add((URIRef(clinicalDrug), linkedspls_vocabulary["OMOPConceptId"], URIRef((OHDSI_BASE + str(drug.omop)))))
if drug.dron:
graph.add((URIRef(clinicalDrug), linkedspls_vocabulary["DrOnId"], URIRef(DRON_BASE + drug.dron)))
##display the graph
f = codecs.open(OUT_FILE,"w","utf8")
s = graph.serialize(format="xml",encoding="utf8")
f.write(unicode(s,errors='replace'))
#print graph.serialize(format="xml")
f.close
graph.close()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/test/test-example-queries.py | <gh_stars>10-100
import urllib2
qryUrlsL = ["https://goo.gl/Z3CAuz","https://goo.gl/dYHEQe","https://goo.gl/UsnWRI","https://goo.gl/z7HJdM","https://goo.gl/YHjIcB ","https://goo.gl/0Ogeka","https://goo.gl/64JQlU","https://goo.gl/XmphPo","https://goo.gl/gOZbHF","https://goo.gl/VgSfw6","https://goo.gl/kS8agv","https://goo.gl/Q4RISO","https://goo.gl/ujJL5s","https://goo.gl/eY2ljk","http://tinyurl.com/zauxa8b","https://goo.gl/AsbpBQ","https://goo.gl/al1r0y","https://goo.gl/fp3syi","https://goo.gl/eQi5pf","https://goo.gl/nocS8m","http://tinyurl.com/z3wrh3j","http://tinyurl.com/z3wrh3j","http://tinyurl.com/hx32ty3","http://tinyurl.com/jcty9qn","http://tinyurl.com/gpxglqf","http://tinyurl.com/hrulbmn","https://goo.gl/WYIzYz","https://goo.gl/Hr1HMX","https://goo.gl/WhpOFO","https://goo.gl/jZ0jpi","https://goo.gl/bH6gA1","https://goo.gl/fr1WoQ","https://goo.gl/fFFLwN","https://goo.gl/YyIbec","https://goo.gl/F9DF3N","https://goo.gl/hoKbw6","https://goo.gl/yIzjo1","https://goo.gl/nMjTjE"]
for qryUrlStr in qryUrlsL:
response = urllib2.urlopen(qryUrlStr)
html = response.read()
if html:
print "[TESTING] query %s is OK" % (qryUrlStr)
else:
print "[WARNING] query %s not return any results" % (qryUrlStr)
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/backups/parseDBIdAndChEBI.py | '''
Created 03/20/2014
@authors: <NAME>
@summary: parse chebi and drugbank_id from drugbank.xml
output terms: FDA Preferred Term, ChEBI URI, Drugbank URI
output file: PT-ChEBI-Drugbank-03202014.txt
'''
import xml.etree.ElementTree as ET
import os, sys
DRUGBANK_XML = "drugbank.xml"
UNIIS_RECORDS = "FDA_UNII_to_ChEBI_09042014.txt"
CHEBI_URL = "http://purl.obolibrary.org/obo/CHEBI_"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
NS = "{http://www.drugbank.ca}"
dict_chebi_dbid = {}
'''
data structure of drugbank.xml
<drug>...
<drugbank-id>...
<name>Simvastatin</name>
<external-identifiers>
<external-identifier>
<resource>ChEBI</resource>
<identifier>6427</identifier>
</external-identifier>
'''
#print out mappings of chebi and drugbank id
def parseDbIdAndChebi(root):
for drug in root.iter(tag=NS + "drug"):
subId = drug.find(NS + "drugbank-id")
if subId == None:
continue
else:
drugbankid = unicode(subId.text)
drugbankName = unicode(drug.find(NS + "name").text)
for exIdens in drug.iter(NS + "external-identifiers"):
for exIden in exIdens.iter(NS + "external-identifier"):
resource = exIden.find(NS + "resource")
if resource == None:
continue
elif resource.text == "ChEBI":
childIdenti = exIden.find(NS + "identifier")
if childIdenti == None:
continue
else:
chebiId = unicode(CHEBI_URL + childIdenti.text)
dict_chebi_dbid[chebiId] = (drugbankid,drugbankName)
tree = ET.parse(DRUGBANK_XML)
root = tree.getroot()
parseDbIdAndChebi(root)
for line in open(UNIIS_RECORDS,'r').readlines():
row = line.split('\t')
chebi = row[1].strip()
if dict_chebi_dbid.has_key(chebi):
drugbankid = dict_chebi_dbid[chebi][0]
drugbankName = dict_chebi_dbid[chebi][1]
output = row[0] +'\t'+ drugbankName +'\t'+ chebi +'\t'+ DRUGBANK_CA+drugbankid +'\t'+ DRUGBANK_BIO2RDF+drugbankid
print output.encode('utf-8').strip()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/DrOn-to-RxNorm/cleanData.py | <gh_stars>10-100
'''
Created 07/23/2014
@authors: <NAME>
@summary: convert mappings that mix with Dron to Rxcui and with ChEBI to Rxcui
to three columnes (Dron - ChEBI - Rxcui) txt files
'''
import os, sys, re
if len(sys.argv) > 1:
INPUT = str(sys.argv[1])
else:
print "Usage: cleanData.py <PATH TO dron-to-chebi-and-rxnorm.txt>"
sys.exit(1)
f = open(INPUT,"r")
buf = f.read()
f.close()
for line in buf.split("\n"):
if not line:
continue
if "CHEBI_" in line:
chebi = re.findall(r'CHEBI_[0-9]+', line)[0]
else:
chebi = ""
if "DRON_" in line:
dron = re.findall(r'DRON_[0-9]+',line)[0]
else:
dron = ""
if "rxcui=" in line:
rxcui = re.findall(r'rxcui=\"[0-9]+',line)[0]
else:
rxcui = ""
print str(dron) + "|" + str(chebi) + "|" + str(rxcui).replace("rxcui=\"","")
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-activeMoiety/createActiveMoietySubInRDF.py | '''
Created 08/18/2014
@authors: <NAME>
@summary: Read csv file, which contains preferred term, UNII, NUI, preferredNameAndRole, Drug bank URI, ChEBI URI, rxnorm URI, OMOP id, DrOn id. To create rdf graph to represents active moieties.
'''
import sys
import time
sys.path = sys.path + ['.']
import re, codecs, uuid, datetime
import urllib2
import urllib
import traceback
import csv
import difflib
from rdflib import Graph, BNode, Literal, Namespace, URIRef, RDF, RDFS
from sets import Set
OUT_FILE = "activeMoietySub-in-rdf.xml"
ACTIVEMOIETY_BASE = "http://bio2rdf.org/linkedspls:"
CHEBI_BASE = "http://purl.obolibrary.org/obo/"
RXNORM_BASE = "http://purl.bioontology.org/ontology/RXNORM/"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
NDFRT_BASE = "http://purl.bioontology.org/ontology/NDFRT/"
DRON_BASE = "http://purl.obolibrary.org/obo/"
OHDSI_BASE = "http://purl.org/net/ohdsi#"
class DictItem:
def __init__(self, pt, db_uri1, db_uri2, rxcui, chebi, nui, dron, omopid, nameAndRole):
self.pt = str(pt)
self.db_uri1 = str(db_uri1)
self.db_uri2 = str(db_uri2)
self.rxcui = str(rxcui)
self.chebi = str(chebi)
self.dron = str(dron)
self.omopid = str(omopid)
if nui and nameAndRole:
self.drugClass = Set([str(nui)+'|'+str(nameAndRole)])
else:
self.drugClass = Set()
data_set = csv.DictReader(open("mergedActiveMoiety.csv","rb"), delimiter='\t')
dict_moieties = {}
## convert data from csv to dict (unii, items-object)
for item in data_set:
if item["unii"] not in dict_moieties:
moiety = DictItem(item["pt"], item["db_uri1"], item["db_uri2"], item["rxcui"], item["chebi"], item["nui"], item["dron"], item["omopid"] ,item["nameAndRole"])
dict_moieties[item["unii"]]=moiety
else:
if not dict_moieties[item["unii"]].pt and item["pt"]:
dict_moieties[item["unii"]].pt = item["pt"]
if not dict_moieties[item["unii"]].db_uri1 and item["db_uri1"]:
dict_moieties[item["unii"]].db_uri1 = item["db_uri1"]
if not dict_moieties[item["unii"]].db_uri2 and item["db_uri2"]:
dict_moieties[item["unii"]].db_uri2 = item["db_uri2"]
if not dict_moieties[item["unii"]].rxcui:
dict_moieties[item["unii"]].rxcui = item["rxcui"]
if not dict_moieties[item["unii"]].dron:
dict_moieties[item["unii"]].dron = item["dron"]
if not dict_moieties[item["unii"]].chebi:
dict_moieties[item["unii"]].chebi = item["chebi"]
if not dict_moieties[item["unii"]].omopid:
dict_moieties[item["unii"]].omopid = item["omopid"]
if item['nui'] and item['nameAndRole']:
if dict_moieties[item["unii"]].drugClass:
dict_moieties[item["unii"]].drugClass.add(item['nui']+'|'+item['nameAndRole'])
else:
dict_moieties[item["unii"]].drugClass = Set(item['nui']+'|'+item['nameAndRole'])
## set up RDF graph
# identify namespaces for other ontologies to be used
dcterms = Namespace("http://purl.org/dc/terms/")
pav = Namespace("http://purl.org/pav")
dctypes = Namespace("http://purl.org/dc/dcmitype/")
linkedspls_vocabulary = Namespace('http://bio2rdf.org/linkedspls_vocabulary:')
sio = Namespace('http://semanticscience.org/resource/')
oa = Namespace('http://www.w3.org/ns/oa#')
cnt = Namespace('http://www.w3.org/2011/content#')
gcds = Namespace('http://www.genomic-cds.org/ont/genomic-cds.owl#')
siocns = Namespace('http://rdfs.org/sioc/ns#')
swande = Namespace('http://purl.org/swan/1.2/discourse-elements#')
dikbD2R = Namespace('http://dbmi-icode-01.dbmi.pitt.edu/dikb/vocab/resource/')
linkedspls = Namespace('file:///home/rdb20/Downloads/d2rq-0.8.1/linkedspls-dump.nt#structuredProductLabelMetadata/')
ncbit = Namespace('http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#')
dikbEvidence = Namespace('http://dbmi-icode-01.dbmi.pitt.edu/dikb-evidence/DIKB_evidence_ontology_v1.3.owl#')
ndfrt = Namespace('http://purl.bioontology.org/ontology/NDFRT/')
activemoiety = Namespace('http://purl.org/net/nlprepository/spl-active-moiety')
graph = Graph()
graph.namespace_manager.reset()
graph.namespace_manager.bind("dcterms", "http://purl.org/dc/terms/")
graph.namespace_manager.bind("pav", "http://purl.org/pav");
graph.namespace_manager.bind("dctypes", "http://purl.org/dc/dcmitype/")
graph.namespace_manager.bind('linkedspls_vocabulary', 'http://bio2rdf.org/linkedspls_vocabulary:')
graph.namespace_manager.bind('sio', 'http://semanticscience.org/resource/')
graph.namespace_manager.bind('oa', 'http://www.w3.org/ns/oa#')
graph.namespace_manager.bind('cnt', 'http://www.w3.org/2011/content#')
graph.namespace_manager.bind('gcds','http://www.genomic-cds.org/ont/genomic-cds.owl#')
graph.namespace_manager.bind('siocns','http://rdfs.org/sioc/ns#')
graph.namespace_manager.bind('swande','http://purl.org/swan/1.2/discourse-elements#')
graph.namespace_manager.bind('dikbD2R','http://dbmi-icode-01.dbmi.pitt.edu/dikb/vocab/resource/')
graph.namespace_manager.bind('linkedspls','file:///home/rdb20/Downloads/d2rq-0.8.1/linkedspls-dump.nt#structuredProductLabelMetadata/')
graph.namespace_manager.bind('ncbit','http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#')
graph.namespace_manager.bind('dikbEvidence','http://dbmi-icode-01.dbmi.pitt.edu/dikb-evidence/DIKB_evidence_ontology_v1.3.owl#')
graph.namespace_manager.bind('ndfrt','http://purl.bioontology.org/ontology/NDFRT/')
graph.namespace_manager.bind('activemoiety','http://purl.org/net/nlprepository/spl-active-moiety')
## metadata
graph.add((URIRef(activemoiety), pav["createdBy"], Literal('<NAME>, PhD')))
graph.add((URIRef(activemoiety), pav["contributedBy"], Literal('<NAME>, MS')))
graph.add((URIRef(activemoiety), pav["createdOn"], Literal(time.strftime("%m/%d/%Y-%H:%M"))))
graph.add((URIRef(activemoiety), dcterms['publisher'], Literal("Department of Biomedical Informatics, University of Pittsburgh")))
graph.add((URIRef(activemoiety), dcterms['license'], URIRef("http://www.opendatacommons.org/licenses/by/1.0")))
index =1
for k,v in dict_moieties.items():
# pt, unii, db_uri1, db_uri2, rxcui, omopid, chebi, dron, nui, nameAndRole
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), linkedspls_vocabulary["UNII"], Literal(k)))
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), RDFS.label, Literal(v.pt.strip())))
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), RDF.type, linkedspls_vocabulary["ActiveMoietyUNII"]))
if v.rxcui:
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), linkedspls_vocabulary["RxCUI"], URIRef(RXNORM_BASE + str(int(float(v.rxcui))))))
if v.chebi:
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), linkedspls_vocabulary["ChEBI"], URIRef(v.chebi)))
if v.db_uri1:
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), linkedspls_vocabulary["x-drugbank"], URIRef(v.db_uri1)))
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), linkedspls_vocabulary["x-drugbank"], URIRef(v.db_uri2)))
if v.dron:
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), linkedspls_vocabulary["DrOnId"], URIRef(DRON_BASE + v.dron)))
if v.omopid:
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), linkedspls_vocabulary["OMOPConceptId"], Literal(OHDSI_BASE + str((v.omopid)))))
if v.drugClass:
for dc in v.drugClass:
idx = dc.find('|')
nui = dc[0:idx]
dcStr = dc[idx+1:]
dcGroup = None
if '[PE]' in dcStr:
dcGroup = "N0000009802"
elif '[MoA]' in dcStr:
dcGroup = "N0000000223"
elif '[Chemical/Ingredient]' in dcStr:
dcGroup = "N0000000002"
elif '[EPC]' in dcStr:
dcGroup = "N0000182631"
if dcGroup:
graph.add((URIRef(ACTIVEMOIETY_BASE + str(k)), ndfrt[dcGroup], ndfrt[nui]))
##display the graph
f = codecs.open(OUT_FILE,"w","utf8")
s = graph.serialize(format="xml",encoding="utf8")
f.write(unicode(s,errors='replace'))
#print graph.serialize(format="xml")
f.close
graph.close()
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/load-dailymed-spls/get_spl_sections.py | <gh_stars>10-100
from lxml import etree
import os
import MySQLdb as mysql
import pdb
from dailymed_rss import unzip
NS = "{urn:hl7-org:v3}" #namespace for dailymed spls
def check_loinc_table(loincs):
excluded = set()
conn = mysql.connect(user="root", passwd="<PASSWORD>", db="linkedSPLs")
cursor = conn.cursor()
for loinc in loincs:
cursor.execute("SELECT * FROM loinc WHERE loinc='{0}'".format(loinc))
results = cursor.fetchall()
if not results:
excluded.add(loinc)
conn.close()
return excluded
def get_sections(xmlRoot):
sections = {}
for sectionTag in xmlRoot.getiterator(tag=NS+"section"):
for codeTag in sectionTag.getiterator(tag=NS+"code"):
code = codeTag.get("code")
name = codeTag.get("displayName")
if code and name:
if is_loinc(code):
sections[code] = name
return sections
def is_loinc(code):
if len(code) == 7:
for i in xrange(7):
if i == 5 and code[i] != "-":
return False
if i!=5 and not code[i].isdigit():
return False
return True
return False
def print_spl_sections(spls):
allSections = {}
for spl in spls:
parser = etree.XMLParser(huge_tree=True)
tree = etree.parse(spl, parser=parser)
document = tree.getroot()
sections = get_sections(document)
allSections.update(sections)
excluded = check_loinc_table(allSections)
print "Unique LOINC Sections:\n\n"
for loinc, name in allSections.items():
print "{0}\t{1}".format(loinc, name)
print "\n\nUnrecognized LOINC Sections:\n\n"
for loinc in excluded:
print "{0}\t{1}".format(loinc, allSections[loinc])
if __name__ == "__main__":
splDir = "/home/PITT/gag30/spls"
splFiles = [os.path.join(splDir, f) for f in os.listdir(splDir)]
print_spl_sections(splFiles)
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/FDA-pharmacogenetic-info-mapping/fda.py | <reponame>kqingcan/bio2rdf-scripts<gh_stars>10-100
##########------------------------------------------------------------------------------------------##########
########## Short script to grab FDA PGx Table and convert to pipe delimited csv with utf-8 encoding ##########
##########------------------------------------------------------------------------------------------##########
## Author: <NAME> ##
## University of Pittsburgh ##
import urllib.request
import csv
import re
from bs4 import BeautifulSoup
import codecs
def gettable(): ## Pulls and cleans the raw FDA table ##
fdapgx = "http://www.fda.gov/drugs/scienceresearch/researchareas/pharmacogenetics/ucm083378.htm"
rawtable = urllib.request.urlopen(fdapgx).read()
soup = BeautifulSoup(rawtable, "lxml")
table = soup.find("table", attrs={"class":"table table-striped tablesorter table-bordered"})
rows = table.findAll("tr")
cols = []
raw = []
for tr in rows:
col = tr.findAll("td")
col = [ele.text.strip() for ele in col]
raw.append([ele for ele in col if ele])
raw.remove(raw[0])
for item in raw:
item = str(item)
cols.append(item.replace("\\r\\n", ""))
writetable(cols)
def writetable(cols): ## Writes the table to CSV as pip delimited utf-8 ##
file = codecs.open("FDA_PGx_Table.csv", "w", "utf-8-sig")
out = csv.writer(file, delimiter = "|", quotechar = '"', quoting = csv.QUOTE_ALL)
for item in cols:
itemlist = []
writelist = item.split("',")
for line in writelist: ## These look messy, I'm sure there is a better way to get rid of the extra characters ##
line = line.strip("['")
line = line.strip("'")
line = line.strip(" '")
line = line.strip("' ")
line = line.strip("']")
line = line.replace(" (1)", "")
line = line.replace(" (2)", "")
line = line.replace(" (3)", "")
line = line.replace(" (4)", "")
itemlist.append(line)
labellist = itemlist[4:len(itemlist)]
out.writerow([itemlist[0], itemlist[1], itemlist[2], itemlist[3],(", ".join([str(x) for x in labellist]))])
if __name__ == "__main__":
gettable() |
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/load-dailymed-spls/rxnorm.py | <filename>linkedSPLs/LinkedSPLs-update/load-dailymed-spls/rxnorm.py
import urllib2
import json
import os
from lxml import etree
RXNORM_BASE="http://rxnav.nlm.nih.gov/REST/"
HEADERS = { 'Accept' : 'application/json'}
def setid_in_rxnorm(setId):
req = urllib2.Request(RXNORM_BASE + "rxcui?idtype=SPL_SET_ID&id=" + setId, None, HEADERS)
#print "Request: %s" % req.get_full_url()
response = urllib2.urlopen(req)
result = json.loads(response.read())
if "idGroup" in result:
if "rxnormId" in result["idGroup"]:
return True
return False
def test_spls(splDir):
ns = "{urn:hl7-org:v3}" #namespace for dailymed spls
spls = [os.path.join(splDir, f) for f in os.listdir(splDir)]
for spl in spls:
parser = etree.XMLParser(huge_tree=True)
tree = etree.parse(spl, parser=parser)
document = tree.getroot()
for idTag in document.getiterator(tag=ns+"id"):
setid = idTag.get("root")
if setid_in_rxnorm(setid):
print "{0} IN RxNORM".format(setid)
else:
print "{0} NOT IN RxNORM".format(setid)
if __name__ == "__main__":
# setId = "1C5BC1DD-E9EC-44C1-9281-67AD482315D9"
# print setid_in_rxnorm(setId)
test_spls("/home/PITT/gag30/spls/")
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/RxNORM-mapping/mergePT-UNII-RXCUI.py | '''
Created 11/25/2014
@authors: <NAME>
@summary: merge preferred term, UNII, rxcui by join mappings of UNII from UMLS and rxcui with mappings of FDA Preferredterm and UNII.
'''
import os, sys
import pandas as pd
from StringIO import StringIO
import numpy as np
PT_UNII = "../../data/FDA/FDAPreferredSubstanceToUNII.txt"
UNII_RXCUI = "../../data/UMLS/UNIIs-Rxcuis-from-UMLS.txt"
PT_UNII_RXCUI_OUT = "pt_unii_rxcui_test.txt"
PT_RXCUI_OUT = "pt_rxcui_test.txt"
if len(sys.argv) > 3:
PT_UNII = str(sys.argv[1])
UNII_RXCUI = str(sys.argv[2])
PT_UNII_RXCUI_OUT = str(sys.argv[3])
PT_RXCUI_OUT = str(sys.argv[4])
else:
print "Usage: mergePT-UNII-RXCUI.py <mapppings of UNII and PT> <mappings of UNII and RXCUI> <OUTPUT PT-UNII-RXCUI> <OUTPUT PT-RXCUI>"
sys.exit(1)
unii_pt_cols = ['unii','pt']
unii_pt_DF = pd.read_csv(PT_UNII, sep='\t', names=unii_pt_cols)
rxcui_unii_cols = ['rxcui','unii']
rxcui_unii_DF = pd.read_csv(UNII_RXCUI, sep='|', names=rxcui_unii_cols)
pt_unii_rxcui_DF = unii_pt_DF.merge(rxcui_unii_DF, on=['unii'], how='left')
#print pt_unii_rxcui_DF.info()
pt_rxcui_DF = unii_pt_DF.merge(rxcui_unii_DF, on=['unii'], how='right')
pt_rxcui_DF[pt_rxcui_DF.pt.notnull()]
#pt_rxcui_DF.dropna(how="all", inplace=True)
#print pt_rxcui_DF.info()
pt_unii_rxcui_DF.to_csv(PT_UNII_RXCUI_OUT, sep='\t',index=False)
pt_rxcui_DF[pt_rxcui_DF.pt.notnull()].ix[:,[1,2]].to_csv(PT_RXCUI_OUT, sep='\t', index=False)
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-clinicalDrug/mergeToClinicalDrug.py | '''
Created 08/15/2014
@authors: <NAME>
@summary: merge rxnorm URI, OMOP id, DrOn id for clinical drug.
'''
import os, sys
import pandas as pd
from StringIO import StringIO
import numpy as np
#from numpy import nan
CHEBI_BASE_URI = "http://purl.obolibrary.org/obo/"
RXNORM_BASE_URI = "http://purl.bioontology.org/ontology/RXNORM/"
## Define data inputs
DRON_RXCUI = "../LinkedSPLs-update/mappings/DrOn-to-RxNorm/cleaned-dron-to-rxcui-drug.txt"
SETID_RXCUI = "mappings/setid_rxcui.txt"
FULLNAME_SETID = "mappings/setid_fullname.txt"
OMOP_RXCUI = "mappings/clinical-drug-omopid-rxcui.dsv"
## read mappings of dron and rxcui
dron_rxcui_cols = ['dron', 'chebi', 'rxcui']
dron_rxcui_DF = pd.DataFrame({'dron': ['string'],'chebi': ['string'],'rxcui': ['string']})
dron_rxcui_DF = pd.read_csv(DRON_RXCUI, sep='|', names=dron_rxcui_cols, usecols=["dron", "rxcui"],)
## read mappings of omopid and rxcui
omop_rxcui_cols = ['omop','rxcui']
omop_rxcui_DF = pd.DataFrame({'omop': ['string'],'rxcui': ['string']})
omop_rxcui_DF = pd.read_csv(OMOP_RXCUI, sep='|', names=omop_rxcui_cols, skiprows=[0])
## merge dron, omop, rxcui
dron_omop_rxcui_DF = pd.DataFrame({'dron': ['string'],'rxcui': ['string'],'omop': ['string']})
dron_omop_rxcui_DF = dron_rxcui_DF.merge(omop_rxcui_DF, on=['rxcui'], how='inner')
print dron_omop_rxcui_DF.info()
## read mappings of setid and rxcui
setid_rxcui_cols = ['setid','rxcui']
setid_rxcui_DF = pd.DataFrame({'setid': ['string'],'rxcui': ['string']})
setid_rxcui_DF = pd.read_csv(SETID_RXCUI, sep='|', names=setid_rxcui_cols)
## read mappings of setid and fullname
fullname_setid_cols = ['setid','fullname']
fullname_setid_DF = pd.DataFrame({'setid': ['string'],'fullname': ['string']})
fullname_setid_DF = pd.read_csv(FULLNAME_SETID, sep=',', names=fullname_setid_cols)
## merge fullname, setid, rxcui
fullname_rxcui_setid_DF = pd.DataFrame({'rxcui': ['string'],'setid': ['string'],'fullname': ['string']})
fullname_rxcui_setid_DF = setid_rxcui_DF.merge(fullname_setid_DF, on=['setid'], how='right')
print fullname_rxcui_setid_DF.info()
## merge fullname, rxcui, dron, omopid
output_DF = fullname_rxcui_setid_DF.merge(dron_omop_rxcui_DF, on=['rxcui'], how='left')
print output_DF.info()
#print output_DF
output_DF.to_csv('mergedClinicalDrug.tsv', sep='\t', index=False)
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/NDFRT-to-RxNorm/convertNDFRTandRXNORM_UUIDsToPurls.py | <filename>linkedSPLs/LinkedSPLs-update/mappings/NDFRT-to-RxNorm/convertNDFRTandRXNORM_UUIDsToPurls.py
'''
Created 03/20/2014
@authors: <NAME>
@summary: add rxnorm and NDFRT base URI to rxcui from UMLS RXNCONSO.rrf
'''
import sys
RXNOEM_NDFRT = "rxnorm-to-ndfrt-chemical-ingredient-mapping-10292014.txt"
if len(sys.argv) > 1:
RXNOEM_NDFRT = str(sys.argv[1])
else:
print "Usage: convertNDFRTandRXNORM_UUIDsToPurls.py <rxnorm-to-ndfrt-chemical-ingredient-mapping.txt>"
sys.exit(1)
f = open(RXNOEM_NDFRT,"r")
buf = f.read()
f.close()
l = buf.split("\n")
for elt in l[1:]:
if elt == "":
break
(rxcui,ncui,label) = elt.split("|")
print "http://purl.bioontology.org/ontology/RXNORM/%s http://purl.bioontology.org/ontology/NDFRT/%s %s" % (rxcui,ncui,label)
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/load-dailymed-spls/loadDailymedToSql.py | <filename>linkedSPLs/LinkedSPLs-update/load-dailymed-spls/loadDailymedToSql.py
'''
Created 1/24/2012
@authors: <NAME>, <NAME>
@summary: Iterate over a bunch of SPLs and load the text into a SQL database (schema created by db_import_dailymed.php)
Script run from
med-info-corpora/phase-I-package-inserts/lodd-dailymed-subset.
Requires a comma delimited file with SPL file names
Mapping of sections to tables is done using the following section heading map from http://www.fda.gov/ForIndustry/DataStandards/StructuredProductLabeling/ucm162057.htm:
LOINC OID: 2.16.840.1.113883.6.1
LOINC Code
LOINC Name
34086-9 ABUSE SECTION
60555-0 ACCESSORIES
34084-4 ADVERSE REACTIONS SECTION
34091-9 ANIMAL PHARMACOLOGY & OR TOXICOLOGY SECTION
60556-8 ASSEMBLY OR INSTALLATION INSTRUCTIONS
34066-1 BOXED WARNING SECTION
60557-6 CALIBRATION INSTRUCTIONS
34083-6 CARCINOGENESIS & MUTAGENESIS & IMPAIRMENT OF FERTILITY SECTION
34090-1 CLINICAL PHARMACOLOGY SECTION
60558-4 CLEANING, DISINFECTING, AND STERILIZATION INSTRUCTIONS
34092-7 CLINICAL STUDIES SECTION
60559-2 COMPONENTS
34070-3 CONTRAINDICATIONS SECTION
34085-1 CONTROLLED SUBSTANCE SECTION
34087-7 DEPENDENCE SECTION
34089-3 DESCRIPTION SECTION
34068-7 DOSAGE & ADMINISTRATION SECTION
43678-2 DOSAGE FORMS & STRENGTHS SECTION
34074-5 DRUG & OR LABORATORY TEST INTERACTIONS SECTION
42227-9 DRUG ABUSE AND DEPENDENCE SECTION
34073-7 DRUG INTERACTIONS SECTION
50742-6 ENVIRONMENTAL WARNING SECTION
50743-4 FOOD SAFETY WARNING SECTION
34072-9 GENERAL PRECAUTIONS SECTION
34082-8 GERIATRIC USE SECTION
50740-0 GUARANTEED ANALYSIS OF FEED SECTION
69719-3 HEALTH CLAIM SECTION
34069-5 HOW SUPPLIED SECTION
51727-6 INACTIVE INGREDIENT SECTION
34067-9 INDICATIONS & USAGE SECTION
50744-2 INFORMATION FOR OWNERS/CAREGIVERS SECTION
34076-0 INFORMATION FOR PATIENTS SECTION
59845-8 INSTRUCTIONS FOR USE SECTION
60560-0 INTENDED USE OF THE DEVICE
34079-4 LABOR & DELIVERY SECTION
34075-2 LABORATORY TESTS SECTION
43679-0 MECHANISM OF ACTION SECTION
49489-8 MICROBIOLOGY SECTION
43680-8 NONCLINICAL TOXICOLOGY SECTION
34078-6 NONTERATOGENIC EFFECTS SECTION
34080-2 NURSING MOTHERS SECTION
60561-8 OTHER SAFETY INFORMATION
34088-5 OVERDOSAGE SECTION
55106-9 OTC - ACTIVE INGREDIENT SECTION
50569-3 OTC - ASK DOCTOR SECTION
50568-5 OTC - ASK DOCTOR/PHARMACIST SECTION
50570-1 OTC - DO NOT USE SECTION
50565-1 OTC - KEEP OUT OF REACH OF CHILDREN SECTION
53414-9 OTC - PREGNANCY OR BREAST FEEDING SECTION
55105-1 OTC - PURPOSE SECTION
53413-1 OTC - QUESTIONS SECTION
50566-9 OTC - STOP USE SECTION
50567-7 OTC - WHEN USING SECTION
51945-4 PACKAGE LABEL.PRINCIPAL DISPLAY PANEL
68498-5 PATIENT MEDICATION INFORMATION SECTION
34081-0 PEDIATRIC USE SECTION
43681-6 PHARMACODYNAMICS SECTION
66106-6 PHARMACOGENOMICS SECTION
43682-4 PHARMACOKINETICS SECTION
42232-9 PRECAUTIONS SECTION
42228-7 PREGNANCY SECTION
43683-2 RECENT MAJOR CHANGES SECTION
34093-5 REFERENCES SECTION
53412-3 RESIDUE WARNING SECTION
60562-6 ROUTE, METHOD AND FREQUENCY OF ADMINISTRATION
50741-8 SAFE HANDLING WARNING SECTION
48779-3 SPL INDEXING DATA ELEMENTS SECTION
48780-1 SPL PRODUCT DATA ELEMENTS SECTION
42231-1 SPL MEDGUIDE SECTION
42230-3 SPL PATIENT PACKAGE INSERT SECTION
42229-5 SPL UNCLASSIFIED SECTION
69718-5 STATEMENT OF IDENTITY SECTION
44425-7 STORAGE AND HANDLING SECTION
60563-4 SUMMARY OF SAFETY AND EFFECTIVENESS
34077-8 TERATOGENIC EFFECTS SECTION
43684-0 USE IN SPECIFIC POPULATIONS SECTION
54433-8 USER SAFETY WARNINGS SECTION
50745-9 VETERINARY INDICATIONS SECTION
43685-7 WARNINGS AND PRECAUTIONS SECTION
34071-1 WARNINGS SECTION
71744-7 HEALTH CARE PROVIDER LETTER SECTION
38056-8 SUPPLEMENTAL PATIENT MATERIAL SECTION
69763-1 DISPOSAL AND WASTE HANDLING
'''
import logging
import os, sys
from lxml import etree
from lxml.etree import XMLParser, parse
import MySQLdb as mdb
import shutil
from rxnorm import setid_in_rxnorm
from get_spl_sections import get_sections
SPL_DIR = "spls/"
UPDATE_DIR = "spls/updates/"
NS = "{urn:hl7-org:v3}" #namespace for dailymed spls
## database connection
con = None
try:
con = mdb.connect('localhost', 'root', '5bboys', 'linkedSPLs')
con.set_character_set('utf8')
cursor = con.cursor()
except mdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
# several to add from the above list
tableToSectionMap = [
("34070-3", "contraindications"),
("34084-4", "adverse_reactions"),
("34088-5", "overdosage"),
("51727-6", "inactiveIngredient"),
("34068-7", "dosage_and_administration"),
("34067-9", "indications_and_usage"),
("42232-9", "precautions"),
("34069-5", "how_supplied"),
("34089-3", "description"),
("34090-1", "clinical_pharmacology"),
("34066-1", "boxed_warning"),
("34073-7", "drug_interactions"),
("43684-0", "specific_populations"),
("34092-7", "clinical_studies")
]
##gag30: Tables that the script currently inserts into
ENABLED_TABLES = ["spl_has_active_moiety", "structuredProductLabelMetadata", "active_moiety"]
##gag: At some point it might be nice to set up a nicer create database script
## using sqlalchemy...for now, let's just empty the tables we're using
## rather than deleting them, avoiding the need to recreate them.
def clear_tables():
cursor.execute("SELECT * FROM loinc")
tables = cursor.fetchall()
for table in tables:
cursor.execute("DELETE FROM `{0}`".format(table[2]))
cursor.execute("ALTER TABLE `{0}` AUTO_INCREMENT=1".format(table[2]))
for table in ENABLED_TABLES:
cursor.execute("DELETE FROM `{0}`".format(table))
cursor.execute("ALTER TABLE `{0}` AUTO_INCREMENT=1".format(table))
##Copy a spl to the master spl directory, making sure
##to delete the previous file with setid if it exists
def copy_to_master_dir(splFile, setid=None):
if setid:
cursor.execute("SELECT filename FROM structuredProductLabelMetadata WHERE setId=%s",setid)
oldFilename = cursor.fetchone()[0]
try:
os.remove(os.path.join(SPL_DIR, oldFilename))
except OSError as e:
print "\nWARNING: Attempt to remove %s yielded OSError %s, SPL file does not exist?" % (oldFilename, e.strerror)
shutil.copy(splFile, SPL_DIR)
##Get section with code = code
def get_section(root, code):
for secTag in root.getiterator(tag=NS + "section"):
secIter = secTag.getiterator(tag=NS + "code")
for childElt in secIter:
codeTagCode = childElt.get("code")
if codeTagCode == code:
# print "codeTagCode: %s" % codeTagCode
# section are sometimes embedded so, if necessary,
# find the section that is the parent of this tag
subSections = [el for el in secTag.iter(tag=NS+"section")]
if not subSections:
return (secTag, childElt)
for childSec in secTag.iter(tag=NS + "section"):
subcode = childSec.find(NS + "code")
if subcode == None:
continue
if subcode.get("code") == codeTagCode:
return (childSec, childElt)
return (None, None)
##gag: Let's generalize getting a value for a tag
##so we don't load up on various get_tag functions.
##This covers most of the tags we get.
def get_tag_value(root, tag, value, attr=None):
for tag in root.getiterator(tag=NS + tag):
if tag.find(NS + value) is not None:
if attr:
return tag.find(NS + value).get(attr)
else:
return tag.find(NS + value).text
return None
##Get the values for all occurrences of tag
def get_tag_values(root, tag, value, attr=None):
values = []
for tag in root.iter(tag=NS + tag):
subTag = tag.find(NS+value)
if subTag is None:
continue
if attr:
values.append(subTag.get(attr))
else:
values.append(subTag.text)
return values
##gag: This will cover some more
def get_tag_attr(root, tag, attr):
for tag in root.getiterator(tag=NS + tag):
if tag.get(attr) is not None:
return tag.get(attr)
return None
##Get the tags for the structuredProductLabelMetaData table
def get_tags(root, splF, logger):
tags = {}
tags['setId'] = get_tag_attr(root, "setId", "root")
check_var(tags['setId'], "setId", splF, logger)
tags['versionNumber'] = get_tag_attr(root, "versionNumber", "value")
check_var(tags['versionNumber'], "versionNumber", splF, logger)
tags['activeMoieties'] = get_tag_values(root, "activeMoiety", "name")
tags['activeMoieties'] = [x.upper() for x in tags['activeMoieties']] # upper case to simplify ontology and linked data mappings
check_var(tags['activeMoieties'], "activeMoiety", splF, logger)
# The UNIIs for each active moiety should be retrieved in the same
# order as the active moieties themselves
tags['activeMoietyUNIIs'] = get_tag_values(root, "activeMoiety", "code", "code")
check_var(tags['activeMoietyUNIIs'], "activeMoietyUNIIs", splF, logger)
# if there is not the same number of moiety names and UNIIs then
# something is wrong so return an empty dictionary
if len(tags['activeMoieties']) != len(tags['activeMoietyUNIIs']):
return {}
tags['fullName'] = get_tag_value(root, "manufacturedProduct", "name")
if not tags['fullName']:
tags['fullName'] = get_tag_value(root, "manufacturedMedicine", "name")
check_var(tags['fullName'], "fullName", splF, logger)
tags['routeOfAdministration'] = get_tag_value(root, "substanceAdministration", "routeCode", attr="displayName")
check_var(tags['routeOfAdministration'], "substanceAdministration", splF, logger)
tags['genericMedicine'] = get_tag_value(root, "genericMedicine", "name")
check_var(tags['genericMedicine'], "genericMedicine", splF, logger)
tags['representedOrganization'] = get_tag_value(root, "representedOrganization", "name")
check_var(tags['representedOrganization'], "representedOrganization", splF, logger)
##gag: Get insert date
tags['effectiveTime'] = get_effective_time(root)
check_var(tags['effectiveTime'], "effectiveTime", splF, logger)
tags['filename'] = splF.split("/")[-1]
return tags
##gag: Help clean up the main code a little
def check_var(var, name, filename, logger):
if not var:
logger.warning(name + " not found for " + filename)
##gag: Get the effective time of the insert itself, which is
## under <document> <effectiveTime value=yyyymmdd>
def get_effective_time(root):
if root.find(NS + "effectiveTime") is not None:
return root.find(NS + "effectiveTime").get("value")
return None
##gag: We can solve the text/tail problem using recursion (lxml.etree actually has a
## recursive function for this purpose, but since we want a mix of text and html
## it works out well that I recreated it anyway).
## This function traverses the xml tree in a depth first fashion (i.e. we're
## still using Element.getiterator()). However, the Element.text is added
## when the element is reached, but Element.tail is only added after all of the
## text and tail attributes of the element's children.
def get_section_text(section, sectionText):
for elem in section.getiterator():
if elem.tag.lower() == NS + "table":
return "".join([sectionText, "\n\n", etree.tostring(elem, pretty_print=True),"\n\n"])
elif has_parent(elem, "table"):
return sectionText
if elem.text:
sectionText += elem.text
if elem.tag.lower() == NS + "br":
sectionText += "\n"
children = list(elem)
for child in children:
sectionText = get_section_text(child, sectionText)
if elem.tail:
return sectionText + elem.tail
return sectionText
##Determine if a section table contains an entry
##for the spl being updated
def has_entry(table, key, rowid):
cursor.execute("SELECT * FROM `{0}` WHERE `{1}`={2}".format(table, key, rowid))
row = cursor.fetchone()
if row:
return True
return False
##Determine if <lxml.etree.Element elem> has a parent element with tag=tag.
def has_parent(elem, tag):
tableAncestors = [el for el in elem.iterancestors(tag = NS+tag)]
if tableAncestors:
return True
return False
##Try to insert an active moiety and its UNII into the active_moiety table
def insert_active_moieties(activeMoieties, activeMoietyUNIIs):
#print "%s\n%s" % (activeMoieties, activeMoietyUNIIs)
for i,v in enumerate(activeMoieties):
query = "INSERT INTO active_moiety (name, UNII) VALUES (%s, %s) ON DUPLICATE KEY UPDATE id=id"
values = [v, activeMoietyUNIIs[i]]
cursor.execute(query, values)
##Insert a new section entry if an updated spl contains
##a new section
def insert_section_entry(table, splid, fieldText):
query = "INSERT INTO `{0}` (`splId`,`field`) VALUES (%s, %s)".format(table)
values = [splid, fieldText]
cursor.execute(query, values)
##Insert values for the many to many relationship
##between structuredProductLabelMetaData and active_moiety
##into the join table spl_has_active_moiety
def link_spl_to_active_moieties(setId, activeMoieties):
cursor.execute("SELECT id FROM structuredProductLabelMetadata WHERE setId=%s",[setId])
splid = cursor.fetchone()[0]
for am in activeMoieties:
cursor.execute("SELECT id FROM active_moiety WHERE name=%s",[am])
try:
amid = cursor.fetchone()[0]
except:
pdb.set_trace()
cursor.execute("INSERT IGNORE INTO spl_has_active_moiety VALUES (%s,%s)",(splid, amid))
##Display db loading progress
def print_progress(current, total, filename):
percent = int(float(current) / float(total) * 100)
message = " ".join([str(percent) + "%", "loaded (" + str(current), "of", str(total) + ")", filename])
if percent == 100:
message += "...done\n"
sys.stdout.write("\r\x1b[K" + message)
sys.stdout.flush()
##Insert new spls into database
def run(logger, spls, limit=None):
count = 0
for splF in spls:
print "\n Start parsing: {0}".format(splF)
#tree = etree.ElementTree(file=splF)
p = XMLParser(huge_tree=True)
tree = parse(splF, parser=p)
root = tree.getroot()
tags = get_tags(root, splF, logger)
#print "[DEBUG] tags: " + str(tags)
if len(tags.keys()) == 0:
print "\nERROR: get_tags failed, most likely because a UNII could not be retrieved for all active moities. Please check the following spl: %s" % splF
continue
if not setid_in_rxnorm(tags['setId']):
logger.info("SetId {0} from file {1} not found in rxnorm".format(tags['setId'],splF))
continue
## check if there are deplicated setId
cursor.execute("SELECT id FROM structuredProductLabelMetadata WHERE setId=%s",[tags['setId']])
idExists = cursor.fetchall()
if idExists:
print "\n duplicated setId %s in file %s" % (tags['setId'], splF)
continue
try:
insert_active_moieties(tags['activeMoieties'], tags['activeMoietyUNIIs'])
insertQuery = "INSERT INTO structuredProductLabelMetadata(setId, versionNumber, fullName, routeOfAdministration, genericMedicine, representedOrganization, effectiveTime, filename) VALUES(%s,%s,%s,%s,%s,%s,%s,%s)"
values = (tags['setId'], tags['versionNumber'], tags['fullName'], tags['routeOfAdministration'], tags['genericMedicine'], tags['representedOrganization'], tags['effectiveTime'], tags['filename'])
cursor.execute(insertQuery, values)
link_spl_to_active_moieties(tags['setId'], tags['activeMoieties'])
cursor.execute("SELECT id FROM structuredProductLabelMetadata WHERE setId=%s",[tags['setId']])
splId = cursor.fetchone()[0]
#print "[DEBUG] insert active moieties - done"
splSections = get_sections(root)
#print "[DEBUG] splSections: " + str(splSections)
for code in splSections:
cursor.execute("SELECT table_name FROM loinc WHERE loinc='{0}'".format(code))
res = cursor.fetchone
if res:
table = cursor.fetchone()[0]
else:
logger.info("Filename: {0}\tSetId: {1}\t no table name from loinc".format(tags['filename'],tags['setId']))
continue
(sectElt, codeElt) = get_section(root, code)
#gag: Recursive function to retrieve text from a section
allText = get_section_text(sectElt, "")
cursor.execute("INSERT INTO `{0}`(splId, field) VALUES({1}, '{2}')".format(table, splId, allText.encode('utf8').replace("'","\\'")))
logger.info("Filename: {0}\tSetId: {1}\tadded".format(tags['filename'],tags['setId']))
count +=1
if limit is not None and count == limit:
break
if len(spls) > 1:
print_progress(spls.index(splF)+1, len(spls), splF)
con.commit()
except mdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
con.rollback()
con.commit()
os.rename ("spls/{0}".format(splF),"problematic-spls/{0}".format(splF))
continue
except:
print "Unexpected error:", sys.exc_info()[0]
con.rollback()
con.commit()
continue
##Create custom logger
def get_logger(filename, loggername, level):
logging.basicConfig(
filename=filename,
format="%(asctime)-6s: %(levelname)s - %(message)s")
logger = logging.getLogger(loggername)
logger.setLevel(level)
return logger
##Update the database with spls contained in UPDATE_DIR.
##Will insert new values into database if spl setid is not found in the database.
def update(logger):
spls = [os.path.join(UPDATE_DIR, f) for f in os.listdir(UPDATE_DIR) if f.endswith(".xml")]
for cnt, spl in enumerate(spls):
p = XMLParser(huge_tree=True)
tree = parse(spl, parser=p)
root = tree.getroot()
tags = get_tags(root, spl, logger)
if len(tags.keys()) == 0:
print "\nERROR: get_tags failed, most likely because a UNII could not be retrieved for all active moities. Please check the following spl: %s" % spl
continue
if not setid_in_rxnorm(tags['setId']):
logger.info("SetId {0} from file {1} not found in rxnorm".format(tags['setId'],spl))
continue
cursor.execute("SELECT id FROM structuredProductLabelMetadata WHERE setId=%s",tags['setId'])
rowid = cursor.fetchone()
if rowid:
rowid = rowid[0]
copy_to_master_dir(spl, setid=tags['setId'])
for name, value in tags.items():
if name == 'activeMoieties':
insert_active_moieties(value, tags["activeMoietyUNIIs"])
link_spl_to_active_moieties(tags['setId'], value)
elif name == "activeMoietyUNIIs":
continue
else:
update_db("structuredProductLabelMetadata", name, value, "id", rowid)
splSections = get_sections(root)
for code in splSections:
try:
cursor.execute("SELECT table_name FROM loinc WHERE loinc={0}".format(code))
table = cursor.fetchone()[0]
except TypeError:
logger.debug("LOINC code not for SPL section found in the database: %s. This section will not be loaded for spl. Try updating the LOINC codes and re-loading this SPL.")
continue
#for sM in tableToSectionMap:
# (code, table) = (sM[0], sM[1])
(sectElt, codeElt) = get_section(root, code)
# if sectElt is None:
# logger.info("No section: %s, %s" % sM)
# continue
##gag: Recursive function to retrieve text from a section
allText = get_section_text(sectElt, "")
##If the section already existed in the spl, update it's entry,
##else add a new entry for the section for that spl
if has_entry(table, "splId", rowid):
update_db(table, "field", allText, "splId", rowid)
else:
insert_section_entry(table, rowid, allText)
logger.info(tags['setId'] + " updated")
else:
copy_to_master_dir(spl)
run(logger, [spl])
print_progress(cnt+1, len(spls), spl)
con.commit()
con.close()
##Wrapper around the UPDATE SQL syntax.
def update_db(tablename, field, value, key, rowid):
names = [tablename, field, key]
values = [value, rowid]
query = "UPDATE `{0}` SET `{1}`= %s WHERE `{2}` = %s".format(*names)
cursor.execute(query, values)
if __name__ == "__main__":
##If run as its own script, clear and repopulate the database tables with the
##spls in SPL_DIR
logger = get_logger('loadDailymedToSql.log', 'main', logging.INFO)
spls = [SPL_DIR + f for f in os.listdir(SPL_DIR) if f.endswith(".xml")]
#clear_tables()
#run(logger, spls, limit=100)
run(logger, spls)
if con:
con.commit()
con.close()
|
kqingcan/bio2rdf-scripts | common/python/parargs.py | <filename>common/python/parargs.py
#!/usr/bin/python
# Parallel xargs.
# <NAME> <<EMAIL>>
import sys, os, signal, getopt
def usage():
print """parargs by <NAME>
Usage: parargs [OPTION] COMMAND [ARGS ...]
For each word read from standard input, run COMMAND with ARGS and the
word as arguments.
-n PROCESSES Run PROCESSES commands in parallel (default 3)
-v, --verbose Print progress reports to stderr
-h, --help Show usage
Report bugs to <<EMAIL>>."""
def main(args):
try:
opts, args = getopt.getopt(args, "n:vh", ["verbose", "help"])
except getopt.GetoptError:
usage()
sys.exit(1)
processes = 3
verbose = False
for (o, a) in opts:
if o in ("-h", "--help"):
usage()
sys.exit(0)
elif o in ("-n"):
try:
processes = int(a)
except ValueError:
processes = -1
elif o in ("-v", "--verbose"):
verbose = True
if args == [] or processes < 1:
usage()
sys.exit(1)
def get_arg():
buf = []
while 1:
while buf == []:
l = sys.stdin.readline()
if l == "":
yield None
return
buf = l.split()
arg, buf = buf[0], buf[1:]
yield arg
reader = get_arg()
at_eof = False
running = {}
while len(running) > 0 or not at_eof:
while len(running) < processes and not at_eof:
arg = reader.next()
if arg is None:
at_eof = True
break
cmd = args + [arg]
if verbose:
print >>sys.stderr, "Starting: " + " ".join(cmd)
pid = os.fork()
if pid == 0:
os.execvp(cmd[0], cmd)
print >>sys.stderr, "Failed to exec:", cmd
sys.exit(20)
running[pid] = cmd
if len(running) > 0:
(pid, status) = os.wait()
if verbose:
print >>sys.stderr, "Finished: " + " ".join(running[pid])
del running[pid]
if __name__ == "__main__":
main(sys.argv[1:])
|
kqingcan/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/mappings/ChEBI-DrugBank-bio2rdf-mapping/scripts/backups/combineDrugBankMappings.py | <gh_stars>10-100
# combineDrugBankMappings.py
#
# Combine FDA UNII to DrugBank mappings created bu INCHI key and synonymn matching
#
# Author: <NAME>
#
FDA_DRUGBANK_BY_INCHI = "PT-UNIIs-Drugbank-09042014.txt"
FDA_DRUGBANK_BY_SYN = "SYNONYMNS-UNIIs-Drugbank-09042014.txt"
FDA_DRUGBANK_COMBINED = "FDA_DRUGBANK_INCHI_AND_SYNONYMNS_09042014.tsv"
f = open(FDA_DRUGBANK_BY_INCHI, 'r')
buf = f.read()
f.close()
ptDic = {}
for ln in buf.split("\n"):
ln = ln.strip()
if ln == "":
break
(uniiPT, unii, drugbankPT, drugbankID) = ln.split("\t")[0:4]
drugbankID = drugbankID.replace("http://www.drugbank.ca/drugs/","")
ptDic[uniiPT] = [uniiPT, unii, drugbankPT, drugbankID]
newMappings = {}
f = open(FDA_DRUGBANK_BY_SYN, 'r')
buf = f.read()
f.close()
l = buf.split("\n")
for ln in l[1:]:
ln = ln.strip()
if ln == "":
break
(FDA_Preferred_Term, FDA_synonymn, UNII, Drugbank_drug, drugbank_id) = ln.split("\t")
if not ptDic.has_key(FDA_Preferred_Term):
if newMappings.get(FDA_Preferred_Term):
newMappings[FDA_Preferred_Term].append((FDA_synonymn, UNII, Drugbank_drug, drugbank_id))
else:
newMappings[FDA_Preferred_Term] = [(FDA_synonymn, UNII, Drugbank_drug, drugbank_id)]
f = open(FDA_DRUGBANK_COMBINED, 'w')
f.write("\t".join(["FDA_Preferred_Term","UNII","Drugbank_drug","drugbank_id","mapping_type","synonymns_used"]) + "\n")
for k,v in ptDic.iteritems():
f.write("\t".join(v + ["INCHI","N/A"]) + "\n")
(FDA_SYNONYMN, UNII, DRUGBANK_DRUG, DRUGBANK_ID) = range(0,4)
for k,v in newMappings.iteritems():
f.write("\t".join([k, v[0][UNII], v[0][DRUGBANK_DRUG], v[0][DRUGBANK_ID]] + ["SYN","|".join([x[FDA_SYNONYMN] for x in v])]) + "\n")
f.close()
|
rafaelschlatter/strava-heatmap | app/contact/contact.py | from flask import render_template, flash, request, current_app
from flask_mail import Message
from app.contact import contact_bp, mail
from app.forms import ContactForm
@contact_bp.route("/contact", methods=["GET", "POST"])
def contact():
form = ContactForm(request.form)
if request.method == "POST":
name = request.form["name"]
email = request.form["email"]
message = request.form["message"]
if form.validate():
msg = Message(
subject="Heatmap feedback from {}".format(name),
sender=email,
recipients=[current_app.config["MAIL_USERNAME"]],
)
msg.body = message
mail.send(msg)
flash("Thank you, {}. Your message has been sent.".format(name), "info")
return render_template("contact.html", form=form)
|
rafaelschlatter/strava-heatmap | app/index/__init__.py | <gh_stars>1-10
from flask import Blueprint
index_bp = Blueprint("index", __name__)
from app.index import index
|
rafaelschlatter/strava-heatmap | app/contact/__init__.py | from flask import Blueprint
from flask_mail import Mail
contact_bp = Blueprint("contact", __name__)
mail = Mail()
from app.contact import contact
|
rafaelschlatter/strava-heatmap | app/forms.py | <gh_stars>1-10
from wtforms import Form, StringField
from flask_wtf import RecaptchaField
from wtforms.validators import InputRequired, Email
class ContactForm(Form):
name = StringField("Name", validators=[InputRequired("Please enter your name.")])
email = StringField(
"Email",
validators=[
InputRequired("Please enter your email address."),
Email("Please enter a valid email address."),
],
)
message = StringField(
"Message", validators=[InputRequired("Please enter a message.")]
)
recaptcha = RecaptchaField("ReCaptcha")
|
rafaelschlatter/strava-heatmap | app/config.py | <gh_stars>1-10
import os
class Config(object):
SECRET_KEY = os.environ.get("SECRET_KEY")
ENV = os.environ.get("ENV")
MAIL_SERVER = "smtp.gmail.com"
MAIL_PORT = 465
MAIL_USE_TLS = False
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get("GMAIL_ADRESS")
MAIL_PASSWORD = <PASSWORD>("<PASSWORD>")
RECAPTCHA_USE_SSL = True
RECAPTCHA_PUBLIC_KEY = os.environ.get("RECAPTCHA_PUBLIC_KEY")
RECAPTCHA_PRIVATE_KEY = os.environ.get("RECAPTCHA_PRIVATE_KEY")
|
rafaelschlatter/strava-heatmap | application.py | <filename>application.py
from app import create_app
app = create_app()
# Only for debugging purposes
if __name__ == "__main__":
host = "localhost"
port = 5000
debug = True
app.run(host=host, port=port, debug=debug)
|
rafaelschlatter/strava-heatmap | app/index/index.py | <gh_stars>1-10
from flask import render_template
from app.index import index_bp
@index_bp.route("/")
def index():
return render_template("index.html")
|
rafaelschlatter/strava-heatmap | app/heatmap/__init__.py | <gh_stars>1-10
from flask import Blueprint
heatmap_bp = Blueprint("heatmap", __name__)
from app.heatmap import heatmap
|
rafaelschlatter/strava-heatmap | app/heatmap/heatmap.py | from flask import render_template
from app.heatmap import heatmap_bp
@heatmap_bp.route("/heatmap")
def heatmap():
return render_template("heatmap.html")
|
rafaelschlatter/strava-heatmap | app/__init__.py | from flask import Flask
from app.config import Config
from app.index import index_bp
from app.contact import contact_bp, mail
from app.heatmap import heatmap_bp
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
app.register_blueprint(index_bp)
app.register_blueprint(contact_bp)
app.register_blueprint(heatmap_bp)
mail.init_app(app)
return app
|
rafaelschlatter/strava-heatmap | scripts/create_heatmap.py | <filename>scripts/create_heatmap.py<gh_stars>1-10
import os
import requests
import logging
import json
import time
import folium
from folium.plugins import HeatMap, LocateControl
from stravaio import StravaIO
from branca.element import Template, MacroElement
def refresh_token():
if time.time() > int(os.environ["STRAVA_TOKEN_EXPIRES_AT"]):
logging.critical("Access token expired. Need to refresh token.")
payload = {
"client_id": os.environ["STRAVA_CLIENT_ID"],
"client_secret": os.environ["STRAVA_CLIENT_SECRET"],
"grant_type": "refresh_token",
"refresh_token": os.environ["STRAVA_REFRESH_TOKEN"],
}
response = requests.request(
"POST", "https://www.strava.com/api/v3/oauth/token", data=payload
)
response.raise_for_status()
response_dict = json.loads(response.text)
os.environ["STRAVA_ACCESS_TOKEN"] = str(response_dict["access_token"])
os.environ["STRAVA_REFRESH_TOKEN"] = str(response_dict["refresh_token"])
os.environ["STRAVA_TOKEN_EXPIRES_AT"] = str(response_dict["expires_at"])
else:
logging.critical("Access token still valid. Can use existing token.")
def add_html(m):
template = """
{% macro html(this, kwargs) %}
<!doctype html>
<html lang="en">
<body>
<div id='maplegend' class='maplegend'
style='position: absolute; z-index:9999; border:0px solid grey;; background-color:rgba(255, 255, 255, 0.9);
border-radius:6px; padding: 0px; left: 50px; top: 11px;'>
<div class='home-button'><a href="https://my-heatmap.azurewebsites.net/" class="btn btn-outline-dark">Back to home</a></div>
</div>
<div id='maplegend' class='maplegend'
style='position: absolute; z-index:9999; border:1px solid grey; background-color:rgba(255, 255, 255, 0.8);
border-radius:6px; padding: 10px; font-size:15px; right: 11px; top: 150px;'>
<div class='legend-title'>Activity type</div>
<div class='legend-scale'>
<ul class='legend-labels'>
<li><span style='background:#ff9933;opacity:0.7;'></span>Run</li>
<li><span style='background:#f6ff00;opacity:0.7;'></span>Ice Skate</li>
<li><span style='background:#00ff55;opacity:0.7;'></span>Canoe</li>
<li><span style='background:#00ffff;opacity:0.7;'></span>Nordic Ski</li>
<li><span style='background:#00ccff;opacity:0.7;'></span>Alpine Ski</li>
<li><span style='background:#0066ff;opacity:0.7;'></span>Ride</li>
<li><span style='background:#cc00ff;opacity:0.7;'></span>Other</li>
</ul>
</div>
</div>
</body>
</html>
<style type='text/css'>
.maplegend .legend-title {
text-align: left;
margin-bottom: 5px;
font-weight: bold;
font-size: 90%;
}
.maplegend .legend-scale ul {
margin: 0;
margin-bottom: 5px;
padding: 0;
float: left;
list-style: none;
}
.maplegend .legend-scale ul li {
font-size: 80%;
list-style: none;
margin-left: 0;
line-height: 18px;
margin-bottom: 2px;
}
.maplegend ul.legend-labels li span {
display: block;
float: left;
height: 16px;
width: 30px;
margin-right: 5px;
margin-left: 0;
border: 1px solid #999;
}
.maplegend .legend-source {
font-size: 80%;
color: #777;
clear: both;
}
.maplegend a {
color: #777;
}
</style>
{% endmacro %}"""
macro = MacroElement()
macro._template = Template(template)
m.get_root().add_child(macro)
return m
def downsample(l, n):
"""Returns every nth element from list l. Returns the
original list if n is set to 1.
Used to reduce the number of GPS points per activity,
to improve performance of the website.
"""
return l[0::n]
def download_data(activities, reduce_sample=True):
data = []
for a in activities:
if a.type == "Workout":
continue
streams = client.get_activity_streams(a.id, athlete.id)
try:
points = list(zip(streams.lat, streams.lng))
if reduce_sample:
points = downsample(l=points, n=4)
activity = {"id": a.id, "type": a.type, "coordinates": points}
data.append(activity)
logging.critical("Downloaded activity with id: {}".format(a.id))
except Exception:
logging.error("Failed to download activity with id: {}".format(a.id))
return data
def create_activity_layer(activities, opacity=0.5, weight=1):
activity_layer = folium.FeatureGroup(name="Activities", show=True, overlay=True)
if len(activities) == 0:
logging.info("No activities found, returning empty folium map.")
return None
data = []
for a in activities:
if a["type"] == "Workout":
continue
if a["type"] == "Run":
folium.PolyLine(
locations=a["coordinates"],
color="#ff9933",
opacity=opacity,
weight=weight,
).add_to(activity_layer)
elif a["type"] == "Ride":
folium.PolyLine(
locations=a["coordinates"],
color="#0066ff",
opacity=opacity,
weight=weight,
).add_to(activity_layer)
elif a["type"] == "NordicSki":
folium.PolyLine(
locations=a["coordinates"],
color="#00ffff",
opacity=opacity,
weight=weight,
).add_to(activity_layer)
elif a["type"] == "AlpineSki":
folium.PolyLine(
locations=a["coordinates"],
color="#00ccff",
opacity=opacity,
weight=weight,
).add_to(activity_layer)
elif a["type"] == "Canoeing":
folium.PolyLine(
locations=a["coordinates"],
color="#00ff55",
opacity=opacity,
weight=weight,
).add_to(activity_layer)
elif a["type"] == "IceSkate":
folium.PolyLine(
locations=a["coordinates"],
color="#f6ff00",
opacity=opacity,
weight=weight,
).add_to(activity_layer)
else:
folium.PolyLine(
locations=a["coordinates"],
color="#cc00ff",
opacity=opacity,
weight=weight,
).add_to(activity_layer)
logging.critical("Successfully created activity layer.")
return activity_layer
def create_heatmap_layer(data, radius=5, blur=5):
heatmap_layer = folium.FeatureGroup(name="Heatmap", show=False, overlay=True)
heatmap_data = []
for activity in data:
heatmap_data.append(activity["coordinates"])
flat_list = [item for sublist in heatmap_data for item in sublist]
HeatMap(data=flat_list, radius=radius, blur=blur,).add_to(heatmap_layer)
logging.critical("Successfully created heatmap layer.")
return heatmap_layer
if __name__ == "__main__":
refresh_token()
client = StravaIO(access_token=os.environ["STRAVA_ACCESS_TOKEN"])
athlete = client.get_logged_in_athlete()
activities = client.get_logged_in_athlete_activities(after=20170101)
data = download_data(activities=activities, reduce_sample=True)
activity_layer = create_activity_layer(activities=data, opacity=0.5, weight=2)
heatmap_layer = create_heatmap_layer(data=data, radius=5, blur=5)
m = folium.Map(
name="Strava Heatmap",
tiles="cartodbpositron",
location=[59.925, 10.728123],
zoom_start=11.5,
control_scale=True,
)
m = add_html(m)
folium.TileLayer("cartodbpositron").add_to(m)
folium.TileLayer("cartodbdark_matter").add_to(m)
# heatmap_layer.add_to(m)
activity_layer.add_to(m)
LocateControl().add_to(m)
folium.LayerControl().add_to(m)
m.save("app/templates/heatmap.html")
|
MartinGoldfinger/TicTacToe | TicTacToe.py | <gh_stars>0
"""
Tic Tac Toe game found on /r/Python
Modified from:
https://www.reddit.com/r/Python/comments/6qvu38/my_code_for_tictactoe_beginner_compared_to_my/
"""
import sys
class Player:
"""
Functions:
__init__ : assign the name, symbol, initialize variables
winner_check : Checks the board for a winner
player_move : user enters board position, will check for valid location
clear_board : reset self.board list
Attributes:
name (str) - player name
symbol (char) - player symbol, X or O
board (list) - stores user moves
win_count (int) - Number of times player has won a round
"""
def __init__(self, symbol):
self.name = input("Enter your name: ")
self.symbol = symbol
self.board = []
self.win_count = 0
def player_move(self):
"""
Player enters position on grid and validates and populates
"""
while True:
move = input("{0.name}'s ( {0.symbol} ) turn, please choose placement (1-9): ".format(self))
if move in ('1', '2', '3', '4', '5', '6', '7', '8', '9') and (int(move) not in game_list):
self.board.append(int(move))
game_list.append(int(move))
position[int(move)-1] = (self.symbol)
print_board()
break #When a valid move is made get out of the loop and function
elif move not in ('1', '2', '3', '4', '5', '6', '7', '8', '9'):
print('That is not a valid move! Try again')
else:
print('That move is taken!, Try again')
def winner_check(self):
"""
Returns Boolean - Checks player's board list for 3 in a row, column or diagonal
"""
if (1 in self.board) and (2 in self.board) and (3 in self.board) or \
(4 in self.board) and (5 in self.board) and (6 in self.board) or \
(7 in self.board) and (8 in self.board) and (9 in self.board) or \
(1 in self.board) and (4 in self.board) and (7 in self.board) or \
(2 in self.board) and (5 in self.board) and (8 in self.board) or \
(3 in self.board) and (6 in self.board) and (9 in self.board) or \
(1 in self.board) and (5 in self.board) and (9 in self.board) or \
(3 in self.board) and (5 in self.board) and (7 in self.board):
print('{0.name} wins as {0.symbol}.'.format(self))
self.win_count += 1
return True
else:
return False
def clear_board(self):
"""
Clearing board used in reseting rounds
"""
self.board = []
#End of Player class
def print_board():
"""
Prints current board and position guide
"""
print('')
print(' '+ position[0] +' | '+ position[1] +' | '+ position[2] + ' ' * 10 + '1' +' | '+ '2' +' | '+ '3')
print('-' * 11 + ' ' * 8 + '-' * 11)
print(' '+ position[3] +' | '+ position[4] +' | '+ position[5] + ' ' * 10 + '4' +' | '+ '5' +' | '+ '6')
print('-' * 11 + ' ' * 8 + '-' * 11)
print(' '+ position[6] +' | '+ position[7] +' | '+ position[8] + ' ' * 10 + '7' +' | '+ '8' +' | '+ '9')
print('')
#End of print_board function
"""
Start main game
"""
play_game = True
print ('Welcome to Tic Tac Toe.')
print("Player 1", end=' ')
player_one = Player('X')
print("Player 2", end=' ')
player_two = Player('O')
print("Player 1 is {0.name} using {0.symbol} and Player 2 is {1.name} using {1.symbol}".format(player_one, player_two))
input("Press Enter to continue...")
while play_game:
position = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']
game_list = []
move_count = 0
winner = False
print_board()
#Game round loop, alternates based on move_count
while (move_count < 9) and (winner is False):
if move_count % 2 == 0:
player_one.player_move()
winner = player_one.winner_check()
else:
player_two.player_move()
winner = player_two.winner_check()
move_count += 1
if winner == True:
print('Congrats!')
else:
print('Its a tie!')
print('')
print('Current score: {0.name} has {0.win_count} and {1.name} has {1.win_count}'.format(player_one, player_two))
print('')
#play again loop, validates user input
while True:
play_again = input('Play again? (y/n)')
if play_again == 'y':
print('Resetting...')
player_one.clear_board()
player_two.clear_board()
break #breaking play again input loop
elif play_again == 'n':
print('Thanks for playing!')
sys.exit()
else:
print('answer not valid, please use y or n')
|
chaturanand/Voice-based-gender-recognition | Code/DataManager.py | <filename>Code/DataManager.py<gh_stars>0
import os
import sys
import math
import tarfile
class DataManager:
def __init__(self, dataset_path):
self.dataset_path = dataset_path
def extract_dataset(self, compressed_dataset_file_name, dataset_directory):
try:
# extract files to dataset folder
tar = tarfile.open(compressed_dataset_file_name, "r:gz")
tar.extractall(dataset_directory)
tar.close()
print("Files extraction was successfull ...")
except:
print("Ecxception raised: No extraction was done ...")
def make_folder(self, folder_path):
try:
os.mkdir(folder_path)
print(folder_path, "was created ...")
except:
print("Ecxception raised: ", folder_path, "could not be created ...")
def move_files(self, src, dst, group):
for fname in group:
os.rename(src + '/' + fname, dst + '/' + fname)
def get_fnames_from_dict(self, dataset_dict, f_or_m):
training_data, testing_data = [], []
for i in range(1,5):
length_data = len(dataset_dict[f_or_m +"000" + str(i)])
length_separator = math.trunc(length_data*2/3)
training_data += dataset_dict[f_or_m + "000" + str(i)][:length_separator]
testing_data += dataset_dict[f_or_m + "000" + str(i)][length_separator:]
return training_data, testing_data
def manage(self):
# read config file and get path to compressed dataset
compressed_dataset_file_name = self.dataset_path
dataset_directory = compressed_dataset_file_name.split(".")[0]
# create a folder for the data
try:
os.mkdir(dataset_directory)
except:
pass
# extract dataset
self.extract_dataset(compressed_dataset_file_name, dataset_directory)
# select females files and males files
file_names = [fname for fname in os.listdir(dataset_directory) if ("f0" in fname or "m0" in fname)]
dataset_dict = {"f0001": [], "f0002": [], "f0003": [], "f0004": [], "f0005": [],
"m0001": [], "m0002": [], "m0003": [], "m0004": [], "m0005": [], }
# fill in dictionary
for fname in file_names:
dataset_dict[fname.split('_')[0]].append(fname)
# divide and group file names
training_set, testing_set = {},{}
training_set["females"], testing_set["females"] = self.get_fnames_from_dict(dataset_dict, "f")
training_set["males" ], testing_set["males" ] = self.get_fnames_from_dict(dataset_dict, "m")
# make training and testing folders
self.make_folder("TrainingData")
self.make_folder("TestingData")
self.make_folder("TrainingData/females")
self.make_folder("TrainingData/males")
self.make_folder("TestingData/females")
self.make_folder("TestingData/males")
# move files
self.move_files(dataset_directory, "TrainingData/females", training_set["females"])
self.move_files(dataset_directory, "TrainingData/males", training_set["males"])
self.move_files(dataset_directory, "TestingData/females", testing_set["females"])
self.move_files(dataset_directory, "TestingData/males", testing_set["males"])
if __name__== "__main__":
data_manager = DataManager("SLR45.tgz")
data_manager.manage()
|
chaturanand/Voice-based-gender-recognition | Code/GenderIdentifier.py | <reponame>chaturanand/Voice-based-gender-recognition
import os
import pickle
import warnings
import numpy as np
from FeaturesExtractor import FeaturesExtractor
warnings.filterwarnings("ignore")
class GenderIdentifier:
def __init__(self, females_files_path, males_files_path, females_model_path, males_model_path):
self.females_training_path = females_files_path
self.males_training_path = males_files_path
self.error = 0
self.total_sample = 0
self.features_extractor = FeaturesExtractor()
# load models
self.females_gmm = pickle.load(open(females_model_path, 'rb'))
self.males_gmm = pickle.load(open(males_model_path, 'rb'))
def process(self):
files = self.get_file_paths(self.females_training_path, self.males_training_path)
# read the test directory and get the list of test audio files
for file in files:
self.total_sample += 1
print("%10s %8s %1s" % ("--> TESTING", ":", os.path.basename(file)))
vector = self.features_extractor.extract_features(file)
winner = self.identify_gender(vector)
expected_gender = file.split("/")[1][:-1]
print("%10s %6s %1s" % ("+ EXPECTATION",":", expected_gender))
print("%10s %3s %1s" % ("+ IDENTIFICATION", ":", winner))
if winner != expected_gender: self.error += 1
print("----------------------------------------------------")
accuracy = ( float(self.total_sample - self.error) / float(self.total_sample) ) * 100
accuracy_msg = "*** Accuracy = " + str(round(accuracy, 3)) + "% ***"
print(accuracy_msg)
def get_file_paths(self, females_training_path, males_training_path):
# get file paths
females = [ os.path.join(females_training_path, f) for f in os.listdir(females_training_path) ]
males = [ os.path.join(males_training_path, f) for f in os.listdir(males_training_path) ]
files = females + males
return files
def identify_gender(self, vector):
# female hypothesis scoring
is_female_scores = np.array(self.females_gmm.score(vector))
is_female_log_likelihood = is_female_scores.sum()
# male hypothesis scoring
is_male_scores = np.array(self.males_gmm.score(vector))
is_male_log_likelihood = is_male_scores.sum()
print("%10s %5s %1s" % ("+ FEMALE SCORE",":", str(round(is_female_log_likelihood, 3))))
print("%10s %7s %1s" % ("+ MALE SCORE", ":", str(round(is_male_log_likelihood,3))))
if is_male_log_likelihood > is_female_log_likelihood: winner = "male"
else : winner = "female"
return winner
if __name__== "__main__":
gender_identifier = GenderIdentifier("TestingData/females", "TestingData/males", "females.gmm", "males.gmm")
gender_identifier.process()
|
masashi-y/myccg | depccg/parsing.py | <filename>depccg/parsing.py
from typing import Callable, List, Dict, Union, Tuple
from multiprocessing import Pool
import numpy
import depccg._parsing
import time
import math
from depccg.types import Token, CombinatorResult, ScoringResult
from depccg.tree import ScoredTree
from depccg.cat import Category
def _chunks(list_, num_chunks):
splits = math.ceil(len(list_) / max(num_chunks, 1))
for i in range(0, len(list_), splits):
yield list_[i:i + splits]
def _binarize(indices, length):
result = numpy.ones(length, dtype=numpy.bool)
result[indices] = 0
return result
def _type_check(doc, score_results, categories):
many_sentences = (
isinstance(doc, list)
and isinstance(doc[0], list)
and isinstance(doc[0][0], Token)
)
many_scores = (
isinstance(score_results, list)
and isinstance(score_results[0], ScoringResult)
)
if (
many_sentences != many_scores
or many_sentences and len(doc) != len(score_results)
):
raise RuntimeError(
'doc and score_results are not compatible types.')
if not many_sentences:
score_results = [score_results]
doc = [doc]
num_tags = len(categories)
for tokens, (tag_scores, dep_scores) in zip(doc, score_results):
if num_tags != tag_scores.shape[1]:
raise RuntimeError(
("all inputs to depccg.parsing.run must contain scores for"
" the equal number of categories as the `categories` list.")
)
num_tokens = len(tokens)
expected_tag_score = (num_tokens, num_tags)
expected_dep_score = (num_tokens, num_tokens + 1)
if (
expected_tag_score != tag_scores.shape
or expected_dep_score != dep_scores.shape
):
raise RuntimeError(
('invalid shape of input matrices:\n'
f'Expected P_tag: {expected_tag_score}, P_dep: {expected_dep_score}\n'
f'Actual P_tag: {tag_scores.shape}, P_dep: {dep_scores.shape}')
)
return doc, score_results
def apply_category_filters(
doc: Union[Token, List[List[Token]]],
score_results: Union[ScoringResult, List[ScoringResult]],
categories: List[Category],
category_dict: Dict[str, List[Category]],
large_negative_value: float = -10e+32,
) -> Union[Tuple[List[Token], ScoringResult], Tuple[List[List[Token]], List[ScoringResult]]]:
doc, score_results = _type_check(doc, score_results, categories)
category_ids = {
cat: index for index, cat in enumerate(categories)
}
category_dict = {
word: _binarize(
[category_ids[cat] for cat in cats],
score_results[0].tag_scores.shape[1]
)
for word, cats in category_dict.items()
}
for tokens, (tag_scores, _) in zip(doc, score_results):
for index, token in enumerate(tokens):
if token.word in category_dict:
tag_scores[index, category_dict[token.word]
] = large_negative_value
return doc, score_results
def run(
doc: Union[Token, List[List[Token]]],
score_results: Union[ScoringResult, List[ScoringResult]],
categories: List[Category],
root_categories: List[Category],
binary_fun: Callable[[Category, Category], List[CombinatorResult]],
unary_fun: Callable[[Category], List[CombinatorResult]],
unary_penalty: float = 0.1,
beta: float = 0.00001,
use_beta: bool = True,
pruning_size: int = 50,
nbest: int = 1,
max_step: int = 10000000,
max_length: int = 250,
processes: int = 2,
max_chunk_size: int = 20,
) -> List[List[ScoredTree]]:
doc, score_results = _type_check(doc, score_results, categories)
args = (
categories,
binary_fun,
unary_fun,
root_categories
)
kwargs = {
'num_tags': score_results[0][0].shape[1],
'unary_penalty': unary_penalty,
'beta': beta,
'use_beta': use_beta,
'pruning_size': pruning_size,
'nbest': nbest,
'max_step': max_step,
'max_length': max_length
}
if len(doc) <= max_chunk_size:
results = depccg._parsing.run(
doc,
score_results,
*args,
**kwargs,
)
else:
chunks = _chunks(list(zip(doc, score_results)), processes)
with Pool(processes) as pool:
tasks = []
for chunk_index, chunk in enumerate(chunks):
doc_, score_results_ = zip(*chunk)
task = pool.apply_async(
depccg._parsing.run,
args=(list(doc_), list(score_results_)) + args,
kwds={**kwargs, 'process_id': chunk_index},
)
tasks.append(task)
while not all(task.ready() for task in tasks):
time.sleep(1)
results = [
result
for task in tasks
for result in task.get()
]
return results
|
masashi-y/myccg | depccg/types.py | <gh_stars>10-100
from typing import Optional, NamedTuple, Callable, List
from pathlib import Path
import re
import numpy
from depccg.cat import Category
dunder_pattern = re.compile("__.*__")
protected_pattern = re.compile("_.*")
class Token(dict):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __getattr__(self, item):
if dunder_pattern.match(item) or protected_pattern.match(item):
return super().__getattr__(item)
return self[item]
def __repr__(self):
res = super().__repr__()
return f'Token({res})'
@classmethod
def of_piped(cls, string: str) -> 'Token':
# WORD|POS|NER or WORD|LEMMA|POS|NER
# or WORD|LEMMA|POS|NER|CHUCK
items = string.split('|')
if len(items) == 5:
word, lemma, pos, entity, chunk = items
elif len(items) == 4:
word, lemma, pos, entity = items
chunk = 'XX'
else:
assert len(items) == 3
word, pos, entity = items
lemma = 'XX'
chunk = 'XX'
return Token(
word=word,
lemma=lemma,
pos=pos,
entity=entity,
chunk=chunk
)
@classmethod
def of_word(cls, word: str) -> 'Token':
return Token(
word=word,
lemma='XX',
pos='XX',
entity='XX',
chunk='XX'
)
class CombinatorResult(NamedTuple):
cat: Category
op_string: str
op_symbol: str
head_is_left: bool
class ScoringResult(NamedTuple):
tag_scores: numpy.ndarray
dep_scores: numpy.ndarray
Combinator = Callable[[Category, Category], Optional[CombinatorResult]]
ApplyBinaryRules = Callable[..., List[CombinatorResult]]
ApplyUnaryRules = Callable[..., List[CombinatorResult]]
class GrammarConfig(NamedTuple):
apply_binary_rules: ApplyBinaryRules
apply_unary_rules: ApplyUnaryRules
class ModelConfig(NamedTuple):
framework: str
name: str
url: str
config: Path
semantic_templates: Path
|
masashi-y/myccg | depccg/grammar/ja.py | <reponame>masashi-y/myccg
from typing import Optional, List, Dict, Set, TypeVar, Tuple
from depccg.cat import Category
from depccg.unification import Unification
from depccg.types import Combinator, CombinatorResult
X = TypeVar('X')
Pair = Tuple[X, X]
def _is_modifier(x: Category) -> bool:
return x.is_functor and x.left == x.right
def forward_application(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("a/b", "b")
if uni(x, y):
result = y if _is_modifier(x) else uni['a']
return CombinatorResult(
cat=result,
op_string="fa",
op_symbol=">",
head_is_left=False,
)
return None
def backward_application(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("b", "a\\b")
if uni(x, y):
result = x if _is_modifier(y) else uni['a']
return CombinatorResult(
cat=result,
op_string="ba",
op_symbol="<",
head_is_left=False,
)
return None
def forward_composition(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("a/b", "b/c")
if uni(x, y):
result = y if _is_modifier(x) else uni['a'] / uni['c']
return CombinatorResult(
cat=result,
op_string="fc",
op_symbol=">B",
head_is_left=False,
)
return None
def generalized_backward_composition1(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("b\\c", "a\\b")
if uni(x, y):
result = x if _is_modifier(y) else uni['a'] | uni['c']
return CombinatorResult(
cat=result,
op_string="bx",
op_symbol="<B1",
head_is_left=False,
)
return None
def generalized_backward_composition2(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("(b\\c)|d", "a\\b")
if uni(x, y):
result = x if _is_modifier(y) else x.functor(
uni['a'] | uni['c'], uni['d'])
return CombinatorResult(
cat=result,
op_string="bx",
op_symbol="<B2",
head_is_left=False,
)
return None
def generalized_backward_composition3(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("((b\\c)|d)|e", "a\\b")
if uni(x, y):
result = x if _is_modifier(y) else x.functor(
x.left.functor(uni['a'] | uni['c'], uni['d']), uni['e']
)
return CombinatorResult(
cat=result,
op_string="bx",
op_symbol="<B3",
head_is_left=False,
)
return None
def generalized_backward_composition4(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("(((b\\c)|d)|e)|f", "a\\b")
if uni(x, y):
result = x if _is_modifier(y) else x.functor(
x.left.functor(
x.left.left.functor(uni['a'] | uni['c'], uni['d']),
uni['e']
),
uni['f']
)
return CombinatorResult(
cat=result,
op_string="bx",
op_symbol="<B4",
head_is_left=False,
)
return None
def generalized_forward_composition1(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("a/b", "b\\c")
if uni(x, y):
result = y if _is_modifier(x) else uni['a'] / uni['c']
return CombinatorResult(
cat=result,
op_string="fx",
op_symbol=">Bx1",
head_is_left=False,
)
return None
def generalized_forward_composition2(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("a/b", "(b\\c)|d")
if uni(x, y):
result = y if _is_modifier(x) else y.functor(
uni['a'] | uni['c'], uni['d'])
return CombinatorResult(
cat=result,
op_string="fx",
op_symbol=">Bx2",
head_is_left=False,
)
return None
def generalized_forward_composition3(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("a/b", "((b\\c)|d)|e")
if uni(x, y):
result = y if _is_modifier(x) else y.functor(y.left.functor(
uni['a'] | uni['c'], uni['d']), uni['e']
)
return CombinatorResult(
cat=result,
op_string="fx",
op_symbol=">Bx3",
head_is_left=False,
)
return None
_possible_root_categories = [
Category.parse("NP[case=nc,mod=nm,fin=f]"),
Category.parse("NP[case=nc,mod=nm,fin=t]"),
Category.parse("S[mod=nm,form=attr,fin=t]"),
Category.parse("S[mod=nm,form=base,fin=f]"),
Category.parse("S[mod=nm,form=base,fin=t]"),
Category.parse("S[mod=nm,form=cont,fin=f]"),
Category.parse("S[mod=nm,form=cont,fin=t]"),
Category.parse("S[mod=nm,form=da,fin=f]"),
Category.parse("S[mod=nm,form=da,fin=t]"),
Category.parse("S[mod=nm,form=hyp,fin=t]"),
Category.parse("S[mod=nm,form=imp,fin=f]"),
Category.parse("S[mod=nm,form=imp,fin=t]"),
Category.parse("S[mod=nm,form=r,fin=t]"),
Category.parse("S[mod=nm,form=s,fin=t]"),
Category.parse("S[mod=nm,form=stem,fin=f]"),
Category.parse("S[mod=nm,form=stem,fin=t]")
]
def conjoin(x: Category, y: Category) -> Optional[CombinatorResult]:
if (
x in _possible_root_categories
and y in _possible_root_categories
):
# if x == y and x in _possible_root_categories:
result = y
return CombinatorResult(
cat=result,
op_string="other",
op_symbol="SSEQ",
head_is_left=False,
)
combinators: List[Combinator] = [
forward_application,
backward_application,
forward_composition,
generalized_backward_composition1,
generalized_backward_composition2,
generalized_backward_composition3,
generalized_backward_composition4,
generalized_forward_composition1,
generalized_forward_composition2,
generalized_forward_composition3,
conjoin,
]
def apply_binary_rules(
x: Category,
y: Category,
seen_rules: Optional[Set[Pair[Category]]] = None,
) -> List[CombinatorResult]:
key = (x, y)
results = []
if seen_rules is None or key in seen_rules:
for combinator in combinators:
result = combinator(*key)
if result is not None:
results.append(result)
return results
def _unary_rule_symbol(x: Category) -> str:
features = set(x.arg(0).feature.items())
if ('mod', 'adn') in features:
if x.clear_features == 'S':
return 'ADNext'
return 'ADNint'
elif ('mod', 'adv') in features:
if x.clear_features == 'S\\NP':
return 'ADV1'
elif x.clear_features == '(S\\NP)\\NP':
return 'ADV2'
return 'ADV0'
return 'OTHER'
def apply_unary_rules(
x: Category,
unary_rules: Dict[Category, List[Category]]
) -> List[CombinatorResult]:
if x not in unary_rules:
return []
results = []
for result in unary_rules[x]:
op_string = _unary_rule_symbol(x)
results.append(
CombinatorResult(
cat=result,
op_string=op_string,
op_symbol=op_string,
head_is_left=True,
)
)
return results
|
masashi-y/myccg | depccg/tools/tagger.py | import argparse
from depccg.tools.reader import read_trees_guess_extension
from depccg.printer import print_
from depccg.lang import set_global_language_to
from depccg.annotator import english_annotator
from depccg.instance_models import SEMANTIC_TEMPLATES
from depccg.types import ScoringResult
LANG = 'en'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'PATH',
help='path to either of *.auto, *.xml, *.jigg.xml, *.ptb')
parser.add_argument(
'--annotator',
default='spacy',
choices=english_annotator.keys(),
help='annotate POS, named entity, and lemmas using this library')
parser.add_argument(
'-f',
'--format',
default='xml',
choices=[
'auto', 'xml', 'prolog', 'jigg_xml',
'jigg_xml_ccg2lambda', 'json'],
help='output format')
parser.add_argument(
'--semantic-templates',
help='semantic templates used in "ccg2lambda" format output')
args = parser.parse_args()
trees = [
[ScoringResult(tree, 0.0)]
for _, _, tree in read_trees_guess_extension(args.PATH)
]
set_global_language_to(LANG)
semantic_templates = args.semantic_templates or SEMANTIC_TEMPLATES[LANG]
print_(
trees,
format=args.format,
semantic_templates=semantic_templates
)
|
masashi-y/myccg | depccg/lang.py |
import logging
logger = logging.getLogger(__name__)
GLOBAL_LANG_NAME = 'en'
def set_global_language_to(lang: str) -> None:
global GLOBAL_LANG_NAME
logger.info('Setting the global language config to: %s', lang)
GLOBAL_LANG_NAME = lang
def get_global_language() -> str:
return GLOBAL_LANG_NAME
|
masashi-y/myccg | tests/test_annotator.py | <reponame>masashi-y/myccg
from depccg.annotator import annotate_using_janome, annotate_using_spacy
from depccg.types import Token
def test_spacy():
sentences = [
["The Penn Treebank has recently implemented a new syntactic annotation scheme,",
"designed to highlight aspects of predicate-argument structure."],
["This paper discusses the implementation of crucial aspects of this new annotation scheme."],
]
tokens = [
[
Token(**{'word': 'The', 'pos': 'DT', 'entity': 'B-ORG',
'lemma': 'the', 'chunk': 'XX'}),
Token(**{'word': 'Penn', 'pos': 'NNP', 'entity': 'I-ORG',
'lemma': 'penn', 'chunk': 'XX'}),
Token(**{'word': 'Treebank', 'pos': 'NNP', 'entity': 'I-ORG',
'lemma': 'treebank', 'chunk': 'XX'}),
Token(**{'word': 'has', 'pos': 'VBZ', 'entity': 'O',
'lemma': 'have', 'chunk': 'XX'}),
Token(**{'word': 'recently', 'pos': 'RB', 'entity': 'O',
'lemma': 'recently', 'chunk': 'XX'}),
Token(**{'word': 'implemented', 'pos': 'VBN', 'entity': 'O',
'lemma': 'implement', 'chunk': 'XX'}),
Token(**{'word': 'a', 'pos': 'DT', 'entity': 'O',
'lemma': 'a', 'chunk': 'XX'}),
Token(**{'word': 'new', 'pos': 'JJ', 'entity': 'O',
'lemma': 'new', 'chunk': 'XX'}),
Token(**{'word': 'syntactic', 'pos': 'JJ', 'entity': 'O',
'lemma': 'syntactic', 'chunk': 'XX'}),
Token(**{'word': 'annotation', 'pos': 'NN',
'entity': 'O', 'lemma': 'annotation', 'chunk': 'XX'}),
Token(**{'word': 'scheme', 'pos': 'NN', 'entity': 'O',
'lemma': 'scheme', 'chunk': 'XX'}),
Token(**{'word': ',', 'pos': ',', 'entity': 'O',
'lemma': ',', 'chunk': 'XX'}),
Token(**{'word': 'designed', 'pos': 'VBN', 'entity': 'O',
'lemma': 'design', 'chunk': 'XX'}),
Token(**{'word': 'to', 'pos': 'TO', 'entity': 'O',
'lemma': 'to', 'chunk': 'XX'}),
Token(**{'word': 'highlight', 'pos': 'VB', 'entity': 'O',
'lemma': 'highlight', 'chunk': 'XX'}),
Token(**{'word': 'aspects', 'pos': 'NNS', 'entity': 'O',
'lemma': 'aspect', 'chunk': 'XX'}),
Token(**{'word': 'of', 'pos': 'IN', 'entity': 'O',
'lemma': 'of', 'chunk': 'XX'}),
Token(**{'word': 'predicate', 'pos': 'NN', 'entity': 'O',
'lemma': 'predicate', 'chunk': 'XX'}),
Token(**{'word': '-', 'pos': 'HYPH', 'entity': 'O',
'lemma': '-', 'chunk': 'XX'}),
Token(**{'word': 'argument', 'pos': 'NN', 'entity': 'O', 'lemma': 'argument',
'chunk': 'XX'}),
Token(**{'word': 'structure', 'pos': 'NN', 'entity': 'O',
'lemma': 'structure', 'chunk': 'XX'}),
Token(**{'word': '.', 'pos': '.', 'entity': 'O',
'lemma': '.', 'chunk': 'XX'})
],
[
Token(**{'word': 'This', 'pos': 'DT', 'entity': 'O',
'lemma': 'this', 'chunk': 'XX'}),
Token(**{'word': 'paper', 'pos': 'NN', 'entity': 'O',
'lemma': 'paper', 'chunk': 'XX'}),
Token(**{'word': 'discusses', 'pos': 'VBZ', 'entity': 'O',
'lemma': 'discuss', 'chunk': 'XX'}),
Token(**{'word': 'the', 'pos': 'DT', 'entity': 'O',
'lemma': 'the', 'chunk': 'XX'}),
Token(**{'word': 'implementation', 'pos': 'NN', 'entity': 'O',
'lemma': 'implementation', 'chunk': 'XX'}),
Token(**{'word': 'of', 'pos': 'IN', 'entity': 'O',
'lemma': 'of', 'chunk': 'XX'}),
Token(**{'word': 'crucial', 'pos': 'JJ', 'entity': 'O',
'lemma': 'crucial', 'chunk': 'XX'}),
Token(**{'word': 'aspects', 'pos': 'NNS', 'entity': 'O',
'lemma': 'aspect', 'chunk': 'XX'}),
Token(**{'word': 'of', 'pos': 'IN', 'entity': 'O',
'lemma': 'of', 'chunk': 'XX'}),
Token(**{'word': 'this', 'pos': 'DT', 'entity': 'O',
'lemma': 'this', 'chunk': 'XX'}),
Token(**{'word': 'new', 'pos': 'JJ', 'entity': 'O',
'lemma': 'new', 'chunk': 'XX'}),
Token(**{'word': 'annotation', 'pos': 'NN', 'entity': 'O',
'lemma': 'annotation', 'chunk': 'XX'}),
Token(**{'word': 'scheme', 'pos': 'NN', 'entity': 'O',
'lemma': 'scheme', 'chunk': 'XX'}),
Token(**{'word': '.', 'pos': '.', 'entity': 'O',
'lemma': '.', 'chunk': 'XX'})
]
]
# raw_sentences = [
# ['The', 'Penn', 'Treebank', 'has', 'recently', 'implemented', 'a', 'new', 'syntactic', 'annotation', 'scheme',
# ',', 'designed', 'to', 'highlight', 'aspects', 'of', 'predicate', '-', 'argument', 'structure', '.'],
# ['This', 'paper', 'discusses', 'the', 'implementation', 'of', 'crucial',
# 'aspects', 'of', 'this', 'new', 'annotation', 'scheme', '.']
# ]
assert tokens == annotate_using_spacy(sentences, tokenize=True)
test_spacy()
def test_janome():
sentences = [
["メロスは激怒した。"],
["メロスには政治がわからぬ。"],
]
tokens = [
[
Token(**{'word': 'メロス', 'surf': 'メロス', 'pos': '名詞', 'pos1': '一般', 'pos2': '*', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': '*', 'base': 'メロス'}),
Token(**{'word': 'は', 'surf': 'は', 'pos': '助詞', 'pos1': '係助詞', 'pos2': '*', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': 'ハ', 'base': 'は'}),
Token(**{'word': '激怒', 'surf': '激怒', 'pos': '名詞', 'pos1': 'サ変接続', 'pos2': '*', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': 'ゲキド', 'base': '激怒'}),
Token(**{'word': 'し', 'surf': 'し', 'pos': '動詞', 'pos1': '自立', 'pos2': '*', 'pos3': '*',
'inflectionForm': '連用形', 'inflectionType': 'サ変・スル', 'reading': 'シ', 'base': 'する'}),
Token(**{'word': 'た', 'surf': 'た', 'pos': '助動詞', 'pos1': '*', 'pos2': '*', 'pos3': '*',
'inflectionForm': '基本形', 'inflectionType': '特殊・タ', 'reading': 'タ', 'base': 'た'}),
Token(**{'word': '。', 'surf': '。', 'pos': '記号', 'pos1': '句点', 'pos2': '*', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': '。', 'base': '。'})
],
[
Token(**{'word': 'メロス', 'surf': 'メロス', 'pos': '名詞', 'pos1': '一般', 'pos2': '*', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': '*', 'base': 'メロス'}),
Token(**{'word': 'に', 'surf': 'に', 'pos': '助詞', 'pos1': '格助詞', 'pos2': '一般', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': 'ニ', 'base': 'に'}),
Token(**{'word': 'は', 'surf': 'は', 'pos': '助詞', 'pos1': '係助詞', 'pos2': '*', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': 'ハ', 'base': 'は'}),
Token(**{'word': '政治', 'surf': '政治', 'pos': '名詞', 'pos1': '一般', 'pos2': '*', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': 'セイジ', 'base': '政治'}),
Token(**{'word': 'が', 'surf': 'が', 'pos': '助詞', 'pos1': '格助詞', 'pos2': '一般', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': 'ガ', 'base': 'が'}),
Token(**{'word': 'わから', 'surf': 'わから', 'pos': '動詞', 'pos1': '自立', 'pos2': '*', 'pos3': '*',
'inflectionForm': '未然形', 'inflectionType': '五段・ラ行', 'reading': 'ワカラ', 'base': 'わかる'}),
Token(**{'word': 'ぬ', 'surf': 'ぬ', 'pos': '助動詞', 'pos1': '*', 'pos2': '*', 'pos3': '*',
'inflectionForm': '基本形', 'inflectionType': '特殊・ヌ', 'reading': 'ヌ', 'base': 'ぬ'}),
Token(**{'word': '。', 'surf': '。', 'pos': '記号', 'pos1': '句点', 'pos2': '*', 'pos3': '*',
'inflectionForm': '*', 'inflectionType': '*', 'reading': '。', 'base': '。'})
]
]
# raw_sentences = [
# ['メロス', 'は', '激怒', 'し', 'た', '。'],
# ['メロス', 'に', 'は', '政治', 'が', 'わから', 'ぬ', '。']
# ]
assert tokens == annotate_using_janome(sentences, tokenize=True)
|
masashi-y/myccg | depccg/printer/html.py | <filename>depccg/printer/html.py
from typing import List, Union, Optional
import html
import re
from depccg.tree import Tree, ScoredTree
_MATHML_SUBTREE_NONTERMINAL = '''\
<mrow>
<mfrac {3} linethickness='2px'>
<mrow>{0}</mrow>
<mstyle mathcolor='Red'>{1}</mstyle>
</mfrac>
<mtext mathsize='0.8' mathcolor='Black'>{2}</mtext>
</mrow>
'''
_MATHML_SUBTREE_TERMINAL = '''\
<mrow>
<mfrac linethickness='2px'>
<mtext mathsize='1.0' mathcolor='Black'>{0}</mtext>
<mstyle mathcolor='Red'>{1}</mstyle>
</mfrac>
<mtext mathsize='0.8' mathcolor='Black'>lex</mtext>
</mrow>
'''
_MATHML_MAIN = '''\
<!doctype html>
<html lang='en'>
<head>
<meta charset='UTF-8'>
<style>
body {{
font-size: 1em;
}}
</style>
<script type="text/javascript"
src="http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML">
</script>
</head>
<body>
{0}
</body>
</html>
'''
def _mathml_subtree(tree: Tree, bgcolor: Optional[str] = None) -> str:
bgcolor = f'mathbackground={bgcolor}' if bgcolor else ''
cat_str = _mathml_cat(str(tree.cat))
if not tree.is_leaf:
children_str = ''.join(map(_mathml_subtree, tree.children))
return _MATHML_SUBTREE_NONTERMINAL.format(
children_str, cat_str, html.escape(tree.op_string), bgcolor
)
else:
return _MATHML_SUBTREE_TERMINAL.format(html.escape(tree.word), cat_str)
def _mathml_cat(cat: str) -> str:
cats_feats = re.findall(r'([\w\\/()]+)(\[.+?\])*', cat)
mathml_str = ''
for cat, feat in cats_feats:
cat = html.escape(cat)
feat = html.escape(feat)
cat_mathml = f'''\
<mi mathvariant='italic'
mathsize='1.0' mathcolor='Red'>{cat}</mi>'''
if feat != '':
mathml_str += f'''\
<msub>{cat_mathml}
<mrow>
<mi mathvariant='italic'
mathsize='0.8' mathcolor='Purple'>{feat}</mi>
</mrow>
</msub>'''
else:
mathml_str += cat_mathml
return mathml_str
def to_mathml(nbest_trees: List[List[Union[Tree, ScoredTree]]]) -> str:
"""convert parsing results to a html string containing all the trees.
Args:
nbest_trees (List[List[Union[Tree, ScoredTree]]]): parsing results
Returns:
str: html string
"""
result = ''
for sentence_index, trees in enumerate(nbest_trees, 1):
if isinstance(trees[0], ScoredTree):
words = trees[0].tree.word
else:
words = trees[0].word
result += f'<p>ID={sentence_index}: {words}</p>'
for tree in trees:
if isinstance(tree, ScoredTree):
tree, prob = tree
result += f'<p>Log prob={prob:.5e}</p>'
tree_str = tree if isinstance(tree, str) else _mathml_subtree(tree)
result += f'<math xmlns="http://www.w3.org/1998/Math/MathML">{tree_str}</math>'
return _MATHML_MAIN.format(result)
|
masashi-y/myccg | depccg/printer/prolog.py | <reponame>masashi-y/myccg
from typing import List
from io import StringIO
from depccg.cat import Category
from depccg.tree import ScoredTree, Tree
def _prolog_category_string(cat: Category) -> str:
def rec(this_cat: Category) -> str:
if this_cat.is_atomic:
base = this_cat.base.lower()
if base == '.':
return "period"
elif base == ",":
return "comma"
elif base == ":":
return "colon"
elif base == ";":
return "semicolon"
elif str(this_cat.feature) == "":
return base
else:
return f'{base}:{str(this_cat.feature)}'
else:
left = rec(this_cat.left)
right = rec(this_cat.right)
return f"({left}{this_cat.slash}{right})"
return rec(cat)
def _escape_prolog(text: str) -> str:
return text.replace("'", "\\'")
_op_mapping = {
'fa': "fa(",
'ba': "ba(",
'fx': "fc(",
'fc': "fc(",
'bx': "bxc(",
'gfc': "gfc(",
'gbx': "gbx(",
'rp': "rp(",
'lp': "lx(",
'conj': "conj(",
'conj2': "conj(",
}
def _prolog_string(tree: Tree, sentence_index: int) -> str:
position = 0
depth = 1
def indent(output):
output.write(" " * depth)
def rec(node: Tree, output):
nonlocal depth, position
indent(output)
if node.is_leaf:
token = node.token
result_str = (
f"t({_prolog_category_string(node.cat)}, "
f"\'{_escape_prolog(node.word)}\', "
f"\'{_escape_prolog(token.lemma)}\', "
f"\'{token.pos}\', "
f"\'{token.chunk}\', "
f"\'{token.entity}\')"
)
output.write(result_str)
position += 1
elif node.is_unary:
this_cat = _prolog_category_string(node.cat)
child_cat = _prolog_category_string(node.left_child.cat)
output.write(f"lx({this_cat}, {child_cat},\n")
depth += 1
rec(node.child, output)
depth -= 1
output.write(")")
else:
output.write(_op_mapping[node.op_string])
output.write(_prolog_category_string(node.cat))
output.write(",")
if node.op_string == 'conj2':
cat_str = _prolog_category_string(node.right_child.cat)
output.write(f" {cat_str}\\{cat_str},\n")
depth += 1
indent(output)
output.write(f"conj({cat_str}\\{cat_str}, {cat_str},")
if node.op_string == 'conj':
cat_str = _prolog_category_string(node.cat.left)
output.write(f" {cat_str},")
if node.op_string == 'lp':
cat_str = _prolog_category_string(node.right_child.cat)
output.write(f" {cat_str},\n")
depth += 1
indent(output)
output.write(f"lp({cat_str},")
output.write("\n")
depth += 1
rec(node.left_child, output)
if not node.is_unary:
output.write(",\n")
rec(node.right_child, output)
output.write(")")
depth -= 1
if node.op_string in ('conj2', 'lp'):
output.write(")")
depth -= 1
with StringIO() as output:
output.write(f"ccg({sentence_index},\n")
rec(tree, output)
output.write(").\n")
return output.getvalue()
_prolog_header = (
':- op(601, xfx, (/)).\n'
':- op(601, xfx, (\\)).\n'
':- multifile ccg/2, id/2.\n'
':- discontiguous ccg/2, id/2.\n'
)
def to_prolog_en(
nbest_trees: List[List[ScoredTree]],
) -> str:
"""convert parsing results to Prolog format used by LangPro.
Args:
nbest_trees (List[List[ScoredTree]]): parsing results
Returns:
str: Prolog string
"""
with StringIO() as output:
print(_prolog_header, file=output)
for sentence_index, trees in enumerate(nbest_trees, 1):
for tree, _ in trees:
print(_prolog_string(tree, sentence_index), file=output)
result = output.getvalue()
return result
_ja_combinators = {
'SSEQ': 'sseq',
'>': 'fa',
'<': 'ba',
'>B': 'fc',
'<B1': 'bc1',
'<B2': 'bc2',
'<B3': 'bc3',
'<B4': 'bc4',
'>Bx1': 'fx1',
'>Bx2': 'fx2',
'>Bx3': 'fx3',
"ADNext": 'adnext',
"ADNint": 'adnint',
"ADV0": 'adv0',
"ADV1": 'adv1',
}
def to_prolog_ja(
nbest_trees: List[List[ScoredTree]],
) -> str:
"""convert parsing results to Prolog format used by LangPro.
This is specifically used for Japanese sentences.
Args:
nbest_trees (List[List[ScoredTree]]): parsing results
Returns:
str: Prolog string
"""
def traverse_cat(node):
if node.is_functor:
left = traverse_cat(node.left)
right = traverse_cat(node.right)
return f'({left}{node.slash}{right})'
else:
feature = dict(node.feature.items())
base = node.base.lower()
if 'case' not in feature:
return base
else:
feature_case = feature["case"].lower()
return f'{base}:{feature_case}'
def traverse_tree(node, depth=1):
whitespace = ' ' * depth
if node.is_leaf:
cat = traverse_cat(node.cat)
token = node.token
surf = _escape_prolog(token.get('surf', node.word))
base = _escape_prolog(token.get('base', '*'))
tags = [
token.get(key, '*')
for key in ('pos', 'pos1', 'pos2', 'pos3')
]
if all(tag == '*' for tag in tags):
pos = '*'
else:
pos = '/'.join(_escape_prolog(tag) for tag in tags)
infl_form = _escape_prolog(token.get('inflectionForm', '*'))
infl_type = _escape_prolog(token.get('inflectionType', '*'))
output.write(
f"\n{whitespace}t({cat}, '{surf}', '{base}', '{pos}', '{infl_form}', '{infl_type}')"
)
else:
cat = traverse_cat(node.cat)
rule = _ja_combinators[node.op_symbol]
output.write(f"\n{whitespace}{rule}({cat}")
for i, child in enumerate(node.children):
if i < len(node.children):
output.write(',')
traverse_tree(child, depth=depth + 1)
output.write(')')
output = StringIO()
print(_prolog_header, file=output)
for sentence_index, trees in enumerate(nbest_trees, 1):
for tree, _ in trees:
output.write(f'ccg({sentence_index},')
traverse_tree(tree)
output.write(').\n\n')
result = output.getvalue()
output.close()
return result
|
masashi-y/myccg | depccg/printer/jigg_xml.py | <reponame>masashi-y/myccg
from typing import List
from lxml import etree
from depccg.tree import ScoredTree, Tree
from depccg.cat import Category, TernaryFeature, UnaryFeature
def _cat_multi_valued(cat: Category) -> str:
def rec(x: Category):
if x.is_atomic:
if isinstance(x.feature, UnaryFeature):
if x.feature.value is None:
return x.base
else:
return f'{x.base}[{x.feature}=true]'
elif isinstance(x.feature, TernaryFeature):
return str(x)
else:
raise RuntimeError(
f'unsupported feature type: {type(x.feature)}')
else:
return f'({_cat_multi_valued(x)})'
if cat.is_atomic:
return rec(cat)
return f'{rec(cat.left)}{cat.slash}{rec(cat.right)}'
class _ConvertToJiggXML(object):
def __init__(self, sid: int, use_symbol: bool) -> None:
self.sid = sid
self._spid = -1
self.processed = 0
self.use_symbol = use_symbol
@property
def spid(self) -> int:
self._spid += 1
return self._spid
def process(self, tree: Tree, score: float = None) -> None:
counter = 0
def traverse(node: Tree) -> None:
nonlocal counter
id = f's{self.sid}_sp{self.spid}'
xml_node = etree.SubElement(res, 'span')
xml_node.set('category', _cat_multi_valued(node.cat))
xml_node.set('id', id)
if node.is_leaf:
start_of_span = counter
counter += 1
xml_node.set('terminal', f's{self.sid}_{start_of_span}')
else:
childid, start_of_span = traverse(node.left_child)
if not node.is_unary:
tmp, _ = traverse(node.right_child)
childid += ' ' + tmp
xml_node.set('child', childid)
xml_node.set(
'rule', node.op_symbol if self.use_symbol else node.op_string
)
xml_node.set('begin', str(start_of_span))
xml_node.set('end', str(start_of_span + len(node)))
return id, start_of_span
res = etree.Element('ccg')
res.set('id', f's{self.sid}_ccg{self.processed}')
id, _ = traverse(tree)
res.set('root', str(id))
res[0].set('root', 'true')
if score is not None:
res.set('score', str(score))
self.processed += 1
return res
def to_jigg_xml(
trees: List[List[ScoredTree]],
use_symbol: bool = False
) -> etree.Element:
"""generate etree.Element XML object in jigg format
containing all the parse results
Args:
trees (List[List[ScoredTree]]): parsing result
use_symbol (bool, optional): [description]. Defaults to False.
Returns:
etree.Element: jigg format etree.Element tree
"""
root_node = etree.Element('root')
document_node = etree.SubElement(root_node, 'document')
sentences_node = etree.SubElement(document_node, 'sentences')
for sentence_index, parsed in enumerate(trees):
sentence_node = etree.SubElement(sentences_node, 'sentence')
tokens_node = etree.SubElement(sentence_node, 'tokens')
cats = [leaf.cat for leaf in parsed[0].tree.leaves]
tokens = parsed[0].tree.tokens
for token_index, (token, cat) in enumerate(zip(tokens, cats)):
token_node = etree.SubElement(tokens_node, 'token')
token_node.set('start', str(token_index))
token_node.set('cat', str(cat))
token_node.set('id', f's{sentence_index}_{token_index}')
if 'word' in token:
token['surf'] = token.pop('word')
if 'lemma' in token:
token['base'] = token.pop('lemma')
for k, v in token.items():
token_node.set(k, v)
converter = _ConvertToJiggXML(sentence_index, use_symbol)
for tree, score in parsed:
sentence_node.append(converter.process(tree, score))
return root_node
|
masashi-y/myccg | tests/test_cat.py | <gh_stars>10-100
import pytest
from depccg.cat import Category, Atom, Functor, TernaryFeature, UnaryFeature
en_categories = [
(Category.parse(text.strip()), text.strip())
for text in open('tests/cats.txt')
]
ja_categories = [
(Category.parse(text.strip()), text.strip())
for text in open('tests/cats.ja.txt')
]
@pytest.mark.parametrize("result, expect", en_categories)
def test_parse_many_en(result, expect):
assert str(result) == expect
@pytest.mark.parametrize("result, expect", en_categories)
def test_parse_many_ja(result, expect):
assert str(result) == expect
def test_parse():
assert Category.parse("NP") == Atom("NP")
assert Category.parse("(NP)") == Atom("NP")
assert Category.parse("S/NP") == Functor(Atom("S"), '/', Atom("NP"))
assert Category.parse("(S/NP)") == Functor(Atom("S"), '/', Atom("NP"))
assert Category.parse(
"S[dcl]/NP") == Functor(Atom("S", UnaryFeature("dcl")), '/', Atom("NP"))
# japanese categories
assert Category.parse("NP[case=nc,mod=nm,fin=t]") == Atom(
"NP", TernaryFeature(("case", "nc"), ("mod", "nm"), ("fin", "t")))
assert Category.parse("S[mod=nm,form=attr,fin=t]") == Atom(
"S", TernaryFeature(("mod", "nm"), ("form", "attr"), ("fin", "t")))
# punctuations
assert Category.parse(',') == Atom(',')
assert Category.parse('.') == Atom('.')
assert Category.parse(';') == Atom(';')
assert Category.parse(':') == Atom(':')
assert Category.parse('LRB') == Atom('LRB')
assert Category.parse('RRB') == Atom('RRB')
assert Category.parse('conj') == Atom('conj')
assert Category.parse('*START*') == Atom('*START*')
assert Category.parse('*END*') == Atom('*END*')
def test_binop():
S = Category.parse("S")
NP = Category.parse("NP")
assert S / NP == Category.parse("S/NP")
assert S | NP == Category.parse("S\\NP")
assert S | (S / NP) == Category.parse("S\\(S/NP)")
assert (S | NP) / NP == Category.parse("(S\\NP)/NP")
assert S / NP == "S/NP"
assert S | NP == "S\\NP"
assert S | (S / NP) == "S\\(S/NP)"
assert (S | NP) / NP == "(S\\NP)/NP"
assert "S/NP" == S / NP
assert "S\\NP" == S | NP
assert "S\\(S/NP)" == S | (S / NP)
assert "(S\\NP)/NP" == (S | NP) / NP
|
masashi-y/myccg | depccg/annotator.py | from typing import List, Tuple, Optional
import os
import subprocess
import tempfile
import logging
from pathlib import Path
from lxml import etree
from depccg.instance_models import MODEL_DIRECTORY
from depccg.morpha import MorphaStemmer
from depccg.types import Token
logger = logging.getLogger(__name__)
candc_cmd = "cat \"{0}\" | {1}/bin/pos --model {2} | {1}/bin/ner --model {3}"
def annotate_XX(sentences: List[List[str]], tokenize: bool = False) -> List[List[Token]]:
if tokenize:
raise NotImplementedError('no tokenizer implemented')
return [
[Token.of_word(word) for word in sentence]
for sentence in sentences
]
def try_annotate_using_candc(sentences: List[List[str]], tokenize: bool = False) -> List[List[Token]]:
if tokenize:
raise NotImplementedError(
'no tokenizer implemented in the C&C pipeline')
candc_dir = os.environ.get('CANDC', None)
candc_model_pos = None
candc_model_ner = None
fail = False
if candc_dir:
candc_dir = Path(candc_dir)
candc_model_pos = Path(
os.environ.get(
'CANDC_MODEL_POS', str(candc_dir / 'models' / 'pos'))
)
candc_model_ner = Path(
os.environ.get(
'CANDC_MODEL_NER', str(candc_dir / 'models' / 'ner'))
)
if (
(candc_dir / 'bin' / 'pos').exists()
and (candc_dir / 'bin' / 'ner').exists()
and candc_model_pos.exists()
and candc_model_ner.exists()
):
pass
else:
logger.info(
'CANDC environmental variable may not be configured correctly.')
logger.info(
'$CANDC/bin/{pos,ner} and $CANDC/models/{pos,ner} are expected to exist.')
fail = True
else:
fail = True
if fail:
logger.info('did not find C&C parser at CANDC environmental variable.')
logger.info('fill POS tag etc. using XX tag.')
return annotate_XX(sentences)
logger.info('find C&C parser at CANDC environmental variable.')
logger.info('use C&C pipeline to annotate POS and NER infos.')
logger.info(f'C&C models: [{candc_model_pos}, {candc_model_ner}]')
stemmer = MorphaStemmer(str(MODEL_DIRECTORY / 'verbstem.list'))
tmpfile = tempfile.mktemp()
with open(tmpfile, 'w') as f:
for sentence in sentences:
print(' '.join(sentence), file=f)
command = candc_cmd.format(
tmpfile,
candc_dir,
candc_model_pos,
candc_model_ner
)
proc = subprocess.Popen(
command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
res, error = proc.communicate()
try:
tagged_sentences = res.decode('utf-8').strip().split('\n')
tagged_sentences = [
[
tuple(token.split('|'))
for token in sentence.strip().split(' ')
]
for sentence in tagged_sentences
]
except RuntimeError:
raise RuntimeError(
'failed to process C&C output. there might have been some problem '
'during running C&C pipeline?\n'
f'stderr:\n {error}'
)
results = []
for sentence in tagged_sentences:
words, poss = zip(*[(word, pos) for word, pos, _ in sentence])
lemmas = stemmer.analyze(list(words), list(poss))
tokens = [
Token(
word=word,
pos=pos,
entity=ner,
lemma=lemma.lower(),
chunk='XX'
)
for (word, pos, ner), lemma in zip(sentence, lemmas)
]
results.append(tokens)
return results
def annotate_using_spacy(
sentences: List[List[str]],
tokenize: bool = False,
model_name: str = 'en_core_web_sm',
n_threads: int = 2,
batch_size: int = 10000
) -> Tuple[List[List[Token]], Optional[List[List[str]]]]:
try:
import spacy
from spacy.tokens import Doc
except ImportError:
logger.error(
'failed to import spacy. please install it by "pip install spacy".'
)
exit(1)
nlp = spacy.load(model_name, disable=['parser'])
logger.info('use spacy to annotate POS and NER infos.')
if tokenize:
docs = [nlp.tokenizer(' '.join(sentence)) for sentence in sentences]
else:
docs = [Doc(nlp.vocab, sentence) for sentence in sentences]
for _, proc in nlp.pipeline:
docs = proc.pipe(
docs,
batch_size=batch_size
)
results = []
for sentence in docs:
tokens = []
for token in sentence:
if token.ent_iob_ == 'O':
ner = token.ent_iob_
else:
ner = token.ent_iob_ + '-' + token.ent_type_
# takes care of pronoun
if token.lemma_ == '-PRON-':
lemma = str(token).lower()
else:
lemma = token.lemma_.lower()
tokens.append(
Token(
word=str(token),
pos=token.tag_,
entity=ner,
lemma=lemma,
chunk='XX'
)
)
results.append(tokens)
return results
def annotate_using_janome(
sentences: List[List[str]],
tokenize: bool = False
) -> Tuple[List[List[Token]], List[List[str]]]:
assert tokenize, 'no support for using janome with pre-tokenized inputs'
try:
from janome.tokenizer import Tokenizer
except ImportError:
logger.error(
'failed to import janome. please install it by "pip install janome".')
exit(1)
logger.info('use Janome to tokenize and annotate POS infos.')
tokenizer = Tokenizer()
results = []
for sentence in sentences:
sentence = ''.join(sentence)
tokenized = list(tokenizer.tokenize(sentence))
tokens = []
for token in tokenized:
pos, pos1, pos2, pos3 = token.part_of_speech.split(',')
token = Token(
word=token.surface,
surf=token.surface,
pos=pos,
pos1=pos1,
pos2=pos2,
pos3=pos3,
inflectionForm=token.infl_form,
inflectionType=token.infl_type,
reading=token.reading,
base=token.base_form
)
tokens.append(token)
results.append(tokens)
return results
jigg_cmd = "java -Xmx2g -cp \"{0}/jar/*\" jigg.pipeline.Pipeline -annotators {1} -file {2} -output {3}"
def annotate_using_jigg(
sentences: List[List[str]],
tokenize: bool = False,
pipeline: str = 'ssplit,kuromoji'
) -> Tuple[List[List[Token]], List[List[str]]]:
assert tokenize, 'no support for using jigg with pre-tokenized inputs'
logger.info('use Jigg to tokenize and annotate POS infos.')
jigg_dir = os.environ.get('JIGG', None)
if not jigg_dir:
logger.error(
'did not find Jigg at JIGG environmental variable. exiting..')
exit(1)
tmpfile = tempfile.mktemp()
with open(tmpfile, 'w') as f:
for sentence in sentences:
print(' '.join(sentence), file=f)
outfile = tempfile.mktemp()
command = jigg_cmd.format(
jigg_dir,
pipeline,
tmpfile,
outfile
)
proc = subprocess.Popen(
command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
proc.communicate()
results = []
for sentence in etree.parse(outfile).getroot().xpath('*//sentence'):
tokens = []
for token in sentence.xpath('*//token'):
attrib = token.attrib
token = Token(
word=attrib['surf'],
surf=attrib['surf'],
pos=attrib['pos'],
pos1=attrib['pos1'],
pos2=attrib['pos2'],
pos3=attrib['pos3'],
inflectionForm=attrib['inflectionForm'],
inflectionType=attrib['inflectionType'],
reading=attrib['reading'],
base=attrib['base']
)
tokens.append(token)
results.append(tokens)
return results
english_annotator = {
'candc': try_annotate_using_candc,
'spacy': annotate_using_spacy,
}
japanese_annotator = {
'janome': annotate_using_janome,
'jigg': annotate_using_jigg,
}
|
masashi-y/myccg | depccg/utils.py | <filename>depccg/utils.py
from typing import List, Tuple, Dict, NamedTuple, Optional
import json
import logging
import numpy
from depccg.types import ScoringResult
from depccg.cat import Category
logger = logging.getLogger(__name__)
def is_json(file_path: str) -> bool:
try:
with open(file_path, 'r') as data_file:
json.load(data_file)
return True
except json.JSONDecodeError:
return False
def normalize(word: str) -> str:
if word == "-LRB-":
return "("
elif word == "-RRB-":
return ")"
elif word == "-LCB-":
return "{"
elif word == "-RCB-":
return "}"
elif word == "-LSB-":
return "["
elif word == "-RSB-":
return "]"
else:
return word
def denormalize(word: str) -> str:
if word == "(":
return "-LRB-"
elif word == ")":
return "-RRB-"
elif word == "{":
return "-LCB-"
elif word == "}":
return "-RCB-"
elif word == "[":
return "-LSB-"
elif word == "]":
return "-RSB-"
word = word.replace(">", "-RAB-")
word = word.replace("<", "-LAB-")
return word
def read_pretrained_embeddings(filepath: str) -> numpy.ndarray:
nvocab = 0
io = open(filepath)
dim = len(io.readline().split())
io.seek(0)
for _ in io:
nvocab += 1
io.seek(0)
res = numpy.empty((nvocab, dim), dtype=numpy.float32)
for i, line in enumerate(io):
line = line.strip()
if len(line) == 0:
continue
res[i] = line.split()
io.close()
return res
def read_model_defs(filepath: str) -> Dict[str, int]:
return {
line.strip().split(' ')[0]: i
for i, line in enumerate(open(filepath, encoding='utf-8'))
}
def remove_comment(line: str) -> str:
comment = line.find('#')
if comment != -1:
line = line[:comment]
return line.strip()
class SpanInfo(NamedTuple):
cat: Category
idx: int
end_idx: Optional[int] = None
def read_partial_tree(string: str) -> Tuple[List[str], List[SpanInfo]]:
stack = []
spans = []
words = []
buf = list(reversed(string.split()))
counter = 0
while buf:
item = buf.pop()
if item.startswith('<'):
cat = item[1:]
cat = None if cat == 'X' else Category.parse(cat)
stack.append(cat)
stack.append(counter)
elif item == '>':
start = stack.pop()
cat = stack.pop()
spans.append(SpanInfo(cat, start, counter - start))
else:
items = item.split('|')
if len(items) == 1:
words.append(items[0])
elif len(items) == 2:
cat, word = items
assert len(cat) > 0 and len(word) > 0, \
'failed to parse partially annotated sentence.'
words.append(word)
spans.append(SpanInfo(Category.parse(cat), counter))
counter += 1
assert len(stack) == 0, 'failed to parse partially annotated sentence.'
return words, spans
def maybe_split_and_join(string):
if isinstance(string, list):
split = string
join = ' '.join(string)
else:
assert isinstance(string, str)
split = string.split(' ')
join = string
return split, join
def read_weights(filename, file_type='json'):
assert file_type == 'json'
categories = None
scores = []
for line in open(filename):
json_dict = json.loads(line.strip())
if categories is None:
categories = [
Category.parse(cat)
for cat in json_dict['categories']
]
dep_scores = numpy.array(json_dict['heads']) \
.reshape(json_dict['heads_shape']) \
.astype(numpy.float32)
tag_scores = numpy.array(json_dict['head_tags']) \
.reshape(json_dict['head_tags_shape']) \
.astype(numpy.float32)
scores.append(
ScoringResult(
tag_scores,
dep_scores
)
)
return scores, categories
|
masashi-y/myccg | depccg/printer/xml.py | from typing import List
from lxml import etree
from depccg.tree import Tree, ScoredTree
def _process_tree(tree: Tree) -> etree.Element:
def rec(node, parent):
if node.is_leaf:
leaf_node = etree.SubElement(parent, 'lf')
start, token = tokens.pop(0)
leaf_node.set('start', str(start))
leaf_node.set('span', '1')
leaf_node.set('cat', str(node.cat))
for k, v in token.items():
leaf_node.set(k, v)
else:
rule_node = etree.SubElement(parent, 'rule')
rule_node.set('type', node.op_string)
rule_node.set('cat', str(node.cat))
for child in node.children:
rec(child, rule_node)
tokens = list(enumerate(tree.tokens))
result = etree.Element("ccg")
rec(tree, result)
return result
def xml_of(
nbest_trees: List[List[ScoredTree]],
) -> etree.Element:
"""convert parsing results to a XML etree.Element in a format commonly used by C&C.
Args:
nbest_trees (List[List[ScoredTree]]): parsing results
Returns:
etree.Element: XML object
"""
candc_node = etree.Element('candc')
for sentence_index, trees in enumerate(nbest_trees, 1):
for tree_index, (tree, _) in enumerate(trees, 1):
out = _process_tree(tree)
out.set('sentence', str(sentence_index))
out.set('id', str(tree_index))
candc_node.append(out)
return candc_node
|
masashi-y/myccg | depccg/chainer/biaffine.py | import math
import numpy as np
from chainer import cuda
from chainer import link
from chainer import functions as F
from chainer import initializer
from chainer.initializers.normal import HeNormal
from chainer.initializers.constant import Constant
from chainer.initializers.constant import Identity
def _get_initializer(initializer, scale=1.0):
if initializer is None:
return HeNormal(scale / np.sqrt(2))
if np.isscalar(initializer):
return Constant(initializer * scale)
if isinstance(initializer, np.ndarray):
return Constant(initializer * scale)
assert callable(initializer)
if scale == 1.0:
return initializer
return _ScaledInitializer(initializer, scale)
class _ScaledInitializer(initializer.Initializer):
def __init__(self, initializer, scale=1.0):
self.initializer = initializer
self.scale = scale
dtype = getattr(initializer, 'dtype', None)
super(Identity, self).__init__(dtype)
def __call__(self, array):
self.initializer(array)
array *= self.scale
class Biaffine(link.Link):
def __init__(self, in_size, wscale=1,
initialW=None, initial_bias=None):
super(Biaffine, self).__init__()
self._W_initializer = _get_initializer(
initialW, math.sqrt(wscale))
self._initialize_params(in_size)
def _initialize_params(self, in_size):
self.add_param('W', (in_size + 1, in_size),
initializer=self._W_initializer)
def forward_one(self, x1, x2):
xp = cuda.get_array_module(x1.data)
return F.matmul(
F.concat([x1, xp.ones((x1.shape[0], 1), 'f')]), # (slen, hidden+1)
F.matmul(self.W, x2, transb=True)) # (hidden+1, hidden) * (slen, hidden)^T
def forward_batch(self, x1, x2):
xp = cuda.get_array_module(x1.data)
batch, slen, hidden = x2.shape
return F.batch_matmul(
# (batch, slen, hidden+1)
F.concat([x1, xp.ones((batch, slen, 1), 'f')], 2),
F.reshape(F.linear(F.reshape(x2, (batch * slen, -1)), self.W),
(batch, slen, -1)), transb=True)
def __call__(self, x1, x2):
dim = len(x1.shape)
if dim == 3:
return self.forward_batch(x1, x2)
elif dim == 2:
return self.forward_one(x1, x2)
else:
raise RuntimeError()
class Bilinear(link.Link):
# chainer.links.Bilinear may have some problem with GPU
# and results in nan with batches with big size
def __init__(self, in_size1, in_size2, out_size, wscale=1,
initialW=None, initial_bias=None, bias=0):
super(Bilinear, self).__init__()
self._W_initializer = _get_initializer(
initialW, math.sqrt(wscale))
if initial_bias is None:
initial_bias = bias
self.bias_initializer = _get_initializer(initial_bias)
# same parameters as chainer.links.Bilinear
# so that both can use serialized parameters of the other
self.add_param('W', (in_size1, in_size2, out_size),
initializer=self._W_initializer)
self.add_param('V1', (in_size1, out_size),
initializer=self._W_initializer)
self.add_param('V2', (in_size2, out_size),
initializer=self._W_initializer)
self.add_param('b', out_size,
initializer=self.bias_initializer)
self.in_size1 = in_size1
self.in_size2 = in_size2
self.out_size = out_size
def __call__(self, e1, e2):
ele2 = F.reshape(
F.batch_matmul(e1[:, :, None], e2[:, None, :]), (-1, self.in_size1 * self.in_size2))
res = F.matmul(ele2,
F.reshape(self.W, (self.in_size1 * self.in_size2, self.out_size))) + \
F.matmul(e1, self.V1) + \
F.matmul(e2, self.V2)
res, bias = F.broadcast(res, self.b)
return res + bias
|
masashi-y/myccg | depccg/chainer/ja_lstm_parser_bi.py | <gh_stars>10-100
import os
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from depccg.utils import read_model_defs
from depccg.chainer.biaffine import Biaffine, Bilinear
from depccg.chainer.param import Param
from depccg.types import ScoringResult
UNK = "*UNKNOWN*"
START = "*START*"
END = "*END*"
IGNORE = -1
class FeatureExtractor(object):
def __init__(self, model_path):
self.model_path = model_path
self.words = read_model_defs(os.path.join(model_path, 'words.txt'))
self.chars = read_model_defs(os.path.join(model_path, 'chars.txt'))
self.unk_word = self.words[UNK]
self.start_word = self.words[START]
self.end_word = self.words[END]
self.unk_char = self.chars[UNK]
self.start_char = self.chars[START]
self.end_char = self.chars[END]
def process(self, words, xp=np):
"""
words: list of unicode tokens
"""
w = xp.array([self.start_word] + [self.words.get(
x, self.unk_word) for x in words] + [self.end_word], 'i')
length = max(len(x) for x in words)
c = -xp.ones((len(words) + 2, length), 'i')
c[0, 0] = self.start_char
c[-1, 0] = self.end_char
for i, word in enumerate(words, 1):
for j in range(len(word)):
c[i, j] = self.chars.get(word[j], self.unk_char)
return w, c, xp.array([length], 'i')
class BiaffineJaLSTMParser(chainer.Chain):
def __init__(self, model_path):
Param.load(self, os.path.join(model_path, 'tagger_defs.txt'))
self.extractor = FeatureExtractor(model_path)
self.in_dim = self.word_dim + self.char_dim
super(BiaffineJaLSTMParser, self).__init__(
emb_word=L.EmbedID(self.n_words, self.word_dim),
emb_char=L.EmbedID(self.n_chars, 50, ignore_label=IGNORE),
conv_char=L.Convolution2D(
1, self.char_dim, (3, 50), stride=1, pad=(1, 0)),
lstm_f=L.NStepLSTM(self.nlayers, self.in_dim,
self.hidden_dim, 0.32),
lstm_b=L.NStepLSTM(self.nlayers, self.in_dim,
self.hidden_dim, 0.32),
arc_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
arc_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
rel_dep=L.Linear(2 * self.hidden_dim, self.dep_dim),
rel_head=L.Linear(2 * self.hidden_dim, self.dep_dim),
biaffine_arc=Biaffine(self.dep_dim),
biaffine_tag=Bilinear(self.dep_dim, self.dep_dim, len(self.targets)))
def forward(self, ws, cs, ls, dep_ts=None):
ws = map(self.emb_word, ws)
cs = [F.squeeze(
F.max_pooling_2d(
self.conv_char(
F.expand_dims(
self.emb_char(c), 1)), (int(l[0]), 1)))
for c, l in zip(cs, ls)]
xs_f = [F.dropout(F.concat([w, c]), 0.5) for w, c in zip(ws, cs)]
xs_b = [x[::-1] for x in xs_f]
_, _, hs_f = self.lstm_f(None, None, xs_f)
_, _, hs_b = self.lstm_b(None, None, xs_b)
hs_b = [x[::-1] for x in hs_b]
hs = [F.concat([h_f, h_b]) for h_f, h_b in zip(hs_f, hs_b)]
dep_ys = [self.biaffine_arc(
F.elu(F.dropout(self.arc_dep(h), 0.32)),
F.elu(F.dropout(self.arc_head(h), 0.32))) for h in hs]
if dep_ts is not None:
heads = dep_ts
else:
heads = [F.argmax(y, axis=1) for y in dep_ys]
cat_ys = [self.biaffine_tag(
F.elu(F.dropout(self.rel_dep(h), 0.32)),
F.elu(F.dropout(self.rel_head(
F.embed_id(t, h, ignore_label=IGNORE)), 0.32)))
for h, t in zip(hs, heads)]
return cat_ys, dep_ys
def _predict(self, xs):
xs = [self.extractor.process(x, self.xp) for x in xs]
ws, ss, ps = zip(*xs)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
cat_ys, dep_ys = self.forward(ws, ss, ps)
cat_ys = [F.log_softmax(y[1:-1]).data for y in cat_ys]
cat_ys = [chainer.cuda.to_cpu(y) for y in cat_ys]
dep_ys = [F.log_softmax(y[1:-1, :-1]).data for y in dep_ys]
dep_ys = [chainer.cuda.to_cpu(y) for y in dep_ys]
return list(ScoringResult(*result) for result in zip(cat_ys, dep_ys))
def predict_doc(self, doc, batchsize=16, gpu=-1):
res = []
if gpu >= 0:
chainer.cuda.get_device_from_id(gpu).use()
for i in range(0, len(doc), batchsize):
res.extend(self._predict(doc[i:i + batchsize]))
return res, self.cats
@property
def cats(self):
return list(zip(*sorted(self.targets.items(), key=lambda x: x[1])))[0]
|
masashi-y/myccg | depccg/chainer/param.py |
import json
class Param:
def __init__(self, obj):
self.__dict__["ps"] = {"model": obj.__class__.__name__}
self.__dict__["obj"] = obj
def __setattr__(self, key, value):
self.__dict__["ps"][key] = value
self.__dict__["obj"].__dict__[key] = value
def dump(self, out):
json.dump(self.ps, open(out, "w"))
@staticmethod
def load(obj, paramfile):
p = Param(obj)
params = json.load(open(paramfile))
p.__dict__["ps"].update(params)
obj.__dict__.update(params)
return p
|
masashi-y/myccg | depccg/printer/ja.py | from depccg.tree import Tree
from depccg.utils import normalize
def ja_of(tree: Tree) -> str:
"""tree string in the Japanese CCGBank's format
Args:
tree (Tree): tree object
Returns:
str: tree string in Japanese CCGBank's format
"""
def rec(node):
if node.is_leaf:
cat = node.cat
word = normalize(node.word)
token = node.token
poss = [
token.get(pos, '*')
for pos in ('pos', 'pos1', 'pos2', 'pos3')
]
poss = [pos for pos in poss if pos != '*']
pos = '-'.join(poss) if len(poss) else '_'
inflections = [
token.get(i, '*')
for i in ('inflectionForm', 'inflectionType')
]
inflections = [i for i in inflections if i != '*']
inflection = '-'.join(inflections) if len(inflections) else '_'
return f'{{{cat} {word}/{word}/{pos}/{inflection}}}'
else:
children = ' '.join(rec(child) for child in node.children)
return f'{{{node.op_symbol} {node.cat} {children}}}'
return rec(tree)
|
masashi-y/myccg | depccg/cat.py | from typing import Optional, Callable, Tuple, TypeVar, Iterator
from dataclasses import dataclass
import re
X = TypeVar('X')
Pair = Tuple[X, X]
cat_split = re.compile(r'([\[\]\(\)/\\|<>])')
punctuations = [',', '.', ';', ':', 'LRB', 'RRB', 'conj', '*START*', '*END*']
class Feature(object):
def __repr__(self) -> str:
return str(self)
@classmethod
def parse(cls, text: str) -> 'Feature':
if '=' in text and ',' in text:
return TernaryFeature(*[tuple(kv.split('=')) for kv in text.split(',')])
return UnaryFeature(text)
@dataclass(frozen=True, repr=False)
class UnaryFeature(Feature):
"""Common feature type widely used in many CCGBanks.
This assumes None or "X" values as representing a variable feature.
As commonly done in the parsing literature, the 'nb' variable is treated
sometimes as not existing, i.e., NP[conj] and NP[nb] can match.
"""
value: Optional[str] = None
def __str__(self) -> str:
return self.value if self.value is not None else ''
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
return self == Feature.parse(other)
elif not isinstance(other, UnaryFeature):
return False
return self.value == other.value
def unifies(self, other: 'UnaryFeature') -> bool:
return (
self.is_variable
or self.is_ignorable
or self == other
)
@property
def is_variable(self) -> bool:
return self.value == "X"
@property
def is_ignorable(self) -> bool:
return self.value is None or self.value == "nb"
@dataclass(frozen=True, repr=False)
class TernaryFeature(Feature):
"""Feature type used in the Japanese version of CCGBank.
This assumes a feature with values (X1, X2, X3) as representing a variable.
"""
kv1: Pair[str]
kv2: Pair[str]
kv3: Pair[str]
def items(self) -> Iterator[Pair[str]]:
return (self.kv1, self.kv2, self.kv3)
def values(self) -> Iterator[str]:
return (v for _, v in self.items())
def keys(self) -> Iterator[str]:
return (k for k, _ in self.items())
def __str__(self) -> str:
return ','.join(f'{k}={v}' for k, v in self.items())
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
return self == Feature.parse(other)
elif not isinstance(other, TernaryFeature):
return False
return (
self.kv1 == other.kv1
and self.kv2 == other.kv2
and self.kv3 == other.kv3
)
def unifies(self, other: 'UnaryFeature') -> bool:
if self == other:
return True
if list(self.keys()) != list(other.keys()):
return False
return all(
v1 == v2 or v1.startswith('X')
for v1, v2 in zip(self.values(), other.values())
)
@property
def is_variable(self) -> bool:
return any(v.startswith('X') for v in self.values())
class Category(object):
@property
def is_functor(self):
return not self.is_atomic
@property
def is_atomic(self):
return not self.is_functor
def __repr__(self) -> str:
return str(self)
def __truediv__(self, other: 'Category') -> 'Category':
return Functor(self, '/', other)
def __or__(self, other: 'Category') -> 'Category':
return Functor(self, '\\', other)
@classmethod
def parse(cls, text: str) -> 'Category':
tokens = cat_split.sub(r' \1 ', text)
buffer = list(reversed([i for i in tokens.split(' ') if i != '']))
stack = []
while len(buffer):
item = buffer.pop()
if item in punctuations:
stack.append(Atom(item))
elif item in '(<':
pass
elif item in ')>':
y = stack.pop()
if len(stack) == 0:
return y
f = stack.pop()
x = stack.pop()
stack.append(Functor(x, f, y))
elif item in '/\\|':
stack.append(item)
else:
if len(buffer) >= 3 and buffer[-1] == '[':
buffer.pop()
feature = Feature.parse(buffer.pop())
assert buffer.pop() == ']'
stack.append(Atom(item, feature))
else:
stack.append(Atom(item))
if len(stack) == 1:
return stack[0]
try:
x, f, y = stack
return Functor(x, f, y)
except ValueError:
raise RuntimeError(f'falied to parse category: {text}')
@dataclass(frozen=True, repr=False)
class Atom(Category):
base: str
feature: Feature = UnaryFeature()
def __str__(self) -> str:
feature = str(self.feature)
if len(feature) == 0:
return self.base
return f'{self.base}[{feature}]'
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
return str(self) == other
elif not isinstance(other, Atom):
return False
return (
self.base == other.base
and self.feature == other.feature
)
def __xor__(self, other: object) -> bool:
if not isinstance(other, Atom):
return False
return self.base == other.base
@property
def is_atomic(self):
return True
@property
def nargs(self) -> int:
return 0
def arg(self, index: int) -> Optional[Category]:
if index == 0:
return self
return None
def clear_features(self, *args) -> 'Atom':
if self.feature in args:
return Atom(self.base)
return self
@dataclass(frozen=True, repr=False)
class Functor(Category):
left: Category
slash: str
right: Category
def __str__(self) -> str:
def _str(cat):
if isinstance(cat, Functor):
return f'({cat})'
return str(cat)
return _str(self.left) + self.slash + _str(self.right)
def __eq__(self, other: object) -> bool:
if isinstance(other, str):
return str(self) == other
elif not isinstance(other, Functor):
return False
return (
self.left == other.left
and self.slash == other.slash
and self.right == other.right
)
def __xor__(self, other: object) -> bool:
if not isinstance(other, Functor):
return False
return (
self.left ^ other.left
and self.slash == other.slash
and self.right ^ other.right
)
@property
def functor(self) -> Callable[[Category, Category], Category]:
return lambda x, y: Functor(x, self.slash, y)
@property
def is_functor(self):
return True
@property
def nargs(self) -> int:
return 1 + self.left.nargs
def arg(self, index: int) -> Optional[Category]:
if self.nargs == index:
return self
else:
return self.left.arg(index)
def clear_features(self, *args) -> 'Functor':
return self.functor(
self.left.clear_features(*args),
self.right.clear_features(*args)
)
|
masashi-y/myccg | depccg/grammar/__init__.py | from typing import Callable, List, TypeVar, Tuple, Set, Dict
from depccg.types import Combinator, CombinatorResult
from depccg.cat import Category
X = TypeVar('X')
Pair = Tuple[X, X]
def apply_rules(
left: Category,
right: Category,
seen_rules: Set[Pair[Category]],
combinators: List[Combinator],
cache: Dict[Pair[Category], List[CombinatorResult]],
) -> List[CombinatorResult]:
cats = (left, right)
if cats in cache:
return cache[cats]
results = []
if cats in seen_rules:
for combinator in combinators:
result = combinator(*cats)
if result is not None:
results.append(result)
cache[cats] = results
return results
def guess_combinator_by_triplet(
binary_rules: Callable[[Category, Category], List[Category]],
target: Category,
x: Category,
y: Category,
) -> CombinatorResult:
for rule in binary_rules(x, y):
if rule.cat == target:
rule
return CombinatorResult(
cat=target,
op_string="unk",
op_symbol="<unk>",
head_is_left=True
)
|
masashi-y/myccg | depccg/unification.py | from typing import Dict, Union
from depccg.cat import Category, Atom, Feature
class Unification(object):
"""This performs unification on category variables.
Usage:
>>> uni = Unification("a/b", "b")
>>> x = Category.parse("S[X]/NP[X]")
>>> y = Category.parse("NP[mod]")
>>> uni(x, y)
True
>>> uni["a"]
S[mod]
Args:
meta_x: a string pattern (e.g., "a/b") to match against the first argument (x above).
meta_y: a string pattern ("b") to match against the first argument (y above).
"""
def __init__(
self,
meta_x: Union[str, Category],
meta_y: Union[str, Category],
) -> None:
self.cats: Dict[str, Category] = {}
# mapping of variable feature to its instantiation
self.mapping: Dict[Feature, Feature] = {}
self.meta_x = (
Category.parse(meta_x) if isinstance(meta_x, str) else meta_x
)
self.meta_y = (
Category.parse(meta_y) if isinstance(meta_y, str) else meta_y
)
# meta variables to feature values
self.x_features: Dict[str, Feature] = {}
self.y_features: Dict[str, Feature] = {}
self.success = False
self.done = False
def __call__(self, x: Category, y: Category) -> bool:
if self.done:
raise RuntimeError(
"cannot use the same Unification object more than once."
)
self.done = True
def scan_deep(s: Category, v: str, index: int, results: Dict[str, Feature]):
if s.is_functor:
index = scan_deep(s.left, v, index, results)
index = scan_deep(s.right, v, index, results)
return index
results[f'{v}{index}'] = s.feature
return index + 1
def scan(s: Category, t: Category, results: Dict[str, Feature]) -> bool:
# collect categories corresponding to meta variables
if s.is_atomic:
if s.base in self.cats and not (t ^ self.cats[s.base]):
return False
self.cats[s.base] = t
if (
s.is_functor and t.is_functor and (
s.slash == t.slash or '|' in (s.slash, t.slash))
):
return (
scan(s.left, t.left, results) and scan(
s.right, t.right, results)
)
elif s.is_atomic and t.is_functor:
scan_deep(t, s.base, 0, results)
return True
elif s.is_atomic and t.is_atomic:
if t.feature is not None:
results[s.base] = t.feature
return True
return False # s.is_functor and t.is_atomic
self.success = (
scan(self.meta_x, x, self.x_features) and scan(
self.meta_y, y, self.y_features)
)
if not self.success:
return False
# meta variables such as a, b, c, d, etc.
meta_vars = set(self.x_features.keys()) & set(self.y_features.keys())
if len(meta_vars) == 0:
return True
for var in meta_vars:
x_feature = self.x_features[var]
y_feature = self.y_features[var]
# these pairs can match: (NP[nb], NP[conj]), (NP, NP[conj]), (NP[X], NP[conj])
if x_feature.unifies(y_feature):
# if (NP[X], NP[conj]), further memorize the matching `X := conj`
# if there're two of `X := conj` and `X := nb`, choose one arbitrarily
if x_feature.is_variable:
self.mapping[x_feature] = y_feature
elif y_feature.unifies(x_feature):
if y_feature.is_variable:
self.mapping[y_feature] = x_feature
else:
self.success = False
return False
return True
def __getitem__(self, key: str) -> Category:
def rec(x: Category) -> Category:
if x.is_functor:
return x.functor(rec(x.left), rec(x.right))
else:
if x.feature in self.mapping:
return Atom(x.base, self.mapping[x.feature])
else:
return x
assert self.success, \
("the unification has not been successful. "
"Unification.__getitem__ is not callable in that case.")
if key not in self.cats:
raise KeyError(f'meta category `{key}` has not been observed.')
return rec(self.cats[key])
|
masashi-y/myccg | depccg/chainer/supertagger.py | from typing import Union
import os
import json
import logging
import chainer
from depccg.chainer.lstm_parser_bi_fast import FastBiaffineLSTMParser
from depccg.chainer.ja_lstm_parser_bi import BiaffineJaLSTMParser
logger = logging.getLogger(__name__)
def load_chainer_tagger(
model_path: str,
device: int = -1
) -> Union[FastBiaffineLSTMParser, BiaffineJaLSTMParser]:
model_file = os.path.join(model_path, 'tagger_model')
def_file = os.path.join(model_path, 'tagger_defs.txt')
assert os.path.exists(model_file) and os.path.exists(def_file), \
(f'Failed in initialization. Directory "{model_path}" must contain both'
'"tagger_model" and "tagger_defs.txt" files')
with open(def_file) as f:
tagger = eval(json.load(f)['model'])(model_path)
logger.info(f'initializing supertagger with parameters at {model_file}')
chainer.serializers.load_npz(model_file, tagger)
if device >= 0:
logger.info(f'sending the supertagger to gpu: {device}')
tagger.to_gpu(device)
return tagger
|
masashi-y/myccg | depccg/tools/ja/reader.py | from typing import Iterator, List, Tuple
import re
from depccg.cat import Category
from depccg.tree import Tree
from depccg.types import Token
from depccg.tools.reader import ReaderResult
combinators = {
'SSEQ', '>', '<', '>B', '<B1', '<B2', '<B3',
'<B4', '>Bx1', '>Bx2', '>Bx3',
'ADNext', 'ADNint', 'ADV0', 'ADV1', 'ADV2'
}
DEPENDENCY = re.compile(r'{.+?}')
def read_ccgbank(filepath: str) -> Iterator[ReaderResult]:
"""read Japanase CCGBank file.
Args:
filename (str): file name string
Yields:
Iterator[ReaderResult]: iterator object containing parse results
"""
for i, line in enumerate(open(filepath)):
line = line.strip()
if len(line) == 0:
continue
tree, tokens = _JaCCGLineReader(line).parse()
yield ReaderResult(str(i), tokens, tree)
class _JaCCGLineReader(object):
def __init__(self, line: str) -> None:
self.line = line
self.index = 0
self.word_id = -1
self.tokens = []
def next(self, target: str) -> str:
end = self.line.find(target, self.index)
result = self.line[self.index:end]
self.index = end + 1
return result
def check(self, text: str, offset: int = 0) -> None:
if self.line[self.index + offset] != text:
raise RuntimeError('AutoLineReader.check catches parse error')
def peek(self) -> str:
return self.line[self.index]
def parse(self) -> Tuple[Tree, List[Token]]:
result = self.next_node()
return result, self.tokens
@property
def next_node(self):
end = self.line.find(' ', self.index)
if self.line[self.index + 1:end] in combinators:
return self.parse_tree
else:
return self.parse_leaf
def parse_leaf(self) -> Tree:
self.word_id += 1
self.check('{')
cat = self.next(' ')[1:]
cat = cat[:cat.find('_')]
cat = DEPENDENCY.sub('', cat)
cat = Category.parse(cat)
surf, base, pos1, pos2 = self.next('}')[:-1].split('/')
token = Token(surf=surf, base=base, pos1=pos1, pos2=pos2)
self.tokens.append(token)
return Tree.make_terminal(surf, cat)
def parse_tree(self) -> Tree:
self.check('{')
op_string = self.next(' ')
cat = DEPENDENCY.sub('', self.next(' '))
cat = Category.parse(cat)
self.check('{')
children = []
while self.peek() != '}':
children.append(self.next_node())
if self.peek() == ' ':
self.next(' ')
self.next('}')
if len(children) == 1:
return Tree.make_unary(cat, children[0], op_string, op_string)
else:
assert len(
children) == 2, f'failed to parse, invalid number of children: {self.line}'
left, right = children
return Tree.make_binary(cat, left, right, op_string, op_string)
|
masashi-y/myccg | depccg/tools/reader.py | <reponame>masashi-y/myccg
from typing import Tuple, List, Iterator, NamedTuple
from depccg.tree import Tree
from depccg.cat import Category
from depccg.lang import get_global_language
from depccg.types import Token
from depccg.grammar import guess_combinator_by_triplet
from depccg.grammar import en, ja
from lxml import etree
import logging
logger = logging.getLogger(__name__)
BINARY_RULES = {
'en': en.apply_binary_rules,
'ja': ja.apply_binary_rules,
}
class ReaderResult(NamedTuple):
name: str
tokens: List[Token]
tree: Tree
class _AutoLineReader(object):
def __init__(self, line):
self.line = line
self.index = 0
self.word_id = -1
self.binary_rules = BINARY_RULES[get_global_language()]
self.tokens = []
def next(self):
end = self.line.find(' ', self.index)
res = self.line[self.index:end]
self.index = end + 1
return res
def check(self, text, offset=0):
if self.line[self.index + offset] != text:
raise RuntimeError(f'failed to parse: {self.line}')
def peek(self):
return self.line[self.index]
def parse(self):
tree = self.next_node()
return tree, self.tokens
@property
def next_node(self):
if self.line[self.index + 2] == 'L':
return self.parse_leaf
elif self.line[self.index + 2] == 'T':
return self.parse_tree
else:
raise RuntimeError(f'failed to parse: {self.line}')
def parse_leaf(self):
self.word_id += 1
self.check('(')
self.check('<', 1)
self.check('L', 2)
self.next()
cat = Category.parse(self.next())
tag1 = self.next() # modified POS tag
tag2 = self.next() # original POS
word = self.next().replace('\\', '')
token = Token(
word=word,
pos=tag1,
tag1=tag1,
tag2=tag2
)
self.tokens.append(token)
if word == '-LRB-':
word = "("
elif word == '-RRB-':
word = ')'
self.next()
return Tree.make_terminal(token, cat)
def parse_tree(self):
self.check('(')
self.check('<', 1)
self.check('T', 2)
self.next()
cat = Category.parse(self.next())
head_is_left = self.next() == '0'
self.next()
children = []
while self.peek() != ')':
children.append(self.next_node())
self.next()
if len(children) == 2:
left, right = children
rule = guess_combinator_by_triplet(
self.binary_rules, cat, left.cat, right.cat
)
return Tree.make_binary(
cat, left, right, rule.op_string, rule.op_symbol, head_is_left
)
elif len(children) == 1:
return Tree.make_unary(cat, children[0])
else:
raise RuntimeError(f'failed to parse: {self.line}')
def read_auto(filename: str) -> Iterator[ReaderResult]:
"""read traditional AUTO file used for CCGBank
English CCGbank contains some unwanted categories such as (S\\NP)\\(S\\NP)[conj].
This reads the treebank while taking care of those categories.
Args:
filename (str): file name string
Yields:
Iterator[ReaderResult]: iterator object containing parse results
"""
__fix = {
'((S[b]\\NP)/NP)/': '(S[b]\\NP)/NP',
}
def _fix(cat):
if cat in __fix:
return __fix[cat]
if cat.endswith(')[conj]') or cat.endswith('][conj]'):
return cat[:-6]
return cat
for line in open(filename):
line = line.strip()
if len(line) == 0:
continue
if line.startswith("ID"):
name = line
else:
line = ' '.join(
_fix(token) for token in line.split(' ')
)
tree, tokens = _AutoLineReader(line).parse()
yield ReaderResult(name, tokens, tree)
def read_xml(filename: str) -> Iterator[ReaderResult]:
"""read XML format file commonly used by C&C.
Args:
filename (str): file name string
Yields:
Iterator[ReaderResult]: iterator object containing parse results
"""
binary_rules = BINARY_RULES[get_global_language()]
def parse(tree):
def rec(node):
attrib = node.attrib
if node.tag == 'rule':
cat = Category.parse(attrib['cat'])
children = [rec(child) for child in node.getchildren()]
if len(children) == 1:
return Tree.make_unary(cat, children[0])
else:
assert len(children) == 2
left, right = children
rule = guess_combinator_by_triplet(
binary_rules, cat, left.cat, right.cat
)
return Tree.make_binary(
cat, left, right, rule.op_string, rule.op_symbol, rule.head_is_left
)
else:
assert node.tag == 'lf'
cat = Category.parse(attrib['cat'])
token = Token(
word=attrib['word'],
pos=attrib['pos'],
entity=attrib['entity'],
lemma=attrib['lemma'],
chunk=attrib['chunk']
)
tokens.append(token)
return Tree.make_terminal(token, cat)
tokens = []
tree = rec(tree)
return tokens, tree
trees = etree.parse(filename).getroot().xpath('ccg')
for tree in trees:
name = '_'.join(f'{k}={v}' for k, v in tree.items())
yield ReaderResult(name, *parse(tree[0]))
def read_jigg_xml(filename: str) -> Iterator[ReaderResult]:
"""read XML format file used by Jigg.
Args:
filename (str): file name string
Yields:
Iterator[ReaderResult]: iterator object containing parse results
"""
binary_rules = BINARY_RULES[get_global_language()]
# TODO
def try_get_surface(token):
if 'word' in token:
return token.word
elif 'surf' in token:
return token.surf
else:
raise RuntimeError(
'the attribute for the token\'s surface form is unknown'
)
def parse(tree, tokens):
def rec(node):
attrib = node.attrib
if 'terminal' not in attrib:
cat = Category.parse(attrib['category'])
children = [
rec(spans[child])
for child in attrib['child'].split(' ')
]
if len(children) == 1:
return Tree.make_unary(cat, children[0])
else:
assert len(children) == 2
left, right = children
rule = guess_combinator_by_triplet(
binary_rules, cat, left.cat, right.cat
)
return Tree.make_binary(
cat, left, right, rule.op_string, rule.op_symbol, rule.head_is_left
)
else:
cat = Category.parse(attrib['category'])
word = try_get_surface(tokens[attrib['terminal']])
return Tree.make_terminal(word, cat)
spans = {span.attrib['id']: span for span in tree.xpath('./span')}
return rec(spans[tree.attrib['root']])
trees = etree.parse(filename).getroot()
sentences = trees[0][0].xpath('sentence')
for sentence in sentences:
token_and_ids = []
for token in sentence.xpath('.//token'):
token_attribs = dict(token.attrib)
token_id = token_attribs['id']
for no_need in ['id', 'start', 'cat']:
if no_need in token_attribs:
del token_attribs[no_need]
token_and_ids.append((token_id, Token(**token_attribs)))
tokens = [token for _, token in token_and_ids]
for ccg in sentence.xpath('./ccg'):
tree = parse(ccg, dict(token_and_ids))
yield ReaderResult(ccg.attrib['id'], tokens, tree)
def _parse_ptb(tree_string: str) -> Tuple[Tree, List[Token]]:
"""parse a S-expression like PTB-format tree
Args:
tree_string (str): S-expression
Raises:
RuntimeError: when parsing fails.
Returns:
Tuple[Tree, List[Token]]: Tree object and tokens
"""
binary_rules = BINARY_RULES[get_global_language()]
assert tree_string.startswith('(ROOT ')
buf = list(reversed(tree_string[6:-1].split(' ')))
stack = []
tokens = []
position = 0
def reduce(item: str) -> None:
nonlocal position
if item[-1] != ')':
token = Token(word=item)
tokens.append(token)
stack.append(item)
return
reduce(item[:-1])
if isinstance(stack[-1], str):
word = stack.pop()
category = stack.pop()
tree = Tree.make_terminal(word, category)
position += 1
else:
assert isinstance(stack[-1], Tree)
children = []
while isinstance(stack[-1], Tree):
tree = stack.pop()
children.append(tree)
category = stack.pop()
if len(children) == 1:
tree = Tree.make_unary(category, children[0])
elif len(children) == 2:
right, left = children
combinator = guess_combinator_by_triplet(
binary_rules, category, left.cat, right.cat
)
tree = Tree.make_binary(
category, left, right, combinator
)
else:
assert False
stack.append(tree)
def rec() -> None:
if len(buf) == 0:
return
item = buf.pop()
assert item[0] == '(' or item[-1] == ')'
if item[0] == '(':
stack.append(Category.parse(item[1:]))
elif item[-1] == ')':
reduce(item)
rec()
try:
rec()
assert len(stack) == 1 and isinstance(stack[0], Tree)
except AssertionError:
raise RuntimeError('Parse failed on an invalid CCG tree')
return stack[0], tokens
def read_ptb(filename: str) -> Iterator[ReaderResult]:
"""parse PTB-formatted file
Args:
filename (str): file name string
Yields:
Iterator[ReaderResult]: iterator object containing parse results
"""
name0 = None
for i, line in enumerate(open(filename)):
line = line.strip()
if len(line) == 0:
continue
if line.startswith("ID"):
name0 = line
else:
tree, tokens = _parse_ptb(line)
name = name0 or f'ID={i}'
yield ReaderResult(name, tokens, tree)
def read_trees_guess_extension(filename: str) -> Iterator[ReaderResult]:
"""guess the file format based on the extension and parse it
Args:
filename (str): file name string
Yields:
Iterator[ReaderResult]: iterator object containing parse results
"""
logger.info(f'reading trees from: {filename}')
if filename.endswith('.jigg.xml'):
logger.info('read it as jigg XML file')
yield from read_jigg_xml(filename)
elif filename.endswith('.xml'):
logger.info('read it as C&C XML file')
yield from read_xml(filename)
elif filename.endswith('.ptb'):
logger.info('read it as PTB format file')
yield from read_ptb(filename)
else:
logger.info('read it as AUTO file')
yield from read_auto(filename)
|
masashi-y/myccg | depccg/tree.py | from depccg.lang import get_global_language
from typing import NamedTuple, List, Iterator, Union
from depccg.cat import Category
from depccg.grammar import guess_combinator_by_triplet, en, ja
from depccg.types import Token
BINARY_RULES = {
'en': en.apply_binary_rules,
'ja': ja.apply_binary_rules,
}
class Tree(object):
def __init__(
self,
cat: Category,
children: Union[List['Tree'], List[Token]],
op_string: str,
op_symbol: str,
head_is_left: bool = True,
) -> None:
assert len({type(child) for child in children}) == 1, \
"children must contain elements of a unique type"
assert not isinstance(children[0], Tree) or len(children) in (1, 2), \
"a tree cannot contain more than two children"
assert not isinstance(children[0], Token) or len(children) == 1, \
"a leaf node cannot contain more than one token object"
self.cat = cat
self.children = children
self.op_string = op_string
self.op_symbol = op_symbol
self.head_is_left = head_is_left
@staticmethod
def make_terminal(
word: Union[str, Token],
cat: Category,
op_string: str = 'lex',
op_symbol: str = '<lex>',
) -> 'Tree':
if isinstance(word, Token):
token = word
else:
token = Token(word=word)
return Tree(cat, [token], op_string, op_symbol)
@staticmethod
def make_binary(
cat: Category,
left: 'Tree',
right: 'Tree',
op_string: str,
op_symbol: str,
head_is_left: bool = True,
) -> 'Tree':
return Tree(cat, [left, right], op_string, op_symbol, head_is_left)
@staticmethod
def make_unary(
cat: Category,
child: 'Tree',
op_string: str = 'lex',
op_symbol: str = '<un>'
) -> 'Tree':
return Tree(cat, [child], op_string, op_symbol)
@staticmethod
def of_nltk_tree(tree) -> 'Tree':
def rec(node):
cat = Category.parse(node.label())
if isinstance(node[0], str):
word = node[0]
return Tree.make_terminal(word, cat)
else:
children = [rec(child) for child in node]
if len(children) == 1:
return Tree.make_unary(cat, children[0])
else:
assert len(children) == 2
left, right = children
rule = guess_combinator_by_triplet(
BINARY_RULES[get_global_language()],
cat, left.cat, right.cat
)
return Tree.make_binary(
cat, left, right, rule.op_string, rule.op_symbol, rule.head_is_left
)
return rec(tree)
def __len__(self):
return len(self.leaves)
@property
def leaves(self) -> List['Tree']:
def rec(node):
if node.is_leaf:
result.append(node)
else:
for child in node.children:
rec(child)
result = []
rec(self)
return result
@property
def tokens(self) -> List[Token]:
return [leaf.children[0] for leaf in self.leaves]
@property
def token(self) -> Token:
assert self.is_leaf, "Tree.token must be called on leaf objects"
return self.children[0]
@property
def child(self):
assert self.is_unary, "This node is not unary node! Please use `Tree.children`"
return self.left_child
@property
def left_child(self):
assert not self.is_leaf, "This node is leaf and does not have any child!"
return self.children[0]
@property
def right_child(self):
assert not self.is_leaf, "This node is leaf and does not have any child!"
assert not self.is_unary, "This node does not have right child!"
return self.children[1]
@property
def is_leaf(self):
return (
self.is_unary and isinstance(self.children[0], Token)
)
@property
def word(self, token_key='word'):
return ' '.join(token[token_key] for token in self.tokens)
@property
def is_unary(self) -> bool:
return len(self.children) == 1
# def __str__(self):
# return auto_of(self)
# def __repr__(self):
# return auto_of(self)
def nltk_tree(self):
from nltk.tree import Tree
def rec(node):
if node.is_leaf:
cat = node.cat
children = [node.word]
else:
cat = node.cat
children = [rec(child) for child in node.children]
return Tree(str(cat), children)
return rec(self)
class ScoredTree(NamedTuple):
tree: Tree
score: float
class ParseResult(NamedTuple):
sentence_index: int
tree_index: int
tree: Tree
tokens: List[Token]
score: float
def iter_parse_results(
nbest_trees: List[List[ScoredTree]],
tagged_doc: List[List[Token]]
) -> Iterator[ParseResult]:
for sentence_index, (trees, tokens) in enumerate(zip(nbest_trees, tagged_doc), 1):
for tree_index, (tree, log_prob) in enumerate(trees, 1):
yield ParseResult(
sentence_index,
tree_index,
tree,
tokens,
log_prob,
)
|
masashi-y/myccg | depccg/allennlp/supertagger.py | <filename>depccg/allennlp/supertagger.py
from typing import Tuple, List
import numpy
import logging
from itertools import islice
from depccg.allennlp.predictor.supertagger_predictor import SupertaggerPredictor
from depccg.allennlp.dataset.ja_supertagging_dataset import JaSupertaggingDatasetReader
from depccg.allennlp.dataset.supertagging_dataset import TritrainSupertaggingDatasetReader
from depccg.allennlp.dataset.supertagging_dataset import SupertaggingDatasetReader
from depccg.allennlp.models.supertagger import Supertagger
from allennlp.models.archival import load_archive
from depccg.types import ScoringResult
logger = logging.getLogger(__name__)
def lazy_groups_of(iterator, group_size):
return iter(lambda: list(islice(iterator, 0, group_size)), [])
class AllennlpSupertagger(object):
def __init__(self, predictor):
self.predictor = predictor
self.dataset_reader = predictor._dataset_reader
def predict_doc(
self,
splitted,
batchsize=32,
) -> Tuple[List[ScoringResult], List[str]]:
instances = (
self.dataset_reader.text_to_instance(' '.join(sentence))
for sentence in splitted
)
categories = None
scores = []
for batch in lazy_groups_of(instances, batchsize):
for json_dict in self.predictor.predict_batch_instance(batch):
if categories is None:
categories = list(json_dict['categories'])
dep_scores = numpy.array(json_dict['heads']) \
.reshape(json_dict['heads_shape']) \
.astype(numpy.float32)
tag_scores = numpy.array(json_dict['head_tags']) \
.reshape(json_dict['head_tags_shape']) \
.astype(numpy.float32)
scores.append(ScoringResult(tag_scores, dep_scores))
return scores, categories
def load_allennlp_tagger(
model_path: str,
device: int = -1
) -> AllennlpSupertagger:
if device >= 0:
logger.info(f'sending the supertagger to gpu: {device}')
archive = load_archive(model_path, cuda_device=device)
predictor = SupertaggerPredictor.from_archive(
archive, 'supertagger-predictor'
)
return AllennlpSupertagger(predictor)
|
masashi-y/myccg | depccg/grammar/en.py | from typing import Optional, List, TypeVar, Tuple, Set, Dict
from string import ascii_letters
from depccg.cat import Category
from depccg.unification import Unification
from depccg.types import Combinator, CombinatorResult
X = TypeVar('X')
Pair = Tuple[X, X]
def _match(x: Category, y: Category) -> bool:
if x.is_functor and y.is_functor:
return _match(x.left, y.left) and _match(x.right, y.right)
elif x.is_atomic and y.is_atomic:
return (
x.base == y.base
and x.feature.unifies(y.feature)
and y.feature.unifies(x.feature)
)
return False
def _is_modifier(x: Category) -> bool:
return x.is_functor and x.left == x.right
def _is_punct(x: Category) -> bool:
if x.is_functor:
return False
return (
not x.base[0] in ascii_letters
or x.base in ("LRB", "RRB", "LQU", "RQU")
)
def _is_type_raised(x: Category) -> bool:
if x.is_atomic:
return False
return (
x.right.is_functor and x.right.left == x.left
)
def forward_application(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("a/b", "b")
if uni(x, y):
result = y if _is_modifier(x) else uni['a']
return CombinatorResult(
cat=result,
op_string="fa",
op_symbol=">",
head_is_left=True,
)
return None
def backward_application(x: Category, y: Category) -> Optional[CombinatorResult]:
if x == 'S[dcl]' and y == 'S[em]\\S[em]':
result = x
else:
uni = Unification("b", "a\\b")
if uni(x, y):
result = x if _is_modifier(y) else uni['a']
else:
return None
return CombinatorResult(
cat=result,
op_string="ba",
op_symbol="<",
head_is_left=True,
)
def forward_composition(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("a/b", "b/c")
if uni(x, y):
result = y if _is_modifier(x) else uni['a'] / uni['c']
return CombinatorResult(
cat=result,
op_string="fc",
op_symbol=">B",
head_is_left=True,
)
return None
def backward_composition(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("b/c", "a\\b")
if uni(x, y):
if str(uni["b"]) in ("N", "NP"):
return None
result = x if _is_modifier(y) else uni['a'] / uni['c']
return CombinatorResult(
cat=result,
op_string="bx",
op_symbol="<B",
head_is_left=True,
)
return None
def generalized_forward_composition(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("a/b", "(b/c)|d")
if uni(x, y):
result = y if _is_modifier(x) else y.functor(
(uni['a'] / uni['c']), uni['d'])
return CombinatorResult(
cat=result,
op_string="gfc",
op_symbol=">B",
head_is_left=True,
)
return None
def generalized_backward_composition(x: Category, y: Category) -> Optional[CombinatorResult]:
uni = Unification("(b/c)|d", "a/b")
if uni(x, y):
if str(uni["b"]) in ("N", "NP"):
return None
result = x if _is_modifier(y) else x.functor(
(uni['a'] / uni['c']), uni['d'])
return CombinatorResult(
cat=result,
op_string="gbx",
op_symbol="<B",
head_is_left=True,
)
return None
def conjunction(x: Category, y: Category) -> Optional[CombinatorResult]:
if (
not _is_punct(y)
and not _is_type_raised(y)
and x in (",", ";", "conj")
and not (y ^ "NP\\NP")
):
result = y | y
return CombinatorResult(
cat=result,
op_string="conj",
op_symbol="<Φ>",
head_is_left=True,
)
return None
def conjunction2(x: Category, y: Category) -> Optional[CombinatorResult]:
if x == "conj" and y == "NP\\NP":
result = y
return CombinatorResult(
cat=result,
op_string="conj",
op_symbol="<Φ>",
head_is_left=True,
)
return None
def remove_punctuation1(x: Category, y: Category) -> Optional[CombinatorResult]:
if _is_punct(x):
result = y
return CombinatorResult(
cat=result,
op_string="lp",
op_symbol="<lp>",
head_is_left=True,
)
return None
def remove_punctuation2(x: Category, y: Category) -> Optional[CombinatorResult]:
if _is_punct(y):
result = x
return CombinatorResult(
cat=result,
op_string="rp",
op_symbol="<rp>",
head_is_left=True,
)
return None
def remove_punctuation_left(x: Category, y: Category) -> Optional[CombinatorResult]:
if x in ("LQU", "LRB"):
result = y | y
return CombinatorResult(
cat=result,
op_string="lp",
op_symbol="<lp>",
head_is_left=True,
)
return None
def comma_vp_to_adv(x: Category, y: Category) -> Optional[CombinatorResult]:
if x == "," and y in ("S[ng]\\NP", "S[pss]\\NP"):
result = Category.parse("(S\\NP)\\(S\\NP)")
return CombinatorResult(
cat=result,
op_string="lp",
op_symbol="<*>",
head_is_left=True
)
return None
def parenthetical_direct_speech(x: Category, y: Category) -> Optional[CombinatorResult]:
if x == "," and y == "S[dcl]/S[dcl]":
result = Category.parse("(S\\NP)/(S\\NP)")
return CombinatorResult(
cat=result,
op_string="lp",
op_symbol="<*>",
head_is_left=True
)
return None
combinators: List[Combinator] = [
forward_application,
backward_application,
forward_composition,
backward_composition,
generalized_forward_composition,
generalized_backward_composition,
conjunction,
conjunction2,
remove_punctuation1,
remove_punctuation2,
remove_punctuation_left,
comma_vp_to_adv,
parenthetical_direct_speech,
]
def apply_binary_rules(
x: Category,
y: Category,
seen_rules: Optional[Set[Pair[Category]]] = None,
) -> List[CombinatorResult]:
key = (x.clear_features('nb'), y.clear_features('nb'))
seen_key = (
x.clear_features('X', 'nb'), y.clear_features('X', 'nb')
)
results = []
if seen_rules is None or seen_key in seen_rules:
for combinator in combinators:
result = combinator(*key)
if result is not None:
results.append(result)
return results
def apply_unary_rules(
x: Category,
unary_rules: Dict[Category, List[Category]]
) -> List[CombinatorResult]:
if x not in unary_rules:
return []
results = []
for result in unary_rules[x]:
type_raised = (
x.is_atomic
and x.base in ('NP', 'PP')
and _is_type_raised(result)
)
results.append(
CombinatorResult(
cat=result,
op_string='tr' if type_raised else 'lex',
op_symbol='<un>',
head_is_left=True,
)
)
return results
|
masashi-y/myccg | depccg/printer/ptb.py | from depccg.tree import Tree
def ptb_of(tree: Tree) -> Tree:
"""PTB-style string of a CCG tree
Args:
tree (Tree): tree object
Returns:
str: tree string in the PTB style
"""
def rec(node):
if node.is_leaf:
cat = node.cat
word = node.word
return f'({cat} {word})'
else:
cat = node.cat
children = ' '.join(rec(child) for child in node.children)
return f'({cat} {children})'
return f'(ROOT {rec(tree)})'
|
masashi-y/myccg | depccg/printer/my_json.py | <reponame>masashi-y/myccg
from typing import Dict, Any
from depccg.tree import Tree
from depccg.cat import Category
def _json_of_category(category: Category) -> Dict[str, Any]:
def rec(node):
if node.is_functor:
return {
'slash': node.slash,
'left': rec(node.left),
'right': rec(node.right)
}
else:
feature = node.features
return {
'base': node.base,
'feature': feature if len(feature) > 0 else None
}
return rec(category)
def json_of(
tree: Tree,
full: bool = False
) -> Dict[str, Any]:
"""a tree in Python dict object.
Args:
tree (Tree): tree object
full (bool): whether to decomopose categories into its components, i.e.,
{
'slash': '/',
'left': {'base': 'S', 'feature': 'adj'},
'right': {'base': 'NP', 'feature': None},
},
or just as a string "S[adj]/NP".
Returns:
str: tree string in the CoNLL format
"""
def rec(node: Tree) -> Dict[str, Any]:
if node.is_leaf:
res = dict(node.token)
res['cat'] = _json_of_category(node.cat) if full else str(node.cat)
return res
else:
return {
'type': node.op_string,
'cat': _json_of_category(node.cat) if full else str(node.cat),
'children': [rec(child) for child in node.children]
}
return rec(tree)
|
masashi-y/myccg | depccg/semantics/ccg2lambda/parse.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from lxml import etree
from multiprocessing import Pool
from multiprocessing import Lock
import os
import sys
from nltk.sem.logic import LogicalExpressionException
from .ccg2lambda_tools import assign_semantics_to_ccg
from .semantic_index import SemanticIndex
from .logic_parser import lexpr
from .nltk2normal import remove_true
logger = logging.getLogger(__name__)
SEMANTIC_INDEX = None
GOLD_TREES = True
NBEST = 0
SENTENCES = None
kMaxTasksPerChild = None
lock = Lock()
def parse(ccg, templates, nbest=0, ncores=3):
global SEMANTIC_INDEX
global SENTENCES
global NBEST
NBEST = nbest
if not os.path.exists(templates):
print('File does not exist: {0}'.format(templates))
sys.exit(1)
logger.info(templates)
SEMANTIC_INDEX = SemanticIndex(templates)
SENTENCES = ccg.findall('.//sentence')
print(SENTENCES)
sentence_inds = range(len(SENTENCES))
sem_nodes_lists = semantic_parse_sentences(sentence_inds, ncores)
assert len(sem_nodes_lists) == len(SENTENCES), \
'Element mismatch: {0} vs {1}'.format(
len(sem_nodes_lists), len(SENTENCES))
logging.info('Adding XML semantic nodes to sentences...')
formulas_list = []
for sentence, (sem_nodes, orig_formulas) in zip(SENTENCES, sem_nodes_lists):
formulas = []
for formula in orig_formulas:
try:
formulas.append(str(remove_true(lexpr(formula))))
except LogicalExpressionException:
formulas.append(formula)
formulas_list.append(formulas)
sentence.extend(sem_nodes)
logging.info('Finished adding XML semantic nodes to sentences.')
root_xml_str = serialize_tree(ccg)
return root_xml_str, formulas_list
def semantic_parse_sentences(sentence_inds, ncores=1):
if ncores <= 1:
sem_nodes_lists = semantic_parse_sentences_seq(sentence_inds)
else:
sem_nodes_lists = semantic_parse_sentences_par(sentence_inds, ncores)
results = [([etree.fromstring(s) for s in sem_nodes], formulas)
for sem_nodes, formulas in sem_nodes_lists]
return results
def semantic_parse_sentences_par(sentence_inds, ncores=3):
pool = Pool(processes=ncores, maxtasksperchild=kMaxTasksPerChild)
results = pool.map(semantic_parse_sentence, sentence_inds)
pool.close()
pool.join()
return results
def semantic_parse_sentences_seq(sentence_inds):
results = []
for sentence_ind in sentence_inds:
result = semantic_parse_sentence(sentence_ind)
results.append(result)
return results
def semantic_parse_sentence(sentence_ind):
"""
`sentence` is an lxml tree with tokens and ccg nodes.
It returns an lxml semantics node.
"""
global lock
sentence = SENTENCES[sentence_ind]
sem_nodes = []
formulas = []
tree_indices = [int(sentence.get('gold_tree', '0')) + 1]
if NBEST != 1:
tree_indices = get_tree_indices(sentence, NBEST)
for tree_index in tree_indices:
sem_node = etree.Element('semantics')
try:
sem_tree = assign_semantics_to_ccg(
sentence, SEMANTIC_INDEX, tree_index)
filter_attributes(sem_tree)
sem_node.extend(sem_tree.xpath('.//descendant-or-self::span'))
sem_node.set('status', 'success')
sem_node.set('ccg_id',
sentence.xpath('./ccg[{0}]/@id'.format(tree_index))[0])
sem_node.set('root',
sentence.xpath('./ccg[{0}]/@root'.format(tree_index))[0])
formulas.append(sem_tree.attrib['sem'])
except Exception as e:
sem_node.set('status', 'failed')
# from pudb import set_trace; set_trace()
sentence_surf = ' '.join(sentence.xpath('tokens/token/@surf'))
lock.acquire()
logging.error('An error occurred: {0}\nSentence: {1}\nTree XML:\n{2}'.format(
e, sentence_surf,
etree.tostring(sentence, encoding='utf-8', pretty_print=True).decode('utf-8')))
lock.release()
# print('x', end='', file=sys.stdout)
formulas.append('FAILED!')
sem_nodes.append(sem_node)
sem_nodes = [etree.tostring(sem_node) for sem_node in sem_nodes]
return sem_nodes, formulas
def get_tree_indices(sentence, nbest):
num_ccg_trees = int(sentence.xpath('count(./ccg)'))
if nbest < 1:
nbest = num_ccg_trees
return list(range(1, min(nbest, num_ccg_trees) + 1))
keep_attributes = set(['id', 'child', 'sem', 'type'])
def filter_attributes(tree):
if 'coq_type' in tree.attrib and 'child' not in tree.attrib:
sem_type = \
tree.attrib['coq_type'].lstrip('["Parameter ').rstrip('."]')
if sem_type:
tree.attrib['type'] = sem_type
attrib_to_delete = [
a for a in tree.attrib.keys() if a not in keep_attributes]
for a in attrib_to_delete:
del tree.attrib[a]
for child in tree:
filter_attributes(child)
return
def serialize_tree(tree):
tree_str = etree.tostring(
tree, xml_declaration=True, encoding='utf-8', pretty_print=True)
return tree_str
|
masashi-y/myccg | depccg/printer/auto.py | from depccg.tree import Tree
from depccg.utils import normalize, denormalize
def auto_of(tree: Tree) -> str:
"""tree string in auto format commonly used in English CCGBank.
Args:
tree (Tree): tree object
Returns:
str: tree string in the auto format
"""
def rec(node):
if node.is_leaf:
cat = node.cat
word = denormalize(node.word)
pos = node.token.get('pos', 'POS')
return f'(<L {cat} {pos} {pos} {word} {cat}>)'
else:
cat = node.cat
children = ' '.join(rec(child) for child in node.children)
num_children = len(node.children)
head_is_left = 0 if node.head_is_left else 1
return f'(<T {cat} {head_is_left} {num_children}> {children} )'
return rec(tree)
def auto_flattened_of(tree: Tree) -> str:
"""tree string in flattened version of auto format.
This is mainly used for evaluation only.
Args:
tree (Tree): tree object
Returns:
str: tree string in the flattened auto format
"""
def rec(node):
if node.is_leaf:
cat = node.cat
word = normalize(node.word).replace('/', '\\/')
pos = node.token.get('pos', 'POS')
return f'(<L *** {cat} {pos} {word}>\n)'
else:
cat = node.cat
children = '\n'.join(rec(child) for child in node.children)
num_children = len(node.children)
head_is_left = 0 if node.head_is_left else 1
return f'(<T *** {cat} * {head_is_left} {num_children}>\n{children}\n)'
return f'###\n{rec(tree)}\n'
def auto_extended_of(tree: Tree) -> str:
"""tree string in extended version of auto format, used by C&C.
Args:
tree (Tree): tree object
Returns:
str: tree string in the extended auto format
"""
def rec(node):
if node.is_leaf:
cat = node.cat
word = denormalize(node.word)
token = node.token
lemma = token.get('lemma', 'XX')
pos = token.get('pos', 'XX')
entity = token.get('entity', 'XX')
chunk = token.get('chunk', 'XX')
return f'(<L {cat} {word} {lemma} {pos} {entity} {chunk} {cat}>)'
else:
cat = node.cat
children = ' '.join(rec(child) for child in node.children)
num_children = len(node.children)
head_is_left = 0 if node.head_is_left else 1
rule = node.op_string
return f'(<T {cat} {rule} {head_is_left} {num_children}> {children} )'
return rec(tree)
|
masashi-y/myccg | depccg/printer/deriv.py | <reponame>masashi-y/myccg
from io import StringIO
from depccg.tree import Tree
def deriv_of(tree: Tree) -> str:
"""ascii art-like derivation tree string.
Args:
tree (Tree): tree object
Returns:
str: derivation tree string
"""
catstr = ''
wordstr = ''
for leaf in tree.leaves:
str_cat = str(leaf.cat)
str_word = leaf.word
nextlen = 2 + max(len(str_word), len(str_cat))
lcatlen = (nextlen - len(str_cat)) // 2
rcatlen = lcatlen + (nextlen - len(str_cat)) % 2
catstr += ' ' * lcatlen + str_cat + ' ' * rcatlen
lwordlen = (nextlen - len(str_word)) // 2
rwordlen = lwordlen + (nextlen - len(str_word)) % 2
wordstr += ' ' * lwordlen + str_word + ' ' * rwordlen
def rec(lwidth, node):
rwidth = lwidth
if node.is_leaf:
return max(
rwidth,
2 + lwidth + len(str(node.cat)),
2 + lwidth + len(node.word)
)
else:
for child in node.children:
rwidth = max(rwidth, rec(rwidth, child))
print(
lwidth * ' ' + (rwidth - lwidth) * '-' + str(node.op_symbol),
file=output
)
result = str(node.cat)
pad_len = (rwidth - lwidth - len(result)) // 2 + lwidth
print(pad_len * ' ' + result, file=output)
return rwidth
with StringIO() as output:
print(catstr.rstrip(), file=output)
print(wordstr.rstrip(), file=output)
rec(0, tree)
return output.getvalue()
|
masashi-y/myccg | tests/test_unification.py | <reponame>masashi-y/myccg
from depccg.cat import Category
from depccg.unification import Unification
import pytest
def test_basic():
uni = Unification("(((a/b)/c)/d)/e", "f")
x = Category.parse("(((a/b)/c)/d)/e")
y = Category.parse("f")
assert uni(x, y)
assert uni["a"] == "a"
assert uni["b"] == "b"
assert uni["c"] == "c"
assert uni["d"] == "d"
assert uni["e"] == "e"
assert uni["f"] == "f"
with pytest.raises(RuntimeError, match="cannot use the same *"):
uni(x, y)
def test_deep():
uni = Unification("a/b", "c")
x = Category.parse("(((a/b)/c)/d)/e")
y = Category.parse("f")
assert uni(x, y)
assert uni["a"] == "((a/b)/c)/d"
assert uni["b"] == "e"
assert uni["c"] == "f"
def test_english():
uni = Unification("a/b", "b")
x = Category.parse("S[X]/NP[X]")
y = Category.parse("NP[mod]")
assert uni(x, y)
assert uni["a"] == Category.parse('S[mod]')
def test_japanese():
uni = Unification("(a\\b)/c", "c")
x = Category.parse(
"(S[mod=nm,form=base,fin=f]\\S[mod=nm,form=base,fin=f])/S[mod=nm,form=base,fin=f]")
y = Category.parse("S[mod=nm,form=base,fin=f]")
assert uni(x, y)
assert uni["a"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["b"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["c"] == Category.parse("S[mod=nm,form=base,fin=f]")
# three variables
uni = Unification("(a\\b)/c", "c")
x = Category.parse(
"(S[mod=X1,form=X2,fin=X3]\\S[mod=X1,form=X2,fin=X3])/S[mod=X1,form=X2,fin=X3]")
y = Category.parse("S[mod=nm,form=base,fin=f]")
assert uni(x, y)
assert uni["a"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["b"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["c"] == Category.parse("S[mod=nm,form=base,fin=f]")
# only two variables
uni = Unification("(a\\b)/c", "c")
x = Category.parse(
"(S[mod=X1,form=X2,fin=f]\\S[mod=X1,form=X2,fin=f])/S[mod=X1,form=X2,fin=f]")
y = Category.parse("S[mod=nm,form=base,fin=f]")
assert uni(x, y)
assert uni["a"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["b"] == Category.parse("S[mod=nm,form=base,fin=f]")
assert uni["c"] == Category.parse("S[mod=nm,form=base,fin=f]")
|
masashi-y/myccg | depccg/tools/diff.py | <filename>depccg/tools/diff.py
import argparse
import logging
from .reader import read_trees_guess_extension
from depccg.printer.html import (
_mathml_subtree, _MATHML_SUBTREE_TERMINAL, _mathml_cat, _MATHML_SUBTREE_NONTERMINAL, _MATHML_MAIN
)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def diff(tree1, tree2):
def rec(node1, node2):
nodes_are_different = False
if node1.cat != node2.cat:
nodes_are_different = True
elif len(node1.children) != len(node2.children):
nodes_are_different = True
elif any(child1.word != child2.word
for child1, child2 in zip(node1.children, node2.children)):
nodes_are_different = True
if nodes_are_different:
return (nodes_are_different,
_mathml_subtree(node1, bgcolor='peachpuff'),
_mathml_subtree(node2, bgcolor='peachpuff'))
elif node1.is_leaf:
assert node2.is_leaf
node1_str = _MATHML_SUBTREE_TERMINAL.format(
node1.word, _mathml_cat(str(node1.cat)))
node2_str = _MATHML_SUBTREE_TERMINAL.format(
node2.word, _mathml_cat(str(node2.cat)))
return nodes_are_different, node1_str, node2_str
else:
children_are_different, node1_children, node2_children = \
zip(*[rec(child1, child2)
for child1, child2 in zip(node1.children, node2.children)])
node1_children = ''.join(node1_children)
node2_children = ''.join(node2_children)
node1_str = _MATHML_SUBTREE_NONTERMINAL.format(
node1_children, _mathml_cat(str(node1.cat)), node1.op_string, '')
node2_str = _MATHML_SUBTREE_NONTERMINAL.format(
node2_children, _mathml_cat(str(node2.cat)), node2.op_string, '')
nodes_are_different = any(children_are_different)
return nodes_are_different, node1_str, node2_str
return rec(tree1, tree2)
def to_diffs(tree_pairs, max_output_num, sampling):
diffs = [(i, name1, name2, tree1, tree2) + diff(tree1, tree2)
for i, ((name1, tree1), (name2, tree2)) in enumerate(tree_pairs)]
diffs = [(i, name1, name2, tree1, tree2, tree1_str, tree2_str)
for (i, name1, name2, tree1, tree2, trees_are_different, tree1_str, tree2_str) in diffs
if trees_are_different]
logger.info(f'The number of input tree pairs: {len(tree_pairs)}')
logger.info(f'The number of different tree pairs: {len(diffs)}')
if len(diffs) > max_output_num:
logger.info(
f'The number of different tree pairs exceeds --max-output-num: {max_output_num}.')
logger.info(f'sample the subset of pairs using "{sampling}" method.')
if sampling == 'head':
diffs = diffs[:max_output_num]
elif sampling == 'tail':
diffs = diffs[-max_output_num:]
elif sampling == 'random':
import random
indices = list(range(len(diffs)))
indices = sorted(random.sample(indices, max_output_num))
diffs = [diffs[i] for i in indices]
else:
assert False
result = ''
for (i, name1, name2, tree1, tree2, tree1_str, tree2_str) in diffs:
if i > 0:
result += '<hr>'
result += f'<p><strong>{name1}</strong> ID={i}: {tree1.word}</p>'
result += f'<math xmlns="http://www.w3.org/1998/Math/MathML">{tree1_str}</math>'
result += f'<p><strong>{name2}</strong> ID={i}: {tree2.word}</p>'
result += f'<math xmlns="http://www.w3.org/1998/Math/MathML">{tree2_str}</math>'
return _MATHML_MAIN.format(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
'show diffs between the structures of two CCG trees')
parser.add_argument('file1')
parser.add_argument('file2', nargs='?', default=None)
parser.add_argument('--max-output-num', default=50, type=int)
parser.add_argument('--sampling', default='head',
choices=['head', 'tail', 'random'])
args = parser.parse_args()
if args.file2:
file1_trees = [(args.file1, tree)
for _, _, tree in read_trees_guess_extension(args.file1)]
file2_trees = [(args.file2, tree)
for _, _, tree in read_trees_guess_extension(args.file2)]
assert len(file1_trees) == len(file2_trees)
tree_pairs = list(zip(file1_trees, file2_trees))
else:
file1_trees = ((args.file1, tree)
for _, tree in read_trees_guess_extension(args.file1))
tree_pairs = list(zip(file1_trees, file1_trees))
print(to_diffs(tree_pairs, args.max_output_num, args.sampling))
|
masashi-y/myccg | depccg/allennlp/predictor/parser_predictor.py | from typing import Dict, Any, List, Optional
import numpy
from allennlp.common.util import JsonDict
from allennlp.data import DatasetReader
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
import depccg.parsing
from depccg.types import ScoringResult, Token
from depccg.allennlp.predictor.supertagger_predictor import SupertaggerPredictor
from depccg.allennlp.utils import read_params
from depccg.cat import Category
from depccg.printer.my_json import json_of
@Predictor.register('parser-predictor')
class ParserPredictor(SupertaggerPredictor):
def __init__(
self,
model: Model,
dataset_reader: DatasetReader,
grammar_json_path: str,
disable_category_dictionary: bool = False,
disable_seen_rules: bool = False,
parsing_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(model, dataset_reader)
(
self.apply_binary_rules,
self.apply_unary_rules,
self.category_dict,
self.root_categories,
) = read_params(
grammar_json_path,
disable_category_dictionary,
disable_seen_rules
)
self.parsing_kwargs = parsing_kwargs or {}
def _make_json(self, output_dicts: List[Dict[str, Any]]) -> List[JsonDict]:
categories = None
score_results = []
doc = []
for output_dict in super()._make_json(output_dicts):
if categories is None:
categories = [
Category.parse(category)
for category in output_dict['categories']
]
tokens = [
Token.of_word(word)
for word in output_dict['words'].split(' ')
]
doc.append(tokens)
dep_scores = numpy.array(output_dict['heads']) \
.reshape(output_dict['heads_shape']) \
.astype(numpy.float32)
tag_scores = numpy.array(output_dict['head_tags']) \
.reshape(output_dict['head_tags_shape']) \
.astype(numpy.float32)
score_results.append(ScoringResult(tag_scores, dep_scores))
if self.category_dict is not None:
doc, score_results = depccg.parsing.apply_category_filters(
doc,
score_results,
categories,
self.category_dict,
)
results = depccg.parsing.run(
doc,
score_results,
categories,
self.root_categories,
self.apply_binary_rules,
self.apply_unary_rules,
**self.parsing_kwargs,
)
for output_dict, trees in zip(output_dicts, results):
output_dict['trees'] = []
for tree, log_prob in trees:
tree_dict = json_of(tree)
tree_dict['log_prob'] = log_prob
output_dict['trees'].append(tree_dict)
return output_dicts
|
masashi-y/myccg | depccg/argparse.py | <filename>depccg/argparse.py
import argparse
from depccg.instance_models import download, AVAILABLE_MODEL_VARIANTS
from depccg.annotator import (
english_annotator, japanese_annotator
)
def add_common_parser_arguments(parser, main_fun):
parser.add_argument(
'-c',
'--config',
help='json config file specifying the set of unary rules used, etc.')
parser.add_argument(
'-m',
'--model',
help='path to model directory')
parser.add_argument(
'-p',
'--num-processes',
default=4,
type=int,
help='number of processes used for parsing')
parser.add_argument(
'-i',
'--input',
default=None,
help='a file with tokenized sentences in each line')
# parser.add_argument(
# '-w',
# '--weights',
# default=None,
# help='a file that contains weights (p_tag, p_dep)')
parser.add_argument(
'--gpu',
type=int,
default=-1,
help='specify gpu id')
parser.add_argument(
'--batchsize',
type=int,
default=32,
help='batchsize in supertagger')
parser.add_argument(
'--nbest',
type=int,
default=1,
help='output N best parses')
parser.add_argument(
'-I',
'--input-format',
default='raw',
choices=['raw', 'POSandNERtagged'],
# choices=['raw', 'POSandNERtagged', 'json', 'partial'],
help='input format')
parser.add_argument(
'--unary-penalty',
default=0.1,
type=float,
help='penalty to use a unary rule')
parser.add_argument(
'--beta',
default=0.00001,
type=float,
help='parameter used to filter categories with lower probabilities')
parser.add_argument(
'--pruning-size',
default=50,
type=int,
help='use only the most probable supertags per word')
parser.add_argument(
'--disable-beta',
action='store_true',
help='disable the use of the beta value')
parser.add_argument(
'--disable-category-dictionary',
action='store_true',
help=('disable a category dictionary that maps'
' words to most likely supertags'))
parser.add_argument(
'--disable-seen-rules',
action='store_true',
help='')
parser.add_argument(
'--max-length',
default=250,
type=int,
help=('give up parsing a sentence that contains'
' more words than this value'))
parser.add_argument(
'--max-step',
default=10000000,
type=int,
help=('give up parsing when the number of times'
' of popping agenda items exceeds this value'))
parser.add_argument(
'--semantic-templates',
help='semantic templates used in "ccg2lambda" format output')
parser.add_argument(
'--silent',
action='store_true')
parser.set_defaults(func=main_fun)
subparsers = parser.add_subparsers()
download_parser = subparsers.add_parser('download')
download_parser.add_argument(
'VARIANT',
nargs='?',
default=None,
choices=AVAILABLE_MODEL_VARIANTS[parser.get_default('lang')])
download_parser.set_defaults(
func=lambda args: download(args.lang, args.VARIANT)
)
def parse_args(main_fun):
parser = argparse.ArgumentParser('depccg')
parser.set_defaults(func=lambda _: parser.print_help())
subparsers = parser.add_subparsers()
english_parser = subparsers.add_parser('en')
english_parser.set_defaults(lang='en')
add_common_parser_arguments(english_parser, main_fun)
english_parser.add_argument(
'-a',
'--annotator',
default=None,
help='annotate POS, named entity, and lemmas using this library',
choices=english_annotator.keys())
english_parser.add_argument(
'-f',
'--format',
default='auto',
choices=[
'auto', 'auto_extended', 'deriv', 'xml',
'conll', 'html', 'prolog', 'jigg_xml', 'ptb',
'ccg2lambda', 'jigg_xml_ccg2lambda', 'json'
],
help='output format')
english_parser.add_argument(
'--root-cats',
default='S[dcl]|S[wq]|S[q]|S[qem]|NP',
help=('"|" separated list of categories '
'allowed to be at the root of a tree.')
),
english_parser.add_argument(
'--tokenize',
action='store_true',
help='tokenize input sentences')
japanese_parser = subparsers.add_parser('ja')
japanese_parser.set_defaults(lang='ja')
add_common_parser_arguments(japanese_parser, main_fun)
japanese_parser.add_argument(
'-a',
'--annotator',
default='janome',
help=('annotate POS, named entity,'
' and lemmas using this library'),
choices=japanese_annotator.keys())
japanese_parser.add_argument(
'-f',
'--format',
default='ja',
choices=[
'auto', 'deriv', 'ja', 'conll',
'html', 'jigg_xml', 'ptb', 'ccg2lambda',
'jigg_xml_ccg2lambda', 'json', 'prolog'
],
help='output format')
japanese_parser.add_argument(
'--root-cats',
default=(
'NP[case=nc,mod=nm,fin=f]|'
'NP[case=nc,mod=nm,fin=t]|'
'S[mod=nm,form=attr,fin=t]|'
'S[mod=nm,form=base,fin=f]|'
'S[mod=nm,form=base,fin=t]|'
'S[mod=nm,form=cont,fin=f]|'
'S[mod=nm,form=cont,fin=t]|'
'S[mod=nm,form=da,fin=f]|'
'S[mod=nm,form=da,fin=t]|'
'S[mod=nm,form=hyp,fin=t]|'
'S[mod=nm,form=imp,fin=f]|'
'S[mod=nm,form=imp,fin=t]|'
'S[mod=nm,form=r,fin=t]|'
'S[mod=nm,form=s,fin=t]|'
'S[mod=nm,form=stem,fin=f]|'
'S[mod=nm,form=stem,fin=t]'
),
help=('"|" separated list of categories '
'allowed to be at the root of a tree.')
)
japanese_parser.add_argument(
'--pre-tokenized',
dest='tokenize',
action='store_false',
help=('the input is pre-tokenized'
' (for running parsing experiments etc.)'))
args = parser.parse_args()
args.func(args)
|
masashi-y/myccg | tests/grammar/test_en.py | <reponame>masashi-y/myccg
import pytest
from depccg.cat import Category
from depccg.grammar import en
observed_binary_rules = [
tuple(Category.parse(category) for category in text.strip().split(' '))
for text in open('tests/grammar/rules.txt')
]
@pytest.mark.parametrize("x, y, expect", observed_binary_rules)
def test_binary_rule(x, y, expect):
assert expect in [result.cat for result in en.apply_binary_rules(x, y)]
|
masashi-y/myccg | depccg/printer/__init__.py | <gh_stars>10-100
from typing import List, Optional, Union
import json
from io import StringIO
from lxml import etree
from depccg.tree import ScoredTree
from depccg.instance_models import SEMANTIC_TEMPLATES
from depccg.semantics.ccg2lambda import parse as ccg2lambda
from depccg.lang import get_global_language
from depccg.printer.html import to_mathml
from depccg.printer.jigg_xml import to_jigg_xml
from depccg.printer.prolog import to_prolog_en, to_prolog_ja
from depccg.printer.xml import xml_of
from depccg.printer.ja import ja_of
from depccg.printer.conll import conll_of
from depccg.printer.my_json import json_of
from depccg.printer.deriv import deriv_of
from depccg.printer.ptb import ptb_of
from depccg.printer.auto import auto_of, auto_extended_of
def _process_xml(xml_node):
return etree \
.tostring(xml_node, encoding='utf-8', pretty_print=True) \
.decode('utf-8')
_formatters = {
'conll': conll_of,
'auto': auto_of,
'auto_extended': auto_extended_of,
'ja': ja_of,
'deriv': deriv_of,
'ptb': ptb_of,
}
def to_string(
nbest_trees: List[Union[List[ScoredTree], ScoredTree]],
format: str = 'auto',
semantic_templates: Optional[str] = None,
) -> str:
"""convert parsing results into one string representation
Args:
nbest_trees (List[Union[List[ScoredTree], ScoredTree]]):
parsed results for multiple sentences
format (str, optional): format type. Defaults to 'auto'.
available options are: 'auto', 'auto_extended', 'conll', 'deriv', 'html', 'ja',
'json', 'ptb', 'jigg_xml', 'jigg_xml_ccg2lambda', 'ccg2lambda', 'prolog'.
semantic_templates (Optional[str], optional): semantic template used for
obtaining semantic formula using ccg2lambda. Defaults to None.
Raises:
KeyError: if the format option is not supported, this error occurs.
Returns:
str: string in the target format
"""
if isinstance(nbest_trees[0], ScoredTree):
nbest_trees = [nbest_trees]
elif not (
isinstance(nbest_trees[0], list)
and isinstance(nbest_trees[0][0], ScoredTree)
):
raise RuntimeError('invalid argument type for stringifying trees')
if format in ('jigg_xml_ccg2lambda', 'ccg2lambda'):
lang = get_global_language()
templates = semantic_templates or SEMANTIC_TEMPLATES.get(lang)
assert templates is not None, \
f'semantic_templates must be specified for language: {lang}'
if format == 'conll':
header = '# ID={}\n# log probability={:.8f}'
else:
header = 'ID={}, log probability={:.8f}'
if format == 'xml':
return _process_xml(xml_of(nbest_trees))
elif format == 'jigg_xml':
return _process_xml(
to_jigg_xml(
nbest_trees,
use_symbol=get_global_language() == 'ja',
)
)
elif format == 'jigg_xml_ccg2lambda':
jigg_xml = to_jigg_xml(nbest_trees)
result_xml_str, _ = ccg2lambda.parse(
jigg_xml, str(templates), ncores=1
)
return result_xml_str.decode('utf-8')
elif format == 'prolog': # print end=''
lang = get_global_language()
if lang == 'en':
return to_prolog_en(nbest_trees)
elif lang == 'ja':
return to_prolog_ja(nbest_trees)
else:
raise KeyError(
f'prolog format is not supported for language {lang}'
)
elif format == 'html':
return to_mathml(nbest_trees)
elif format == 'json':
results = {}
for sentence_index, trees in enumerate(nbest_trees, 1):
results[sentence_index] = []
for tree, log_prob in trees:
tree_dict = json_of(tree)
tree_dict['log_prob'] = log_prob
results[sentence_index].append(tree_dict)
return json.dumps(results, indent=4)
elif format == 'ccg2lambda':
with StringIO() as file:
jigg_xml = to_jigg_xml(nbest_trees)
_, formulas_list = ccg2lambda.parse(
jigg_xml, str(templates), ncores=1
)
for sentence_index, (trees, formulas) in enumerate(zip(nbest_trees, formulas_list), 1):
for (tree, log_prob), formula in zip(trees, formulas):
print(header.format(sentence_index, log_prob), file=file)
print(formula, file=file)
return file.getvalue()
try:
formatter = _formatters[format]
except KeyError:
raise KeyError(
f'unsupported format type: {format}'
)
with StringIO() as file:
for sentence_index, trees in enumerate(nbest_trees, 1):
for tree, log_prob in trees:
print(header.format(sentence_index, log_prob), file=file)
print(formatter(tree), file=file)
return file.getvalue()
def print_(
nbest_trees: List[Union[List[ScoredTree], ScoredTree]],
format: str = 'auto',
semantic_templates: Optional[str] = None,
**kwargs,
) -> None:
"""print parsing results into one string representation
Args:
nbest_trees (List[Union[List[ScoredTree], ScoredTree]]):
parsed results for multiple sentences
format (str, optional): format type. Defaults to 'auto'.
available options are: 'auto', 'auto_extended', 'conll', 'deriv', 'html', 'ja',
'json', 'ptb', 'jigg_xml', 'jigg_xml_ccg2lambda', 'ccg2lambda', 'prolog'.
semantic_templates (Optional[str], optional): semantic template used for
obtaining semantic formula using ccg2lambda. Defaults to None.
other keyword arguments for Python 'print' function are also available.
Raises:
KeyError: if the format option is not supported, this error occurs.
"""
print(
to_string(
nbest_trees,
format=format,
semantic_templates=semantic_templates,
),
**kwargs,
)
|
masashi-y/myccg | depccg/instance_models.py | <reponame>masashi-y/myccg
from typing import Dict, Tuple, Optional
import tarfile
import logging
from pathlib import Path
from collections import defaultdict
from depccg.types import GrammarConfig, ModelConfig
from depccg.chainer.supertagger import load_chainer_tagger
from depccg.allennlp.supertagger import load_allennlp_tagger
from depccg.lang import get_global_language
from depccg.grammar import en, ja
logger = logging.getLogger(__name__)
MODEL_DIRECTORY = Path(__file__).parent / 'models'
SEMANTIC_TEMPLATES: Dict[str, Path] = {
'en': MODEL_DIRECTORY / 'semantic_templates_en_event.yaml',
'ja': MODEL_DIRECTORY / 'semantic_templates_ja_event.yaml'
}
GRAMMARS: Dict[str, GrammarConfig] = {
'en': GrammarConfig(
en.apply_binary_rules,
en.apply_unary_rules,
),
'ja': GrammarConfig(
ja.apply_binary_rules,
ja.apply_unary_rules,
)
}
MODELS: Dict[str, ModelConfig] = {
'en': ModelConfig(
'chainer',
'tri_headfirst',
'1mxl1HU99iEQcUYhWhvkowbE4WOH0UKxv',
MODEL_DIRECTORY / 'config_en.jsonnet',
SEMANTIC_TEMPLATES['en'],
),
'en[elmo]': ModelConfig(
'allennlp',
'lstm_parser_elmo',
'1r2EsAtg47gFXDwMjmDdIw69akRo8oBXh',
MODEL_DIRECTORY / 'config_en.jsonnet',
SEMANTIC_TEMPLATES['en'],
),
'en[rebank]': ModelConfig(
'allennlp',
'lstm_parser_char_rebanking',
'1N5B4t40OEUxPyWZWwpO02MEqDyWQVYUa',
MODEL_DIRECTORY / 'config_rebank.jsonnet',
SEMANTIC_TEMPLATES['en'],
),
'en[elmo_rebank]': ModelConfig(
'allennlp',
'lstm_parser_elmo_rebanking',
'1deyCjSgCuD16WkEhOL3IXEfQBfARh_ll',
MODEL_DIRECTORY / 'config_rebank.jsonnet',
SEMANTIC_TEMPLATES['en'],
),
'ja': ModelConfig(
'chainer',
'ja_headfinal',
'1bblQ6FYugXtgNNKnbCYgNfnQRkBATSY3',
MODEL_DIRECTORY / 'config_ja.jsonnet',
SEMANTIC_TEMPLATES['ja'],
)
}
def _lang_and_variant(model: str):
if '[' in model and ']' in model:
assert model[-1] == ']'
return model[:-1].split('[')
return model, None
def _get_model_name(variant: Optional[str]) -> str:
lang = get_global_language()
if variant is None:
return lang
return f'{lang}[{variant}]'
AVAILABLE_MODEL_VARIANTS = defaultdict(list)
for model in MODELS:
lang, variant = _lang_and_variant(model)
AVAILABLE_MODEL_VARIANTS[lang].append(variant)
def download(lang: str, variant: Optional[str]) -> None:
config = MODELS[f'{lang}[{variant}]' if variant else lang]
from google_drive_downloader import GoogleDriveDownloader as gdd
logging.info(f'start downloading from {config.url}')
filename = (MODEL_DIRECTORY / config.name).with_suffix('.tar.gz')
gdd.download_file_from_google_drive(
file_id=config.url,
dest_path=filename,
unzip=False,
overwrite=True
)
if config.framework == 'chainer':
logging.info('extracting files')
tf = tarfile.open(filename)
tf.extractall(MODEL_DIRECTORY)
logging.info('finished')
def load_model_directory(
variant: Optional[str]
) -> Tuple[Path, ModelConfig]:
config = MODELS[_get_model_name(variant)]
model_path = MODEL_DIRECTORY / config.name
if config.framework == 'allennlp':
model_path = model_path.with_suffix('.tar.gz')
if not model_path.exists():
if variant is None:
variant = ''
lang = get_global_language()
raise RuntimeError(
('please download the model by doing '
f'\'depccg_{lang} download {variant}\'.')
)
return model_path, config
def model_is_available(model_name: str) -> bool:
return model_name in MODELS.keys()
def load_model(variant: Optional[str], device: int = -1):
model_path, config = load_model_directory(variant)
if config.framework == 'allennlp':
supertagger = load_allennlp_tagger(model_path, device)
elif config.framework == 'chainer':
supertagger = load_chainer_tagger(model_path, device)
else:
lang = get_global_language()
raise KeyError(
('unsupported model for language '
f'({lang}): {variant}')
)
return supertagger, config
|
masashi-y/myccg | depccg/printer/conll.py | <gh_stars>10-100
from typing import List
from depccg.utils import denormalize
from depccg.tree import Tree
def _resolve_dependencies(tree: Tree) -> List[int]:
results = []
def rec(node: Tree) -> int:
if node.is_leaf:
index = len(results)
results.append(-1)
return index
else:
if node.is_unary:
return rec(node.child)
else:
left_head = rec(node.left_child)
right_head = rec(node.right_child)
if node.head_is_left:
results[right_head] = left_head
return left_head
else:
results[left_head] = right_head
return right_head
rec(tree)
assert len(
[dependency for dependency in results if dependency == -1]
) == 1
return results
def conll_of(tree: Tree) -> str:
"""CoNLL-like format string where dependency relations are constructed by
nodes' head_is_left property.
Args:
tree (Tree): tree object
tokens (Optional[List[Token]], optional): list of token objects. Defaults to None.
Returns:
str: tree string in the CoNLL format
"""
stack = []
counter = 1
def rec(node):
nonlocal stack, counter
if node.is_leaf:
cat = node.cat
word = denormalize(node.word)
token = node.token
lemma = token.get('lemma', '_')
pos = token.get('pos', '_')
stack.append(f'(<L {cat} {pos} {pos} {word} {cat}>)')
subtree = ' '.join(stack)
line = '\t'.join(
(
str(counter),
word,
lemma,
pos,
pos,
'_',
str(dependencies[counter - 1] + 1),
str(cat),
'_',
subtree
)
)
stack = []
counter += 1
return line
else:
cat = node.cat
num_children = len(node.children)
head_is_left = 0 if node.head_is_left else 1
stack.append(f'(<T {cat} {head_is_left} {num_children}>')
children = '\n'.join(rec(child) for child in node.children) + ' )'
return children
dependencies = _resolve_dependencies(tree)
return rec(tree)
|
masashi-y/myccg | depccg/allennlp/utils.py | <reponame>masashi-y/myccg
from functools import partial
from collections import defaultdict
from allennlp.common.params import Params
from depccg.lang import get_global_language
from depccg.instance_models import GRAMMARS
from depccg.cat import Category
def read_params(
param_path: str,
disable_category_dictionary: bool = False,
disable_seen_rules: bool = False,
):
lang = get_global_language()
params = Params.from_file(param_path)
unary_rules = defaultdict(list)
for key, value in params.pop('unary_rules'):
unary_rules[Category.parse(key)].append(Category.parse(value))
if disable_category_dictionary:
category_dict = None
else:
category_dict = {
word: [Category.parse(cat) for cat in cats]
for word, cats in params.pop('cat_dict').items()
}
if disable_seen_rules:
seen_rules = None
else:
seen_rules = {
(Category.parse(x).clear_features('X', 'nb'),
Category.parse(y).clear_features('X', 'nb'))
for x, y in params.pop('seen_rules')
}
if len(seen_rules) == 0:
seen_rules = None
try:
apply_binary_rules = partial(
GRAMMARS[lang].apply_binary_rules,
seen_rules=seen_rules
)
apply_unary_rules = partial(
GRAMMARS[lang].apply_unary_rules,
unary_rules=unary_rules
)
except KeyError:
raise KeyError('unsupported language: {args.lang}')
root_categories = [
Category.parse(category)
for category in params.pop('targets')
]
return (
apply_binary_rules,
apply_unary_rules,
category_dict,
root_categories
)
|
masashi-y/myccg | depccg/__main__.py | <reponame>masashi-y/myccg
import sys
import logging
import depccg.parsing
from depccg.types import Token
from depccg.cat import Category
from depccg.printer import print_
from depccg.instance_models import load_model
from depccg.argparse import parse_args
from depccg.lang import set_global_language_to
from depccg.annotator import (
english_annotator, japanese_annotator, annotate_XX
)
from depccg.allennlp.utils import read_params
logger = logging.getLogger(__name__)
def get_annotator(args):
if args.lang == 'en':
if (
args.format in ['ccg2lambda', 'jigg_xml_ccg2lambda']
and args.annotator is None
):
raise RuntimeError(
('Specify --annotator argument in '
f'using "{args.format}" output format')
)
return english_annotator.get(args.annotator, annotate_XX)
elif args.lang == 'ja':
if (
args.format in ['ccg2lambda', 'jigg_xml_ccg2lambda']
and not args.tokenize
):
raise RuntimeError(
('Cannot specify --pre-tokenized '
f'argument using "{args.format}" output format')
)
if args.tokenize:
return japanese_annotator[args.annotator]
return annotate_XX
raise RuntimeError(f'unsupported language: {args.lang}')
def main(args):
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.CRITICAL if args.silent else logging.INFO
)
set_global_language_to(args.lang)
annotator_fun = get_annotator(args)
supertagger, config = load_model(args.model, args.gpu)
(
apply_binary_rules,
apply_unary_rules,
category_dict,
_
) = read_params(config.config, args)
root_categories = [
Category.parse(category)
for category in args.root_cats.split('|')
]
semantic_templates = (
args.semantic_templates or config.semantic_templates
)
kwargs = dict(
unary_penalty=args.unary_penalty,
nbest=args.nbest,
pruning_size=args.pruning_size,
beta=args.beta,
use_beta=not args.disable_beta,
max_length=args.max_length,
max_step=args.max_step,
processes=args.num_processes,
)
if args.input is not None:
input_type = open(args.input)
elif not sys.stdin.isatty():
input_type = sys.stdin
else:
# reading from keyboard
input_type = None
sys.stdout.flush()
sys.stderr.flush()
logging.getLogger().setLevel(logging.CRITICAL)
categories = None
while True:
fin = [
line for line in map(str.strip, input_type or [input()])
if len(line) > 0
]
if len(fin) == 0:
break
if args.input_format == 'POSandNERtagged':
doc = [
[
Token.of_piped(token)
for token in sent.split(' ')
] for sent in fin
]
else:
doc = annotator_fun(
[
[word for word in sentence.split(' ')]
for sentence in fin
if len(sentence) > 0
],
tokenize=args.tokenize,
)
logger.info("supertagging")
score_result, categories_ = supertagger.predict_doc(
[[token.word for token in sentence] for sentence in doc]
)
if categories is None:
categories = [
Category.parse(category) for category in categories_
]
if category_dict is not None:
doc, score_result = depccg.parsing.apply_category_filters(
doc,
score_result,
categories,
category_dict,
)
logger.info("parsing")
results = depccg.parsing.run(
doc,
score_result,
categories,
root_categories,
apply_binary_rules,
apply_unary_rules,
**kwargs,
)
print_(
results,
format=args.format,
semantic_templates=semantic_templates
)
if input_type is None:
sys.stdout.flush()
else:
break
if __name__ == '__main__':
# disable lengthy allennlp logs
logging.getLogger('filelock').setLevel(logging.ERROR)
logging.getLogger('allennlp').setLevel(logging.ERROR)
parse_args(main)
|
masashi-y/myccg | tests/grammar/test_ja.py | import pytest
from depccg.cat import Category
from depccg.grammar import ja
observed_binary_rules = [
tuple(Category.parse(category) for category in text.strip().split(' '))
for text in open('tests/grammar/rules.ja.txt')
]
@pytest.mark.parametrize("expect, x, y", observed_binary_rules)
def test_binary_rule(expect, x, y):
assert expect in [result.cat for result in ja.apply_binary_rules(x, y)]
|
masashi-y/myccg | tests/test_printer.py | import pytest
from depccg.cat import Category
from depccg.types import Token
from depccg.tree import Tree, ScoredTree
from depccg.printer import to_string
from depccg.instance_models import SEMANTIC_TEMPLATES
from depccg.lang import set_global_language_to
@pytest.fixture()
def en_tree():
return Tree.make_binary(
Category.parse("S[dcl]"),
Tree.make_binary(
Category.parse("S[dcl]"),
Tree.make_binary(
Category.parse('NP'),
Tree.make_terminal(
Token(word="This", pos="DT", entity="O",
lemma="this", chunk="XX"),
Category.parse("NP[nb]/N"),
),
Tree.make_terminal(
Token(word="paper", pos="NN", entity="O",
lemma="paper", chunk="XX"),
Category.parse("N"),
),
'fa',
'>'
),
Tree.make_binary(
Category.parse("S[dcl]\\NP"),
Tree.make_terminal(
Token(word="discusses", pos="VBZ",
entity="O", lemma="discuss", chunk="XX"),
Category.parse("(S[dcl]\\NP)/NP"),
),
Tree.make_binary(
Category.parse("NP"),
Tree.make_unary(
Category.parse("NP"),
Tree.make_binary(
Category.parse("N"),
Tree.make_terminal(
Token(word="crucial", pos="JJ", entity="O",
lemma="crucial", chunk="XX"),
Category.parse("N/N"),
),
Tree.make_terminal(
Token(word="aspects", pos="NNS", entity="O",
lemma="aspect", chunk="XX"),
Category.parse("N"),
),
"fa",
">"
),
),
Tree.make_binary(
Category.parse("NP\\NP"),
Tree.make_terminal(
Token(word="of", pos="IN", entity="O",
lemma="of", chunk="XX"),
Category.parse("(NP\\NP)/NP"),
),
Tree.make_binary(
Category.parse("NP"),
Tree.make_terminal(
Token(word="this", pos="DT", entity="O",
lemma="this", chunk="XX"),
Category.parse("NP[nb]/N"),
),
Tree.make_binary(
Category.parse('N'),
Tree.make_terminal(
Token(word="new", pos="JJ", entity="O",
lemma="new", chunk="XX"),
Category.parse("N/N"),
),
Tree.make_binary(
Category.parse('N'),
Tree.make_terminal(
Token(word="annotation", pos="NN", entity="O",
lemma="annotation", chunk="XX"),
Category.parse("N/N"),
),
Tree.make_terminal(
Token(word="scheme", pos="NN", entity="O",
lemma="scheme", chunk="XX"),
Category.parse("N"),
),
'fa',
'>'
),
'fa',
'>'
),
'fa',
'>'
),
'fa',
'>'
),
"ba",
"<"
),
'fa',
'>'
),
"ba",
"<"
),
Tree.make_terminal(
Token(word=".", pos=".", entity="O", lemma=".", chunk="XX"),
Category.parse("."),
),
"rp",
"<rp>"
)
@pytest.fixture()
def scored_en_tree(en_tree):
return ScoredTree(en_tree, -0.05)
@pytest.fixture()
def ja_tree():
return Tree.make_binary(
Category.parse("S[mod=nm,form=base,fin=t]"),
Tree.make_binary(
Category.parse("S[mod=nm,form=base,fin=f]"),
Tree.make_binary(
Category.parse(
"S[mod=X1,form=X2,fin=f]/S[mod=X1,form=X2,fin=f]"),
Tree.make_binary(
Category.parse("NP[case=nc,mod=nm,fin=f]"),
Tree.make_terminal(
Token(word='メロス', surf='メロス', pos='名詞', pos1='一般', pos2='*', pos3='*',
nflectionForm='*', inflectionType='*', reading='*', base='メロス'),
Category.parse("NP[case=nc,mod=nm,fin=f]")),
Tree.make_terminal(
Token(word='に', surf='に', pos='助詞', pos1='格助詞', pos2='一般', pos3='*',
inflectionForm='*', inflectionType='*', reading='ニ', base='に'),
Category.parse("NP[case=nc,mod=nm,fin=f]\\NP[case=nc,mod=nm,fin=f]")),
"<",
"<",
),
Tree.make_terminal(
Token(word='は', surf='は', pos='助詞', pos1='係助詞', pos2='*', pos3='*',
nflectionForm='*', inflectionType='*', reading='ハ', base='は'),
Category.parse("(S[mod=X1,form=X2,fin=f]/S[mod=X1,form=X2,fin=f])\\NP[case=nc,mod=nm,fin=f]")),
"<",
"<",
),
Tree.make_binary(
Category.parse("S[mod=nm,form=base,fin=f]"),
Tree.make_binary(
Category.parse("NP[case=ga,mod=nm,fin=f]"),
Tree.make_terminal(
Token(word='政治', surf='政治', pos='名詞', pos1='一般', pos2='*', pos3='*',
inflectionForm='*', inflectionType='*', reading='セイジ', base='政治'),
Category.parse("NP[case=nc,mod=nm,fin=f]")),
Tree.make_terminal(
Token(word='が', surf='が', pos='助詞', pos1='格助詞', pos2='一般', pos3='*',
nflectionForm='*', inflectionType='*', reading='ガ', base='が'),
Category.parse("NP[case=ga,mod=nm,fin=f]\\NP[case=nc,mod=nm,fin=f]")),
"<",
"<",
),
Tree.make_binary(
Category.parse(
"S[mod=nm,form=base,fin=f]\\NP[case=ga,mod=nm,fin=f]"),
Tree.make_terminal(
Token(word='わから', surf='わから', pos='動詞', pos1='自立', pos2='*', pos3='*',
inflectionForm='未然形', inflectionType='五段・ラ行', reading='ワカラ', base='わかる'),
Category.parse("S[mod=nm,form=neg,fin=f]\\NP[case=ga,mod=nm,fin=f]")),
Tree.make_terminal(
Token(word='ぬ', surf='ぬ', pos='助動詞', pos1='*', pos2='*', pos3='*',
inflectionForm='基本形', inflectionType='特殊・ヌ', reading='ヌ', base='ぬ'),
Category.parse("S[mod=nm,form=base,fin=f]\\S[mod=nm,form=neg,fin=f]")),
"<B1",
"<B1",
),
"<",
"<",
),
">",
">",
),
Tree.make_terminal(
Token(word='。', surf='。', pos='記号', pos1='句点', pos2='*', pos3='*',
inflectionForm='*', inflectionType='*', reading='。', base='。'),
Category.parse("S[mod=nm,form=base,fin=t]\\S[mod=nm,form=base,fin=f]")),
"<",
"<",
)
@pytest.fixture()
def scored_ja_tree(ja_tree):
return ScoredTree(ja_tree, -0.05)
def test_en_auto(scored_en_tree):
expected = (
'ID=1, log probability=-0.05000000\n'
'(<T S[dcl] 0 2> (<T S[dcl] 0 2> (<T NP 0 2> (<L NP[nb]/N DT DT This NP[nb]/N>)'
' (<L N NN NN paper N>) ) (<T S[dcl]\\NP 0 2> (<L (S[dcl]\\NP)/NP VBZ VBZ discusses'
' (S[dcl]\\NP)/NP>) (<T NP 0 2> (<T NP 0 1> (<T N 0 2> (<L N/N JJ JJ crucial N/N>)'
' (<L N NNS NNS aspects N>) ) ) (<T NP\\NP 0 2> (<L (NP\\NP)/NP IN IN of (NP\\NP)/NP>)'
' (<T NP 0 2> (<L NP[nb]/N DT DT this NP[nb]/N>) (<T N 0 2> (<L N/N JJ JJ new N/N>)'
' (<T N 0 2> (<L N/N NN NN annotation N/N>) (<L N NN NN scheme N>) ) ) ) ) ) ) ) (<L . . . . .>) )\n'
)
assert to_string([scored_en_tree], format='auto') == expected
def test_en_auto_extended(scored_en_tree):
expected = (
'ID=1, log probability=-0.05000000\n'
'(<T S[dcl] rp 0 2> (<T S[dcl] ba 0 2> (<T NP fa 0 2> (<L NP[nb]/N This this DT O XX NP[nb]/N>)'
' (<L N paper paper NN O XX N>) ) (<T S[dcl]\\NP fa 0 2> (<L (S[dcl]\\NP)/NP discusses discuss'
' VBZ O XX (S[dcl]\\NP)/NP>) (<T NP ba 0 2> (<T NP lex 0 1> (<T N fa 0 2> (<L N/N crucial crucial'
' JJ O XX N/N>) (<L N aspects aspect NNS O XX N>) ) ) (<T NP\\NP fa 0 2> (<L (NP\\NP)/NP of of IN O'
' XX (NP\\NP)/NP>) (<T NP fa 0 2> (<L NP[nb]/N this this DT O XX NP[nb]/N>) (<T N fa 0 2> (<L N/N'
' new new JJ O XX N/N>) (<T N fa 0 2> (<L N/N annotation annotation NN O XX N/N>) (<L N scheme'
' scheme NN O XX N>) ) ) ) ) ) ) ) (<L . . . . O XX .>) )\n'
)
assert to_string([scored_en_tree], format='auto_extended') == expected
def test_en_xml(scored_en_tree):
expected = (
'<candc>\n'
' <ccg sentence="1" id="1">\n'
' <rule type="rp" cat="S[dcl]">\n'
' <rule type="ba" cat="S[dcl]">\n'
' <rule type="fa" cat="NP">\n'
' <lf start="0" span="1" cat="NP[nb]/N" word="This" pos="DT" entity="O" lemma="this" chunk="XX"/>\n'
' <lf start="1" span="1" cat="N" word="paper" pos="NN" entity="O" lemma="paper" chunk="XX"/>\n'
' </rule>\n'
' <rule type="fa" cat="S[dcl]\\NP">\n'
' <lf start="2" span="1" cat="(S[dcl]\\NP)/NP" word="discusses" pos="VBZ" entity="O" lemma="discuss" chunk="XX"/>\n'
' <rule type="ba" cat="NP">\n'
' <rule type="lex" cat="NP">\n'
' <rule type="fa" cat="N">\n'
' <lf start="3" span="1" cat="N/N" word="crucial" pos="JJ" entity="O" lemma="crucial" chunk="XX"/>\n'
' <lf start="4" span="1" cat="N" word="aspects" pos="NNS" entity="O" lemma="aspect" chunk="XX"/>\n'
' </rule>\n'
' </rule>\n'
' <rule type="fa" cat="NP\\NP">\n'
' <lf start="5" span="1" cat="(NP\\NP)/NP" word="of" pos="IN" entity="O" lemma="of" chunk="XX"/>\n'
' <rule type="fa" cat="NP">\n'
' <lf start="6" span="1" cat="NP[nb]/N" word="this" pos="DT" entity="O" lemma="this" chunk="XX"/>\n'
' <rule type="fa" cat="N">\n'
' <lf start="7" span="1" cat="N/N" word="new" pos="JJ" entity="O" lemma="new" chunk="XX"/>\n'
' <rule type="fa" cat="N">\n'
' <lf start="8" span="1" cat="N/N" word="annotation" pos="NN" entity="O" lemma="annotation" chunk="XX"/>\n'
' <lf start="9" span="1" cat="N" word="scheme" pos="NN" entity="O" lemma="scheme" chunk="XX"/>\n'
' </rule>\n'
' </rule>\n'
' </rule>\n'
' </rule>\n'
' </rule>\n'
' </rule>\n'
' </rule>\n'
' <lf start="10" span="1" cat="." word="." pos="." entity="O" lemma="." chunk="XX"/>\n'
' </rule>\n'
' </ccg>\n'
'</candc>\n'
)
assert to_string([scored_en_tree], format='xml') == expected
def test_en_deriv(scored_en_tree):
expected = (
'ID=1, log probability=-0.05000000\n'
' NP[nb]/N N (S[dcl]\\NP)/NP N/N N (NP\\NP)/NP NP[nb]/N N/N N/N N .\n'
' This paper discusses crucial aspects of this new annotation scheme .\n'
'----------------->\n'
' NP\n'
' ------------------>\n'
' N\n'
' ------------------<un>\n'
' NP\n'
' -------------------->\n'
' N\n'
' ------------------------->\n'
' N\n'
' ----------------------------------->\n'
' NP\n'
' ----------------------------------------------->\n'
' NP\\NP\n'
' -----------------------------------------------------------------<\n'
' NP\n'
' --------------------------------------------------------------------------------->\n'
' S[dcl]\\NP\n'
'--------------------------------------------------------------------------------------------------<\n'
' S[dcl]\n'
'-----------------------------------------------------------------------------------------------------<rp>\n'
' S[dcl]\n\n'
)
assert to_string([scored_en_tree], format='deriv') == expected
def test_en_prolog(scored_en_tree):
expected = (
":- op(601, xfx, (/)).\n"
":- op(601, xfx, (\\)).\n"
":- multifile ccg/2, id/2.\n"
":- discontiguous ccg/2, id/2.\n"
"\n"
"ccg(1,\n"
" rp(s:dcl,\n"
" ba(s:dcl,\n"
" fa(np,\n"
" t((np:nb/n), 'This', 'this', 'DT', 'XX', 'O'),\n"
" t(n, 'paper', 'paper', 'NN', 'XX', 'O')),\n"
" fa((s:dcl\\np),\n"
" t(((s:dcl\\np)/np), 'discusses', 'discuss', 'VBZ', 'XX', 'O'),\n"
" ba(np,\n"
" lx(np, n,\n"
" fa(n,\n"
" t((n/n), 'crucial', 'crucial', 'JJ', 'XX', 'O'),\n"
" t(n, 'aspects', 'aspect', 'NNS', 'XX', 'O'))),\n"
" fa((np\\np),\n"
" t(((np\\np)/np), 'of', 'of', 'IN', 'XX', 'O'),\n"
" fa(np,\n"
" t((np:nb/n), 'this', 'this', 'DT', 'XX', 'O'),\n"
" fa(n,\n"
" t((n/n), 'new', 'new', 'JJ', 'XX', 'O'),\n"
" fa(n,\n"
" t((n/n), 'annotation', 'annotation', 'NN', 'XX', 'O'),\n"
" t(n, 'scheme', 'scheme', 'NN', 'XX', 'O')))))))),\n"
" t(period, '.', '.', '.', 'XX', 'O'))).\n\n"
)
assert to_string([scored_en_tree], format='prolog') == expected
def test_en_jigg_xml(scored_en_tree):
expected = (
'<root>\n'
' <document>\n'
' <sentences>\n'
' <sentence>\n'
' <tokens>\n'
' <token start="0" cat="NP[nb]/N" id="s0_0" pos="DT" entity="O" chunk="XX" surf="This" base="this"/>\n'
' <token start="1" cat="N" id="s0_1" pos="NN" entity="O" chunk="XX" surf="paper" base="paper"/>\n'
' <token start="2" cat="(S[dcl]\\NP)/NP" id="s0_2" pos="VBZ" entity="O" chunk="XX" surf="discusses" base="discuss"/>\n'
' <token start="3" cat="N/N" id="s0_3" pos="JJ" entity="O" chunk="XX" surf="crucial" base="crucial"/>\n'
' <token start="4" cat="N" id="s0_4" pos="NNS" entity="O" chunk="XX" surf="aspects" base="aspect"/>\n'
' <token start="5" cat="(NP\\NP)/NP" id="s0_5" pos="IN" entity="O" chunk="XX" surf="of" base="of"/>\n'
' <token start="6" cat="NP[nb]/N" id="s0_6" pos="DT" entity="O" chunk="XX" surf="this" base="this"/>\n'
' <token start="7" cat="N/N" id="s0_7" pos="JJ" entity="O" chunk="XX" surf="new" base="new"/>\n'
' <token start="8" cat="N/N" id="s0_8" pos="NN" entity="O" chunk="XX" surf="annotation" base="annotation"/>\n'
' <token start="9" cat="N" id="s0_9" pos="NN" entity="O" chunk="XX" surf="scheme" base="scheme"/>\n'
' <token start="10" cat="." id="s0_10" pos="." entity="O" chunk="XX" surf="." base="."/>\n'
' </tokens>\n'
' <ccg id="s0_ccg0" root="s0_sp0" score="-0.05">\n'
' <span category="S[dcl=true]" id="s0_sp0" child="s0_sp1 s0_sp21" rule="rp" begin="0" end="11" root="true"/>\n'
' <span category="S[dcl=true]" id="s0_sp1" child="s0_sp2 s0_sp5" rule="ba" begin="0" end="10"/>\n'
' <span category="NP" id="s0_sp2" child="s0_sp3 s0_sp4" rule="fa" begin="0" end="2"/>\n'
' <span category="NP[nb=true]/N" id="s0_sp3" terminal="s0_0" begin="0" end="1"/>\n'
' <span category="N" id="s0_sp4" terminal="s0_1" begin="1" end="2"/>\n'
' <span category="S[dcl=true]\\NP" id="s0_sp5" child="s0_sp6 s0_sp7" rule="fa" begin="2" end="10"/>\n'
' <span category="(S[dcl=true]\\NP)/NP" id="s0_sp6" terminal="s0_2" begin="2" end="3"/>\n'
' <span category="NP" id="s0_sp7" child="s0_sp8 s0_sp12" rule="ba" begin="3" end="10"/>\n'
' <span category="NP" id="s0_sp8" child="s0_sp9" rule="lex" begin="3" end="5"/>\n'
' <span category="N" id="s0_sp9" child="s0_sp10 s0_sp11" rule="fa" begin="3" end="5"/>\n'
' <span category="N/N" id="s0_sp10" terminal="s0_3" begin="3" end="4"/>\n'
' <span category="N" id="s0_sp11" terminal="s0_4" begin="4" end="5"/>\n'
' <span category="NP\\NP" id="s0_sp12" child="s0_sp13 s0_sp14" rule="fa" begin="5" end="10"/>\n'
' <span category="(NP\\NP)/NP" id="s0_sp13" terminal="s0_5" begin="5" end="6"/>\n'
' <span category="NP" id="s0_sp14" child="s0_sp15 s0_sp16" rule="fa" begin="6" end="10"/>\n'
' <span category="NP[nb=true]/N" id="s0_sp15" terminal="s0_6" begin="6" end="7"/>\n'
' <span category="N" id="s0_sp16" child="s0_sp17 s0_sp18" rule="fa" begin="7" end="10"/>\n'
' <span category="N/N" id="s0_sp17" terminal="s0_7" begin="7" end="8"/>\n'
' <span category="N" id="s0_sp18" child="s0_sp19 s0_sp20" rule="fa" begin="8" end="10"/>\n'
' <span category="N/N" id="s0_sp19" terminal="s0_8" begin="8" end="9"/>\n'
' <span category="N" id="s0_sp20" terminal="s0_9" begin="9" end="10"/>\n'
' <span category="." id="s0_sp21" terminal="s0_10" begin="10" end="11"/>\n'
' </ccg>\n'
' </sentence>\n'
' </sentences>\n'
' </document>\n'
'</root>\n'
)
assert to_string([scored_en_tree], format='jigg_xml') == expected
def test_en_jigg_xml_ccg2lambda(scored_en_tree):
expected = (
'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n'
'<root>\n'
' <document>\n'
' <sentences>\n'
' <sentence>\n'
' <tokens>\n'
' <token start="0" cat="NP[nb]/N" id="s0_0" pos="DT" entity="O" chunk="XX" surf="This" base="this"/>\n'
' <token start="1" cat="N" id="s0_1" pos="NN" entity="O" chunk="XX" surf="paper" base="paper"/>\n'
' <token start="2" cat="(S[dcl]\\NP)/NP" id="s0_2" pos="VBZ" entity="O" chunk="XX" surf="discusses" base="discuss"/>\n'
' <token start="3" cat="N/N" id="s0_3" pos="JJ" entity="O" chunk="XX" surf="crucial" base="crucial"/>\n'
' <token start="4" cat="N" id="s0_4" pos="NNS" entity="O" chunk="XX" surf="aspects" base="aspect"/>\n'
' <token start="5" cat="(NP\\NP)/NP" id="s0_5" pos="IN" entity="O" chunk="XX" surf="of" base="of"/>\n'
' <token start="6" cat="NP[nb]/N" id="s0_6" pos="DT" entity="O" chunk="XX" surf="this" base="this"/>\n'
' <token start="7" cat="N/N" id="s0_7" pos="JJ" entity="O" chunk="XX" surf="new" base="new"/>\n'
' <token start="8" cat="N/N" id="s0_8" pos="NN" entity="O" chunk="XX" surf="annotation" base="annotation"/>\n'
' <token start="9" cat="N" id="s0_9" pos="NN" entity="O" chunk="XX" surf="scheme" base="scheme"/>\n'
' <token start="10" cat="." id="s0_10" pos="." entity="O" chunk="XX" surf="." base="."/>\n'
' </tokens>\n'
' <ccg id="s0_ccg0" root="s0_sp0" score="-0.05">\n'
' <span category="S[dcl=true]" id="s0_sp0" child="s0_sp1 s0_sp21" rule="rp" begin="0" end="11" root="true"/>\n'
' <span category="S[dcl=true]" id="s0_sp1" child="s0_sp2 s0_sp5" rule="ba" begin="0" end="10"/>\n'
' <span category="NP" id="s0_sp2" child="s0_sp3 s0_sp4" rule="fa" begin="0" end="2"/>\n'
' <span category="NP[nb=true]/N" id="s0_sp3" terminal="s0_0" begin="0" end="1"/>\n'
' <span category="N" id="s0_sp4" terminal="s0_1" begin="1" end="2"/>\n'
' <span category="S[dcl=true]\\NP" id="s0_sp5" child="s0_sp6 s0_sp7" rule="fa" begin="2" end="10"/>\n'
' <span category="(S[dcl=true]\\NP)/NP" id="s0_sp6" terminal="s0_2" begin="2" end="3"/>\n'
' <span category="NP" id="s0_sp7" child="s0_sp8 s0_sp12" rule="ba" begin="3" end="10"/>\n'
' <span category="NP" id="s0_sp8" child="s0_sp9" rule="lex" begin="3" end="5"/>\n'
' <span category="N" id="s0_sp9" child="s0_sp10 s0_sp11" rule="fa" begin="3" end="5"/>\n'
' <span category="N/N" id="s0_sp10" terminal="s0_3" begin="3" end="4"/>\n'
' <span category="N" id="s0_sp11" terminal="s0_4" begin="4" end="5"/>\n'
' <span category="NP\\NP" id="s0_sp12" child="s0_sp13 s0_sp14" rule="fa" begin="5" end="10"/>\n'
' <span category="(NP\\NP)/NP" id="s0_sp13" terminal="s0_5" begin="5" end="6"/>\n'
' <span category="NP" id="s0_sp14" child="s0_sp15 s0_sp16" rule="fa" begin="6" end="10"/>\n'
' <span category="NP[nb=true]/N" id="s0_sp15" terminal="s0_6" begin="6" end="7"/>\n'
' <span category="N" id="s0_sp16" child="s0_sp17 s0_sp18" rule="fa" begin="7" end="10"/>\n'
' <span category="N/N" id="s0_sp17" terminal="s0_7" begin="7" end="8"/>\n'
' <span category="N" id="s0_sp18" child="s0_sp19 s0_sp20" rule="fa" begin="8" end="10"/>\n'
' <span category="N/N" id="s0_sp19" terminal="s0_8" begin="8" end="9"/>\n'
' <span category="N" id="s0_sp20" terminal="s0_9" begin="9" end="10"/>\n'
' <span category="." id="s0_sp21" terminal="s0_10" begin="10" end="11"/>\n'
' </ccg>\n'
' <semantics status="success" ccg_id="s0_ccg0" root="s0_sp0">\n'
' <span id="s0_sp0" child="s0_sp1 s0_sp21" sem="exists x.(_paper(x) & True & exists z2.(_aspect(z2) & _crucial(z2) & exists z1.(_scheme(z1) & _annotation(z1) & _new(z1) & True & (z2 = z1)) & True & exists e.(_discuss(e) & (Subj(e) = x) & (Acc(e) = z2) & True)))"/>\n'
' <span id="s0_sp1" child="s0_sp2 s0_sp5" sem="\\K.exists x.(_paper(x) & True & exists z2.(_aspect(z2) & _crucial(z2) & exists z1.(_scheme(z1) & _annotation(z1) & _new(z1) & True & (z2 = z1)) & True & exists e.(_discuss(e) & (Subj(e) = x) & (Acc(e) = z2) & K(e))))"/>\n'
' <span id="s0_sp2" child="s0_sp3 s0_sp4" sem="\\F2 F3.exists x.(_paper(x) & F2(x) & F3(x))"/>\n'
' <span id="s0_sp3" sem="\\F1 F2 F3.exists x.(F1(x) & F2(x) & F3(x))"/>\n'
' <span id="s0_sp4" sem="\\x._paper(x)" type="_paper : Entity -> Prop"/>\n'
' <span id="s0_sp5" child="s0_sp6 s0_sp7" sem="\\Q2 K.Q2(\\x.True,\\x.exists z2.(_aspect(z2) & _crucial(z2) & exists z1.(_scheme(z1) & _annotation(z1) & _new(z1) & True & (z2 = z1)) & True & exists e.(_discuss(e) & (Subj(e) = x) & (Acc(e) = z2) & K(e))))"/>\n'
' <span id="s0_sp6" sem="\\Q1 Q2 K.Q2(\\x.True,\\x.Q1(\\y.True,\\y.exists e.(_discuss(e) & (Subj(e) = x) & (Acc(e) = y) & K(e))))" type="_discuss : Event -> Prop"/>\n'
' <span id="s0_sp7" child="s0_sp8 s0_sp12" sem="\\F1 F2.exists x.(_aspect(x) & _crucial(x) & exists z1.(_scheme(z1) & _annotation(z1) & _new(z1) & True & (x = z1)) & F1(x) & F2(x))"/>\n'
' <span id="s0_sp8" child="s0_sp9" sem="\\F1 F2.exists x.(_aspect(x) & _crucial(x) & F1(x) & F2(x))"/>\n'
' <span id="s0_sp9" child="s0_sp10 s0_sp11" sem="\\x.(_aspect(x) & _crucial(x))"/>\n'
' <span id="s0_sp10" sem="\\F x.(F(x) & _crucial(x))" type="_crucial : Entity -> Prop"/>\n'
' <span id="s0_sp11" sem="\\x._aspect(x)" type="_aspect : Entity -> Prop"/>\n'
' <span id="s0_sp12" child="s0_sp13 s0_sp14" sem="\\Q2 F1 F2.Q2(\\x.(exists z1.(_scheme(z1) & _annotation(z1) & _new(z1) & True & (x = z1)) & F1(x)),F2)"/>\n'
' <span id="s0_sp13" sem="\\Q1 Q2 F1 F2.Q2(\\x.(Q1(\\w.True,\\y.(x = y)) & F1(x)),F2)"/>\n'
' <span id="s0_sp14" child="s0_sp15 s0_sp16" sem="\\F2 F3.exists x.(_scheme(x) & _annotation(x) & _new(x) & F2(x) & F3(x))"/>\n'
' <span id="s0_sp15" sem="\\F1 F2 F3.exists x.(F1(x) & F2(x) & F3(x))"/>\n'
' <span id="s0_sp16" child="s0_sp17 s0_sp18" sem="\\x.(_scheme(x) & _annotation(x) & _new(x))"/>\n'
' <span id="s0_sp17" sem="\\F x.(F(x) & _new(x))" type="_new : Entity -> Prop"/>\n'
' <span id="s0_sp18" child="s0_sp19 s0_sp20" sem="\\x.(_scheme(x) & _annotation(x))"/>\n'
' <span id="s0_sp19" sem="\\F x.(F(x) & _annotation(x))" type="_annotation : Entity -> Prop"/>\n'
' <span id="s0_sp20" sem="\\x._scheme(x)" type="_scheme : Entity -> Prop"/>\n'
' <span id="s0_sp21" sem="\\X.X"/>\n'
' </semantics>\n'
' </sentence>\n'
' </sentences>\n'
' </document>\n'
'</root>\n'
)
assert (
to_string(
[scored_en_tree],
format='jigg_xml_ccg2lambda',
semantic_templates=SEMANTIC_TEMPLATES['en'],
) == expected
)
def test_en_conll(scored_en_tree):
expected = (
'# ID=1\n'
'# log probability=-0.05000000\n'
'1 This this DT DT _ 0 NP[nb]/N _ (<T S[dcl] 0 2> (<T S[dcl] 0 2> (<T NP 0 2> (<L NP[nb]/N DT DT This NP[nb]/N>)\n'
'2 paper paper NN NN _ 1 N _ (<L N NN NN paper N>) )\n'
'3 discusses discuss VBZ VBZ _ 1 (S[dcl]\\NP)/NP _ (<T S[dcl]\\NP 0 2> (<L (S[dcl]\\NP)/NP VBZ VBZ discusses (S[dcl]\\NP)/NP>)\n'
'4 crucial crucial JJ JJ _ 3 N/N _ (<T NP 0 2> (<T NP 0 1> (<T N 0 2> (<L N/N JJ JJ crucial N/N>)\n'
'5 aspects aspect NNS NNS _ 4 N _ (<L N NNS NNS aspects N>) ) )\n'
'6 of of IN IN _ 4 (NP\\NP)/NP _ (<T NP\\NP 0 2> (<L (NP\\NP)/NP IN IN of (NP\\NP)/NP>)\n'
'7 this this DT DT _ 6 NP[nb]/N _ (<T NP 0 2> (<L NP[nb]/N DT DT this NP[nb]/N>)\n'
'8 new new JJ JJ _ 7 N/N _ (<T N 0 2> (<L N/N JJ JJ new N/N>)\n'
'9 annotation annotation NN NN _ 8 N/N _ (<T N 0 2> (<L N/N NN NN annotation N/N>)\n'
'10 scheme scheme NN NN _ 9 N _ (<L N NN NN scheme N>) ) ) ) ) ) ) )\n'
'11 . . . . _ 1 . _ (<L . . . . .>) )\n'
)
assert to_string([scored_en_tree], format='conll') == expected
def test_en_json(scored_en_tree):
expected = (
'{\n'
' "1": [\n'
' {\n'
' "type": "rp",\n'
' "cat": "S[dcl]",\n'
' "children": [\n'
' {\n'
' "type": "ba",\n'
' "cat": "S[dcl]",\n'
' "children": [\n'
' {\n'
' "type": "fa",\n'
' "cat": "NP",\n'
' "children": [\n'
' {\n'
' "word": "This",\n'
' "pos": "DT",\n'
' "entity": "O",\n'
' "lemma": "this",\n'
' "chunk": "XX",\n'
' "cat": "NP[nb]/N"\n'
' },\n'
' {\n'
' "word": "paper",\n'
' "pos": "NN",\n'
' "entity": "O",\n'
' "lemma": "paper",\n'
' "chunk": "XX",\n'
' "cat": "N"\n'
' }\n'
' ]\n'
' },\n'
' {\n'
' "type": "fa",\n'
' "cat": "S[dcl]\\\\NP",\n'
' "children": [\n'
' {\n'
' "word": "discusses",\n'
' "pos": "VBZ",\n'
' "entity": "O",\n'
' "lemma": "discuss",\n'
' "chunk": "XX",\n'
' "cat": "(S[dcl]\\\\NP)/NP"\n'
' },\n'
' {\n'
' "type": "ba",\n'
' "cat": "NP",\n'
' "children": [\n'
' {\n'
' "type": "lex",\n'
' "cat": "NP",\n'
' "children": [\n'
' {\n'
' "type": "fa",\n'
' "cat": "N",\n'
' "children": [\n'
' {\n'
' "word": "crucial",\n'
' "pos": "JJ",\n'
' "entity": "O",\n'
' "lemma": "crucial",\n'
' "chunk": "XX",\n'
' "cat": "N/N"\n'
' },\n'
' {\n'
' "word": "aspects",\n'
' "pos": "NNS",\n'
' "entity": "O",\n'
' "lemma": "aspect",\n'
' "chunk": "XX",\n'
' "cat": "N"\n'
' }\n'
' ]\n'
' }\n'
' ]\n'
' },\n'
' {\n'
' "type": "fa",\n'
' "cat": "NP\\\\NP",\n'
' "children": [\n'
' {\n'
' "word": "of",\n'
' "pos": "IN",\n'
' "entity": "O",\n'
' "lemma": "of",\n'
' "chunk": "XX",\n'
' "cat": "(NP\\\\NP)/NP"\n'
' },\n'
' {\n'
' "type": "fa",\n'
' "cat": "NP",\n'
' "children": [\n'
' {\n'
' "word": "this",\n'
' "pos": "DT",\n'
' "entity": "O",\n'
' "lemma": "this",\n'
' "chunk": "XX",\n'
' "cat": "NP[nb]/N"\n'
' },\n'
' {\n'
' "type": "fa",\n'
' "cat": "N",\n'
' "children": [\n'
' {\n'
' "word": "new",\n'
' "pos": "JJ",\n'
' "entity": "O",\n'
' "lemma": "new",\n'
' "chunk": "XX",\n'
' "cat": "N/N"\n'
' },\n'
' {\n'
' "type": "fa",\n'
' "cat": "N",\n'
' "children": [\n'
' {\n'
' "word": "annotation",\n'
' "pos": "NN",\n'
' "entity": "O",\n'
' "lemma": "annotation",\n'
' "chunk": "XX",\n'
' "cat": "N/N"\n'
' },\n'
' {\n'
' "word": "scheme",\n'
' "pos": "NN",\n'
' "entity": "O",\n'
' "lemma": "scheme",\n'
' "chunk": "XX",\n'
' "cat": "N"\n'
' }\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
' ]\n'
' },\n'
' {\n'
' "word": ".",\n'
' "pos": ".",\n'
' "entity": "O",\n'
' "lemma": ".",\n'
' "chunk": "XX",\n'
' "cat": "."\n'
' }\n'
' ],\n'
' "log_prob": -0.05\n'
' }\n'
' ]\n'
'}'
)
assert to_string([scored_en_tree], format='json') == expected
def test_en_ptb(scored_en_tree):
expected = (
'ID=1, log probability=-0.05000000\n'
'(ROOT (S[dcl] (S[dcl] (NP (NP[nb]/N This) (N paper)) (S[dcl]\\NP ((S[dcl]\\NP)/NP discusses)'
' (NP (NP (N (N/N crucial) (N aspects))) (NP\\NP ((NP\\NP)/NP of) (NP (NP[nb]/N this) (N (N/N new)'
' (N (N/N annotation) (N scheme)))))))) (. .)))\n'
)
assert to_string([scored_en_tree], format='ptb') == expected
def test_ja_ja(scored_ja_tree):
expected = (
'ID=1, log probability=-0.05000000\n'
'{< S[mod=nm,form=base,fin=t] {> S[mod=nm,form=base,fin=f] {< S[mod=X1,form=X2,fin=f]/S[mod=X1,form=X2,fin=f]'
' {< NP[case=nc,mod=nm,fin=f] {NP[case=nc,mod=nm,fin=f] メロス/メロス/名詞-一般/_}'
' {NP[case=nc,mod=nm,fin=f]\\NP[case=nc,mod=nm,fin=f] に/に/助詞-格助詞-一般/_}}'
' {(S[mod=X1,form=X2,fin=f]/S[mod=X1,form=X2,fin=f])\\NP[case=nc,mod=nm,fin=f] は/は/助詞-係助詞/_}}'
' {< S[mod=nm,form=base,fin=f] {< NP[case=ga,mod=nm,fin=f] {NP[case=nc,mod=nm,fin=f] 政治/政治/名詞-一般/_}'
' {NP[case=ga,mod=nm,fin=f]\\NP[case=nc,mod=nm,fin=f] が/が/助詞-格助詞-一般/_}}'
' {<B1 S[mod=nm,form=base,fin=f]\\NP[case=ga,mod=nm,fin=f] {S[mod=nm,form=neg,fin=f]\\NP[case=ga,mod=nm,fin=f]'
' わから/わから/動詞-自立/未然形-五段・ラ行} {S[mod=nm,form=base,fin=f]\\S[mod=nm,form=neg,fin=f]'
' ぬ/ぬ/助動詞/基本形-特殊・ヌ}}}} {S[mod=nm,form=base,fin=t]\\S[mod=nm,form=base,fin=f] 。/。/記号-句点/_}}\n'
)
assert to_string([scored_ja_tree], format='ja') == expected
def test_ja_prolog(scored_ja_tree):
expected = (
":- op(601, xfx, (/)).\n"
":- op(601, xfx, (\\)).\n"
":- multifile ccg/2, id/2.\n"
":- discontiguous ccg/2, id/2.\n"
"\n"
"ccg(1,\n"
" ba(s,\n"
" fa(s,\n"
" ba((s/s),\n"
" ba(np:nc,\n"
" t(np:nc, 'メロス', 'メロス', '名詞/一般/*/*', '*', '*'),\n"
" t((np:nc\\np:nc), 'に', 'に', '助詞/格助詞/一般/*', '*', '*')),\n"
" t(((s/s)\\np:nc), 'は', 'は', '助詞/係助詞/*/*', '*', '*')),\n"
" ba(s,\n"
" ba(np:ga,\n"
" t(np:nc, '政治', '政治', '名詞/一般/*/*', '*', '*'),\n"
" t((np:ga\\np:nc), 'が', 'が', '助詞/格助詞/一般/*', '*', '*')),\n"
" bc1((s\\np:ga),\n"
" t((s\\np:ga), 'わから', 'わかる', '動詞/自立/*/*', '未然形', '五段・ラ行'),\n"
" t((s\\s), 'ぬ', 'ぬ', '助動詞/*/*/*', '基本形', '特殊・ヌ')))),\n"
" t((s\\s), '。', '。', '記号/句点/*/*', '*', '*'))).\n\n"
)
set_global_language_to('ja')
assert to_string([scored_ja_tree], format='prolog') == expected
|
masashi-y/myccg | depccg/allennlp/models/supertagger.py | <gh_stars>0
import copy
import logging
from typing import Any, Dict, List, Optional, Tuple
import numpy
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, TextFieldEmbedder
from allennlp.modules.matrix_attention.bilinear_matrix_attention import (
BilinearMatrixAttention,
)
from allennlp.nn import Activation, InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import (
get_device_of,
get_range_vector,
get_text_field_mask,
masked_log_softmax,
)
from allennlp.training.metrics import CategoricalAccuracy
from depccg.allennlp.nn.bilinear import BilinearWithBias
from torch.nn.modules import Dropout
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def _apply_head_mask(attended_arcs: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
# Mask the diagonal, because the head of a word can't be itself.
attended_arcs = attended_arcs + torch.diag(
attended_arcs.new(mask.size(1)).fill_(-numpy.inf)
)
# Mask padded tokens, because we only want to consider actual words as heads.
attended_arcs.masked_fill_((~mask).unsqueeze(1), -numpy.inf)
attended_arcs.masked_fill_((~mask).unsqueeze(2), -numpy.inf)
return attended_arcs
@Model.register("supertagger")
class Supertagger(Model):
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
tag_representation_dim: int,
arc_representation_dim: int,
tag_feedforward: FeedForward = None,
arc_feedforward: FeedForward = None,
dropout: float = 0.5,
input_dropout: float = 0.5,
head_tag_temperature: Optional[float] = None,
head_temperature: Optional[float] = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(Supertagger, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.encoder = encoder
encoder_dim = encoder.get_output_dim()
self.head_arc_feedforward = arc_feedforward or FeedForward(
encoder_dim,
1,
arc_representation_dim,
Activation.by_name("elu")(),
dropout=dropout,
)
self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward)
self.arc_attention = BilinearMatrixAttention(
arc_representation_dim, arc_representation_dim, use_input_biases=True
)
num_labels = self.vocab.get_vocab_size("head_tags")
self.head_tag_feedforward = tag_feedforward or FeedForward(
encoder_dim,
1,
tag_representation_dim,
Activation.by_name("elu")(),
dropout=dropout,
)
self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward)
self.tag_bilinear = BilinearWithBias(
tag_representation_dim, tag_representation_dim, num_labels
)
self._head_sentinel = torch.nn.Parameter(
torch.randn([1, 1, encoder.get_output_dim()])
)
representation_dim = text_field_embedder.get_output_dim()
check_dimensions_match(
representation_dim,
encoder.get_input_dim(),
"text field embedding dim",
"encoder input dim",
)
check_dimensions_match(
tag_representation_dim,
self.head_tag_feedforward.get_output_dim(),
"tag representation dim",
"tag feedforward output dim",
)
check_dimensions_match(
arc_representation_dim,
self.head_arc_feedforward.get_output_dim(),
"arc representation dim",
"arc feedforward output dim",
)
self._input_dropout = Dropout(input_dropout)
self._attachment_scores = CategoricalAccuracy()
self._tagging_accuracy = CategoricalAccuracy()
self.head_tag_temperature = head_tag_temperature
self.head_temperature = head_temperature
initializer(self)
def forward(
self, # type: ignore
words: Dict[str, torch.LongTensor],
weight: torch.Tensor,
metadata: List[Dict[str, Any]],
head_tags: torch.LongTensor = None,
head_indices: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
embedded_text_input = self.text_field_embedder(words)
embedded_text_input = self._input_dropout(embedded_text_input)
mask = get_text_field_mask(words)
encoded_text = self.encoder(embedded_text_input, mask)
batch_size, _, encoding_dim = encoded_text.size()
head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim)
# Concatenate the head sentinel onto the sentence representation.
encoded_text = torch.cat([head_sentinel, encoded_text], 1)
mask = torch.cat([mask.new_ones(batch_size, 1), mask], 1)
if head_indices is not None:
head_indices = torch.cat(
[head_indices.new_zeros(batch_size, 1), head_indices], 1
)
if head_tags is not None:
head_tags = torch.cat([head_tags.new_zeros(batch_size, 1), head_tags], 1)
# shape (batch_size, sequence_length, arc_representation_dim)
head_arc_representation = self.head_arc_feedforward(encoded_text)
child_arc_representation = self.child_arc_feedforward(encoded_text)
# shape (batch_size, sequence_length, tag_representation_dim)
head_tag_representation = self.head_tag_feedforward(encoded_text)
child_tag_representation = self.child_tag_feedforward(encoded_text)
# shape (batch_size, sequence_length, sequence_length)
attended_arcs = self.arc_attention(
head_arc_representation, child_arc_representation
)
if head_indices is not None and head_tags is not None:
(
loss,
normalised_arc_logits,
normalised_head_tag_logits,
) = self._construct_loss(
head_tag_representation=head_tag_representation,
child_tag_representation=child_tag_representation,
attended_arcs=attended_arcs,
head_indices=head_indices,
head_tags=head_tags,
mask=mask,
weight=weight,
)
normalised_arc_logits = _apply_head_mask(normalised_arc_logits, mask)
tag_mask = self._get_unknown_tag_mask(mask, head_tags)
self._attachment_scores(
normalised_arc_logits[:, 1:].contiguous(),
head_indices[:, 1:].contiguous(),
mask[:, 1:].contiguous(),
)
self._tagging_accuracy(
normalised_head_tag_logits[:, 1:].contiguous(),
head_tags[:, 1:].contiguous(),
tag_mask[:, 1:].contiguous(),
)
predicted_heads, predicted_head_tags = None, None
if not self.training:
attended_arcs = _apply_head_mask(attended_arcs, mask)
# Compute the heads greedily.
# shape (batch_size, sequence_length)
_, predicted_heads = attended_arcs.max(dim=2)
# Given the greedily predicted heads, decode their dependency tags.
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self._get_head_tags(
head_tag_representation, child_tag_representation, predicted_heads
)
_, predicted_head_tags = head_tag_logits.max(dim=2)
(
loss,
normalised_arc_logits,
normalised_head_tag_logits,
) = self._construct_loss(
head_tag_representation=head_tag_representation,
child_tag_representation=child_tag_representation,
attended_arcs=attended_arcs,
head_indices=predicted_heads.long(),
head_tags=predicted_head_tags.long(),
mask=mask,
weight=weight,
)
normalised_arc_logits = _apply_head_mask(normalised_arc_logits, mask)
output_dict = {
"heads": normalised_arc_logits,
"head_tags": normalised_head_tag_logits,
"loss": loss,
"mask": mask,
"words": [meta["words"] for meta in metadata],
}
if predicted_heads is not None and predicted_head_tags is not None:
output_dict["predicted_heads"] = predicted_heads[:, 1:]
output_dict["predicted_head_tags"] = predicted_head_tags[:, 1:]
output_dict = self.decode(output_dict)
return output_dict
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
head_tags = output_dict.pop("head_tags")
# discard a sentinel token and padding and unknown tags
head_tags = head_tags[:, 1:, 2:]
heads = output_dict.pop("heads")
heads = heads[:, 1:]
output_dict["head_tags"] = head_tags.cpu().detach().numpy()
output_dict["heads"] = heads.cpu().detach().numpy()
if "predicted_heads" in output_dict:
output_dict["predicted_heads"] = (
output_dict["predicted_heads"].cpu().detach().numpy()
)
if "predicted_head_tags" in output_dict:
output_dict["predicted_head_tags"] = (
output_dict["predicted_head_tags"].cpu().detach().numpy()
)
output_dict.pop("loss")
return output_dict
def _construct_loss(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
head_indices: torch.Tensor,
head_tags: torch.Tensor,
mask: torch.Tensor,
weight: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
float_mask = mask.float()
tag_mask = self._get_unknown_tag_mask(mask, head_tags)
batch_size, sequence_length, _ = attended_arcs.size()
# shape (batch_size, 1)
range_vector = get_range_vector(
batch_size, get_device_of(attended_arcs)
).unsqueeze(1)
# shape (batch_size, sequence_length, sequence_length)
if self.head_temperature:
attended_arcs /= self.head_temperature
normalised_arc_logits = (
masked_log_softmax(attended_arcs, mask)
* float_mask.unsqueeze(2)
* float_mask.unsqueeze(1)
)
# shape (batch_size, sequence_length, num_head_tags)
if self.head_tag_temperature:
attended_arcs /= self.head_tag_temperature
head_tag_logits = self._get_head_tags(
head_tag_representation, child_tag_representation, head_indices
)
normalised_head_tag_logits = masked_log_softmax(
head_tag_logits, tag_mask.unsqueeze(-1)
) * tag_mask.float().unsqueeze(-1)
# index matrix with shape (batch, sequence_length)
timestep_index = get_range_vector(sequence_length, get_device_of(attended_arcs))
child_index = (
timestep_index.view(1, sequence_length)
.expand(batch_size, sequence_length)
.long()
)
# shape (batch_size, sequence_length)
arc_loss = normalised_arc_logits[range_vector, child_index, head_indices]
tag_loss = normalised_head_tag_logits[range_vector, child_index, head_tags]
tag_loss *= (head_tags > 1).float()
# We don't care about predictions for the symbolic ROOT token's head,
# so we remove it from the loss.
arc_loss = arc_loss[:, 1:] * weight
tag_loss = tag_loss[:, 1:] * weight
# The number of valid positions is equal to the number of unmasked elements minus
# 1 per sequence in the batch, to account for the symbolic HEAD token.
valid_positions = mask.sum() - batch_size
arc_nll = -arc_loss.sum() / valid_positions.float()
tag_nll = -tag_loss.sum() / valid_positions.float()
loss = arc_nll + tag_nll
return loss, normalised_arc_logits, normalised_head_tag_logits
def _get_head_tags(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
head_indices: torch.Tensor,
) -> torch.Tensor:
batch_size = head_tag_representation.size(0)
# shape (batch_size,)
range_vector = get_range_vector(
batch_size, get_device_of(head_tag_representation)
).unsqueeze(1)
# This next statement is quite a complex piece of indexing, which you really
# need to read the docs to understand. See here:
# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#advanced-indexing
# In effect, we are selecting the indices corresponding to the heads of each word from the
# sequence length dimension for each element in the batch.
# shape (batch_size, sequence_length, tag_representation_dim)
selected_head_tag_representations = head_tag_representation[
range_vector, head_indices
]
selected_head_tag_representations = (
selected_head_tag_representations.contiguous()
)
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self.tag_bilinear(
selected_head_tag_representations, child_tag_representation
)
return head_tag_logits
def _get_unknown_tag_mask(
self, mask: torch.LongTensor, head_tags: torch.LongTensor
) -> torch.LongTensor:
oov = self.vocab.get_token_index(DEFAULT_OOV_TOKEN, "head_tags")
new_mask = mask.detach()
oov_mask = head_tags.eq(oov).long()
new_mask = new_mask * (1 - oov_mask)
return new_mask
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
dependency = self._attachment_scores.get_metric(reset)
tagging = self._tagging_accuracy.get_metric(reset)
harmonic_mean = (2 * dependency * tagging) / (dependency + tagging)
scores = {
"dependency": dependency,
"tagging": tagging,
"harmonic_mean": harmonic_mean,
}
return scores
|
Anaphory/swip | setup.py | from setuptools import setup
setup(
name='swip',
version="0.1.1",
description='SignWriting Images in Python',
long_description=open("README.md").read().split('##')[0],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Anaphory/swip',
install_requires=[
# 'sqlite3',
],
include_package_data=True,
license="MIT",
zip_safe=False,
keywords='',
classifiers=[
],
packages=[
'swip'],
entry_points={
'console_scripts': [
'swip=swip.__main__:main',
'swflashcards=swip.swflashcards:main',
]
},
tests_require=['nose'],
)
|
Anaphory/swip | swip/swflashcards.py | #!python3
"""Generate html for printing flash cards from sign puddle export"""
import io
import bisect
import os
import sys
import json
import argparse
import xml.etree.ElementTree as ET
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.parse import quote_plus
from . import compose
ET.register_namespace("", "http://www.w3.org/2000/svg")
DICTAPI_URL = "https://api.datamuse.com/words?sp={:}&md=f"
class UncleanEntryError (ValueError):
"""A sign puddle markup language entry had no valid glosses."""
class Sign:
def __init__(self, sign, glosses, comment=None, source=None):
self.sign_string = sign
self.glosses = tuple(glosses)
self.comment = comment
self.source = source
@classmethod
def from_spml_entry(cl, spml_entry):
data = spml_entry.findall("term")
glosses = [element.text for element in data[1:]]
if not glosses:
raise UncleanEntryError
sign = cl(data[0].text, glosses)
comment = spml_entry.find("text")
if comment is not None:
if comment.text[0] == 'M' and comment.text[4] == 'x':
raise UncleanEntryError
sign.comment = comment.text
source = spml_entry.find("src")
if source is not None:
sign.source = source.text
return sign
def __repr__(self):
return "<Sign {:}>".format(self.glosses[0].upper())
def look_up_frequency(gloss):
"""Load frequency data from Datamuse
>>> look_up_frequency("apple")
19.314666
>>> look_up_frequency("juice")
17.828465
>>> look_up_frequency("apple-juice") == 0.5 * (
... look_up_frequency("apple")+look_up_frequency("juice"))
True
"""
dictapi = DICTAPI_URL.format(quote_plus(gloss))
gloss_dict = urlopen(dictapi).read().decode('utf-8')
freq = None
if ' ' in gloss:
parts = gloss.split(" ")
freq = sum(look_up_frequency(part) or 0.0
for part in parts) / len(parts)
elif '-' in gloss:
parts = gloss.split("-")
freq = sum(look_up_frequency(part) or 0.0
for part in parts) / len(parts)
parsed = json.loads(gloss_dict)
if not parsed or parsed[0]["word"] != gloss.lower():
return freq
this_freq = float([
f for f in parsed[0]['tags']
if f.startswith('f:')][0][2:])
freq = this_freq if not freq or this_freq > freq else freq
return freq
def parse_spml(spml_file, signs_by_gloss=None, ordered_glosses=None, scores=None, scorer=look_up_frequency, debug=False):
if signs_by_gloss is None:
signs_by_gloss = {}
ordered_glosses = []
if scores is None:
scores = [0 for sign in signs_by_gloss]
rejected = []
strange = []
tree = ET.parse(spml_file)
root = tree.getroot()
for entry in root.findall("entry"):
try:
sign = Sign.from_spml_entry(entry)
except UncleanEntryError:
rejected.append(entry)
continue
frequency = 0.0
is_strange = True
for gloss in sign.glosses:
try:
# Make sure that the rarest words are at the end of the list.
frequency -= scorer(gloss)
is_strange = False
except TypeError:
pass
if sign.glosses in signs_by_gloss:
if len(sign.sign_string) > len(signs_by_gloss[sign.glosses].sign_string):
# Assume longer sign means more detailed transcription means better
strange.append(signs_by_gloss[sign.glosses])
signs_by_gloss[sign.glosses] = sign
else:
# Duplicate gloss
strange.append(sign)
elif is_strange:
strange.append(sign)
else:
index = bisect.bisect(scores, frequency)
scores.insert(index, frequency)
signs_by_gloss[sign.glosses] = sign
ordered_glosses.insert(index, sign.glosses)
if debug:
return signs_by_gloss, strange, rejected
else:
return signs_by_gloss, strange
def main():
"""Run the CLI"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"spml_file",
nargs='+',
type=argparse.FileType('r'),
help='SPML file(s) to parse')
parser.add_argument(
"--gloss-scores",
type=argparse.FileType('r'),
help="JSON file with cached gloss scores")
parser.add_argument(
"--front",
type=argparse.FileType('wb'),
help="HTML file to write signs to")
parser.add_argument(
"--back",
type=argparse.FileType('wb'),
help="HTML file to write glosses to")
parser.add_argument(
"--columns",
type=int,
default=5,
help="Print this many columns of cards per row")
args = parser.parse_args()
if args.front is None:
name = args.spml_file[0].name
args.front = open(
(name[:-5] if name.endswith('.spml') else name) +
'_f.html', 'wb')
if args.back is None:
name = args.front.name
args.back = open(
(name[:-7] if name.endswith('_f.html') else
name[:-5] if name.endswith('.html') else name) +
'_b.html', 'wb')
# Read a cache file of gloss scores
if args.gloss_scores:
score_cache = json.load(args.gloss_scores)
else:
score_cache = {}
def scorer(gloss):
print(gloss, file=sys.stderr)
try:
return score_cache[gloss]
except KeyError:
score = look_up_frequency(gloss)
score_cache[gloss] = score
return score
# Read all signs from a spml file
try:
signs = {}
glosses = []
scores = []
strange = []
for file in args.spml_file:
_, strange_here = parse_spml(file, signs, glosses, scores, scorer)
strange += strange_here
except KeyboardInterrupt:
pass
# Try to write-back a file of gloss scores
try:
args.gloss_scores.close()
with open(args.gloss_scores.name, "w") as json_data:
json.dump(score_cache, json_data, sort_keys=True, indent=4)
except (AttributeError, OSError):
pass
# HTML Template
COLUMNS = args.columns
STYLE = """
tr {{ page-break-inside: avoid; max-height: {length:f}cm; overflow: hidden; }}
td {{ height: {length:f}cm; width: {length:f}cm; border: 0.3pt solid black; page-break-inside: avoid; }}
svg {{ max-width: {length:f}cm; max-height: {length:f}cm; overflow: hidden; }}
td div {{ max-width: {length:f}cm; max-height: {length:f}cm; text-align: center; overflow: hidden; }}
p {{ max-width: {length:f}cm; max-height: {length:f}cm; overflow: hidden; }}
p.comment {{ font-size: 0.5em; }}
""".format(length=18/COLUMNS)
html_f = ET.Element('html')
document_f = ET.ElementTree(html_f)
style_f = ET.SubElement(html_f, 'style')
style_f.text = STYLE
body_f = ET.SubElement(html_f, 'body')
table_f = ET.SubElement(body_f, 'table')
html_b = ET.Element('html')
document_b = ET.ElementTree(html_b)
style_b = ET.SubElement(html_b, 'style')
style_b.text = STYLE
body_b = ET.SubElement(html_b, 'body')
table_b = ET.SubElement(body_b, 'table')
# Generate HTML
strange = sorted(strange, key=lambda x: len(x.glosses[0]))
try:
for i, sign in enumerate([signs[g] for g in glosses] + strange):
if i % COLUMNS == 0:
# Start a new row
row_f = ET.SubElement(table_f, 'tr')
row_b = ET.SubElement(table_b, 'tr')
print(i, file=sys.stderr)
cell_f = ET.SubElement(row_f, 'td')
cell_b = ET.Element('td')
row_b.insert(0, cell_b)
# Front contains svg graphic
try:
svg = ET.parse(io.StringIO(compose.glyphogram(
sign.sign_string,
bound=None))).getroot()
svg.attrib['viewbox'] = "0 0 {:} {:}".format(
svg.attrib['width'], svg.attrib['height'])
cell_f.insert(0, svg)
except ValueError:
# Leave cell blank
pass
# Back contains gloss
maxsize = ET.SubElement(cell_b, 'div')
ET.SubElement(maxsize, 'p').text = '; '.join(sign.glosses)
if sign.comment:
ET.SubElement(maxsize, 'p', **{'class': 'comment'}).text = sign.comment
except KeyboardInterrupt:
pass
i += 1
# Fill up last row, so that mirror symmetry is given
while i % COLUMNS != 0:
cell_f = ET.SubElement(row_f, 'td')
cell_b = ET.Element('td')
row_b.insert(0, cell_b)
i += 1
# Write output to files
document_f.write(args.front)
document_b.write(args.back)
if __name__ == '__main__':
main()
|
Anaphory/swip | swip/__main__.py | <gh_stars>0
#!/usr/bin/env python
"Render Kartesian SignWriting strings as SVG graphics."
import sys
import argparse
from .compose import glyphogram
from .iswa_font import ISWAFont
def main():
"""The main CLI."""
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument(
"ksw_string",
help="The KSW string to be rendered.")
parser.add_argument(
"--output",
type=argparse.FileType('w'),
default=sys.stdout,
help="The file to write output to.")
parser.add_argument(
"-a", "--auto-output",
action="store_true",
default=False,
help="Write output to KSW_STRING.svg")
parser.add_argument(
"--font",
default="font_svg1",
help="The font to use")
args = parser.parse_args()
if args.auto_output and args.output != sys.stdout:
raise ValueError("Both auto-output and output file specified.")
elif args.auto_output:
args.output = open(args.ksw_string + '.svg', 'w')
args.output.write(
glyphogram(args.ksw_string,
font=ISWAFont(name=args.font)))
if __name__ == "__main__":
main()
|
Anaphory/swip | swip/parser.py | #!/usr/bin/env python3
"""parser: SignWriting string parse functions
Define various regular expressions and use them to validate
SignWriting strings.
"""
import re
SYMBOL_BLOCK = 'S[123][0-9a-f]{2}[0-5][0-9a-f]'
COORD_BLOCK = 'n?[0-9]+xn?[0-9]+'
POS_COORD_BLOCK = '[0-9]+x[0-9]+'
re_word = (
'(A(' + SYMBOL_BLOCK + ')+)?'
'[BLMR](' + SYMBOL_BLOCK + COORD_BLOCK + ')*')
re_pword = (
'(A(' + SYMBOL_BLOCK + ')+)?'
'[BLMR](' + SYMBOL_BLOCK + POS_COORD_BLOCK +
'x' + COORD_BLOCK + ')*')
re_panelword = (
'[BLMR](' + POS_COORD_BLOCK + ')'
'(' + SYMBOL_BLOCK + COORD_BLOCK + ')*')
re_lword = (
'(A(' + SYMBOL_BLOCK + ')+)?' + re_panelword)
re_punc = 'S38[7-9ab][0-5][0-9a-f]'
re_ppunc = re_punc + POS_COORD_BLOCK
re_lpunc = re_punc + COORD_BLOCK
COORDINATES = re.compile(COORD_BLOCK,
flags=re.IGNORECASE)
SYM_WITH_COORD = re.compile(SYMBOL_BLOCK + COORD_BLOCK,
flags=re.IGNORECASE)
PREFIX = re.compile('A((' + SYMBOL_BLOCK + ')+)',
flags=re.IGNORECASE)
RAW_TOKEN = re.compile("(" + re_word + '|' + re_punc + ")",
flags=re.IGNORECASE)
EXPANDED_TOKEN = re.compile('(' + re_pword + '|' + re_ppunc + ')',
flags=re.IGNORECASE)
LAYOUT_TOKEN = re.compile('(' + re_lword + '|' + re_lpunc + ')',
flags=re.IGNORECASE)
PANEL_TOKEN = re.compile(
'D' + POS_COORD_BLOCK + '(_' + re_panelword + ')*',
flags=re.IGNORECASE)
# Pattern matching on Kartesian SignWriting strings
def is_raw(text):
"""Test whether text is a raw SignWriting string.
Returns True if `text` is raw Kartesian SignWriting, that is,
without symbol sizes or preprocessed information, False otherwise.
This function tests according to pattern-matching with a regex, so
it can only be a necessary, not a sufficient condition. For
example, is_raw('BS3ff5f') will be True, even though the symbol
slot S3ff5f is not assigned.
>>> all([is_raw('B'), is_raw('L'), is_raw('M'), is_raw('R')])
True
>>> is_raw('BS10000n10xn10')
True
>>> is_raw('LS10000n10xn10')
True
>>> is_raw('MS10000n10xn10')
True
>>> is_raw('RS10000n10xn10')
True
>>> is_raw('AS10000BS10000n10xn10')
True
>>> is_raw('AS10000LS10000n10xn10')
True
>>> is_raw('AS10000MS10000n10xn10')
True
>>> is_raw('AS10000RS10000n10xn10')
True
>>> is_raw('AS10000BS1000010x10xn10xn10')
False
>>> is_raw('AS10000LS1000010x10xn10xn10')
False
>>> is_raw('AS10000MS1000010x10xn10xn10')
False
>>> is_raw('AS10000RS1000010x10xn10xn10')
False
@param `text` str
@return boolean
"""
return all([RAW_TOKEN.fullmatch(token) for token in text.split()])
def is_expanded(text):
"""Test if text is Kartesian SignWriting with symbol sizes.
Returns True if `text` is Kartesian SignWriting with symbol sizes
specified.
This function tests according to pattern-matching with a regex, so
it can only be a necessary, not a sufficient condition. For
example, is_expanded('BS3ff5f1x1') will be True, even though the
symbol slot S3ff5f is not assigned.
>>> all([is_expanded('B'), is_expanded('L'),
... is_expanded('M'), is_expanded('R')])
True
>>> is_expanded('BS1000010x10xn10xn10')
True
>>> is_expanded('LS1000010x10xn10xn10')
True
>>> is_expanded('MS1000010x10xn10xn10')
True
>>> is_expanded('RS1000010x10xn10xn10')
True
>>> is_expanded('AS10000BS1000010x10xn10xn10')
True
>>> is_expanded('AS10000LS1000010x10xn10xn10')
True
>>> is_expanded('AS10000MS1000010x10xn10xn10')
True
>>> is_expanded('AS10000RS1000010x10xn10xn10')
True
>>> is_expanded('BS10000n10xn10')
False
>>> is_expanded('LS10000n10xn10')
False
>>> is_expanded('MS10000n10xn10')
False
>>> is_expanded('RS10000n10xn10')
False
"""
return all([EXPANDED_TOKEN.fullmatch(token) for token in text.split()])
def is_layouted(text):
"""Test if text is Kartesian SignWriting with layout data.
Returns True if `text` is Kartesian SignWriting without symbol
sizes specified, but with layout data from pre-processing.
This function tests according to pattern-matching with a regex, so
it can only be a necessary, not a sufficient condition. For
example, is_raw(BS3ff5f1x1) will be True, even though the symbol
slot S3ff5f is not assigned.
>>> all([is_layouted('B10x10'), is_layouted('L3x4'),
... is_layouted('M3x2'), is_layouted('R8x23')])
True
>>> is_layouted('B10x10S10000n10xn10')
True
>>> is_layouted('L10x10S10000n10xn10')
True
>>> is_layouted('M10x10S10000n10xn10')
True
>>> is_layouted('R10x10S10000n10xn10')
True
"""
return all([LAYOUT_TOKEN.fullmatch(token)
for token in text.split()])
def is_panel(text):
"""Test if text is a Kartesian SignWriting panel string.
Returns True if `text` is Kartesian SignWriting without symbol
sizes specified, but with layout data from pre-processing.
This function tests according to pattern-matching with a regex, so
it can only be a necessary, not a sufficient condition. For
example, is_raw(BS3ff5f1x1) will be True, even though the symbol
slot S3ff5f is not assigned.
"""
return all([PANEL_TOKEN.fullmatch(token) for token in text.split()])
def all_symbols(sw_string):
"""List all symbols referenced in sw_string
List all symbols that are referenced in the body of the Kartesian
SignWriting string `sw_string`, that is, all symbols outside the
`AS00000` prefix, or equivalently, all symbols with defined
coordinates.
>>> all_symbols('BS10000n10xn10')
['S10000']
>>> all_symbols('LS10000n10xn10')
['S10000']
>>> all_symbols('MS10000n10xn10')
['S10000']
>>> all_symbols('RS10000n10xn10')
['S10000']
>>> all_symbols('AS10000BS10000n10xn10S1035f10x10')
['S10000', 'S1035f']
>>> all_symbols('AS10000LS10000n10xn10S1035f10x10')
['S10000', 'S1035f']
>>> all_symbols('AS10000MS10000n10xn10S1035f10x10')
['S10000', 'S1035f']
>>> all_symbols('AS10000RS10000n10xn10S1035f10x10')
['S10000', 'S1035f']
"""
symbols = []
for group in SYM_WITH_COORD.findall(sw_string):
symbols.append(group[:6])
return symbols
def prefix_symbols(sw_string):
""" List all symbols referenced in the prefix.
>>> prefix_symbols(
... 'M18x33S1870an11x15S18701n18xn10S205008xn4S2e7340xn32')
[]
>>> prefix_symbols('AS1870aS18701S2e734M18x33S1870an11x15'
... 'S18701n18xn10S205008xn4S2e7340xn32')
['S1870a', 'S18701', 'S2e734']
"""
match = PREFIX.match(sw_string)
if not match:
return []
return re.findall(SYMBOL_BLOCK, match.group(0), flags=re.IGNORECASE)
# Define symbol types
symbol_ranges = {
'iswa': (0x100, 0x38b),
'writing': (0x100, 0x37e),
'hand': (0x100, 0x204),
'movement': (0x205, 0x2f6),
'dynamics': (0x2f7, 0x2fe),
'head': (0x2ff, 0x36c),
'trunk': (0x36d, 0x375),
'limb': (0x376, 0x37e),
'location': (0x37f, 0x386),
'punctuation': (0x387, 0x38b)}
def symbol_id(symbol):
"""This basic shape's integer ID.
A helper function for looking up symbol types. Following the `S`,
the first three symbols are the hexadecimal number of the symbol
(up to rotation or similar variation).
>>> symbol_id('S10350') == 0x103
True
>>> symbol_id('2a6') == 0x2a6
True
"""
if symbol.startswith('S'):
return int(symbol[1:4], 16)
else:
return int(symbol[:3], 16)
def is_type(symbol, type):
"""Check whether a symbol is of a given type.
The symbol can be given as ISWA identifier with or without `S`, or
as the corresponding integer id of the basic shape.
>>> is_type('S32a00', 'head')
True
>>> is_type(0x389, 'punctuation')
True
>>> is_type('1da51', 'hand')
True
"""
try:
symbol = symbol_id(symbol)
except AttributeError:
pass
try:
return (
symbol_ranges[type][0] <= symbol <= symbol_ranges[type][1])
except KeyError:
raise ValueError('No symbol type {:}.'.format(type))
def symbol_type(symbol):
"""Return the type of the symbol.
>>> symbol_type(0x100)
'hand'
>>> symbol_type('S38b00')
'punctuation'
>>> symbol_type('S37e00')
'limb'
"""
try:
symbol = symbol_id(symbol)
except AttributeError:
pass
for type, (lower, upper) in symbol_ranges.items():
if type == 'iswa' or type == 'writing':
# Not a fundamental type
continue
if lower <= symbol <= upper:
return type
raise ValueError('Not a valid symbol: {:}'.format(symbol))
def swnumber(string):
"""Convert a KSW number string into an integer.
A KSW number string is a string of the format 'n?[0-9]+'. The 'n'
indicates 'negative' numbers.
>>> swnumber('0092')
92
>>> swnumber('200')
200
>>> swnumber('n34')
-34
>>> swnumber('-34')
Traceback (most recent call last):
[...]
ValueError: Not a valid KSW number string: -34
"""
if not re.fullmatch('n?[0-9]+', string):
raise ValueError('Not a valid KSW number string: {:}'.format(
string))
if string.startswith('n'):
return -int(string[1:])
else:
return int(string)
def coordinates(sw_substring):
"""Convert a KSW coordinate string into a pair of integers.
A KSW coordinate string, used for Cartesian coordinates as well as
dimensions, is a string of the format 'n?[0-9]+xn?[0-9]+' and
corresponds in an obvious manner to a pair of signed integers.
>>> coordinates('0092x0108')
(92, 108)
>>> coordinates('n15x20')
(-15, 20)
>>> coordinates('1x2x3')
Traceback (most recent call last):
[...]
ValueError: Not a valid KSW coordinates string: 1x2x3
"""
if not COORDINATES.fullmatch(sw_substring):
raise ValueError(
'Not a valid KSW coordinates string: {:}'.format(
sw_substring))
first, last = sw_substring.split('x')
return swnumber(first), swnumber(last)
def parse(layout_string):
"""Parse a layout string to array of symbols with placement.
>>> parse('M18x33S1870an11x15S18701n18xn10S205008xn4S2e7340xn32')
[('M', (18, 33)), ('S1870a', (-11, 15)), ('S18701', (-18, -10)), ('S20500', (8, -4)), ('S2e734', (0, -32))]
>>> parse('MS1870an11x15S18701n18xn10S205008xn4S2e7340xn32')
[('M', (8, 15)), ('S1870a', (-11, 15)), ('S18701', (-18, -10)), ('S20500', (8, -4)), ('S2e734', (0, -32))]
>>> parse('S38800n36xn4')
[('B', (36, 4)), ('S38800', (-36, -4))]
>>> parse('')
[('M', (0, 0))]
"""
if not layout_string:
return [('M', (0, 0))]
seq = 'A' + ''.join(prefix_symbols(layout_string))
sw_string = layout_string.replace(seq, '')
match = re.fullmatch(
'((' + re_punc + ')(' + COORD_BLOCK + '))|'
'([BLMR](' + POS_COORD_BLOCK + ')?)'
'((' + SYMBOL_BLOCK + COORD_BLOCK + ')*)',
sw_string)
if not match:
raise ValueError(
'String {:} contained unrecognized elements'.format(
sw_string))
if match.group(1):
# This is a punctuation character
punct = match.group(2)
coord = coordinates(match.group(3))
return [
('B', (-coord[0], -coord[1])),
(punct, coord)]
else:
# This is some other character
cluster = []
max_x = max_y = float('-inf')
for symbol in re.findall(
SYMBOL_BLOCK + COORD_BLOCK,
match.group(6)):
coord = coordinates(symbol[6:])
max_x = max(max_x, coord[0])
max_y = max(max_y, coord[1])
cluster.append(
(symbol[:6], coord))
cluster.insert(
0, (match.group(4)[0],
coordinates(match.group(5)) if match.group(5) else (max_x, max_y)))
return cluster
def min_coordinates(cluster, min_is_zero=True):
x_min = 0 if min_is_zero else float('inf')
y_min = 0 if min_is_zero else float('inf')
for i, (sym, (x, y)) in enumerate(cluster):
if i == 0:
continue
x_min = min(x_min, x)
y_min = min(y_min, y)
return x_min, y_min
|
Anaphory/swip | swip/iswa_font.py | <reponame>Anaphory/swip<filename>swip/iswa_font.py
#!/usr/bin/env python
"""iswa_font: International SignWriting Alphabet font interface
The iswa_font module provides a class for accessing SVG ISWA glyphs
from their location in an SQLite3 database.
"""
import os
import sqlite3
LICENSE = "MIT"
AUTHORS = ["<NAME> <<EMAIL>>"]
COPYRIGHT = "Copyright (c) 2017 <NAME>"
class ISWAFont:
"""A class encapsulating an ISWA font database connection."""
def __init__(self, db=None, name="font_svg1"):
if db is None:
db = os.path.join(
os.path.dirname(__file__),
'iswa.sql3')
conn = sqlite3.connect(db)
self.name = name
self.c = conn.cursor()
@staticmethod
def code(symbol_key):
"""Create the internal database code from a symbol key.
>>> ISWAFont.code('10000')
1
>>> ISWAFont.code('S10001')
2
>>> ISWAFont.code('S1000f')
16
>>> ISWAFont.code('S10100')
97
"""
if symbol_key.startswith('S'):
symbol_key = symbol_key[1:]
return 1 + (
(int(symbol_key[0:3], 16) - 256) * 96 +
int(symbol_key[3:5], 16))
def svg_snippet(self, symbol):
"""Get an SVG glyph snippet from the database.
Load the svg group representing the glyph 'key' from the
database. Return the svg group, its width and height.
>>> ISWAFont().svg_snippet('S1000f')[1:]
(30, 21)
"""
# WARNING: This function is in theory able to run HAVOC with
# the database, because poor database design means we need to
# handle IN PRINCIPLE ARBITRAY TABLE NAMES.
query = (
'SELECT glyph, w, h FROM {name:s}, symbol '
'WHERE {name:s}.code = ? '
'AND symbol.code = ?').format(name=self.name)
code = self.code(symbol)
self.c.execute(query, (code, code))
glyph, w, h = self.c.fetchone()
return glyph, w, h
def complete_svg(self, symbol):
"""Load the image corresponding to `key` from the database.
>>> iswa = ISWAFont()
>>> print(iswa.complete_svg('S1000f'))
... # doctest: +NORMALIZE_WHITESPACE +REPORT_NDIFF
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg
width="30" height="21">
<metadata>S1000f</metadata>
<g transform="scale(0.938 0.913) translate(10.667 -9)
rotate(315) scale(-1,1)">
<rect id="index" x="13" y="0"
width="2" height="15" fill="#000000" />
<rect id="base" x="0" y="15"
width="15" height="15" fill="#000000" />
<rect id="fill" x="2" y="17"
width="11" height="11" fill="#ffffff" />
</g>
</svg>
"""
if symbol.startswith('S'):
symbol = symbol[1:]
glyph, w, h = self.svg_snippet(symbol)
return """<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg
width="{w:d}" height="{h:d}">
<metadata>S{symbol:s}</metadata>
{glyph:s}
</svg>
""".format(
w=w, h=h, symbol=symbol, glyph=glyph)
def glyph(self, key,
line='#000000', fill='#ffffff'):
"""Return the (raw) glyph, potentially recolored.
>>> print(ISWAFont().glyph('S10000'))
... # doctest: +NORMALIZE_WHITESPACE +REPORT_NDIFF
<g>
<rect id="index" x="13" y="0" width="2" height="15" fill="#000000" />
<rect id="base" x="0" y="15" width="15" height="15" fill="#000000" />
<rect id="fill" x="2" y="17" width="11" height="11" fill="#ffffff" />
</g>
>>> print(ISWAFont().glyph('S10000', fill='#00ffff', line='#0000ff'))
... # doctest: +NORMALIZE_WHITESPACE +REPORT_NDIFF
<g>
<rect id="index" x="13" y="0" width="2" height="15" fill="#0000ff" />
<rect id="base" x="0" y="15" width="15" height="15" fill="#0000ff" />
<rect id="fill" x="2" y="17" width="11" height="11" fill="#00ffff" />
</g>
"""
svg, w, h = self.svg_snippet(key)
svg = svg.replace('#000000', '__line_color__')
svg = svg.replace('#ffffff', '__fill_color__')
svg = svg.replace('__line_color__', line)
svg = svg.replace('__fill_color__', fill)
return svg
|
Anaphory/swip | swip/compose.py | #!/urs/bin/env python
"""image: Compose symbols to SignWriting SVGs
The core function is `glyphogram`, which takes a KSW string (and some
optional parameters) and constructs a SVG graphic for that sign.
"""
from . import parser
from .iswa_font import ISWAFont
DEFAULT = ISWAFont()
symbol_group_color = {
'hand': '#0000ff',
'movement': '#ff0000',
'dynamics': '#ff00ff',
'head': '#00ff00',
'trunk': '#000000',
'limb': '#000000',
'location': '#ddaa00',
'punctuation': '#ff5500'}
def glyphogram(ksw_string, pad=1, bound=None, line='#000000',
fill='#ffffff', colorize=False, font=DEFAULT):
"""
>>> print(glyphogram(
... 'M40x69S35000n18xn18S30c00n18xn18S14c2017x15S22e0420x51'))
... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg" width="60.000000" height="89.000000">
<metadata>
Generated with SWIP using Valerie Sutton's ISWA 2010 symbols (font_svg1)
M40x69S35000n18xn18S30c00n18xn18S14c2017x15S22e0420x51
</metadata>
<g transform="translate(1,1)"> ...
</g>
</svg>
"""
# Process cluster string
layout = parser.parse(ksw_string)
x_max, y_max = layout[0][1]
x_min, y_min = parser.min_coordinates(layout, False)
# Crudely center the image
if bound == 'c' or bound == 'h':
if -x_min > x_max:
x_max = -x_min
else:
x_min = -x_max
if bound == 'c' or bound == 'h':
if -x_min > x_max:
x_max = -x_min
else:
x_min = -x_max
# Pad with whitespace
x_max += pad
x_min -= pad
y_max += pad
y_min -= pad
# Load images and put in the right places
images = []
for num, (symbol, (x, y)) in enumerate(layout):
if num == 0:
continue
key = symbol[1:6]
if colorize:
group = parser.symbol_type(symbol)
line = symbol_group_color[group]
images.append("""
<g transform="translate({x:d},{y:d})">
{core:}
</g>""".format(
x=x - x_min,
y=y - y_min,
core=font.glyph(key, line, fill)))
# Insert into single SVG canvas
svg = """<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg" width="{width:f}" height="{height:f}">
<metadata>
Generated with SWIP using <NAME>'s ISWA 2010 symbols ({font:})
{ksw_string:s}
</metadata>
{image:s}
</svg>
""".format(
font=font.name,
width=x_max - x_min,
height=y_max - y_min,
ksw_string=ksw_string,
image=''.join(images))
# Return
return svg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.