repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
ignatowski/u-test
|
server/u_test/models.py
|
<gh_stars>0
import datetime, os, re
class Customer:
"""A Customer object."""
def __init__(self, name: str, doc: str, address: str, emails: str, phones: str) -> None:
self.name = name
self.participation = 'Titular'
self.doc = doc
self.address = address
self.emails = emails
self.phones = phones
def __str__(self) -> str:
return (
'\t\t\tName: ' + self.name + os.linesep +
'\t\t\tParticipation: ' + self.participation + os.linesep +
'\t\t\tDoc: ' + self.doc + os.linesep +
'\t\t\tAddress: ' + self.address + os.linesep +
'\t\t\tEmails: ' + self.emails + os.linesep +
'\t\t\tPhones: ' + self.phones + os.linesep
)
class Account:
"""An Account object."""
def __init__(self, name: str, number: str, balance: str, account_id: str) -> None:
self.name = name
self.number = number
self.currency = self.determine_currency(balance)
self.balance = balance
self.account_id = account_id
self.statements = list()
def __str__(self) -> str:
return (
'\t\tName: ' + self.name + os.linesep +
'\t\tNumber: ' + self.number + os.linesep +
'\t\tCurrency: ' + self.currency + os.linesep +
'\t\tBalance: ' + re.sub("[^0-9.]", "", self.balance) + os.linesep
)
def determine_currency(self, balance: str) -> str:
"""Check the currency symbol of the balance and return the three letter text representation."""
if (balance[0] == '€'):
return 'EUR'
return ''
class Statement:
"""A Statement object."""
def __init__(self, date: str, amount: str, balance: str, concept: str) -> None:
self.date = self.format_date(date)
self.amount = amount
self.balance = balance
self.concept = concept
def __str__(self) -> str:
return (
'\t\t' +
self.date.strftime('%Y-%m-%d').ljust(11) + '|' +
'{0:-.1f}'.format(self.amount).center(8) + '|' +
' ' + '{0:-.0f}'.format(self.balance).ljust(8) + '|' +
' ' + self.concept +
os.linesep
)
def format_date(self, date: str) -> datetime:
"""Take a date string in format '%d/%m/%Y' and return a datetime object."""
return datetime.datetime.strptime(date, '%d/%m/%Y')
|
ignatowski/u-test
|
server/read_r_bs4.py
|
import getopt, sys
from u_test.api_r import ApiR
from u_test.html_parser_bs4 import HtmlParserBs4
from u_test.printer import Printer
def main() -> None:
"Gets and prints data."
# get the username and password from the command line arguments
username = ''
password = ''
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["username=","password="])
except getopt.GetoptError as err:
print('usage: read.py --username <username> --password <password>')
print('error: ' + str(err))
sys.exit(2)
for opt, arg in opts:
if opt == '--username':
username = arg
elif opt == '--password':
password = arg
# login
api_r = ApiR(username, password)
api_r.login()
# get the customers html
customers_html = api_r.get_customers_html()
# parse the customers html into customer objects
html_parser_bs4 = HtmlParserBs4()
customers = html_parser_bs4.parse_customers_html_to_objects(customers_html, username)
# get the accounts html
accounts_html = api_r.get_accounts_html()
accounts = html_parser_bs4.parse_accounts_html_to_objects(accounts_html)
# get statements for each account
for account in accounts:
# get the statements html
statements_html = api_r.get_statements_html(account.account_id)
account.statements = html_parser_bs4.parse_statements_html_to_objects(statements_html)
# print accounts, customers, and statements
Printer(accounts, customers)
if __name__ == '__main__':
main()
|
ignatowski/u-test
|
server/u_test/tests/test_models.py
|
<gh_stars>0
from ..models import Customer, Account, Statement
from datetime import datetime
import unittest
class TestCustomer(unittest.TestCase):
"""Test the Customer class."""
def __init__(self, *args, **kwargs) -> None:
"""Test that a Customer object can be created."""
unittest.TestCase.__init__(self, *args, **kwargs)
self.name = '<NAME>'
self.participation = 'Titular'
self.doc = 'Test Doc'
self.address = 'Test Address'
self.emails = 'Test Emails'
self.phones = 'Test Phones'
self.customer = Customer(self.name, self.doc, self.address, self.emails, self.phones)
def test_customer_name(self) -> None:
"""Test that the customer name was set correctly."""
self.assertEqual(self.customer.name, self.name)
def test_customer_participation(self) -> None:
"""Test that the customer participation was set correctly."""
self.assertEqual(self.customer.participation, self.participation)
def test_customer_doc(self) -> None:
"""Test that the customer doc was set correctly."""
self.assertEqual(self.customer.doc, self.doc)
def test_customer_address(self) -> None:
"""Test that the customer address was set correctly."""
self.assertEqual(self.customer.address, self.address)
def test_customer_emails(self) -> None:
"""Test that the customer emails was set correctly."""
self.assertEqual(self.customer.emails, self.emails)
def test_customer_phones(self) -> None:
"""Test that the customer phones was set correctly."""
self.assertEqual(self.customer.phones, self.phones)
class TestAccount(unittest.TestCase):
"""Test the Account class."""
def __init__(self, *args, **kwargs) -> None:
"""Test that an Account object can be created."""
unittest.TestCase.__init__(self, *args, **kwargs)
self.name = '<NAME>'
self.number = '1234'
self.currency = 'EUR'
self.balance = '€100'
self.account_id = '1'
self.statements = list()
self.account = Account(self.name, self.number, self.balance, self.account_id)
def test_account_name(self) -> None:
"""Test that the account name was set correctly."""
self.assertEqual(self.account.name, self.name)
def test_account_number(self) -> None:
"""Test that the account number was set correctly."""
self.assertEqual(self.account.number, self.number)
def test_account_currency(self) -> None:
"""Test that the account currency was set correctly."""
self.assertEqual(self.account.currency, self.currency)
def test_account_balance(self) -> None:
"""Test that the account balance was set correctly."""
self.assertEqual(self.account.balance, self.balance)
def test_account_statements(self) -> None:
"""Test that the account statements was set correctly."""
self.assertEqual(self.account.statements, self.statements)
class TestStatement(unittest.TestCase):
"""Test the Statement class."""
def __init__(self, *args, **kwargs) -> None:
"""Test that an Statement object can be created."""
unittest.TestCase.__init__(self, *args, **kwargs)
self.date = '31/12/2010'
self.amount = '€10'
self.balance = '€100.2'
self.concept = 'Test Concept'
self.statement = Statement(self.date, self.amount, self.balance, self.concept)
def test_statement_date(self) -> None:
"""Test that the statement date was set correctly."""
self.assertEqual(self.statement.date, datetime.strptime(self.date, '%d/%m/%Y'))
def test_statement_amount(self) -> None:
"""Test that the statement amount was set correctly."""
self.assertEqual(self.statement.amount, self.amount)
def test_statement_balance(self) -> None:
"""Test that the statement balance was set correctly."""
self.assertEqual(self.statement.balance, self.balance)
def test_statement_concept(self) -> None:
"""Test that the statement concept was set correctly."""
self.assertEqual(self.statement.concept, self.concept)
if __name__ == '__main__':
unittest.main()
|
ignatowski/u-test
|
server/u_test/html_parser_u.py
|
import sys
from html.parser import HTMLParser
from .models import Customer, Account, Statement
from typing import List
class HtmlParserCustomers(HTMLParser):
"""A class used to parse customers html to Customer objects."""
def __init__(self, username: str = '') -> None:
HTMLParser.__init__(self)
# customers data
self.customer_doc = username
self.customers = list()
self.customer_object_entered = False
self.customer_attribute_entered = False
self.customer_li_count = 0
self.customer_name, self.customer_address, self.customer_emails, self.customer_phones = ('','','','')
def handle_starttag(self, tag: str, attrs: List[tuple]) -> None:
"""Set instance properties based on opening html tags."""
# beginning of customer html
if tag == 'ul':
for name, value in attrs:
if name == 'class' and value == 'collection with-header':
self.customer_object_entered = True
# beginning of customer attribute html
if self.customer_object_entered == True and tag == 'li':
self.customer_attribute_entered = True
self.customer_li_count += 1
def handle_endtag(self, tag: str) -> None:
"""Set instance properties based on closing html tags."""
# end of customer html
if tag == 'ul' and self.customer_object_entered == True:
self.customers.append(Customer(self.customer_name, self.customer_doc, self.customer_address, self.customer_emails, self.customer_phones))
self.customer_object_entered = False
self.customer_attribute_entered = False
self.customer_li_count = 0
self.customer_name, self.customer_address, self.customer_emails, self.customer_phones = ('','','','')
def handle_data(self, data: str) -> None:
"""Set instance properties based on html data."""
# customer attribute text that's not empty
if self.customer_attribute_entered == True and data.strip():
if self.customer_li_count == 1:
self.customer_name = data
elif self.customer_li_count == 2:
self.customer_phones = data
elif self.customer_li_count == 3:
self.customer_emails = data
elif self.customer_li_count == 4:
self.customer_address = data
class HtmlParserAccounts(HTMLParser):
"""A class used to parse accounts html to Account objects."""
def __init__(self) -> None:
HTMLParser.__init__(self)
# accounts data
self.accounts = list()
self.account_object_entered = False
self.account_attributes_entered = False
self.account_attribute_count = 0
self.account_name, self.account_number, self.account_balance, self.account_id = ('','','','')
def handle_starttag(self, tag: str, attrs: List[tuple]) -> None:
"""Set instance properties based on opening html tags."""
# beginning of account html
if tag == 'ul':
for name, value in attrs:
if name == 'class' and value == 'collection':
self.account_object_entered = True
# beginning of account attribute html
if self.account_object_entered == True and tag == 'li':
self.account_attributes_entered = True
# account id
if self.account_attributes_entered == True and tag == 'a':
for name, value in attrs:
if name == 'href':
self.account_id = value[value.index('/')+1:]
def handle_endtag(self, tag: str) -> None:
"""Set instance properties based on closing html tags."""
# end of account html
if tag == 'li' and self.account_attributes_entered == True:
self.accounts.append(Account(self.account_name, self.account_number, self.account_balance, self.account_id))
self.account_attributes_entered = False
self.account_attribute_count = 0
self.account_name, self.account_address, self.account_emails, self.account_phones = ('','','','')
if tag == 'ul' and self.account_object_entered == True:
self.account_object_entered = False
def handle_data(self, data: str) -> None:
"""Set instance properties based on html data."""
# account attribute text that's not empty
if self.account_attributes_entered == True and data.strip():
self.account_attribute_count += 1
if self.account_attribute_count == 1:
self.account_name = data
elif self.account_attribute_count == 2:
self.account_number = data
elif self.account_attribute_count == 3:
self.account_balance = data
class HtmlParserStatements(HTMLParser):
"""A class used to parse statements html to Statement objects."""
def __init__(self) -> None:
HTMLParser.__init__(self)
# statements data
self.statements = list()
self.statement_headerPositions = {}
self.statement_thead_entered = False
self.statement_thead_td_entered = False
self.statement_thead_count = 0
self.statements_html_entered = False
self.statement_object_entered = False
self.statement_attributes_entered = False
self.statement_attribute_count = 0
self.statement_date, self.statement_amount, self.statement_balance, self.statement_concept = ('','','','')
def handle_starttag(self, tag: str, attrs: List[tuple]) -> None:
"""Set instance properties based on opening html tags."""
# beginning of statement html
if tag == 'thead':
self.statement_thead_entered = True
if tag == 'td' and self.statement_thead_entered == True:
self.statement_thead_td_entered = True
self.statement_thead_count += 1
if tag == 'tbody':
self.statements_html_entered = True
# beginning of statement object
if tag == 'tr' and self.statements_html_entered == True:
self.statement_object_entered = True
# beginning of statement attribute html
if self.statement_object_entered == True and tag == 'td':
self.statement_attributes_entered = True
self.statement_attribute_count += 1
def handle_endtag(self, tag: str) -> None:
"""Set instance properties based on closing html tags."""
# end of statement header html
if tag == 'thead' and self.statement_thead_entered == True:
self.statement_thead_entered = False
self.statement_thead_td_entered = False
self.statement_thead_count = 0
# end of statement object html
if tag == 'tr' and self.statement_attributes_entered == True:
self.statements.append(Statement(self.statement_date, self.statement_amount, self.statement_balance, self.statement_concept))
self.statement_object_entered = False
self.statement_attributes_entered = False
self.statement_attribute_count = 0
self.statement_date, self.statement_amount, self.statement_balance, self.statement_concept = ('','','','')
# end of statements html
if tag == 'tbody':
self.statements_html_entered = False
def handle_data(self, data: str) -> None:
"""Set instance properties based on html data."""
# statement header text that's not empty
if self.statement_thead_td_entered == True and data.strip():
self.statement_headerPositions[self.statement_thead_count] = data.lower()
# statement attribute text that's not empty
if self.statement_attributes_entered == True and data.strip():
# if the attribute is in the header,
# user the header for reference
if self.statement_attribute_count in self.statement_headerPositions:
if self.statement_headerPositions[self.statement_attribute_count] == 'statement':
self.statement_concept = data
elif self.statement_headerPositions[self.statement_attribute_count] == 'date':
self.statement_date = data
elif self.statement_headerPositions[self.statement_attribute_count] == 'amount':
self.statement_amount = data
elif self.statement_headerPositions[self.statement_attribute_count] == 'balance':
self.statement_balance = data
# otherwise fall back to a set position
else:
if self.statement_attribute_count == 1:
self.statement_concept = data
elif self.statement_attribute_count == 2:
self.statement_date = data
elif self.statement_attribute_count == 3:
self.statement_amount = data
elif self.statement_attribute_count == 4:
self.statement_balance = data
class HtmlObjects:
"""A class used to parse html to objects."""
def __init__(self) -> None:
pass
def parse_customers_html_to_objects(self, customers_html: str, username: str) -> List[Customer]:
"""Iterate over the customers' html, and create and return Customer objects."""
html_parser_customers = HtmlParserCustomers(username)
html_parser_customers.feed(customers_html)
html_parser_customers.close()
return html_parser_customers.customers
def parse_accounts_html_to_objects(self, accounts_html: str) -> List[Account]:
"""Iterate over the accounts' html, and create and return Account objects."""
html_parser_accounts = HtmlParserAccounts()
html_parser_accounts.feed(accounts_html)
html_parser_accounts.close()
return html_parser_accounts.accounts
def parse_statements_html_to_objects(self, statements_html: str) -> List[Statement]:
"""Iterate over the statements' html, and create and return Statement objects."""
html_parser_statements = HtmlParserStatements()
html_parser_statements.feed(statements_html)
html_parser_statements.close()
return html_parser_statements.statements
|
ignatowski/u-test
|
server/read.py
|
<filename>server/read.py
import getopt, sys
from u_test.api import Api
from u_test.html_parser_u import HtmlObjects
from u_test.printer import Printer
def main() -> None:
"Gets and prints data."
# get the username and password from the command line arguments
username = ''
password = ''
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["username=","password="])
except getopt.GetoptError as err:
print('usage: read.py --username <username> --password <password>')
print('error: ' + str(err))
sys.exit(2)
for opt, arg in opts:
if opt == '--username':
username = arg
elif opt == '--password':
password = arg
# login
api = Api(username, password)
api.login()
# get the customers html
customers_html = api.get_customers_html()
# parse the customers html into customer objects
html_objects = HtmlObjects()
customers = html_objects.parse_customers_html_to_objects(customers_html, username)
# get the accounts html
accounts_html = api.get_accounts_html()
accounts = html_objects.parse_accounts_html_to_objects(accounts_html)
# get statements for each account
for account in accounts:
# get the statements html
statements_html = api.get_statements_html(account.account_id)
account.statements = html_objects.parse_statements_html_to_objects(statements_html)
# print accounts, customers, and statements
Printer(accounts, customers)
if __name__ == '__main__':
main()
|
NREL/grapinator
|
grapinator/resources/setup_fix/graphqlview.py
|
<gh_stars>0
from functools import partial
from flask import Response, request
from flask.views import View
from graphql.type.schema import GraphQLSchema
from graphql_server import (HttpQueryError, default_format_error,
encode_execution_results, json_encode,
load_json_body, run_http_query)
from .render_graphiql import render_graphiql
class GraphQLView(View):
schema = None
executor = None
root_value = None
pretty = False
graphiql = False
backend = None
graphiql_version = None
graphiql_template = None
graphiql_html_title = None
middleware = None
batch = False
methods = ['GET', 'POST', 'PUT', 'DELETE']
def __init__(self, **kwargs):
super(GraphQLView, self).__init__()
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
assert isinstance(self.schema, GraphQLSchema), 'A Schema is required to be provided to GraphQLView.'
# noinspection PyUnusedLocal
def get_root_value(self):
return self.root_value
def get_context(self):
return request
def get_middleware(self):
return self.middleware
def get_backend(self):
return self.backend
def get_executor(self):
return self.executor
def render_graphiql(self, params, result):
return render_graphiql(
params=params,
result=result,
graphiql_version=self.graphiql_version,
graphiql_template=self.graphiql_template,
graphiql_html_title=self.graphiql_html_title,
)
format_error = staticmethod(default_format_error)
encode = staticmethod(json_encode)
def dispatch_request(self):
try:
request_method = request.method.lower()
data = self.parse_body()
show_graphiql = request_method == 'get' and self.should_display_graphiql()
catch = show_graphiql
pretty = self.pretty or show_graphiql or request.args.get('pretty')
extra_options = {}
executor = self.get_executor()
if executor:
# We only include it optionally since
# executor is not a valid argument in all backends
extra_options['executor'] = executor
execution_results, all_params = run_http_query(
self.schema,
request_method,
data,
query_data=request.args,
batch_enabled=self.batch,
catch=catch,
backend=self.get_backend(),
# Execute options
root=self.get_root_value(),
context=self.get_context(),
middleware=self.get_middleware(),
**extra_options
)
result, status_code = encode_execution_results(
execution_results,
is_batch=isinstance(data, list),
format_error=self.format_error,
encode=partial(self.encode, pretty=pretty)
)
if show_graphiql:
return self.render_graphiql(
params=all_params[0],
result=result
)
return Response(
result,
status=status_code,
content_type='application/json'
)
except HttpQueryError as e:
return Response(
self.encode({
'errors': [self.format_error(e)]
}),
status=e.status_code,
headers=e.headers,
content_type='application/json'
)
# Flask
# noinspection PyBroadException
def parse_body(self):
# We use mimetype here since we don't need the other
# information provided by content_type
content_type = request.mimetype
if content_type == 'application/graphql':
return {'query': request.data.decode('utf8')}
elif content_type == 'application/json':
try:
return load_json_body(request.data.decode('utf8'))
except UnicodeDecodeError:
return load_json_body(request.data.decode('utf8', "backslashreplace"))
elif content_type in ('application/x-www-form-urlencoded', 'multipart/form-data'):
return request.form
return {}
def should_display_graphiql(self):
if not self.graphiql or 'raw' in request.args:
return False
return self.request_wants_html()
def request_wants_html(self):
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'text/html' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['application/json']
|
NREL/grapinator
|
grapinator/app.py
|
from flask import Flask
from flask_cors import CORS
from flask_graphql import GraphQLView
from grapinator import settings, schema_settings, log
from grapinator.model import db_session
from grapinator.schema import *
# setup Flask
app = Flask(__name__)
# add CORS support
CORS(app, resources={r"/*": {
"origins": settings.CORS_EXPOSE_ORIGINS
,"send_wildcard": settings.CORS_SEND_WILDCARD
,"methods": settings.CORS_ALLOW_METHODS
,"max_age": settings.CORS_HEADER_MAX_AGE
,"allow_headers": settings.CORS_ALLOW_HEADERS
,"expose_headers": settings.CORS_EXPOSE_HEADERS
,"supports_credentials": settings.CORS_SUPPORTS_CREDENTIALS
}})
# set server_name if running local, not docker or server
if settings.FLASK_SERVER_NAME != '':
app.config['SERVER_NAME'] = settings.FLASK_SERVER_NAME
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = settings.SQLALCHEMY_TRACK_MODIFICATIONS
app.add_url_rule(
settings.FLASK_API_ENDPOINT,
view_func=GraphQLView.as_view(
'graphql',
schema=gql_schema,
graphiql=True # for having the GraphiQL interface
)
)
# set default response headers per NREL spec.
@app.after_request
def apply_custom_response(response):
response.headers["X-Frame-Options"] = settings.HTTP_HEADERS_XFRAME
response.headers["X-XSS-Protection"] = settings.HTTP_HEADERS_XSS_PROTECTION
response.headers["Cache-Control"] = settings.HTTP_HEADER_CACHE_CONTROL
response.headers["Access-Control-Allow-Headers"] = settings.CORS_ALLOW_HEADERS
return response
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
def main():
log.info('>>>>> Starting development server at http://{}{} <<<<<'.format(app.config['SERVER_NAME'], settings.FLASK_API_ENDPOINT))
# Note: can't use flask debug with vscode debugger. default: False
app.run(debug=settings.FLASK_DEBUG)
if __name__ == "__main__":
main()
|
NREL/grapinator
|
grapinator/settings.py
|
import graphene
import os
from os import path
from datetime import datetime
from crypto_config import cryptoconfigparser
from sqlalchemy import (
Column
,BigInteger
,Boolean
,Date
,DateTime
,Enum
,Float
,Integer
,Interval
,LargeBinary
,Numeric
,PickleType
,SmallInteger
,String
,Text
,Time
,Unicode
,UnicodeText
)
class Settings(object):
WSGI_SOCKET_HOST = None
WSGI_SOCKET_PORT = None
WSGI_SSL_CERT = None
WSGI_SSL_PRIVKEY = None
CORS_ENABLE = None
CORS_EXPOSE_ORIGINS = None
CORS_ALLOW_METHODS = None
CORS_HEADER_MAX_AGE = None
CORS_ALLOW_HEADERS = None
CORS_EXPOSE_HEADERS = None
CORS_SEND_WILDCARD = None
CORS_SUPPORTS_CREDENTIALS = None
HTTP_HEADERS_XFRAME = None
HTTP_HEADERS_XSS_PROTECTION = None
HTTP_HEADER_CACHE_CONTROL = None
APP_VERSION = None
GQL_SCHEMA = None
FLASK_SERVER_NAME = None
FLASK_DEBUG = None
FLASK_API_ENDPOINT = None
DB_USER = None
DB_PASSWORD = <PASSWORD>
DB_CONNECT = None
DB_TYPE = None
SQLALCHEMY_TRACK_MODIFICATIONS = None
def __init__(self, **kwargs):
config_file = kwargs.pop('config_file', None)
if config_file != None:
self.config_file = config_file
else:
raise RuntimeError('Could not parse config_file.')
# CryptoConfigParser gets crypt_key from environment
try:
key = os.environ['GQLAPI_CRYPT_KEY']
except KeyError as err:
raise RuntimeError(f"Could not get env key: {err}")
try:
# load labor.ini file
cwd = path.abspath(path.dirname(__file__))
properties = cryptoconfigparser.CryptoConfigParser(crypt_key=key)
properties_file = cwd + self.config_file
properties.read(properties_file)
# load APP section
self.APP_VERSION = properties.get('APP', 'VERSION')
# load WSGI section
self.WSGI_SOCKET_HOST = properties.get('WSGI', 'WSGI_SOCKET_HOST')
self.WSGI_SOCKET_PORT = properties.getint('WSGI', 'WSGI_SOCKET_PORT')
if properties.has_option('WSGI', 'WSGI_SSL_CERT') and properties.has_option('WSGI', 'WSGI_SSL_PRIVKEY'):
self.WSGI_SSL_CERT = properties.get('WSGI', 'WSGI_SSL_CERT')
self.WSGI_SSL_PRIVKEY = properties.get('WSGI', 'WSGI_SSL_PRIVKEY')
# load CORS
self.CORS_ENABLE = properties.getboolean('CORS', 'CORS_ENABLE')
self.CORS_EXPOSE_ORIGINS = properties.get('CORS', 'CORS_EXPOSE_ORIGINS')
self.CORS_ALLOW_METHODS = properties.get('CORS', 'CORS_ALLOW_METHODS')
self.CORS_HEADER_MAX_AGE = properties.get('CORS', 'CORS_HEADER_MAX_AGE')
self.CORS_ALLOW_HEADERS = properties.get('CORS', 'CORS_ALLOW_HEADERS')
self.CORS_EXPOSE_HEADERS = properties.get('CORS', 'CORS_EXPOSE_HEADERS')
self.CORS_SEND_WILDCARD = properties.getboolean('CORS', 'CORS_SEND_WILDCARD')
self.CORS_SUPPORTS_CREDENTIALS = properties.getboolean('CORS', 'CORS_SUPPORTS_CREDENTIALS')
# load HTTP_HEADERS
self.HTTP_HEADERS_XFRAME = properties.get('HTTP_HEADERS', 'HTTP_HEADERS_XFRAME')
self.HTTP_HEADERS_XSS_PROTECTION = properties.get('HTTP_HEADERS', 'HTTP_HEADERS_XSS_PROTECTION')
self.HTTP_HEADER_CACHE_CONTROL = properties.get('HTTP_HEADERS', 'HTTP_HEADER_CACHE_CONTROL')
# load FLASK section
self.FLASK_SERVER_NAME = properties.get('FLASK', 'FLASK_SERVER_NAME')
self.FLASK_API_ENDPOINT = properties.get('FLASK', 'FLASK_API_ENDPOINT')
self.FLASK_DEBUG = properties.getboolean('FLASK', 'FLASK_DEBUG')
# load SQLALCHEMY section
self.DB_USER = properties.get('SQLALCHEMY', 'DB_USER')
self.DB_PASSWORD = properties.get('SQLALCHEMY', 'DB_PASSWORD')
self.DB_CONNECT = properties.get('SQLALCHEMY', 'DB_CONNECT')
self.DB_TYPE = properties.get('SQLALCHEMY', 'DB_TYPE')
self.SQLALCHEMY_DATABASE_URI = f"{self.DB_TYPE}://{self.DB_USER}:{self.DB_PASSWORD}@{self.DB_CONNECT}"
self.SQLALCHEMY_TRACK_MODIFICATIONS = properties.getboolean('SQLALCHEMY', 'SQLALCHEMY_TRACK_MODIFICATIONS')
# load GRAPHENE section
self.GQL_SCHEMA = properties.get('GRAPHENE', 'GQL_SCHEMA')
# set oracle environment stuff for SQLAlchemy
if properties.has_option('SQLALCHEMY', 'ORCL_NLS_LANG'):
os.environ['NLS_LANG'] = properties.get('SQLALCHEMY', 'ORCL_NLS_LANG')
if properties.has_option('SQLALCHEMY', 'ORCL_NLS_DATE_FORMAT'):
os.environ['NLS_DATE_FORMAT'] = properties.get('SQLALCHEMY', 'ORCL_NLS_DATE_FORMAT')
except cryptoconfigparser.ParsingError as err:
raise RuntimeError(f"Could not parse: {err}")
class SchemaSettings(object):
def __init__(self, *args, **kwargs):
file = kwargs.pop('schema_file', None)
if file != None:
# load file
cwd = path.abspath(path.dirname(__file__))
self._schema_dict = self._loadSchemaDict(cwd + file)
self._db_classes = self._make_db_classes()
self._gql_classes = self._make_gql_classes()
else:
raise TypeError("schema_file arg not set!")
def _loadSchemaDict(self, file_name):
s = open(file_name, 'r').read()
schema_dict = eval(s)
return schema_dict
def _make_db_classes(self):
db_classes = []
for row in self._schema_dict:
db_class_cols = [{
'name':r['gql_col_name']
,'db_col_name':r['db_col_name']
,'db_type':r['db_type']
} for r in row['FIELDS']]
db_class_relation = [{
'name':r['rel_name']
,'class_name':r['rel_class_name']
,'arguments':r['rel_arguments']
} for r in row['RELATIONSHIPS']]
db_class = {
'db_class': row['DB_CLASS_NAME']
,'db_table': row['DB_TABLE_NAME']
,'db_pk': row['DB_TABLE_PK']
,'db_columns': db_class_cols
,'db_relationships': db_class_relation
}
db_classes.append(db_class)
return db_classes
def _make_gql_classes(self):
gql_classes = []
for row in self._schema_dict:
gql_class_cols = [{
'name':r['gql_col_name']
,'type':r['gql_type']
,'desc':r['gql_description']
} for r in row['FIELDS']]
gql_class = {
'gql_class': row['GQL_CLASS_NAME']
,'gql_conn_class': row['GQL_CONN_CLASS_NAME']
,'gql_conn_query_name': row['GQL_CONN_QUERY_NAME']
,'gql_db_class': row['DB_CLASS_NAME']
,'gql_columns': gql_class_cols
,'gql_db_default_sort_col': row['DB_DEFAULT_SORT_COL']
}
gql_classes.append(gql_class)
return gql_classes
def get_db_classes(self):
return self._db_classes
def get_gql_classes(self):
return self._gql_classes
|
NREL/grapinator
|
tests/test_gql_class_creation.py
|
<filename>tests/test_gql_class_creation.py
import unittest
import context
from datetime import datetime
from grapinator import settings, log, schema_settings
from grapinator.schema import *
class TestStringMethods(unittest.TestCase):
def test_gql(self):
for c in schema_settings.get_gql_classes():
self.assertTrue(
issubclass(globals()[c['gql_class']], SQLAlchemyObjectType)
,"test_gql failed!"
)
def test_gql_connection(self):
for c in schema_settings.get_gql_classes():
self.assertTrue(
issubclass(globals()[c['gql_conn_class']], relay.Connection)
,"test_gql_connection failed!"
)
|
NREL/grapinator
|
grapinator/svc_cherrypy.py
|
<reponame>NREL/grapinator
# Reason for choosing cherrypy
# https://blog.appdynamics.com/engineering/a-performance-analysis-of-python-wsgi-servers-part-2/
#
# Flask application based on Quickstart
# http://flask.pocoo.org/docs/0.12/quickstart/
#
# CherryPy documentation for this
# http://docs.cherrypy.org/en/latest/deploy.html#wsgi-servers
# http://docs.cherrypy.org/en/latest/advanced.html#host-a-foreign-wsgi-application-in-cherrypy
# Install: pip install cherrypy paste
#
# This code is mostly plagiarized from here:
# http://fgimian.github.io/blog/2012/12/08/setting-up-a-rock-solid-python-development-web-server/
import cherrypy
from requestlogger import WSGILogger, ApacheFormatter
from logging import StreamHandler
from flask import Flask
from flask_graphql import GraphQLView
from grapinator import settings, log
from grapinator.app import app
from grapinator.model import db_session
def run_server():
# Enable WSGI access logging
handlers = [StreamHandler(), ]
app_logged = WSGILogger(app, handlers, ApacheFormatter())
cherrypy.tree.graft(app_logged, '/')
cherrypy.config.update({
'server.socket_host': settings.WSGI_SOCKET_HOST,
'server.socket_port': settings.WSGI_SOCKET_PORT,
'engine.autoreload.on': False,
'log.screen': True,
'server.ssl_module': 'builtin',
'server.ssl_certificate': settings.WSGI_SSL_CERT,
'server.ssl_private_key': settings.WSGI_SSL_PRIVKEY,
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == '__main__':
run_server()
|
NREL/grapinator
|
grapinator/model.py
|
<reponame>NREL/grapinator
from sqlalchemy import (Column, DateTime, Integer, Numeric, String,
create_engine)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session
,sessionmaker
,relationship
)
from grapinator import settings, schema_settings
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True, convert_unicode=True)
db_session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine)
)
# No reflection against db tables as we are hardcoding below.
Base = declarative_base()
# We will need this for querying
Base.query = db_session.query_property()
def orm_class_constructor(clazz_name, db_table, clazz_pk, clazz_attrs, clazz_relationships):
"""
Create a ORM class dynamically.
See: http://sparrigan.github.io/sql/sqla/2016/01/03/dynamic-tables.html
:param clazz_name: class name
:param db_table: name of database table to map class
:param clazz_pk: primary key for ORM object. Required!
:param clazz_attrs: dict of {column_name: SQLAlchemy type}
Returns dynamically created ORM class
"""
orm_attrs = {'__tablename__': db_table}
for col in clazz_attrs:
if col['db_col_name'] == clazz_pk:
orm_attrs[col['name']] = Column(col['db_col_name'], col['db_type'], primary_key=True)
else:
orm_attrs[col['name']] = Column(col['db_col_name'], col['db_type'])
# TODO: this works for now but needs improvment.
# this sets relationships for table joins in sqlalchemy
for col in clazz_relationships:
orm_attrs[col['name']] = relationship(
col['class_name']
,primaryjoin=col['arguments']['primaryjoin']
,foreign_keys=col['arguments']['foreign_keys']
)
return type(str(clazz_name), (Base,), orm_attrs)
for db_class in schema_settings.get_db_classes():
globals()[db_class['db_class']] = orm_class_constructor(
db_class['db_class']
,db_class['db_table']
,db_class['db_pk']
,db_class['db_columns']
,db_class['db_relationships']
)
|
NREL/grapinator
|
grapinator/schema.py
|
<gh_stars>0
from sqlalchemy import and_, or_, desc, asc
import graphene
from graphene import relay
from graphene_sqlalchemy import SQLAlchemyObjectType, SQLAlchemyConnectionField
import datetime
from grapinator import log, schema_settings
from grapinator.model import *
def gql_class_constructor(clazz_name, db_clazz_name, clazz_attrs, default_sort_col):
gql_attrs = {
'Meta': type('Meta', (), {'model': globals()[db_clazz_name], 'interfaces': (relay.Node, )})
,'matches': graphene.String(description='exact, contains', default_value='contains')
,'sort_by': graphene.String(description='Field to sort by.', default_value=default_sort_col)
,'logic': graphene.String(description='and, or', default_value='and')
,'sort_dir': graphene.String(description='asc, desc', default_value='asc')
}
for attr in clazz_attrs:
gql_attrs[attr['name']] = attr['type'](description=attr['desc'])
return type(str(clazz_name), (SQLAlchemyObjectType,), gql_attrs)
def gql_connection_class_constructor(clazz_name, gql_clazz_name):
gql_attrs = {
'Meta': type('Meta', (), {'node': gql_clazz_name})
}
return type(str(clazz_name), (relay.Connection,), gql_attrs)
class MyConnectionField(SQLAlchemyConnectionField):
RELAY_ARGS = ['first', 'last', 'before', 'after']
@classmethod
def get_query(cls, model, info, **args):
matches = None
operator = None
sort_by = None
sort_dir = None
filter_conditions = []
if 'matches' in args:
matches = args['matches']
del args['matches']
if 'logic' in args:
operator = args['logic']
del args['logic']
if 'sort_by' in args:
sort_by = getattr(model, args['sort_by']).name
del args['sort_by']
if 'sort_dir' in args:
sort_dir = args['sort_dir']
del args['sort_dir']
sort = asc(sort_by) if sort_dir == "asc" else desc(sort_by)
query = super(MyConnectionField, cls).get_query(model, info, **args)
for field, value in args.items():
if field not in cls.RELAY_ARGS:
if matches == 'exact' or isinstance(value, datetime.datetime):
filter_conditions.append(getattr(model, field) == value)
else:
filter_conditions.append(getattr(model, field).ilike('%' + value + '%'))
if operator == 'or':
query = query.filter(or_(*filter_conditions)).order_by(sort)
else:
query = query.filter(and_(*filter_conditions)).order_by(sort)
return query
# loop and dynamicaly create all the graphene classes necessary for the Query class
for clazz in schema_settings.get_gql_classes():
# create the Graphene classes
globals()[clazz['gql_class']] = gql_class_constructor(
clazz['gql_class']
,clazz['gql_db_class']
,clazz['gql_columns']
,clazz['gql_db_default_sort_col']
)
# create the Graphene connection class
globals()[clazz['gql_conn_class']] = gql_connection_class_constructor(
clazz['gql_conn_class']
,globals()[clazz['gql_class']]
)
def _make_gql_query_fields(cols):
gql_attrs = {}
for row in cols:
gql_attrs[row['name']] = row['type']()
gql_attrs.update({
'matches': graphene.String()
,'sort_by': graphene.String()
,'logic': graphene.String()
,'sort_dir': graphene.String()
})
return gql_attrs
# create the Graphene Query class
class Query(graphene.ObjectType):
node = relay.Node.Field()
for clazz in schema_settings.get_gql_classes():
locals()[clazz['gql_conn_query_name']] = MyConnectionField(
globals()[clazz['gql_class']]
,_make_gql_query_fields(clazz['gql_columns'])
)
# create the gql schema
gql_schema = graphene.Schema(query=Query, auto_camelcase=False)
|
NREL/grapinator
|
grapinator/__init__.py
|
import sys
import os
from os import path
import logging.config
from grapinator.settings import Settings, SchemaSettings
# get application settings, exit if something missing.
try:
settings = Settings(config_file='/resources/grapinator.ini')
except RuntimeError as err:
print(f"Runtime error: {err}")
sys.exit(1)
# get app schema settings for dynamic class creation, exit if somthing missing
try:
schema_settings = SchemaSettings(schema_file=settings.GQL_SCHEMA)
except (TypeError, FileNotFoundError) as err:
print(f"Schema settings runtime error: {err}")
sys.exit(1)
# setup app
version = settings.APP_VERSION
# setup logging
logging_conf_path = path.abspath(path.dirname(__file__)) + '/resources/logging.conf'
logging.config.fileConfig(logging_conf_path)
log = logging.getLogger(__name__)
|
NREL/grapinator
|
tests/test_orm_class_creation.py
|
<gh_stars>0
import unittest
import context
from datetime import datetime
from grapinator import log, schema_settings
from grapinator.model import *
class TestStringMethods(unittest.TestCase):
orm_classes = schema_settings.get_db_classes()
def test_orm(self):
for clz in self.orm_classes:
self.assertTrue(issubclass(globals()[clz['db_class']], Base), f"test_orm failed for class: {clz['db_class']}!")
self.assertTrue(hasattr(globals()[clz['db_class']], "__tablename__"), "test_orm failed!")
self.assertTrue(hasattr(globals()[clz['db_class']], "metadata"), "test_orm failed!")
self.assertTrue(hasattr(globals()[clz['db_class']], "query"), "test_orm failed!")
|
NREL/grapinator
|
tests/test_schema_settings.py
|
import unittest
import context
from datetime import datetime
from grapinator import settings, log, schema_settings
class TestStringMethods(unittest.TestCase):
sb = schema_settings
def test_gqlschemabuilder(self):
self.assertTrue(self.sb, "test_gqlschemabuilder: empty sb!")
def test_get_db_classes(self):
dict_list = self.sb.get_db_classes()
for r in dict_list:
self.assertTrue('db_class' in r, "test_get_db_classes: failed for db_class!")
self.assertTrue('db_pk' in r, "test_get_db_classes: failed for db_pk!")
self.assertTrue('db_table' in r, "test_get_db_classes: failed for db_table!")
self.assertTrue('db_columns' in r, "test_get_db_classes: failed for db_columns!")
self.assertTrue(len(r['db_columns']) > 0, "test_get_db_classes: failed for db_columns size!")
def test_get_gql_classes(self):
dict_list = self.sb.get_gql_classes()
for r in dict_list:
self.assertTrue('gql_class' in r, "test_get_gql_classes: failed for gql_class!")
self.assertTrue('gql_conn_class' in r, "test_get_gql_classes: failed for gql_conn_class!")
self.assertTrue('gql_conn_query_name' in r, "test_get_gql_classes: failed for gql_conn_query_name!")
self.assertTrue('gql_db_class' in r, "test_get_gql_classes: failed for gql_db_class!")
self.assertTrue('gql_columns' in r, "test_get_gql_classes: failed for gql_columns!")
self.assertTrue(len(r['gql_columns']) > 0, "test_get_gql_classes: failed for gql_columns size!")
|
NREL/grapinator
|
setup.py
|
<reponame>NREL/grapinator<filename>setup.py
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup
from setuptools import find_packages
from setuptools.command.develop import develop
from setuptools.command.install import install
from subprocess import check_call
import os
def get_textfile(filename):
""" Get contents from a text file. """
with open(filename, 'r') as fh:
return fh.read().lstrip()
class PostDevelopCommand(develop):
"""Post-installation for development mode."""
def run(self):
print("Calling PostDevelopCommand(develop)")
from flask_graphql import graphqlview
path = os.path.abspath(graphqlview.__file__)
# call fix_flask_graphql.sh to remedy 500 error for badly formed unicode. See docs/500_error.txt
cmd = os.path.abspath(os.path.join(os.path.dirname(__file__), 'grapinator/resources/setup_fix/fix_flask_graphql.sh'))
check_call([cmd, path])
develop.run(self)
class PostInstallCommand(install):
"""Post-installation for install mode."""
def run(self):
print("Calling PostInstallCommand(install)")
from flask_graphql import graphqlview
path = os.path.abspath(graphqlview.__file__)
# call fix_flask_graphql.sh to remedy 500 error for badly formed unicode. See docs/500_error.txt
cmd = os.path.abspath(os.path.join(os.path.dirname(__file__), 'grapinator/resources/setup_fix/fix_flask_graphql.sh'))
check_call([cmd, path])
install.run(self)
setup(
name='grapinator',
version='1.0.0.dev1',
description='Dynamic GraphQL API Creator',
long_description=get_textfile('README.md'),
author='<NAME>',
packages=find_packages(),
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
},
)
|
eatenliao/skyblock-auction-bot
|
code.py
|
import discord
import cmath
from discord.ext import commands
from discord import Permissions
from discord.utils import get
import asyncio
import json
import locale
import time
import requests
from requests import get
from json import loads, dump
import datetime
from json.decoder import JSONDecodeError
locale.setlocale(locale.LC_ALL, 'en_US')
def seconds_to_hms(seconds):
out = []
for ct in range(3)[::-1]:
a, b = divmod(seconds, 60 ** ct)
out.append(a)
seconds = b
return out
def remove1(a):
counter_Alpha = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
for n in range(0,10):
counter = n
remove = "§" + str(counter)
remove_1 = "§" + counter_Alpha[n]
try:
a = a.replace(remove,"")
a = a.replace(remove_1,"")
a = a.replace("§l","")
a = a.replace("§o","")
a = a.replace("§ka","")
a = a.replace("§r","")
except AttributeError:
continue
return(a)
token= 'UR-BOT-TOKEN-HERE' ### TOKEN IDENTIFIER ###
APIKEY = 'UR-API-KEY-HERE'
client = commands.Bot(command_prefix = '/')
client.remove_command("help")
game = discord.Game("with- do /help")
@client.event
async def on_ready():
print(f'We have logged in as {client.user}')
print()
print("Discord.py Version " + discord.__version__)
print()
await client.change_presence(activity = game)
@client.event
async def on_message(message):
print()
print(f"{message.channel}: {message.author}: {message.author.name}: {message.content}")
await client.process_commands(message)
@client.command()
async def s(ctx,*,un):
await ctx.send(f"https://sky.shiiyu.moe/stats/{un}")
await ctx.message.delete()
@client.command()
async def help(ctx,*,phrase = None):
await ctx.send(f"<@!{ctx.message.author.id}>! ```/ac [ign] -- check active auctions \n/acl [ign] -- check active auctions with lore (use this if its a book!) \n/lb [item_name] -- (*note*: if item is a pet, rarity: 0 - common, 1 - uncommon, 2 - rare, 3 - epic, 4 - legendary. And for books, eg, Power 7 == power;7) \n/bz [item_name] -- for bazaar buy & sell prices! \n \nFor basic math commands: \n/add [num1] [num2] -- to add 2 numbers together.\n/sub [num1] [num2] -- to subtract one number from the other. \n/mtp [num1] [num2] -- to multiply both numbers.```")
await ctx.message.delete()
@client.command()
async def p(ctx,*,p):
await ctx.send(f"https://plancke.io/hypixel/guild/name/{p}")
await ctx.message.delete()
@client.command()
async def purge(ctx,*,num):
await ctx.message.delete()
q = await ctx.channel.purge(limit=int(num))
await ctx.send(':otter: '+f'Deleted {len(q)} message(s).')
"""SLOW METHOD OF GETTING PLAYER AUCTIONS WITHOUT LORE"""
@client.command()
async def ac_slow(ctx,*,un):
pp = await ctx.send(f"<@!{ctx.message.author.id}>! Finding auctions for {un}..")
await ctx.message.delete()
uuid_raw = requests.get("https://api.mojang.com/users/profiles/minecraft/"+un)
uuid_json = json.loads(uuid_raw.text)
uuid_json == uuid_raw.json
uuid = uuid_json["id"]
auction_pagesRaw = requests.get("https://api.hypixel.net/skyblock/auctions?key=API%20Key&page=0")
auction_pagesJson = json.loads(auction_pagesRaw.text)
auction_pagesJson == auction_pagesRaw.json
pages = auction_pagesJson["totalPages"]
user_Auctions = []
count = 1
for n in range(pages+1):
auction_raw = requests.get("https://api.hypixel.net/skyblock/auctions?key=API%20Key&page="+str(n))
auction_json = json.loads(auction_raw.text)
auction_json == auction_raw.json
for i in range(len(auction_json["auctions"])):
try:
if auction_json["auctions"][i]["auctioneer"] == str(uuid):
if auction_json["auctions"][i]["claimed"] == False:
data = {"NAME":auction_json["auctions"][i]["item_name"],"BID":auction_json["auctions"][i]["highest_bid_amount"],"PRICE":auction_json["auctions"][i]["starting_bid"],"NUMBER":len(auction_json["auctions"][i]["bids"]),"TIER":auction_json["auctions"][i]["tier"],"AID":auction_json["auctions"][i]["uuid"],"TIME":auction_json["auctions"][i]["end"]}
user_Auctions.append(data)
except KeyError:
continue
if len(user_Auctions) != 0:
await pp.edit(content = f"<@!{ctx.message.author.id}>! Auctions found for {un}")
for j in range(len(user_Auctions)):
aid = user_Auctions[j]["AID"]
a1 = aid[:8]
a2 = aid[8:12]
a3 = aid[12:16]
a4 = aid[16:20]
a5 = aid[20:-1]
a6 = aid[-1]
b = "/viewauction "+a1+"-"+a2+"-"+a3+"-"+a4+"-"+a5+a6
time = int(user_Auctions[j]["TIME"])
_end = datetime.datetime.fromtimestamp(time/1000)
__end = (_end - datetime.datetime.now())
end = ":".join([str(i).zfill(2) for i in ([__end.days] + seconds_to_hms(__end.seconds))])
name = user_Auctions[j]["NAME"]
price = user_Auctions[j]["PRICE"]
price = locale.format("%d",int(price), grouping=True)
bid = user_Auctions[j]["BID"]
bid = locale.format("%d",int(bid), grouping=True)
num = user_Auctions[j]["NUMBER"]
tier = user_Auctions[j]["TIER"]
if datetime.datetime.now() > _end:
end = "AUCTION HAS ENDED"
if user_Auctions[j]["NUMBER"] == 0:
bid = "No Bidders!"
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for {un}!", color=0xFFF8E7)
embedVar.add_field(name="ITEM NAME: ", value=name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Bids: ", value=bid, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
await ctx.send(embed=embedVar)
if user_Auctions[j]["NUMBER"] != 0:
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for {un}!", color=0xFFF8E7)
embedVar.add_field(name="ITEM NAME: ", value=name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Highest Bid: ", value=bid, inline=True)
embedVar.add_field(name="Bidders: ", value=num, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
await ctx.send(embed=embedVar)
elif len(user_Auctions) == 0 and count == 1:
await pp.edit(content=f"<@!{ctx.message.author.id}>! No Auctions detected for {un}")
"""SLOW METHOD OF GETTING PLAYER AUCTION WITH LORE"""
@client.command()
async def acl_slow(ctx,*,username):
qq = await ctx.send(f"<@!{ctx.message.author.id}>! Finding auctions for {username}..")
await ctx.message.delete()
uuid_raw = requests.get("https://api.mojang.com/users/profiles/minecraft/"+username)
uuid_json = json.loads(uuid_raw.text)
uuid_json == uuid_raw.json
uuid = uuid_json["id"]
auction_pagesRaw = requests.get("https://api.hypixel.net/skyblock/auctions?key=API%20Key&page=0")
auction_pagesJson = json.loads(auction_pagesRaw.text)
auction_pagesJson == auction_pagesRaw.json
pages = auction_pagesJson["totalPages"]
user_Auctions = []
count = 1
for n in range(pages+1):
auction_raw = requests.get("https://api.hypixel.net/skyblock/auctions?key=API%20Key&page="+str(n))
auction_json = json.loads(auction_raw.text)
auction_json == auction_raw.json
for i in range(len(auction_json["auctions"])):
try:
if auction_json["auctions"][i]["auctioneer"] == str(uuid):
if auction_json["auctions"][i]["claimed"] == False:
data = {"NAME":auction_json["auctions"][i]["item_name"],"BID":auction_json["auctions"][i]["highest_bid_amount"],"PRICE":auction_json["auctions"][i]["starting_bid"],"NUMBER":len(auction_json["auctions"][i]["bids"]),"LORE":remove1(auction_json["auctions"][i]["item_lore"]),"TIER":auction_json["auctions"][i]["tier"],"AID":auction_json["auctions"][i]["uuid"],"TIME":auction_json["auctions"][i]["end"]}
user_Auctions.append(data)
except KeyError:
continue
if len(user_Auctions) != 0:
await qq.edit(content = f"<@!{ctx.message.author.id}>! Auctions found for {username}")
for j in range(len(user_Auctions)):
aid = user_Auctions[j]["AID"]
a1 = aid[:8]
a2 = aid[8:12]
a3 = aid[12:16]
a4 = aid[16:20]
a5 = aid[20:-1]
a6 = aid[-1]
b = "/viewauction "+a1+"-"+a2+"-"+a3+"-"+a4+"-"+a5+a6
time = int(user_Auctions[j]["TIME"])
_end = datetime.datetime.fromtimestamp(time/1000)
__end = (_end - datetime.datetime.now())
end = ":".join([str(i).zfill(2) for i in ([__end.days] + seconds_to_hms(__end.seconds))])
name = user_Auctions[j]["NAME"]
price = user_Auctions[j]["PRICE"]
price = locale.format("%d",int(price), grouping=True)
bid = user_Auctions[j]["BID"]
bid = locale.format("%d",int(bid), grouping=True)
num = user_Auctions[j]["NUMBER"]
lore = user_Auctions[j]["LORE"]
tier = user_Auctions[j]["TIER"]
if datetime.datetime.now() > _end:
end = "AUCTION HAS ENDED"
if user_Auctions[j]["NUMBER"] == 0:
bid = "No Bidders!"
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for {username}!", color=0xFFF8E7)
embedVar.add_field(name="ITEM NAME: ", value=name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Item Lore: ", value=lore, inline=False)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Bids: ", value=bid, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
await ctx.send(embed=embedVar)
if user_Auctions[j]["NUMBER"] != 0:
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for {username}!", color=0xFFF8E7)
embedVar.add_field(name="ITEM NAME: ", value=name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Item Lore: ", value=lore, inline=False)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Highest Bid: ", value=bid, inline=True)
embedVar.add_field(name="Bidders: ", value=num, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
await ctx.send(embed=embedVar)
elif len(user_Auctions) == 0 and count == 1:
await qq.edit(content = f"<@!{ctx.message.author.id}>! No Auctions detected for {username}")
"""LOWEST BIN FOR ITEMS. DO /help TO FIND OUT HOW OR JUST TRY /lb [item_name] or smt"""
@client.command()
async def lb(ctx,*,item):
qqq = await ctx.send(f"<@!{ctx.message.author.id}> Finding lowest bin for {item}")
await ctx.message.delete()
item1 = item.upper()
item2 = item1.replace(" ","_")
lb = get("https://moulberry.codes/lowestbin.json")
lb = lb.json()
avatar = ctx.message.author.avatar_url or ctx.message.author.default_avatar_url
t = datetime.datetime.now()
tt = t.strftime("%H:%M:%S")
try:
lb_item = lb[f"{item2}"]
lb_item = locale.format("%d",int(lb_item), grouping=True)
embedVar = discord.Embed(title="BIN BOT.", description="", color=0x66ff00)
await qqq.edit(content=f"<@!{ctx.message.author.id}> Found Lowest BIN for {item}")
embedVar.add_field(name=f"Lowest BIN for {item}: ", value=lb_item, inline=False)
embedVar.set_footer(icon_url = avatar,text=f" Requested By: {ctx.message.author} | Today At: {tt}")
except KeyError:
await qqq.edit(content=f"<@!{ctx.message.author.id}> Couldn't Find Lowest BIN for {item}")
await ctx.send(f"```Lowest BIN for {item} could not be found, \nDo Note: for pets, rarity: 0 - common, 1 - uncommon, 2 - rare, 3 - epic, 4 - legendary\n And for books, eg, Power 7 == power;7 \nSend the command in the format: /lb [item];[level]```")
await ctx.send(embed=embedVar)
"""BAZAAR PRICE INFO, kinda unstable cos slothpixel"""
@client.command()
async def bz(ctx,*,item):
initial_message = await ctx.send(f"<@!{ctx.message.author.id}> Finding Bazaar Info for {item}")
await ctx.message.delete()
itemI = item.upper()
itemId = itemI.replace(" ","_")
baz = get(f"https://api.slothpixel.me/api/skyblock/bazaar/{itemId}")
baz = baz.json()
avatar = ctx.message.author.avatar_url or ctx.message.author.default_avatar_url
t = datetime.datetime.now()
tt = t.strftime("%H:%M:%S")
try:
sell = int(baz["quick_status"]["sellPrice"])
sell = locale.format("%d",sell, grouping=True)
buy = int(baz["quick_status"]["buyPrice"])
buy = locale.format("%d",buy, grouping=True)
await initial_message.edit(content=f"<@!{ctx.message.author.id}> Found Bazaar Info for {item}")
embedVar = discord.Embed(title="Bazaar Bot.", description=f"Bazaar Info for {item}!", color=0x66ff00)
embedVar.add_field(name="Sell Price: ", value=sell, inline=True)
embedVar.add_field(name="Buy Price: ", value=buy, inline=True)
embedVar.set_footer(icon_url = avatar,text=f" Requested By: {ctx.message.author} | Today At: {tt}")
await ctx.send(embed=embedVar)
except KeyError:
await initial_message.edit(content=f"<@!{ctx.message.author.id}> Couldn't Find Bazaar Info for {item}")
"""" MATH STUFF, CAN IGNORE IF YOU WANT """"
""""
@client.command()
async def mtp(ctx,*,num):
q = num.split(" ")
num1 = int(q[0])
num2 = int(q[-1])
numend = num1 * num2
numend = locale.format("%d",numend, grouping=True)
num1 = locale.format("%d",num1, grouping=True)
num2 = locale.format("%d",num2, grouping=True)
await ctx.send(f"{num1} x {num2} = {numend}")
@client.command()
async def add(ctx,*,num):
q = num.split(" ")
num1 = int(q[0])
num2 = int(q[-1])
numend = num1 + num2
numend = locale.format("%d",numend, grouping=True)
num1 = locale.format("%d",num1, grouping=True)
num2 = locale.format("%d",num2, grouping=True)
await ctx.send(f"{num1} + {num2} = {numend}")
@client.command()
async def sub(ctx,*,num):
q = num.split(" ")
num1 = int(q[0])
num2 = int(q[-1])
numend = num1 - num2
numend = locale.format("%d",numend, grouping=True)
num1 = locale.format("%d",num1, grouping=True)
num2 = locale.format("%d",num2, grouping=True)
await ctx.send(f"{num1} - {num2} = {numend}") """
""" --FOR AUCTIONS WITHOUT LORE--"""
"""NOTE THAT PICTURE OF ITEM MAY NOT NECESSARILY ALW SHOW UP"""
@client.command()
async def ac(ctx,*,un):
pp = await ctx.send(f"<@!{ctx.message.author.id}>! Finding auctions for `{un}`..")
await ctx.message.delete()
uuid_raw = requests.get("https://api.mojang.com/users/profiles/minecraft/"+un)
uuid_json = json.loads(uuid_raw.text)
uuid_json == uuid_raw.json
uuid = uuid_json["id"]
user_Auctions = []
count = 1
auction_raw = requests.get(f"https://api.hypixel.net/skyblock/auction?key={APIKEY}&player={uuid}")
auction_json = json.loads(auction_raw.text)
auction_json == auction_raw.json
for i in range(len(auction_json["auctions"])):
try:
if auction_json["auctions"][i]["claimed"] == False:
time1 = int(auction_json["auctions"][i]["end"])
_end = datetime.datetime.fromtimestamp(time1/1000)
if datetime.datetime.now() < _end:
data = {"NAME":auction_json["auctions"][i]["item_name"],"BID":auction_json["auctions"][i]["highest_bid_amount"],"PRICE":auction_json["auctions"][i]["starting_bid"],"NUMBER":len(auction_json["auctions"][i]["bids"]),"TIER":auction_json["auctions"][i]["tier"],"AID":auction_json["auctions"][i]["uuid"],"TIME":auction_json["auctions"][i]["end"]}
user_Auctions.append(data)
except KeyError:
continue
if len(user_Auctions) != 0:
await pp.edit(content = f"<@!{ctx.message.author.id}>! Auctions found for `{un}`")
for j in range(len(user_Auctions)):
t = datetime.datetime.now()
tt = t.strftime("%H:%M:%S")
aid = user_Auctions[j]["AID"]
a1 = aid[:8]
a2 = aid[8:12]
a3 = aid[12:16]
a4 = aid[16:20]
a5 = aid[20:-1]
a6 = aid[-1]
b = "/viewauction "+a1+"-"+a2+"-"+a3+"-"+a4+"-"+a5+a6
time = int(user_Auctions[j]["TIME"])
_end = datetime.datetime.fromtimestamp(time/1000)
__end = (_end - datetime.datetime.now())
end = ":".join([str(i).zfill(2) for i in ([__end.days] + seconds_to_hms(__end.seconds))])
name = user_Auctions[j]["NAME"]
image = name.upper()
image = image.replace(" ","_")
image1 = f"https://sky.shiiyu.moe/item/{image}"
price = user_Auctions[j]["PRICE"]
price = locale.format("%d",int(price), grouping=True)
bid = user_Auctions[j]["BID"]
bid = locale.format("%d",int(bid), grouping=True)
num = user_Auctions[j]["NUMBER"]
tier = user_Auctions[j]["TIER"]
avatar = ctx.message.author.avatar_url or ctx.message.author.default_avatar_url
if user_Auctions[j]["NUMBER"] == 0:
bid = "No Bidders!"
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for `{un}`!", color=0x66ff00)
embedVar.set_thumbnail(url=image1)
embedVar.add_field(name="ITEM NAME: ", value= name , inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Bids: ", value=bid, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
embedVar.set_footer(icon_url = avatar,text=f" Requested By: {ctx.message.author} | Today At: {tt}")
await ctx.send(embed=embedVar)
if user_Auctions[j]["NUMBER"] != 0:
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for `{un}`!", color=0x66ff00)
embedVar.set_thumbnail(url=image1)
embedVar.add_field(name="ITEM NAME: ", value= name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Highest Bid: ", value=bid, inline=True)
embedVar.add_field(name="Bidders: ", value=num, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
embedVar.set_footer(icon_url = avatar,text=f" Requested By: {ctx.message.author} | Today At: {tt}")
await ctx.send(embed=embedVar)
elif len(user_Auctions) == 0 and count == 1:
await pp.edit(content=f"<@!{ctx.message.author.id}>! No Auctions detected for `{un}`")
""" --FOR AUCTIONS WITH LORE--"""
"""NOTE THAT PICTURE OF ITEM MAY NOT NECESSARILY ALW SHOW UP"""
@client.command()
async def acl(ctx,*,un):
pp = await ctx.send(f"<@!{ctx.message.author.id}>! Finding auctions for `{un}`..")
await ctx.message.delete()
uuid_raw = requests.get("https://api.mojang.com/users/profiles/minecraft/"+un)
uuid_json = json.loads(uuid_raw.text)
uuid_json == uuid_raw.json
uuid = uuid_json["id"]
user_Auctions = []
count = 1
auction_raw = requests.get(f"https://api.hypixel.net/skyblock/auction?key={APIKEY}&player={uuid}")
auction_json = json.loads(auction_raw.text)
auction_json == auction_raw.json
for i in range(len(auction_json["auctions"])):
try:
if auction_json["auctions"][i]["claimed"] == False:
time1 = int(auction_json["auctions"][i]["end"])
_end = datetime.datetime.fromtimestamp(time1/1000)
if datetime.datetime.now() < _end:
data = {"NAME":auction_json["auctions"][i]["item_name"],"LORE":remove1(auction_json["auctions"][i]["item_lore"]),"BID":auction_json["auctions"][i]["highest_bid_amount"],"PRICE":auction_json["auctions"][i]["starting_bid"],"NUMBER":len(auction_json["auctions"][i]["bids"]),"TIER":auction_json["auctions"][i]["tier"],"AID":auction_json["auctions"][i]["uuid"],"TIME":auction_json["auctions"][i]["end"]}
user_Auctions.append(data)
except KeyError:
continue
if len(user_Auctions) != 0:
await pp.edit(content = f"<@!{ctx.message.author.id}>! Auctions found for `{un}`")
for j in range(len(user_Auctions)):
t = datetime.datetime.now()
tt = t.strftime("%H:%M:%S")
aid = user_Auctions[j]["AID"]
a1 = aid[:8]
a2 = aid[8:12]
a3 = aid[12:16]
a4 = aid[16:20]
a5 = aid[20:-1]
a6 = aid[-1]
b = "/viewauction "+a1+"-"+a2+"-"+a3+"-"+a4+"-"+a5+a6
time = int(user_Auctions[j]["TIME"])
_end = datetime.datetime.fromtimestamp(time/1000)
__end = (_end - datetime.datetime.now())
end = ":".join([str(i).zfill(2) for i in ([__end.days] + seconds_to_hms(__end.seconds))])
name = user_Auctions[j]["NAME"]
image = name.upper()
image = image.replace(" ","_")
image1 = f"https://sky.shiiyu.moe/item/{image}"
price = user_Auctions[j]["PRICE"]
price = locale.format("%d",int(price), grouping=True)
bid = user_Auctions[j]["BID"]
bid = locale.format("%d",int(bid), grouping=True)
num = user_Auctions[j]["NUMBER"]
tier = user_Auctions[j]["TIER"]
lore = user_Auctions[j]["LORE"]
avatar = ctx.message.author.avatar_url or ctx.message.author.default_avatar_url
if user_Auctions[j]["NUMBER"] == 0:
bid = "No Bidders!"
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for `{un}`!", color=0x66ff00)
embedVar.set_thumbnail(url=image1)
embedVar.add_field(name="ITEM NAME: ", value=name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Item Lore: ", value=lore, inline=False)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Bids: ", value=bid, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
embedVar.set_footer(icon_url = avatar, text=f" Requested By: {ctx.message.author} | Today At: {tt}")
await ctx.send(embed=embedVar)
if user_Auctions[j]["NUMBER"] != 0:
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for `{un}`!", color=0x66ff00)
embedVar.set_thumbnail(url=image1)
embedVar.add_field(name="ITEM NAME: ", value=name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Item Lore: ", value=lore, inline=False)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Highest Bid: ", value=bid, inline=True)
embedVar.add_field(name="Bidders: ", value=num, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
embedVar.set_footer(icon_url = avatar, text=f" Requested By: {ctx.message.author} | Today At: {tt}")
await ctx.send(embed=embedVar)
elif len(user_Auctions) == 0 and count == 1:
await pp.edit(content=f"<@!{ctx.message.author.id}>! No Auctions detected for `{un}`")
"""PROBS DOESNT WORK BUT U CAN TRY IDK"""
"""NOTE THAT PICTURE OF ITEM MAY NOT NECESSARILY ALW SHOW UP"""
@client.command()
async def myah(ctx,*,phrase=None):
un = ctx.message.author.display_name
pp = await ctx.send(f"<@!{ctx.message.author.id}>! Finding auctions for `{un}`..")
await ctx.message.delete()
uuid_raw = requests.get("https://api.mojang.com/users/profiles/minecraft/"+un)
uuid_json = json.loads(uuid_raw.text)
uuid_json == uuid_raw.json
uuid = uuid_json["id"]
user_Auctions = []
count = 1
auction_raw = requests.get(f"https://api.hypixel.net/skyblock/auction?key={APIKEY}&player={uuid}")
auction_json = json.loads(auction_raw.text)
auction_json == auction_raw.json
for i in range(len(auction_json["auctions"])):
try:
if auction_json["auctions"][i]["claimed"] == False:
time1 = int(auction_json["auctions"][i]["end"])
_end = datetime.datetime.fromtimestamp(time1/1000)
if datetime.datetime.now() < _end:
data = {"NAME":auction_json["auctions"][i]["item_name"],"BID":auction_json["auctions"][i]["highest_bid_amount"],"PRICE":auction_json["auctions"][i]["starting_bid"],"NUMBER":len(auction_json["auctions"][i]["bids"]),"TIER":auction_json["auctions"][i]["tier"],"AID":auction_json["auctions"][i]["uuid"],"TIME":auction_json["auctions"][i]["end"]}
user_Auctions.append(data)
except KeyError:
continue
if len(user_Auctions) != 0:
await pp.edit(content = f"<@!{ctx.message.author.id}>! Auctions found for `{un}`")
for j in range(len(user_Auctions)):
aid = user_Auctions[j]["AID"]
a1 = aid[:8]
a2 = aid[8:12]
a3 = aid[12:16]
a4 = aid[16:20]
a5 = aid[20:-1]
a6 = aid[-1]
b = "/viewauction "+a1+"-"+a2+"-"+a3+"-"+a4+"-"+a5+a6
time = int(user_Auctions[j]["TIME"])
_end = datetime.datetime.fromtimestamp(time/1000)
__end = (_end - datetime.datetime.now())
end = ":".join([str(i).zfill(2) for i in ([__end.days] + seconds_to_hms(__end.seconds))])
name = user_Auctions[j]["NAME"]
price = user_Auctions[j]["PRICE"]
price = locale.format("%d",int(price), grouping=True)
bid = user_Auctions[j]["BID"]
bid = locale.format("%d",int(bid), grouping=True)
num = user_Auctions[j]["NUMBER"]
tier = user_Auctions[j]["TIER"]
if user_Auctions[j]["NUMBER"] == 0:
bid = "No Bidders!"
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for `{un}`!", color=0xFFF8E7)
embedVar.add_field(name="ITEM NAME: ", value=name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Bids: ", value=bid, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
await ctx.send(embed=embedVar)
if user_Auctions[j]["NUMBER"] != 0:
embedVar = discord.Embed(title="Auction Bot.", description=f"Auctions for `{un}`!", color=0xFFF8E7)
embedVar.add_field(name="ITEM NAME: ", value=name, inline=True)
embedVar.add_field(name="RARITY: ", value=tier, inline=True)
embedVar.add_field(name="Starting Price: ", value=price, inline=True)
embedVar.add_field(name="Highest Bid: ", value=bid, inline=True)
embedVar.add_field(name="Bidders: ", value=num, inline=True)
embedVar.add_field(name="Ends in: ", value=end, inline=True)
embedVar.add_field(name="For this auction, do:", value=b, inline=True)
await ctx.send(embed=embedVar)
elif len(user_Auctions) == 0 and count == 1:
await pp.edit(content=f"<@!{ctx.message.author.id}>! No Auctions detected for `{un}`")
client.run(token)
|
johnwason/robotraconteur_companion
|
tools/generate_info_parsers.py
|
import RobotRaconteur as RR
from RobotRaconteur.RobotRaconteurPythonUtil import SplitQualifiedName
import re
from pathlib import Path
NUMBER_TYPES = {
RR.DataTypes_double_t: "double",
RR.DataTypes_single_t: "float",
RR.DataTypes_int8_t: "int8_t",
RR.DataTypes_uint8_t: "uint8_t",
RR.DataTypes_int16_t: "int16_t",
RR.DataTypes_uint16_t: "uint16_t",
RR.DataTypes_int32_t: "int32_t",
RR.DataTypes_uint32_t: "uint32_t",
RR.DataTypes_int64_t: "int64_t",
RR.DataTypes_uint64_t: "uint64_t",
}
def convert_bool(val):
if val:
return "true"
else:
return "false"
def get_qualified_type_string(type_def,service_def):
assert type_def.Type == RR.DataTypes_namedtype_t
if '.' in type_def.TypeString:
return type_def.TypeString.replace('.','::')
else:
return service_def.Name.replace('.','::') + '::' + type_def.TypeString
def flags_override(flags_enum_type):
return lambda fieldname,optional: f"RobotRaconteur::Companion::InfoParser::yaml::parse_enum_flags<{flags_enum_type}>(node,\"{fieldname}\",{convert_bool(optional)})"
FIELD_OVERRIDES = {
"com.robotraconteur.uuid.UUID.uuid_bytes": \
lambda fieldname,optional: f"RobotRaconteur::Companion::InfoParser::yaml::parse_uuid_bytes_override(node,\"{fieldname}\")",
"com.robotraconteur.robotics.robot.RobotInfo.robot_capabilities": \
flags_override("com::robotraconteur::robotics::robot::RobotCapabilities::RobotCapabilities"),
"com.robotraconteur.robotics.tool.ToolInfo.tool_capabilities": \
flags_override("com::robotraconteur::robotics::tool::ToolCapabilities::ToolCapabilities"),
"com.robotraconteur.servo.ServoInfo.capabilities": \
flags_override("com::robotraconteur::servo::ServoCapabilities::ServoCapabilities"),
#"com.robotraconteur.robotics.robot.RobotInfo.robot_capabilities": \
# flags_override("com::robotraconteur::robotics::robot::RobotCapabilities::RobotCapabilities"),
#"com.robotraconteur.imaging.Camera.capabilities": \
# flags_override("com::robotraconteur::imaging::Capabilities::Capabilities"),
#"com.robotraconteur.imaging.ImagePartCamera.capabilities": \
# flags_override("com::robotraconteur::imaging::Capabilities::Capabilities")
"com.robotraconteur.robotics.planning.PlannerAlgorithmInfo.algorithm_capability_flags":
lambda fieldname,optional: f"RobotRaconteur::Companion::InfoParser::yaml::parse_planner_algorithm_flags_override(node,\"{fieldname}\",{convert_bool(optional)})",
"com.robotraconteur.imaging.camerainfo.CameraCalibration.distortion_info":
lambda fieldname,optional: f"RobotRaconteur::Companion::InfoParser::yaml::parse_structure<com::robotraconteur::imaging::camerainfo::PlumbBobDistortionInfoPtr>(node,\"{fieldname}\",{convert_bool(optional)})",
}
OVERRIDE_TYPES = ["com::robotraconteur::uuid::UUID", "com::robotraconteur::identifier::Identifier"]
def parse_namedarray_field(field_def,service_def,optional=False):
fieldname=field_def.Name
fieldtype=field_def.Type
f = service_def.Name + "." + field_def.GetServiceEntry().Name + "." + field_def.Name
if f in FIELD_OVERRIDES:
return FIELD_OVERRIDES[f](fieldname,optional)
if fieldtype.Type in NUMBER_TYPES:
if (fieldtype.ArrayType == RR.DataTypes_ArrayTypes_none):
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_number<{NUMBER_TYPES[fieldtype.Type]}>(node,\"{fieldname}\",{convert_bool(optional)})"
elif(fieldtype.ArrayType == RR.DataTypes_ArrayTypes_array):
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_numeric_array_na<{NUMBER_TYPES[fieldtype.Type]},{fieldtype.ArrayLength[0]}>(node,\"{fieldname}\")"
if (fieldtype.Type == RR.DataTypes_namedtype_t):
field_type_str = get_qualified_type_string(field_def.Type,service_def)
if (fieldtype.ArrayType == RR.DataTypes_ArrayTypes_none):
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_namedarray<{field_type_str}>(node,\"{fieldname}\",{convert_bool(optional)})"
elif(fieldtype.ArrayType == RR.DataTypes_ArrayTypes_array):
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_namedarray_array<{field_type_str}>(node,\"{fieldname}\",{convert_bool(optional)},false,{fieldtype.ArrayLength[0]})"
assert False, "Invalid namedarray field type"
def parse_struct_field(field_def,service_def,service_defs,optional=True):
fieldname=field_def.Name
fieldtype=field_def.Type
f = service_def.Name + "." + field_def.GetServiceEntry().Name + "." + field_def.Name
if f in FIELD_OVERRIDES:
return FIELD_OVERRIDES[f](fieldname,optional)
container = ""
if fieldtype.ContainerType == RR.DataTypes_ContainerTypes_list:
container = "_list"
elif fieldtype.ContainerType == RR.DataTypes_ContainerTypes_map_int32:
container = "_map_int32"
elif fieldtype.ContainerType == RR.DataTypes_ContainerTypes_map_string:
container = "_map_string"
if fieldtype.Type in NUMBER_TYPES:
if (fieldtype.ArrayType == RR.DataTypes_ArrayTypes_none):
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_number<{NUMBER_TYPES[fieldtype.Type]}>(node,\"{fieldname}\",{convert_bool(optional)})"
elif(fieldtype.ArrayType == RR.DataTypes_ArrayTypes_array):
array_len = 0
if len(fieldtype.ArrayLength) > 0:
array_len = fieldtype.ArrayLength[0]
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_numeric_array{container}<{NUMBER_TYPES[fieldtype.Type]}>(node,\"{fieldname}\",{convert_bool(optional)},{convert_bool(fieldtype.ArrayVarLength)},{array_len})"
elif(fieldtype.ArrayType == RR.DataTypes_ArrayTypes_multidimarray):
if len(fieldtype.ArrayLength) != 2:
return None
array_len = fieldtype.ArrayLength
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_numeric_multidimarray{container}<{NUMBER_TYPES[fieldtype.Type]}>(node,\"{fieldname}\",{convert_bool(optional)},{array_len[0]},{array_len[1]})"
elif fieldtype.Type == RR.DataTypes_bool_t:
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_bool(node,\"{fieldname}\",{convert_bool(optional)})"
elif fieldtype.Type == RR.DataTypes_string_t:
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_string{container}(node,\"{fieldname}\",{convert_bool(optional)})"
elif (fieldtype.Type == RR.DataTypes_namedtype_t):
field_type_str = get_qualified_type_string(field_def.Type,service_def)
namedtype_def, namedtype_t = resolve_named_type(field_type_str, service_defs)
if (namedtype_def is None):
return None
if namedtype_t == "na":
if (fieldtype.ContainerType != RR.DataTypes_ContainerTypes_none):
return None
if (fieldtype.ArrayType == RR.DataTypes_ArrayTypes_none):
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_namedarray{container}<{field_type_str}>(node,\"{fieldname}\",{convert_bool(optional)})"
elif(fieldtype.ArrayType == RR.DataTypes_ArrayTypes_array):
array_len = 0
if len(fieldtype.ArrayLength) > 0:
array_len = fieldtype.ArrayLength[0]
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_namedarray_array<{field_type_str}>(node,\"{fieldname}\",{convert_bool(optional)},{convert_bool(fieldtype.ArrayVarLength)},{array_len})"
elif namedtype_t == "s":
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_structure{container}<{field_type_str}Ptr>(node,\"{fieldname}\",{convert_bool(optional)})"
elif namedtype_t == "e":
enum_name = field_type_str.split("::")[-1]
return f"RobotRaconteur::Companion::InfoParser::yaml::parse_enum{container}<{field_type_str}::{enum_name}>(node,\"{fieldname}\",{convert_bool(optional)})"
return None
def resolve_named_type(type_str, service_defs):
def find_type(v,n):
for t in v:
if n == t.Name:
return t
return None
service_str, type_str1 = SplitQualifiedName(type_str.replace("::","."))
service_def1 = service_defs[service_str]
na_t = find_type(service_def1.NamedArrays, type_str1)
if na_t is not None:
return na_t, "na"
s_t = find_type(service_def1.Structures, type_str1)
if s_t is not None:
return s_t, "s"
e_t = find_type(service_def1.Enums, type_str1)
if e_t is not None:
return e_t, "e"
return None,None
my_service_defs={}
source_dir=Path.cwd().joinpath('..').absolute()
assert source_dir.joinpath('include').is_dir(), "Script must be run in tools directory"
assert source_dir.joinpath('robdef/group1').is_dir(), "Standard robdef must be cloned recursively"
robdef_dir = source_dir.joinpath("robdef/group1")
for file in robdef_dir.glob("*.robdef"):
filed=open(file)
my_service_def=filed.read()
service_def = RR.ServiceDefinition()
service_def.FromString(my_service_def)
my_service_defs[service_def.Name]=service_def
Named_arrayslist=[]
Structureslist=[]
enum_list=[]
enum_dict={}
usingdict={}
for key in my_service_defs:
name=my_service_defs[key].Name.replace(".","::")
print(name)
for n in my_service_defs[key].NamedArrays:
#name=my_service_defs[key].Name.replace(".","::")
qualifiedname=name+"::"+n.Name
Named_arrayslist.append(qualifiedname)
usingdict[n.Name]=qualifiedname
for e in my_service_defs[key].Structures:
#name=my_service_defs[key].Name.replace(".","::")
qualifiedname=name+"::"+e.Name
Structureslist.append(qualifiedname)
usingdict[e.Name]=qualifiedname
for use in my_service_defs[key].Using:
#print(use.UnqualifiedName)
if(use.UnqualifiedName not in usingdict.keys()):
usingdict[use.UnqualifiedName]=use.QualifiedName.replace(".","::")
for enum in my_service_defs[key].Enums:
#if(enum.Name not in usingdict.keys()):
qualifiedname=name+"::"+enum.Name
enum_dict[enum.Name]=qualifiedname
enum_list.append((enum.Name,qualifiedname))
#usingdict[enum.Name]=qualifiedname
#file1 = open("GeometryEnum.txt","w")
error_names=[]
print(enum_dict)
yaml_dir = source_dir.joinpath("include/RobotRaconteurCompanion/InfoParser/yaml")
yaml_dir.mkdir(parents=True, exist_ok=True)
filename4=yaml_dir.joinpath("yaml_parser_all.h")
file4=open(filename4,"w")
filenames=[]
filename5=yaml_dir.joinpath("yaml_loader_enums_impl.h")
file5=open(filename5,"w")
file5.write("#pragma once\n")
file5.write("#include <string>\n")
file5.write("#include \"yaml-cpp/yaml.h\"\n")
file5.write("#include <RobotRaconteurCompanion/StdRobDef/StdRobDefAll.h>\n")
file5.write("namespace RobotRaconteur{\n")
file5.write("namespace Companion{\n")
file5.write("namespace InfoParser{\n")
file5.write("namespace yaml{\n")
file5.write("template<typename T> struct string_to_enum_traits { };\n")
for entry in enum_list:
file5.write("int string_to_enum_%s(const std::string &input, const YAML::Node& node);\n"%(entry[0]))
file5.write("template<> struct string_to_enum_traits<%s::%s> { static %s::%s string_to_enum(const std::string& s, const YAML::Node& node) { return (%s::%s)string_to_enum_%s(s,node); } };\n"%(entry[1], entry[0], entry[1], entry[0], entry[1], entry[0], entry[0]))
file5.write("}\n")
file5.write("}\n")
file5.write("}\n")
file5.write("}\n")
file5.close()
src_dir = source_dir.joinpath("src")
filename8=src_dir.joinpath("yaml_loader_enums.cpp")
file2=open(filename8,"w")
file2.write("#include \"RobotRaconteurCompanion/InfoParser/yaml/yaml_loader_enums.h\"\n")
file2.write("#include <string>\n\n")
file2.write("namespace RobotRaconteur{\n")
file2.write("namespace Companion{\n")
file2.write("namespace InfoParser{\n")
file2.write("namespace yaml{\n")
file4.write("#pragma once\n")
test_dir = source_dir.joinpath("test")
testfilename = test_dir.joinpath("test_infoparser.cpp")
testfile=open(testfilename,"w")
testfile.write("#include <RobotRaconteurCompanion/StdRobDef/StdRobDefAll.h>\n")
testfile.write("#include <RobotRaconteurCompanion/InfoParser/yaml/yaml_parser_all.h>\n\n")
testfile.write("void testfunction()\n{\n")
testfile.write("// Not a full test, just make sure everything compiles\n")
testfile.write("YAML::Node node = YAML::Load(\"[1, 2, 3]\");\n")
for key in my_service_defs:
usingdict={}
for e in my_service_defs[key].Enums:
enum_list.append(e.Name)
#filename="YAMLconverter__"+my_service_defs[key].Name+".h"
if("com.robotraconteur." in my_service_defs[key].Name):
filename=my_service_defs[key].Name.replace("com.robotraconteur.","")
else:
filename=my_service_defs[key].Name
filename = yaml_dir.joinpath(filename.replace(".","__")+"_parser.h")
file4.write("#include \"%s\"\n"%(filename.name))
file1=open(filename,"w")
filenames.append(filename)
file1.write("#include \"yaml_parser_common_include.h\"\n\n")
file1.write("#pragma once\n\n")
file1.write("namespace YAML {\n")
for e in my_service_defs[key].Enums:
file2.write("int string_to_enum_%s(const std::string &input, const YAML::Node& node){\n"%(e.Name))
# Compare e.Name to the enum you are looking for
#print(e.Values[-1].Name)
enum_list.append(e.Name)
for e_value in e.Values:
#if(e_value.Name==e.Values[-1].Name):
file2.write("\tif(input ==\""+e_value.Name + "\") return " + str(e_value.Value)+";\n")
#else:
# file1.write("\t"+e_value.Name + " = " + str(e_value.Value)+",\n")
file2.write("\tthrow RobotRaconteur::InvalidArgumentException(\"Invalid enum value\");\n")
file2.write("}\n")
file2.write("\n")
for use in my_service_defs[key].Using:
#print(use.UnqualifiedName)
if(use.UnqualifiedName not in usingdict.keys()):
usingdict[use.UnqualifiedName]=use.QualifiedName.replace(".","::")
name=my_service_defs[key].Name.replace(".","::")
for n in my_service_defs[key].NamedArrays:
if (name + "::" + n.Name) in OVERRIDE_TYPES:
continue
file1.write("\ttemplate<> \n\tstruct convert<%s::%s>{\n"%(name,n.Name))
file1.write("\t\tstatic Node encode(const %s::%s& rhs){\n"%(name,n.Name))
file1.write("\t\t\tNode node;\n")
file1.write("\t\t\treturn node;\n")
file1.write("\t\t}\n\n")
file1.write("\t\tstatic bool decode(const Node& node, %s::%s& rhs){\n"%(name,n.Name))
qualifiedname=name+"::"+n.Name
#print(qualifiedname)
usingdict[n.Name]=qualifiedname
count=0
for i in range(len(n.Members)):
field_def = n.Members[i]
fieldname = field_def.Name
#print(f)
if(isinstance(field_def,RR.PropertyDefinition)):
file1.write(f"\t\t\trhs.s.{fieldname} = {parse_namedarray_field(field_def,my_service_defs[key])};\n")
count+=1
file1.write("\t\t\treturn true;\n")
file1.write("\t\t}\n")
file1.write("\t};\n\n")
testfile.write("node.as<%s::%s>();\n"%(name,n.Name))
for e in my_service_defs[key].Structures:
if (name + "::" + e.Name) in OVERRIDE_TYPES:
continue
file1.write("\n\ttemplate<> \n\tstruct convert<%s::%sPtr>{\n"%(name,e.Name))
file1.write("\t\tstatic Node encode(const %s::%sPtr& rhs){\n"%(name,e.Name))
file1.write("\t\t\tNode node;\n")
file1.write("\t\t\treturn node;\n")
file1.write("\t\t}\n\n")
file1.write("\t\tstatic bool decode(const Node& node, %s::%sPtr& rhs){\n"%(name,e.Name))
file1.write("\t\t\tif (!rhs) rhs.reset(new %s::%s);\n"%(name,e.Name))
qualifiedname=name+"::"+e.Name
#usingdict[e.Name]=qualifiedname
for i in range(len(e.Members)):
field_def = e.Members[i]
fieldname = field_def.Name
#print(f)
#TODO implement var value parsing as a map of type and value
if(isinstance(field_def,RR.PropertyDefinition)):
field_parse_str = parse_struct_field(field_def,my_service_defs[key],my_service_defs)
if field_parse_str is not None:
file1.write(f"\t\t\trhs->{fieldname} = {field_parse_str};\n")
else:
file1.write(f"\t\t\t// TODO: parse field {field_def.Type.ToString().split()[0]} {fieldname}\n")
"""if(f[1]=="single"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<float>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="double"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<double>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="int32"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<int>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="uint32"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<uint32_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="int8"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<int8_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="uint8"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<uint8_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="int16"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<int16_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="uint16"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<uint16_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="int64"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<int64_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="uint64"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<uint64_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="string"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<std::string>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="bool"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<bool>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif("[" in f[1]):
if("," in f[1]):
if("single" in f[1]):
size=f[1].replace("single[","").replace("]","")
numbers=size.split(",")
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tstd::vector<uint32_t> dims = {%s,%s};\n"%(numbers[0],numbers[1]))
file1.write("\t\t\t\tRobotRaconteur::RRMultiDimArrayPtr<float> my_multidimarray = RobotRaconteur::AllocateEmptyRRMultiDimArray<float>(dims);\n")
file1.write("\t\t\t\tfor(int i =0; i< %s; i++){\n"%(numbers[0]))
file1.write("\t\t\t\t\tfor(int j=0; j< %s; j++){\n"%(numbers[1]))
file1.write("\t\t\t\t\t\tmy_multidimarray->Array->at(i+(j * %s)) = node[\"%s\"][j+ (i * %s)].as<float>();\n"%(numbers[0],f[2],numbers[1]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_multidimarray;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("double" in f[1]):
size=f[1].replace("double[","").replace("]","")
numbers=size.split(",")
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tstd::vector<uint32_t> dims = {%s,%s};\n"%(numbers[0],numbers[1]))
file1.write("\t\t\t\tRobotRaconteur::RRMultiDimArrayPtr<double> my_multidimarray = RobotRaconteur::AllocateEmptyRRMultiDimArray<double>(dims);\n")
file1.write("\t\t\t\tfor(int i =0; i< %s; i++){\n"%(numbers[0]))
file1.write("\t\t\t\t\tfor(int j=0; j< %s; j++){\n"%(numbers[1]))
file1.write("\t\t\t\t\t\tmy_multidimarray->Array->at(i+(j * %s)) = node[\"%s\"][j+ (i * %s)].as<double>();\n"%(numbers[0],f[2],numbers[1]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_multidimarray;\n"%(f[2]))
file1.write("\t\t\t}\n")
else:
if("{string}" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRMapPtr<std::string, RobotRaconteur::RRArray<double>> joints;\n")
file1.write("\t\t\t\tfor (YAML::const_iterator it = node[\"%s\"].begin(); it != node[\"%s\"].end(); ++it) {\n"%(f[2],f[2]))
file1.write("\t\t\t\t\tstd::string name = it->first.as<std::string>();\n")
file1.write("\t\t\t\t\tRobotRaconteur::RRArrayPtr<double> my_array = RobotRaconteur::AllocateEmptyRRArray<double>(node[name].size());\n")
file1.write("\t\t\t\t\tfor (int i = 0; i < node[name].size(); i++) {\n")
file1.write("\t\t\t\t\t\tmy_array->at(i) = node[name][i].as<double>();\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tjoints->insert(std::make_pair(name,my_array));\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = joints;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("{list}" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRListPtr<RobotRaconteur::RRArray<double>> listy = RobotRaconteur::AllocateEmptyRRList<RobotRaconteur::RRArray<double>>();\n")
#file1.write("\t\t\t\tRobotRaconteur::RRListPtr<%s> listy = RobotRaconteur::AllocateEmptyRRList<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\tfor(int i =0; i<node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tRobotRaconteur::RRArrayPtr<double> my_array = RobotRaconteur::AllocateEmptyRRArray<double>(node[\"%s\"][i].size());\n"%(f[2]))
file1.write("\t\t\t\t\tfor(int k =0; k<node[\"%s\"][i].size(); k++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\tmy_array->at(k)=node[\"%s\"][i][k].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tlisty->push_back(my_array);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = listy;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("single" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRArrayPtr<float> my_array = RobotRaconteur::AllocateEmptyRRArray<float>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<float>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("double" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRArrayPtr<double> my_array = RobotRaconteur::AllocateEmptyRRArray<double>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("uint32" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRArrayPtr<uint32_t> my_array = RobotRaconteur::AllocateEmptyRRArray<uint32_t>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("uint8" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRArrayPtr<uint8_t> my_array = RobotRaconteur::AllocateEmptyRRArray<uint8_t>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<uint8_t>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("int16" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRArrayPtr<int16_t> my_array = RobotRaconteur::AllocateEmptyRRArray<int16_t>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<int16_t>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("int32" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRArrayPtr<int32_t> my_array = RobotRaconteur::AllocateEmptyRRArray<int32_t>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<int32_t>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif(f[1].replace("[]","") in usingdict.keys()):
if(usingdict.get(f[1]) in Structureslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRArrayPtr<%sPtr> my_array = RobotRaconteur::AllocateEmptyRRArray<%sPtr>(node[\"%s\"].size());\n"%(usingdict.get(f[1]),usingdict.get(f[1]),f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<%sPtr>();\n"%(f[2],usingdict.get(f[1])))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
if(usingdict.get(f[1]) in Named_arrayslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRArrayPtr<%s> my_array = RobotRaconteur::AllocateEmptyRRArray<%s>(node[\"%s\"].size());\n"%(usingdict.get(f[1]),usingdict.get(f[1]),f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<%s>();\n"%(f[2],usingdict.get(f[1])))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("{list}" in f[1]):
#RobotRaconteur::RRListPtr<imaging::camerainfo::CameraInfo> camerainfos = RobotRaconteur::AllocateEmptyRRList<imaging::camerainfo::CameraInfo>();
if(f[1].replace("{list}","")=="string"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRListPtr<RobotRaconteur::RRArray<char>> listy = RobotRaconteur::AllocateEmptyRRList<RobotRaconteur::RRArray<char>>();\n")
file1.write("\t\t\t\tfor(int j=0; j< node[\"%s\"].size(); j++){\n"%(f[2]))
file1.write("\t\t\t\t\tstd::string item= node[\"%s\"][j].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\t\tRobotRaconteur::RRArrayPtr<char> itemRR= RobotRaconteur::stringToRRArray(item);\n")
file1.write("\t\t\t\t\tlisty->push_back(itemRR);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = listy;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif(f[1].replace("{list}","") in usingdict.keys()):
#print(f[1].replace("{list}",""))
#print(usingdict.get(f[1].replace("{list}","")))
if(usingdict.get(f[1].replace("{list}","")) in Structureslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
#file1.write("\t\t\t\tRobotRaconteur::RRListPtr<%sPtr> listy = RobotRaconteur::AllocateEmptyRRList<%sPtr>();\n"%(usingdict.get(f[1].replace("{list}","")),usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\tRobotRaconteur::RRListPtr<%s> listy = RobotRaconteur::AllocateEmptyRRList<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\tfor(int j=0; j< node[\"%s\"].size(); j++){\n"%(f[2]))
file1.write("\t\t\t\t\t%sPtr item= node[\"%s\"][j].as<%sPtr>();\n"%(usingdict.get(f[1].replace("{list}","")),f[2],usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\t\tlisty->push_back(item);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = listy;\n"%(f[2]))
file1.write("\t\t\t}\n")
#print("\t%s item= node[\"%s\"][j].as<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),f[2],usingdict.get(f[1].replace("{list}",""))))
if(usingdict.get(f[1].replace("{list}","")) in Named_arrayslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRListPtr<RobotRaconteur::RRNamedArray<%s>> listy = RobotRaconteur::AllocateEmptyRRList<RobotRaconteur::RRNamedArray<%s>>();\n"%(usingdict.get(f[1].replace("{list}","")),usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\tfor(int j=0; j< node[\"%s\"].size(); j++){\n"%(f[2]))
file1.write("\t\t\t\t\t%s item= node[\"%s\"][j].as<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),f[2],usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\t\tlisty->push_back(RobotRaconteur::ScalarToRRNamedArray(item));\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = listy;\n"%(f[2]))
file1.write("\t\t\t}\n")
#print("\t%s item= node[\"%s\"][j].as<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),f[2],usingdict.get(f[1].replace("{list}",""))))
elif(f[1] in enum_list):
#std::string array_type_code = node["array_type_code"].as<std::string>();
#rhs->array_type_code = com::robotraconteur::datatype::ArrayTypeCode::ArrayTypeCode(string_to_enum_ArrayTypeCode(array_type_code));
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tstd::string enum_val_string= node[\"%s\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = %s::%s(RobotRaconteur::Companion::InfoParser::yaml::string_to_enum_%s(enum_val_string,node[\"%s\"]));\n"%(f[2],enum_dict.get(f[1]),f[1],f[1],f[1]))
file1.write("\t\t\t}\n")
#print("\t\t\t\trhs->%s = %s::%s(string_to_enum_%s(enum_val_string));\n"%(f[2],usingdict.get(f[1]),f[1],f[1]))
elif(f[1] in usingdict.keys()):
if(usingdict.get(f[1]) in Structureslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<%sPtr>();\n"%(f[2],f[2],usingdict.get(f[1])))
file1.write("\t\t\t}\n")
if(usingdict.get(f[1]) in Named_arrayslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<%s>();\n"%(f[2],f[2],usingdict.get(f[1])))
file1.write("\t\t\t}\n")
elif(f[1].replace("{string}","") in usingdict.keys()):
if(usingdict.get(f[1]) in Structureslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRMapPtr<std::string, %sPtr> joints;\n"%(usingdict.get(f[1])))
file1.write("\t\t\t\tfor (YAML::const_iterator it = node[\"%s\"].begin(); it != node[\"%s\"].end(); ++it) {\n"%(f[2],f[2]))
file1.write("\t\t\t\t\tstd::string name = it->first.as<std::string>();\n")
file1.write("\t\t\t\t\tjoints->insert(std::make_pair(name,node[name].as<%sPtr>()));\n"%(usingdict.get(f[1])))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = joints;\n"%(f[2]))
file1.write("\t\t\t}\n")
if(usingdict.get(f[1]) in Named_arrayslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRMapPtr<std::string, %s> joints;\n"%(usingdict.get(f[1])))
file1.write("\t\t\t\tfor (YAML::const_iterator it = node[\"%s\"].begin(); it != node[\"%s\"].end(); ++it) {\n"%(f[2],f[2]))
file1.write("\t\t\t\t\tstd::string name = it->first.as<std::string>();\n")
file1.write("\t\t\t\t\tjoints->insert(std::make_pair(name,node[name].as<%s>()));\n"%(usingdict.get(f[1])))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = joints;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="varvalue{string}"):
#print("varvalue seen")
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRMapPtr<std::string, RobotRaconteur::RRValue> vars;\n")
file1.write("\t\t\t\tfor (YAML::const_iterator it = node[\"%s\"].begin(); it != node[\"%s\"].end(); ++it) {\n"%(f[2],f[2]))
file1.write("\t\t\t\t\tstd::string name = it->first.as<std::string>();\n")
file1.write("\t\t\t\t\tstd::string type = node[\"%s\"][\"type\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\t\tRobotRaconteur::RRValuePtr varval;\n")
file1.write("\t\t\t\t\tif(type==\"string\"){\n")
file1.write("\t\t\t\t\t\tstd::string value = node[\"%s\"][\"value\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\t\t\tvarval=RobotRaconteur::stringToRRArray(value);\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"double\"){\n")
file1.write("\t\t\t\t\t\tdouble value = node[\"%s\"][\"value\"].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\t\tvarval=RobotRaconteur::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"int32\"){\n")
file1.write("\t\t\t\t\t\tint value = node[\"%s\"][\"value\"].as<int>();\n"%(f[2]))
file1.write("\t\t\t\t\t\tvarval=RobotRaconteur::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"uint32\"){\n")
file1.write("\t\t\t\t\t\tuint32_t value = node[\"%s\"][\"value\"].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t\t\tvarval=RobotRaconteur::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"double[]\"){\n")
file1.write("\t\t\t\t\t\tRobotRaconteur::RRArrayPtr<double> my_array = RobotRaconteur::AllocateEmptyRRArray<double>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\t\t}\n")
file1.write("\t\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"int32[]\"){\n")
file1.write("\t\t\t\t\t\tRobotRaconteur::RRArrayPtr<int> my_array = RobotRaconteur::AllocateEmptyRRArray<int>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<int>();\n"%(f[2]))
file1.write("\t\t\t\t\t\t}\n")
file1.write("\t\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"uint32[]\"){\n")
file1.write("\t\t\t\t\t\tRobotRaconteur::RRArrayPtr<uint32_t> my_array = RobotRaconteur::AllocateEmptyRRArray<uint32_t>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t\t\t}\n")
file1.write("\t\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t\t}\n")
#file1.write("\t\t\t\t\tRRValuePtr my_array = RobotRaconteur::AllocateEmptyRRArray<double>(node[name].size());\n")
#file1.write("\t\t\t\t\tfor (int i = 0; i < node[name].size(); i++) {\n")
#file1.write("\t\t\t\t\t\tmy_array->at(i) = node[name][i].as<double>();\n")
#file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tvars->insert(std::make_pair(name,varval));\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = vars;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="varvalue"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tstd::string type = node[\"%s\"][\"type\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRValuePtr varval;\n")
file1.write("\t\t\t\tif(type==\"string\"){\n")
file1.write("\t\t\t\t\tstd::string value = node[\"%s\"][\"value\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\t\tvarval =RobotRaconteur::stringToRRArray(value);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"double\"){\n")
file1.write("\t\t\t\t\tdouble value = node[\"%s\"][\"value\"].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\tvarval=RobotRaconteur::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"int32\"){\n")
file1.write("\t\t\t\t\tint value= node[\"%s\"][\"value\"].as<int>();\n"%(f[2]))
file1.write("\t\t\t\t\tvarval=RobotRaconteur::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"uint32\"){\n")
file1.write("\t\t\t\t\tuint32_t value= node[\"%s\"][\"value\"].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t\tvarval=RobotRaconteur::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"double[]\"){\n")
file1.write("\t\t\t\t\tRobotRaconteur::RRArrayPtr<double> my_array = RobotRaconteur::AllocateEmptyRRArray<double>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"int32[]\"){\n")
file1.write("\t\t\t\t\tRobotRaconteur::RRArrayPtr<int> my_array = RobotRaconteur::AllocateEmptyRRArray<int>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<int>();\n"%(f[2]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"uint32[]\"){\n")
file1.write("\t\t\t\t\tRobotRaconteur::RRArrayPtr<uint32_t> my_array = RobotRaconteur::AllocateEmptyRRArray<uint32_t>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s=varval;\n"%(f[2]))
file1.write("\t\t\t}\n")
else:
if(qualifiedname not in error_names):
error_names.append(qualifiedname)"""
file1.write("\t\t\treturn true;\n")
file1.write("\t\t}\n")
file1.write("\t};\n\n\n")
testfile.write("node.as<%s::%sPtr>();\n"%(name,e.Name))
if(len(error_names)>0):
file1.write("//TODO: Fix the following structures or namedarrays: \n")
for i in error_names:
file1.write("// "+i+" \n")
file1.write("\n}")
error_names=[]
file2.write("}\n")
file2.write("}\n")
file2.write("}\n")
file2.write("}\n")
testfile.write("}\n\n")
testfile.write("int main(int ac, char** av)\n{\n")
testfile.write("return 0;\n}\n")
print(error_names)
|
johnwason/robotraconteur_companion
|
tools/generate_info_parsers-old.py
|
<reponame>johnwason/robotraconteur_companion
import RobotRaconteur as RR
import re
from pathlib import Path
my_service_defs={}
source_dir=Path.cwd().joinpath('..').absolute()
assert source_dir.joinpath('include').is_dir(), "Script must be run in tools directory"
assert source_dir.joinpath('robdef/group1').is_dir(), "Standard robdef must be cloned recursively"
robdef_dir = source_dir.joinpath("robdef/group1")
for file in robdef_dir.glob("*.robdef"):
filed=open(file)
my_service_def=filed.read()
service_def = RR.ServiceDefinition()
service_def.FromString(my_service_def)
my_service_defs[service_def.Name]=service_def
Named_arrayslist=[]
Structureslist=[]
enum_list=[]
enum_dict={}
usingdict={}
for key in my_service_defs:
name=my_service_defs[key].Name.replace(".","::")
print(name)
for n in my_service_defs[key].NamedArrays:
#name=my_service_defs[key].Name.replace(".","::")
qualifiedname=name+"::"+n.Name
Named_arrayslist.append(qualifiedname)
usingdict[n.Name]=qualifiedname
for e in my_service_defs[key].Structures:
#name=my_service_defs[key].Name.replace(".","::")
qualifiedname=name+"::"+e.Name
Structureslist.append(qualifiedname)
usingdict[e.Name]=qualifiedname
for use in my_service_defs[key].Using:
#print(use.UnqualifiedName)
if(use.UnqualifiedName not in usingdict.keys()):
usingdict[use.UnqualifiedName]=use.QualifiedName.replace(".","::")
for enum in my_service_defs[key].Enums:
#if(enum.Name not in usingdict.keys()):
qualifiedname=name+"::"+enum.Name
enum_dict[enum.Name]=qualifiedname
enum_list.append(enum.Name)
#usingdict[enum.Name]=qualifiedname
#file1 = open("GeometryEnum.txt","w")
error_names=[]
print(enum_dict)
yaml_dir = source_dir.joinpath("include/RobotRaconteurCompanion/InfoParser/yaml")
yaml_dir.mkdir(parents=True, exist_ok=True)
filename4=yaml_dir.joinpath("yaml_parser_all.h")
file4=open(filename4,"w")
filenames=[]
filename5=yaml_dir.joinpath("yaml_loader_enums.h")
file5=open(filename5,"w")
file5.write("#pragma once\n")
file5.write("#include <string>\n")
file5.write("namespace RobotRaconteur{\n")
file5.write("namespace Companion{\n")
file5.write("namespace InfoParser{\n")
file5.write("namespace yaml{\n")
for entry in enum_list:
file5.write("int string_to_enum_%s(const std::string &input);\n"%(entry));
file5.write("}\n")
file5.write("}\n")
file5.write("}\n")
file5.write("}\n")
file5.close()
src_dir = source_dir.joinpath("src")
filename8=src_dir.joinpath("yaml_loader_enums.cpp")
file2=open(filename8,"w")
file2.write("#include \"RobotRaconteurCompanion/InfoParser/yaml/yaml_loader_enums.h\"\n")
file2.write("#include <string>\n\n")
file2.write("namespace RobotRaconteur{\n")
file2.write("namespace Companion{\n")
file2.write("namespace InfoParser{\n")
file2.write("namespace yaml{\n")
file4.write("#pragma once\n")
test_dir = source_dir.joinpath("test")
testfilename = test_dir.joinpath("test_infoparser.cpp")
testfile=open(testfilename,"w")
testfile.write("#include <RobotRaconteurCompanion/StdRobDef/StdRobDefAll.h>\n")
testfile.write("#include <RobotRaconteurCompanion/InfoParser/yaml/yaml_parser_all.h>\n\n")
testfile.write("void testfunction()\n{\n")
testfile.write("// Not a full test, just make sure everything compiles\n")
testfile.write("YAML::Node node = YAML::Load(\"[1, 2, 3]\");\n")
for key in my_service_defs:
usingdict={}
for e in my_service_defs[key].Enums:
enum_list.append(e.Name)
#filename="YAMLconverter__"+my_service_defs[key].Name+".h"
if("com.robotraconteur." in my_service_defs[key].Name):
filename=my_service_defs[key].Name.replace("com.robotraconteur.","")
else:
filename=my_service_defs[key].Name
filename = yaml_dir.joinpath(filename.replace(".","__")+"_parser.h")
file4.write("#include \"%s\"\n"%(filename.name))
file1=open(filename,"w")
filenames.append(filename)
file1.write("#pragma once\n")
file1.write("#include <RobotRaconteur.h>\n")
file1.write("#include \"yaml-cpp/yaml.h\"\n")
file1.write("#include <boost/uuid/uuid_io.hpp>\n")
file1.write("#include \"RobotRaconteurCompanion/StdRobDef/StdRobDefAll.h\"\n")
file1.write("#include \"yaml_loader_enums.h\"\n\n")
file1.write("using namespace RobotRaconteur;\n")
file1.write("using namespace Companion;\n")
file1.write("using namespace boost;\n\n")
file1.write("#pragma once\n\n")
file1.write("namespace RR = RobotRaconteur;\n\n")
file1.write("namespace YAML {\n")
for e in my_service_defs[key].Enums:
file2.write("int string_to_enum_%s(const std::string &input){\n"%(e.Name))
# Compare e.Name to the enum you are looking for
#print(e.Values[-1].Name)
enum_list.append(e.Name)
for e_value in e.Values:
#if(e_value.Name==e.Values[-1].Name):
file2.write("\tif(input ==\""+e_value.Name + "\") return " + str(e_value.Value)+";\n")
#else:
# file1.write("\t"+e_value.Name + " = " + str(e_value.Value)+",\n")
file2.write("\tthrow RobotRaconteur::InvalidArgumentException(\"Invalid enum value\");\n")
file2.write("}\n")
file2.write("\n")
for use in my_service_defs[key].Using:
#print(use.UnqualifiedName)
if(use.UnqualifiedName not in usingdict.keys()):
usingdict[use.UnqualifiedName]=use.QualifiedName.replace(".","::")
name=my_service_defs[key].Name.replace(".","::")
for n in my_service_defs[key].NamedArrays:
file1.write("\ttemplate<> \n\tstruct convert<%s::%s>{\n"%(name,n.Name))
file1.write("\t\tstatic Node encode(const %s::%s& rhs){\n"%(name,n.Name))
file1.write("\t\t\tNode node;\n")
file1.write("\t\t\treturn node;\n")
file1.write("\t\t}\n\n")
file1.write("\t\tstatic bool decode(const Node& node, %s::%s& rhs){\n"%(name,n.Name))
qualifiedname=name+"::"+n.Name
#print(qualifiedname)
usingdict[n.Name]=qualifiedname
count=0
for field in n.Members:
output=field.ToString()
fieldname=field.Name
f=re.split('\s+', output)
#print(f)
if(f[0]=="property"):
if(f[1]=="single"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<float>();\n"%(f[2],f[2]))
elif(f[1]=="double"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<double>();\n"%(f[2],f[2]))
elif(f[1]=="int32"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<int>();\n"%(f[2],f[2]))
elif(f[1]=="uint32"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<uint32_t>();\n"%(f[2],f[2]))
elif(f[1]=="bool"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<bool>();\n"%(f[2],f[2]))
elif(f[1]=="int8"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<int8>();\n"%(f[2],f[2]))
elif(f[1]=="uint8"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<uint8_t>();\n"%(f[2],f[2]))
elif(f[1]=="int16"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<int16_t>();\n"%(f[2],f[2]))
elif(f[1]=="uint16"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<uint16_t>();\n"%(f[2],f[2]))
elif(f[1]=="int64"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<int64_t>();\n"%(f[2],f[2]))
elif(f[1]=="uint64"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<uint64_t>();\n"%(f[2],f[2]))
elif(f[1]=="string"):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<std::string>();\n"%(f[2],f[2]))
elif(f[1]=="uint8[16]"):
file1.write("\t\t\tstd::string uuid_long = node.as<std::string>();\n")
file1.write("\t\t\tauto uuid_boost = boost::lexical_cast<boost::uuids::uuid>(uuid_long);\n")
file1.write("\t\t\tcom::robotraconteur::uuid::UUID uuid;\n")
file1.write("\t\t\tstd::copy(uuid_boost.begin(), uuid_boost.end(), uuid.a.begin());\n")
#file1.write("\t\t\trhs.s.uuid_bytes=uuid;\n")
elif("[" in f[1]):
if("single" in f[1]):
#file1.write("\t\t\tRRArrayPtr<float> my_array = AllocateEmptyRRArray<float>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\trhs.s.%s[i]= node[\"%s\"][i].as<float>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
#file1.write("\t\t\trhs.s.%s = my_array;\n"%(f[2]))
elif("double" in f[1]):
#file1.write("\t\t\tRRArrayPtr<double> my_array = AllocateEmptyRRArray<double>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\trhs.s.%s[i]= node[\"%s\"][i].as<double>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
#file1.write("\t\t\trhs.s.%s = my_array;\n"%(f[2]))
elif(f[1] in usingdict.keys()):
#print(
if(usingdict.get(f[1]) in Structureslist):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<%sPtr>();\n"%(f[2],f[2],usingdict.get(f[1])))
if(usingdict.get(f[1]) in Named_arrayslist):
file1.write("\t\t\trhs.s.%s = node[\"%s\"].as<%s>();\n"%(f[2],f[2],usingdict.get(f[1])))
else:
if(qualifiedname not in error_names):
error_names.append(qualifiedname)
#print("\n"+f[1]+"\n")
#print(f[2]+"\n")
count+=1
file1.write("\t\t\treturn true;\n")
file1.write("\t\t}\n")
file1.write("\t};\n\n")
testfile.write("node.as<%s::%s>();\n"%(name,n.Name))
for e in my_service_defs[key].Structures:
file1.write("\n\ttemplate<> \n\tstruct convert<%s::%sPtr>{\n"%(name,e.Name))
file1.write("\t\tstatic Node encode(const %s::%sPtr& rhs){\n"%(name,e.Name))
file1.write("\t\t\tNode node;\n")
file1.write("\t\t\treturn node;\n")
file1.write("\t\t}\n\n")
file1.write("\t\tstatic bool decode(const Node& node, %s::%sPtr& rhs){\n"%(name,e.Name))
file1.write("\t\t\tif (!rhs) rhs.reset(new %s::%s);\n"%(name,e.Name))
qualifiedname=name+"::"+e.Name
#usingdict[e.Name]=qualifiedname
for field in e.Members:
output=field.ToString()
fieldname=field.Name
f=re.split('\s+', output)
#print(f)
#TODO implement var value parsing as a map of type and value
if(f[0]=="property"):
if(f[1]=="single"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<float>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="double"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<double>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="int32"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<int>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="uint32"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<uint32_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="int8"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<int8_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="uint8"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<uint8_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="int16"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<int16_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="uint16"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<uint16_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="int64"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<int64_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="uint64"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<uint64_t>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="string"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<std::string>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="bool"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<bool>();\n"%(f[2],f[2]))
file1.write("\t\t\t}\n")
elif("[" in f[1]):
if("," in f[1]):
if("single" in f[1]):
size=f[1].replace("single[","").replace("]","")
numbers=size.split(",")
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tstd::vector<uint32_t> dims = {%s,%s};\n"%(numbers[0],numbers[1]))
file1.write("\t\t\t\tRRMultiDimArrayPtr<float> my_multidimarray = AllocateEmptyRRMultiDimArray<float>(dims);\n")
file1.write("\t\t\t\tfor(int i =0; i< %s; i++){\n"%(numbers[0]))
file1.write("\t\t\t\t\tfor(int j=0; j< %s; j++){\n"%(numbers[1]))
file1.write("\t\t\t\t\t\tmy_multidimarray->Array->at(i+(j * %s)) = node[\"%s\"][j+ (i * %s)].as<float>();\n"%(numbers[0],f[2],numbers[1]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_multidimarray;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("double" in f[1]):
size=f[1].replace("double[","").replace("]","")
numbers=size.split(",")
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tstd::vector<uint32_t> dims = {%s,%s};\n"%(numbers[0],numbers[1]))
file1.write("\t\t\t\tRRMultiDimArrayPtr<double> my_multidimarray = AllocateEmptyRRMultiDimArray<double>(dims);\n")
file1.write("\t\t\t\tfor(int i =0; i< %s; i++){\n"%(numbers[0]))
file1.write("\t\t\t\t\tfor(int j=0; j< %s; j++){\n"%(numbers[1]))
file1.write("\t\t\t\t\t\tmy_multidimarray->Array->at(i+(j * %s)) = node[\"%s\"][j+ (i * %s)].as<double>();\n"%(numbers[0],f[2],numbers[1]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_multidimarray;\n"%(f[2]))
file1.write("\t\t\t}\n")
else:
if("{string}" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRR::RRMapPtr<std::string, RR::RRArray<double>> joints;\n")
file1.write("\t\t\t\tfor (YAML::const_iterator it = node[\"%s\"].begin(); it != node[\"%s\"].end(); ++it) {\n"%(f[2],f[2]))
file1.write("\t\t\t\t\tstd::string name = it->first.as<std::string>();\n")
file1.write("\t\t\t\t\tRRArrayPtr<double> my_array = AllocateEmptyRRArray<double>(node[name].size());\n")
file1.write("\t\t\t\t\tfor (int i = 0; i < node[name].size(); i++) {\n")
file1.write("\t\t\t\t\t\tmy_array->at(i) = node[name][i].as<double>();\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tjoints->insert(std::make_pair(name,my_array));\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = joints;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("{list}" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRListPtr<RRArray<double>> listy = AllocateEmptyRRList<RRArray<double>>();\n")
#file1.write("\t\t\t\tRobotRaconteur::RRListPtr<%s> listy = AllocateEmptyRRList<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\tfor(int i =0; i<node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tRRArrayPtr<double> my_array = AllocateEmptyRRArray<double>(node[\"%s\"][i].size());\n"%(f[2]))
file1.write("\t\t\t\t\tfor(int k =0; k<node[\"%s\"][i].size(); k++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\tmy_array->at(k)=node[\"%s\"][i][k].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tlisty->push_back(my_array);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = listy;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("single" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRRArrayPtr<float> my_array = AllocateEmptyRRArray<float>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<float>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("double" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRRArrayPtr<double> my_array = AllocateEmptyRRArray<double>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("uint32" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRRArrayPtr<uint32_t> my_array = AllocateEmptyRRArray<uint32_t>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("uint8" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRRArrayPtr<uint8_t> my_array = AllocateEmptyRRArray<uint8_t>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<uint8_t>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("int16" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRRArrayPtr<int16_t> my_array = AllocateEmptyRRArray<int16_t>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<int16_t>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("int32" in f[1]):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRRArrayPtr<int32_t> my_array = AllocateEmptyRRArray<int32_t>(node[\"%s\"].size());\n"%(f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<int32_t>();\n"%(f[2]))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif(f[1].replace("[]","") in usingdict.keys()):
if(usingdict.get(f[1]) in Structureslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRRArrayPtr<%sPtr> my_array = AllocateEmptyRRArray<%sPtr>(node[\"%s\"].size());\n"%(usingdict.get(f[1]),usingdict.get(f[1]),f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<%sPtr>();\n"%(f[2],usingdict.get(f[1])))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
if(usingdict.get(f[1]) in Named_arrayslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRRArrayPtr<%s> my_array = AllocateEmptyRRArray<%s>(node[\"%s\"].size());\n"%(usingdict.get(f[1]),usingdict.get(f[1]),f[2]))
file1.write("\t\t\t\tfor (int i = 0; i < node[\"%s\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\tmy_array->at(i) = node[\"%s\"][i].as<%s>();\n"%(f[2],usingdict.get(f[1])))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = my_array;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif("{list}" in f[1]):
#RR::RRListPtr<imaging::camerainfo::CameraInfo> camerainfos = AllocateEmptyRRList<imaging::camerainfo::CameraInfo>();
if(f[1].replace("{list}","")=="string"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRListPtr<RRArray<char>> listy = AllocateEmptyRRList<RRArray<char>>();\n")
file1.write("\t\t\t\tfor(int j=0; j< node[\"%s\"].size(); j++){\n"%(f[2]))
file1.write("\t\t\t\t\tstd::string item= node[\"%s\"][j].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\t\tRRArrayPtr<char> itemRR= RR::stringToRRArray(item);\n")
file1.write("\t\t\t\t\tlisty->push_back(itemRR);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = listy;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif(f[1].replace("{list}","") in usingdict.keys()):
#print(f[1].replace("{list}",""))
#print(usingdict.get(f[1].replace("{list}","")))
if(usingdict.get(f[1].replace("{list}","")) in Structureslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
#file1.write("\t\t\t\tRobotRaconteur::RRListPtr<%sPtr> listy = AllocateEmptyRRList<%sPtr>();\n"%(usingdict.get(f[1].replace("{list}","")),usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\tRobotRaconteur::RRListPtr<%s> listy = AllocateEmptyRRList<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\tfor(int j=0; j< node[\"%s\"].size(); j++){\n"%(f[2]))
file1.write("\t\t\t\t\t%sPtr item= node[\"%s\"][j].as<%sPtr>();\n"%(usingdict.get(f[1].replace("{list}","")),f[2],usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\t\tlisty->push_back(item);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = listy;\n"%(f[2]))
file1.write("\t\t\t}\n")
#print("\t%s item= node[\"%s\"][j].as<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),f[2],usingdict.get(f[1].replace("{list}",""))))
if(usingdict.get(f[1].replace("{list}","")) in Named_arrayslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRobotRaconteur::RRListPtr<RRNamedArray<%s>> listy = AllocateEmptyRRList<RRNamedArray<%s>>();\n"%(usingdict.get(f[1].replace("{list}","")),usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\tfor(int j=0; j< node[\"%s\"].size(); j++){\n"%(f[2]))
file1.write("\t\t\t\t\t%s item= node[\"%s\"][j].as<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),f[2],usingdict.get(f[1].replace("{list}",""))))
file1.write("\t\t\t\t\tlisty->push_back(RobotRaconteur::ScalarToRRNamedArray(item));\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = listy;\n"%(f[2]))
file1.write("\t\t\t}\n")
#print("\t%s item= node[\"%s\"][j].as<%s>();\n"%(usingdict.get(f[1].replace("{list}","")),f[2],usingdict.get(f[1].replace("{list}",""))))
elif(f[1] in enum_list):
#std::string array_type_code = node["array_type_code"].as<std::string>();
#rhs->array_type_code = com::robotraconteur::datatype::ArrayTypeCode::ArrayTypeCode(string_to_enum_ArrayTypeCode(array_type_code));
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tstd::string enum_val_string= node[\"%s\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = %s::%s(RobotRaconteur::Companion::InfoParser::yaml::string_to_enum_%s(enum_val_string));\n"%(f[2],enum_dict.get(f[1]),f[1],f[1]))
file1.write("\t\t\t}\n")
#print("\t\t\t\trhs->%s = %s::%s(string_to_enum_%s(enum_val_string));\n"%(f[2],usingdict.get(f[1]),f[1],f[1]))
elif(f[1] in usingdict.keys()):
if(usingdict.get(f[1]) in Structureslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<%sPtr>();\n"%(f[2],f[2],usingdict.get(f[1])))
file1.write("\t\t\t}\n")
if(usingdict.get(f[1]) in Named_arrayslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\trhs->%s = node[\"%s\"].as<%s>();\n"%(f[2],f[2],usingdict.get(f[1])))
file1.write("\t\t\t}\n")
elif(f[1].replace("{string}","") in usingdict.keys()):
if(usingdict.get(f[1]) in Structureslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRR::RRMapPtr<std::string, %sPtr> joints;\n"%(usingdict.get(f[1])))
file1.write("\t\t\t\tfor (YAML::const_iterator it = node[\"%s\"].begin(); it != node[\"%s\"].end(); ++it) {\n"%(f[2],f[2]))
file1.write("\t\t\t\t\tstd::string name = it->first.as<std::string>();\n")
file1.write("\t\t\t\t\tjoints->insert(std::make_pair(name,node[name].as<%sPtr>()));\n"%(usingdict.get(f[1])))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = joints;\n"%(f[2]))
file1.write("\t\t\t}\n")
if(usingdict.get(f[1]) in Named_arrayslist):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRR::RRMapPtr<std::string, %s> joints;\n"%(usingdict.get(f[1])))
file1.write("\t\t\t\tfor (YAML::const_iterator it = node[\"%s\"].begin(); it != node[\"%s\"].end(); ++it) {\n"%(f[2],f[2]))
file1.write("\t\t\t\t\tstd::string name = it->first.as<std::string>();\n")
file1.write("\t\t\t\t\tjoints->insert(std::make_pair(name,node[name].as<%s>()));\n"%(usingdict.get(f[1])))
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = joints;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="varvalue{string}"):
#print("varvalue seen")
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tRR::RRMapPtr<std::string, RR::RRValue> vars;\n")
file1.write("\t\t\t\tfor (YAML::const_iterator it = node[\"%s\"].begin(); it != node[\"%s\"].end(); ++it) {\n"%(f[2],f[2]))
file1.write("\t\t\t\t\tstd::string name = it->first.as<std::string>();\n")
file1.write("\t\t\t\t\tstd::string type = node[\"%s\"][\"type\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\t\tRR::RRValuePtr varval;\n")
file1.write("\t\t\t\t\tif(type==\"string\"){\n")
file1.write("\t\t\t\t\t\tstd::string value = node[\"%s\"][\"value\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\t\t\tvarval=RR::stringToRRArray(value);\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"double\"){\n")
file1.write("\t\t\t\t\t\tdouble value = node[\"%s\"][\"value\"].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\t\tvarval=RR::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"int32\"){\n")
file1.write("\t\t\t\t\t\tint value = node[\"%s\"][\"value\"].as<int>();\n"%(f[2]))
file1.write("\t\t\t\t\t\tvarval=RR::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"uint32\"){\n")
file1.write("\t\t\t\t\t\tuint32_t value = node[\"%s\"][\"value\"].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t\t\tvarval=RR::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"double[]\"){\n")
file1.write("\t\t\t\t\t\tRRArrayPtr<double> my_array = AllocateEmptyRRArray<double>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\t\t}\n")
file1.write("\t\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"int32[]\"){\n")
file1.write("\t\t\t\t\t\tRR::RRArrayPtr<int> my_array = AllocateEmptyRRArray<int>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<int>();\n"%(f[2]))
file1.write("\t\t\t\t\t\t}\n")
file1.write("\t\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tif(type==\"uint32[]\"){\n")
file1.write("\t\t\t\t\t\tRR::RRArrayPtr<uint32_t> my_array = AllocateEmptyRRArray<uint32_t>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t\t\t}\n")
file1.write("\t\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t\t}\n")
#file1.write("\t\t\t\t\tRRValuePtr my_array = AllocateEmptyRRArray<double>(node[name].size());\n")
#file1.write("\t\t\t\t\tfor (int i = 0; i < node[name].size(); i++) {\n")
#file1.write("\t\t\t\t\t\tmy_array->at(i) = node[name][i].as<double>();\n")
#file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tvars->insert(std::make_pair(name,varval));\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s = vars;\n"%(f[2]))
file1.write("\t\t\t}\n")
elif(f[1]=="varvalue"):
file1.write("\t\t\tif(node[\"%s\"]){\n"%(f[2]))
file1.write("\t\t\t\tstd::string type = node[\"%s\"][\"type\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\tRR::RRValuePtr varval;\n")
file1.write("\t\t\t\tif(type==\"string\"){\n")
file1.write("\t\t\t\t\tstd::string value = node[\"%s\"][\"value\"].as<std::string>();\n"%(f[2]))
file1.write("\t\t\t\t\tvarval =RR::stringToRRArray(value);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"double\"){\n")
file1.write("\t\t\t\t\tdouble value = node[\"%s\"][\"value\"].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\tvarval=RR::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"int32\"){\n")
file1.write("\t\t\t\t\tint value= node[\"%s\"][\"value\"].as<int>();\n"%(f[2]))
file1.write("\t\t\t\t\tvarval=RR::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"uint32\"){\n")
file1.write("\t\t\t\t\tuint32_t value= node[\"%s\"][\"value\"].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t\tvarval=RR::ScalarToRRArray(value);\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"double[]\"){\n")
file1.write("\t\t\t\t\tRRArrayPtr<double> my_array = AllocateEmptyRRArray<double>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<double>();\n"%(f[2]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"int32[]\"){\n")
file1.write("\t\t\t\t\tRR::RRArrayPtr<int> my_array = AllocateEmptyRRArray<int>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<int>();\n"%(f[2]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\tif(type==\"uint32[]\"){\n")
file1.write("\t\t\t\t\tRR::RRArrayPtr<uint32_t> my_array = AllocateEmptyRRArray<uint32_t>(node[\"%s\"][\"value\"].size());\n"%(f[2]))
file1.write("\t\t\t\t\tfor (int i = 0; i < node[\"%s\"][\"value\"].size(); i++) {\n"%(f[2]))
file1.write("\t\t\t\t\t\tmy_array->at(i) = node[\"%s\"][\"value\"][i].as<uint32_t>();\n"%(f[2]))
file1.write("\t\t\t\t\t}\n")
file1.write("\t\t\t\t\tvarval=my_array;\n")
file1.write("\t\t\t\t}\n")
file1.write("\t\t\t\trhs->%s=varval;\n"%(f[2]))
file1.write("\t\t\t}\n")
else:
if(qualifiedname not in error_names):
error_names.append(qualifiedname)
file1.write("\t\t\treturn true;\n")
file1.write("\t\t}\n")
file1.write("\t};\n\n\n")
testfile.write("node.as<%s::%sPtr>();\n"%(name,e.Name))
if(len(error_names)>0):
file1.write("//TODO: Fix the following structures or namedarrays: \n")
for i in error_names:
file1.write("// "+i+" \n")
file1.write("\n}")
error_names=[]
file2.write("}\n")
file2.write("}\n")
file2.write("}\n")
file2.write("}\n")
testfile.write("}\n\n")
testfile.write("int main(int ac, char** av)\n{\n")
testfile.write("return 0;\n}\n")
print(error_names)
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/normalization_as_tagging_infer.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to run itn inference with the ThutmoseTaggerModel.
The inference works on a raw file (no labels required).
Each line of the input file represents a single example for inference.
Specify inference.from_file and inference.batch_size parameters.
USAGE Example:
1. Train a model, or use a pretrained checkpoint.
2. Run:
export TOKENIZERS_PARALLELISM=false
python ${NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_infer.py \
pretrained_model=./training.nemo \
inference.from_file=./input.txt \
inference.out_file=./output.txt \
model.max_sequence_len=1024 #\
inference.batch_size=128
This script uses the `/examples/nlp/text_normalization_as_tagging/conf/thutmose_tagger_itn_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'.
"""
import os
from helpers import ITN_MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import spoken_preprocessing
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="thutmose_tagger_itn_config")
def main(cfg: DictConfig) -> None:
logging.debug(f'Config Params: {OmegaConf.to_yaml(cfg)}')
if cfg.pretrained_model is None:
raise ValueError("A pre-trained model should be provided.")
_, model = instantiate_model_and_trainer(cfg, ITN_MODEL, False)
text_file = cfg.inference.from_file
logging.info(f"Running inference on {text_file}...")
if not os.path.exists(text_file):
raise ValueError(f"{text_file} not found.")
with open(text_file, "r", encoding="utf-8") as f:
lines = f.readlines()
batch_size = cfg.inference.get("batch_size", 8)
batch, all_preds = [], []
for i, line in enumerate(lines):
s = spoken_preprocessing(line) # this is the same input transformation as in corpus preparation
batch.append(s.strip())
if len(batch) == batch_size or i == len(lines) - 1:
outputs = model._infer(batch)
for x in outputs:
all_preds.append(x)
batch = []
if len(all_preds) != len(lines):
raise ValueError(
"number of input lines and predictions is different: predictions="
+ str(len(all_preds))
+ "; lines="
+ str(len(lines))
)
out_file = cfg.inference.out_file
with open(f"{out_file}", "w", encoding="utf-8") as f_out:
f_out.write("\n".join(all_preds))
logging.info(f"Predictions saved to {out_file}.")
if __name__ == "__main__":
main()
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/dataset/dialogue_bert_dataset.py
|
<reponame>gkucsko/NeMo<filename>nemo/collections/nlp/data/dialogue/dataset/dialogue_bert_dataset.py
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import numpy as np
from nemo.collections.nlp.data.data_utils import get_stats
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['DialogueBERTDataset']
class DialogueBERTDataset(DialogueDataset):
"""
Creates a dataset to use for the task of joint intent
and slot classification with pretrained model.
For a dataset to use during inference without labels, see
IntentSlotDataset.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'intent_labels': NeuralType(('B'), LabelsType()),
'slot_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
"""
Args:
dataset_split: dataset split
dialogues_processor: Data generator for dialogues
tokenizer: tokenizer
cfg: config container for dataset
"""
self.cfg = cfg
self.all_possible_labels = dialogues_processor.intents
self.label_to_label_id = {self.all_possible_labels[i]: i for i in range(len(self.all_possible_labels))}
self.all_possible_slots = dialogues_processor.slots
self.slot_name_to_slot_id = {self.all_possible_slots[i]: i for i in range(len(self.all_possible_slots))}
self.empty_slot_name = 'O'
self.features = dialogues_processor.get_dialog_examples(dataset_split)
self.features = self.features if self.cfg.num_samples == -1 else self.features[: self.cfg.num_samples]
queries = [feature.data["utterance"] for feature in self.features]
if self.cfg.do_lowercase:
queries = [query.lower() for query in queries]
intents = [self.label_to_label_id[feature.data["labels"]["intent"]] for feature in self.features]
word_level_slots = [self.convert_slot_position_to_slot_ids(feature.data) for feature in self.features]
features = DialogueBERTDataset.get_features(
queries,
self.cfg.max_seq_length,
tokenizer,
pad_label=self.cfg.pad_label,
word_level_slots=word_level_slots,
ignore_extra_tokens=self.cfg.ignore_extra_tokens,
ignore_start_end=self.cfg.ignore_start_end,
)
self.all_input_ids = features[0]
self.all_segment_ids = features[1]
self.all_input_mask = features[2]
self.all_loss_mask = features[3]
self.all_subtokens_mask = features[4]
self.all_slots = features[5]
self.all_intents = intents
def convert_slot_position_to_slot_ids(self, feature):
slot_ids = [self.slot_name_to_slot_id[self.empty_slot_name] for i in range(len(feature["utterance"].split()))]
slot_name_to_positions = feature["label_positions"]["slots"]
for slot_name in slot_name_to_positions:
slot_id = self.slot_name_to_slot_id[slot_name]
start = slot_name_to_positions[slot_name]["start"]
exclusive_end = slot_name_to_positions[slot_name]["exclusive_end"]
for to_replace_position in range(start, min(exclusive_end, len(slot_ids))):
slot_ids[to_replace_position] = slot_id
return slot_ids
def __len__(self):
return len(self.all_input_ids)
def __getitem__(self, idx):
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.long),
np.array(self.all_loss_mask[idx]),
np.array(self.all_subtokens_mask[idx]),
self.all_intents[idx],
np.array(self.all_slots[idx]),
)
@staticmethod
def truncate_and_pad(
max_seq_length,
ignore_start_end,
with_label,
pad_label,
tokenizer,
all_slots,
all_subtokens,
all_input_mask,
all_loss_mask,
all_subtokens_mask,
all_input_ids,
all_segment_ids,
):
too_long_count = 0
for i, subtokens in enumerate(all_subtokens):
if len(subtokens) > max_seq_length:
subtokens = [tokenizer.cls_token] + subtokens[-max_seq_length + 1 :]
all_input_mask[i] = [1] + all_input_mask[i][-max_seq_length + 1 :]
all_loss_mask[i] = [1 - ignore_start_end] + all_loss_mask[i][-max_seq_length + 1 :]
all_subtokens_mask[i] = [0] + all_subtokens_mask[i][-max_seq_length + 1 :]
if with_label:
all_slots[i] = [pad_label] + all_slots[i][-max_seq_length + 1 :]
too_long_count += 1
all_input_ids.append([tokenizer.tokens_to_ids(t) for t in subtokens])
if len(subtokens) < max_seq_length:
extra = max_seq_length - len(subtokens)
all_input_ids[i] = all_input_ids[i] + [0] * extra
all_loss_mask[i] = all_loss_mask[i] + [0] * extra
all_subtokens_mask[i] = all_subtokens_mask[i] + [0] * extra
all_input_mask[i] = all_input_mask[i] + [0] * extra
if with_label:
all_slots[i] = all_slots[i] + [pad_label] * extra
all_segment_ids.append([0] * max_seq_length)
logging.info(f'{too_long_count} are longer than {max_seq_length}')
return (
all_slots,
all_subtokens,
all_input_mask,
all_loss_mask,
all_subtokens_mask,
all_input_ids,
all_segment_ids,
)
@staticmethod
def get_features(
queries,
max_seq_length,
tokenizer,
pad_label=128,
word_level_slots=None,
ignore_extra_tokens=False,
ignore_start_end=False,
):
"""
Convert queries (utterance, intent label and slot labels) to BERT input format
"""
all_subtokens = []
all_loss_mask = []
all_subtokens_mask = []
all_segment_ids = []
all_input_ids = []
all_input_mask = []
sent_lengths = []
all_slots = []
with_label = word_level_slots is not None
for i, query in enumerate(queries):
words = query.strip().split()
subtokens = [tokenizer.cls_token]
loss_mask = [1 - ignore_start_end]
subtokens_mask = [0]
if with_label:
slots = [pad_label]
for j, word in enumerate(words):
word_tokens = tokenizer.text_to_tokens(word)
# to handle emojis that could be neglected during tokenization
if len(word.strip()) > 0 and len(word_tokens) == 0:
word_tokens = [tokenizer.ids_to_tokens(tokenizer.unk_id)]
subtokens.extend(word_tokens)
# mask all sub-word tokens except the first token in a word
# use the label for the first sub-word token as the label for the entire word to eliminate need for disambiguation
loss_mask.append(1)
loss_mask.extend([int(not ignore_extra_tokens)] * (len(word_tokens) - 1))
subtokens_mask.append(1)
subtokens_mask.extend([0] * (len(word_tokens) - 1))
if with_label:
slots.extend([word_level_slots[i][j]] * len(word_tokens))
subtokens.append(tokenizer.sep_token)
loss_mask.append(1 - ignore_start_end)
subtokens_mask.append(0)
sent_lengths.append(len(subtokens))
all_subtokens.append(subtokens)
all_loss_mask.append(loss_mask)
all_subtokens_mask.append(subtokens_mask)
all_input_mask.append([1] * len(subtokens))
if with_label:
slots.append(pad_label)
all_slots.append(slots)
max_seq_length_data = max(sent_lengths)
max_seq_length = min(max_seq_length, max_seq_length_data) if max_seq_length > 0 else max_seq_length_data
logging.info(f'Setting max length to: {max_seq_length}')
get_stats(sent_lengths)
# truncate and pad samples
(
all_slots,
all_subtokens,
all_input_mask,
all_loss_mask,
all_subtokens_mask,
all_input_ids,
all_segment_ids,
) = DialogueBERTDataset.truncate_and_pad(
max_seq_length,
ignore_start_end,
with_label,
pad_label,
tokenizer,
all_slots,
all_subtokens,
all_input_mask,
all_loss_mask,
all_subtokens_mask,
all_input_ids,
all_segment_ids,
)
# log examples for debugging
logging.debug("*** Some Examples of Processed Data ***")
for i in range(min(len(all_input_ids), 5)):
logging.debug("i: %s" % (i))
logging.debug("subtokens: %s" % " ".join(list(map(str, all_subtokens[i]))))
logging.debug("loss_mask: %s" % " ".join(list(map(str, all_loss_mask[i]))))
logging.debug("input_mask: %s" % " ".join(list(map(str, all_input_mask[i]))))
logging.debug("subtokens_mask: %s" % " ".join(list(map(str, all_subtokens_mask[i]))))
if with_label:
logging.debug("slots_label: %s" % " ".join(list(map(str, all_slots[i]))))
return (all_input_ids, all_segment_ids, all_input_mask, all_loss_mask, all_subtokens_mask, all_slots)
|
gkucsko/NeMo
|
nemo/collections/nlp/data/language_modeling/megatron/retro_dataset.py
|
<gh_stars>0
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RETRO Style dataset."""
import os
from typing import List
import numpy as np
import torch
from nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import (
get_datasets_weights_and_num_samples,
get_train_valid_test_split_,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import (
_build_index_mappings,
get_indexed_dataset_,
)
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
KNNIndex,
MMapRetrievalIndexedDataset,
)
from nemo.core import Dataset
from nemo.utils import logging
try:
from apex.transformer import parallel_state
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = [
"RETRODataset",
"build_train_valid_test_datasets",
"MockRETRODataset",
"build_mock_train_valid_test_datasets",
]
class RETRODataset(Dataset):
"""
Dataset for RETRO model.
It constructs single data record from the training/retrieval indexed retrieval dataset and knn index file.
The KNN index file maps data chunk id to K-nearest neighbors in the the retrieval dataset chunk ids.
First, it loads a long sequence (2048) from training dataset. Then for each chunk in the sequence, it finds the kNN
chunks from the retrieval dataset using the KNN index. Lastly, compute the masks based on pad id.
"""
def __init__(
self,
cfg,
trainer,
tokenizer,
name: str,
data_prefix: str,
documents, # document ids in the indexed_dataset used for this dataset
indexed_dataset: MMapRetrievalIndexedDataset,
num_samples: int, # number of data samples, max_steps * global_batch_size
seq_length: int, # input seq length
seed: int,
knn_index: KNNIndex,
retrieval_index: MMapRetrievalIndexedDataset,
):
if not HAVE_APEX:
raise ImportError(
"Apex was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__()
self.name = name
self.indexed_dataset: MMapRetrievalIndexedDataset = indexed_dataset
self.knn_index: KNNIndex = knn_index
self.retrieval_index: MMapRetrievalIndexedDataset = retrieval_index
self.chunk_size = self.indexed_dataset.chunk_size
# make sure seq_length is a multiple of chunk_size
assert seq_length % self.chunk_size == 0
# Checks
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
self.eos_id = tokenizer.eos_id
self.pad_id = tokenizer.pad_id
assert self.retrieval_index._index.retrieval_db
self._validate_pad_id()
# save index mappings to a configurable dir
self.index_mapping_dir = cfg.data.get('index_mapping_dir', None)
self.neighbors = cfg.data.get('neighbors', self.knn_index.K)
# the number of neighbors cannot exceed the max number of neighbors in the index
assert self.neighbors <= self.knn_index.K
# create index_mapping_dir on rank 0
if torch.distributed.is_available() and torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if self.index_mapping_dir is not None and not os.path.isdir(self.index_mapping_dir):
os.makedirs(self.index_mapping_dir)
torch.distributed.barrier()
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
self.name,
data_prefix,
documents,
self.indexed_dataset.sizes,
num_samples,
seq_length,
seed,
index_mapping_dir=self.index_mapping_dir,
)
if len(self.doc_idx) > np.iinfo('int32').max:
raise "number of epochs exceeds the maximum number for int32 used by sample_idx"
self.padding_context = np.ones(2 * self.chunk_size, dtype=self.retrieval_index._index.dtype) * self.pad_id
def _validate_pad_id(self):
# validate the pad_id matches the dataset pad_id
ptr, size = self.retrieval_index._index[0]
ptr += size * np.dtype(self.retrieval_index._index.dtype).itemsize
# padded chunk_size of pad_ids at the end of the doc
retrieval_paddings = np.frombuffer(
self.retrieval_index._bin_buffer,
dtype=self.retrieval_index._index.dtype,
count=self.chunk_size,
offset=ptr,
)
assert (retrieval_paddings == self.pad_id).all()
ptr, size = self.indexed_dataset._index[0]
ptr += (size - 1) * np.dtype(self.indexed_dataset._index.dtype).itemsize
data_paddings = np.frombuffer(
self.indexed_dataset._bin_buffer, dtype=self.indexed_dataset._index.dtype, count=1, offset=ptr
)
# the last element is either a padding or an eos
assert (data_paddings == self.pad_id).all() or (data_paddings == self.eos_id).all()
def __len__(self):
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def _get_chunks(self, chunk_id: int, num_chunks: int, chunks: List):
"""
starting from chunk_id, loop for num_chunks, get the
KNN chunk ids from retrieval dataset, and get the chunk token ids,
put them into the chunks list
"""
for i in range(chunk_id, chunk_id + num_chunks):
knn = self.knn_index.get_KNN_chunk_ids(i)
for rid in knn[: self.neighbors]:
if rid < 0:
# no neighbor, just pad it
one_chunk = self.padding_context
else:
one_chunk = self.retrieval_index.get_chunk(rid)
chunks.append(one_chunk)
def _get_text(self, idx: int) -> np.ndarray:
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(
self.doc_idx[doc_index_f], offset=offset_f, length=offset_l - offset_f + 1
)
chunk_id = self.indexed_dataset.get_chunk_id(self.doc_idx[doc_index_f], offset_f)
num_chunks = (offset_l - offset_f) // self.chunk_size
chunks = []
self._get_chunks(chunk_id, num_chunks, chunks)
chunks = np.stack(chunks, axis=0).reshape(num_chunks, self.neighbors, -1).astype(np.int64)
else:
# Otherwise, get the rest of the initial document.
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)]
num_chunks = (self.indexed_dataset._index.sizes[self.doc_idx[doc_index_f]] - offset_f) // self.chunk_size
total_chunks = num_chunks
chunks = []
chunk_id = self.indexed_dataset.get_chunk_id(self.doc_idx[doc_index_f], offset_f)
self._get_chunks(chunk_id, num_chunks, chunks)
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
chunk_id = self.indexed_dataset.get_chunk_id(self.doc_idx[i], 0)
num_chunks = self.indexed_dataset._index.sizes[self.doc_idx[i]] // self.chunk_size
total_chunks += num_chunks
self._get_chunks(chunk_id, num_chunks, chunks)
# And finally add the relevant portion of last document.
chunk_id = self.indexed_dataset.get_chunk_id(self.doc_idx[doc_index_l], 0)
num_chunks = (offset_l) // self.chunk_size
total_chunks += num_chunks
self._get_chunks(chunk_id, num_chunks, chunks)
sample_list.append(self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1))
sample = np.concatenate(sample_list)
chunks = np.stack(chunks, axis=0).reshape(total_chunks, self.neighbors, -1).astype(np.int64)
return sample.astype(np.int64), chunks
def __getitem__(self, idx):
text, retrieved = self._get_text(idx)
text = torch.from_numpy(text)
retrieved = torch.from_numpy(retrieved)
tokens = text[:-1].contiguous()
labels = text[1:].contiguous()
hidden_mask = tokens != self.pad_id
context_mask = retrieved != self.pad_id
return {
'tokens': tokens,
'labels': labels,
'tokens_mask': hidden_mask,
'loss_mask': hidden_mask,
'retrieved_emb_mask': context_mask,
'retrieved_ids': retrieved,
}
def build_train_valid_test_datasets(
cfg,
trainer,
data_prefix: List[str],
data_impl: str,
splits_string: str,
train_valid_test_num_samples,
seq_length: int,
seed: int,
skip_warmup: bool,
tokenizer,
retrieval_prefix: str,
knn_map_path: List[str],
):
"""Build train, valid, and test RETRO datasets.
There is one to one mapping between data_prefix and knn_map_path.
Currently only supports one retrieval dataset.
"""
# make sure there is one to one mapping between data_prefix and knn_map_path
assert len(data_prefix) == len(knn_map_path)
# Single dataset.
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(
cfg,
trainer,
data_prefix[0],
data_impl,
splits_string,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
tokenizer,
retrieval_prefix,
knn_map_path[0],
)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
cfg,
trainer,
prefixes[i],
data_impl,
splits_string,
datasets_train_valid_test_num_samples[i],
seq_length,
seed,
skip_warmup,
tokenizer,
retrieval_prefix,
knn_map_path[i],
)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights)
return (blending_train_dataset, blending_valid_dataset, blending_test_dataset)
def _build_train_valid_test_datasets(
cfg,
trainer,
data_prefix: str,
data_impl: str,
splits_string: str,
train_valid_test_num_samples,
seq_length: int,
seed: int,
skip_warmup: bool,
tokenizer,
retrieval_prefix: str,
knn_map_path: str,
):
"""Build train, valid, and test datasets."""
# Indexed dataset.
indexed_dataset: MMapRetrievalIndexedDataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup)
knn_index: KNNIndex = KNNIndex(knn_map_path, skip_warmup)
retrieval_index: MMapRetrievalIndexedDataset = get_indexed_dataset_(retrieval_prefix, data_impl, skip_warmup)
total_num_of_documents = indexed_dataset.sizes.shape[0]
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
logging.info(' > dataset split:')
def print_split_stats(name, index):
logging.info(' {}:'.format(name))
logging.info(
' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])
)
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
documents = np.arange(start=splits[index], stop=splits[index + 1], step=1, dtype=np.int32)
dataset = RETRODataset(
cfg,
trainer,
tokenizer,
name,
data_prefix,
documents,
indexed_dataset,
train_valid_test_num_samples[index],
seq_length,
seed,
knn_index,
retrieval_index,
)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
class MockRETRODataset(torch.utils.data.Dataset):
def __init__(self, cfg, trainer, tokenizer, name, size):
super().__init__()
self.name = name
self.tokenizer = tokenizer
self._cfg = cfg
self.size = size
seed_val = parallel_state.get_data_parallel_rank() * 131 + 97
torch.manual_seed(seed_val)
def __len__(self):
return self.size
def __getitem__(self, idx):
vocab_size = self.tokenizer.vocab_size
neighbors = self._cfg.data.neighbors
input_length = self._cfg.data.seq_length
chunks = input_length // self._cfg.chunk_size
chunk_size = self._cfg.chunk_size
pad_id = self.tokenizer.pad_id
all_tokens = torch.randint(0, vocab_size, (input_length + 1,))
# make sure the eod happens at the end of each chunk, can add paddings to it
# e.g. [..., id, id, pad, pad, pad, eod] each has chunk_size, each sentence
# has length of multiple of chunk_size
hidden = all_tokens[:-1]
labels = all_tokens[1:]
hidden_mask = hidden != pad_id
# to mask out the token ids [id, id, eod, id, pad, eod, id, id]
# so attention is not across eod, mask should be:
# [false, true, true, true, true, true, true, true]
# [false, false, true, true, true, true, true, true]
# [false, false, false,true, true, true, true, true]
# [true, true, true, false, true, true, true, true]
# [true, true, true, true, true, true, true, true]
# [true, true, true, false, true, false, true, true]
# [true, true, true, true, true, true, false, true]
# [true, true, true, true, true, true, false, false]
retrieved = torch.randint(0, vocab_size, (chunks, neighbors, 2 * chunk_size))
context_mask = retrieved != pad_id
return {
'tokens': hidden,
'labels': labels,
'tokens_mask': hidden_mask,
'loss_mask': hidden_mask,
'retrieved_emb_mask': context_mask,
'retrieved_ids': retrieved,
}
def build_mock_train_valid_test_datasets(
cfg, trainer, splits_string, tokenizer, mock_data_size,
):
"""Build train, valid, and test datasets."""
splits = get_train_valid_test_split_(splits_string, mock_data_size)
# Print stats about the splits.
logging.info(' > dataset split:')
def print_split_stats(name, index):
logging.info(' {}:'.format(name))
logging.info(
' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])
)
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
dataset = MockRETRODataset(cfg, trainer, tokenizer, name, splits[index + 1] - splits[index],)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
|
gkucsko/NeMo
|
scripts/dataset_processing/nlp/intent_and_slot/prompt_learning_assistant_preprocessing.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import random
from assistant_utils import process_assistant
from tqdm import tqdm
"""
Dataset preprocessing script for the Assistant dataset: https://github.com/xliuhw/NLU-Evaluation-Data/archive/master.zip
Converts the dataset into a jsonl format that can be used for p-tuning/prompt tuning in NeMo.
Inputs:
source-dir: (str) The unziped directory where the assistant dataset was downloaded
nemo-format-dir: (str) The directory where intermediate preprocessed files will be saved
output-dir: (str) The directory where the final train, val, and test files will be saved
save-name-base: (str) The base name for each of the train, val, and test files. If save-name-base were 'assistant' for
example, the files would be saved as assistant_train.jsonl, assistant_val.jsonl, and assistant_test.jsonl
make-ground-truth: (bool) If true, test files will include answers, if false, test files will not include answers
include-options: (bool) If true, all intent and slot options will be added to the jsonl file under the key names
'intent options' and 'slot_options'. This will be added in addition to 'taskname', 'utterance', and 'label'.
random-seed: (int) Random seed for repeatable shuffling of train/val/test splits.
Saves train, val, and test files for the assitant dataset.
Example Output format (with include-options = False):
{"taskname": "intent_and_slot", "utterance": "who was <NAME>", "label": "\nIntent: qa_factoid\nSlots: person(<NAME>)"}
{"taskname": "intent_and_slot", "utterance": "can you play my favorite music", "label": "\nIntent: play_music\nSlots: None"}
{"taskname": "intent_and_slot", "utterance": "is adele going to go on tour", "label": "\nIntent: qa_factoid\nSlots: artist_name(adele)"}
{"taskname": "intent_and_slot", "utterance": "will the temperature be in the today", "label": "\nIntent: weather_query\nSlots: weather_descriptor(temperature), date(today)"}
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--source-dir", type=str, default="data/assistant/NLU-Evaluation-Data-master")
parser.add_argument("--nemo-format-dir", type=str, default="data/assistant/nemo-format")
parser.add_argument("--output-dir", type=str, default="data/assistant")
parser.add_argument("--save-name-base", type=str, default="assistant")
parser.add_argument("--make-ground-truth", action='store_true')
parser.add_argument("--include-options", action='store_true')
parser.add_argument("--random-seed", type=int, default=42)
args = parser.parse_args()
random.seed(args.random_seed)
process_assistant(args.source_dir, args.nemo_format_dir, modes=["train", "test"])
intent_dict = open(f"{args.nemo_format_dir}/dict.intents.csv").readlines()
slot_dict = open(f"{args.nemo_format_dir}/dict.slots.csv").readlines()
# Convert train set to prompt learning format
train_utterance_lines = open(f"{args.nemo_format_dir}/train.tsv").readlines()[1:]
train_slot_lines = open(f"{args.nemo_format_dir}/train_slots.tsv").readlines()
train_examples = list(zip(train_utterance_lines, train_slot_lines))
random.shuffle(train_examples)
train_utterance_lines, train_slot_lines = zip(*train_examples)
train_save_path = f"{args.output_dir}/{args.save_name_base}_train.jsonl"
process_data_for_prompt_learning(
train_utterance_lines, train_slot_lines, intent_dict, slot_dict, train_save_path, args.include_options,
)
# Split test set into validation and test sets
test_utterance_lines = open(f"{args.nemo_format_dir}/test.tsv").readlines()[1:]
test_slot_lines = open(f"{args.nemo_format_dir}/test_slots.tsv").readlines()
val_half = len(test_utterance_lines) // 2
test_examples = list(zip(test_utterance_lines, test_slot_lines))
random.shuffle(test_examples)
test_utterance_lines, test_slot_lines = zip(*test_examples)
# Convert val set to prompt learning format
val_utterance_lines = test_utterance_lines[:val_half]
val_slot_lines = test_slot_lines[:val_half]
val_save_path = f"{args.output_dir}/{args.save_name_base}_val.jsonl"
process_data_for_prompt_learning(
val_utterance_lines, val_slot_lines, intent_dict, slot_dict, val_save_path, args.include_options,
)
# Convert test set to prompt learning format
test_utterance_lines = test_utterance_lines[val_half:]
test_slot_lines = test_slot_lines[val_half:]
test_save_path = f"{args.output_dir}/{args.save_name_base}_test.jsonl"
process_data_for_prompt_learning(
test_utterance_lines,
test_slot_lines,
intent_dict,
slot_dict,
test_save_path,
args.include_options,
make_ground_truth=args.make_ground_truth,
)
def process_data_for_prompt_learning(
utterance_lines, slot_lines, intent_dict, slot_dict, save_path, include_options, make_ground_truth=False
):
"""
Formats each line in the utterance file as a json object
with intent and slot labels.
"""
save_file = open(save_path, "w")
print(f"Saving data to {save_path}")
# List all possible intent and slot lables
if include_options:
all_intents = ", ".join([intent.strip() for intent in intent_dict])
all_slots = ", ".join([slot.strip() for slot in slot_dict])
# all_labels = f"possible intents: {all_intents}\n\npossible slots: {all_slots}\n\n"
for line_idx, line in enumerate(tqdm(utterance_lines)):
# Get utterance and intent label
utterance, intent_label_idx = line.split("\t")
intent_label_idx = int(intent_label_idx.strip())
intent_label = intent_dict[intent_label_idx].strip()
slot_line = slot_lines[line_idx].strip().split()
# Get and foramt all slot labels for the utterance
slot_labels = get_slots(slot_line, utterance, slot_dict)
if include_options:
example_json = {
"taskname": "intent_and_slot",
"intent options": all_intents,
"slot_options": all_slots,
"utterance": utterance,
}
else:
example_json = {
"taskname": "intent_and_slot",
"utterance": utterance,
}
# Dont want test examples to have labels
if "_test" not in save_path or make_ground_truth:
example_json["label"] = f"\nIntent: {intent_label}\nSlots: {slot_labels}"
save_file.write(json.dumps(example_json) + "\n")
def get_slots(slot_line, utterance, slot_dict):
"""
Formats slot labels for an utterance. Ensures the multiword
slot labels are grouped together. For example the words
'birthday party' should be grouped together under the
same event_name label like event_name(birthday party)
instead of event_name(birthday), event_name(party).
"""
# Get slots and their labels
utterance_words = utterance.split()
slots_and_labels = []
prev_slot_label = 'O'
prev_word_idx = 0
current_word = ""
if len(utterance_words) != len(slot_line):
slot_line = slot_line[1:]
for word_idx, slot_label_idx in enumerate(slot_line):
word = utterance_words[word_idx]
slot_label = slot_dict[int(slot_label_idx)].strip()
# Only care about words with labels
if slot_label != 'O':
# Keep multiword answers together
if prev_slot_label == slot_label and prev_word_idx == word_idx - 1:
current_word += " " + word
# Previous answer has ended and a new one is starting
else:
if current_word != "":
slots_and_labels.append(f"{prev_slot_label}({current_word})")
current_word = word
prev_word_idx = word_idx
prev_slot_label = slot_label.strip()
# Add last labeled word to list of slots and labels if the utterance is over
if current_word != "" and prev_slot_label != 'O':
slots_and_labels.append(f"{prev_slot_label}({current_word})")
# Format slot labels
if not slots_and_labels:
slot_labels = "None"
else:
slot_labels = ", ".join(slots_and_labels)
return slot_labels
if __name__ == "__main__":
main()
|
gkucsko/NeMo
|
tests/collections/common/mixins/test_adapter_modules.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.common.parts import adapter_modules
from nemo.core.classes.mixins import adapter_mixin_strategies
from nemo.utils import config_utils
class TestAdapterModules:
@pytest.mark.unit
def test_linear_adapter_config(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_modules.LinearAdapter, adapter_modules.LinearAdapterConfig, ignore_args=IGNORED_ARGS
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_linear_adapter_init(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
adapter = adapter_modules.LinearAdapter(in_features=50, dim=5)
with torch.no_grad():
assert adapter.module[-1].weight.sum() == 0
if hasattr(adapter.module[-1], 'bias') and adapter.module[-1].bias is not None:
assert adapter.module[-1].bias.sum() == 0
out = adapter(x)
assert out.sum() <= 1e-8
@pytest.mark.unit
def test_linear_adapter_dropout(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
adapter = adapter_modules.LinearAdapter(in_features=50, dim=5, dropout=0.5)
with torch.no_grad():
assert adapter.module[-1].weight.sum() == 0
if hasattr(adapter.module[-1], 'bias') and adapter.module[-1].bias is not None:
assert adapter.module[-1].bias.sum() == 0
out = adapter(x)
assert out.sum() <= 1e-8
@pytest.mark.unit
@pytest.mark.parametrize('norm_position', ['pre', 'post'])
def test_linear_adapter_norm_position(self, norm_position):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
adapter = adapter_modules.LinearAdapter(in_features=50, dim=5, norm_position=norm_position)
with torch.no_grad():
assert adapter.module[-1].weight.sum() == 0
if hasattr(adapter.module[-1], 'bias') and adapter.module[-1].bias is not None:
assert adapter.module[-1].bias.sum() == 0
out = adapter(x)
assert out.sum() <= 1e-8
@pytest.mark.unit
def test_linear_adapter_strategy(self):
adapter = adapter_modules.LinearAdapter(in_features=50, dim=5)
assert hasattr(adapter, 'adapter_strategy')
assert adapter.adapter_strategy is not None
# assert default strategy is set
assert isinstance(adapter.adapter_strategy, adapter_mixin_strategies.ResidualAddAdapterStrategy)
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/dataset_preparation/get_label_vocab.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to get label vocab from train and dev labeled files.
"""
import sys
from argparse import ArgumentParser
from collections import Counter
parser = ArgumentParser(description="Get label vocab")
parser.add_argument("--train_filename", required=True, type=str, help='File with training data')
parser.add_argument("--dev_filename", required=True, type=str, help='File with development data')
parser.add_argument("--out_filename", required=True, type=str, help='Output file')
args = parser.parse_args()
vocab = Counter()
n = 0
for fn in [args.train_filename, args.dev_filename]:
with open(fn, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split("\t")
if len(parts) < 2:
print("Warning: bad format in line: " + str(n) + ": " + line, file=sys.stderr)
continue
tags = parts[1].split(" ")
for t in tags:
if t == "<SELF>":
vocab["KEEP"] += 1
elif t == "<DELETE>":
vocab["DELETE"] += 1
else:
vocab["DELETE|" + t] += 1
n += 1
print("len(vocab)=", len(vocab))
with open(args.out_filename, "w", encoding="utf-8") as out:
out.write("KEEP\n")
out.write("DELETE\n")
for t, freq in vocab.most_common(10000000):
if t == "KEEP":
continue
if t == "DELETE":
continue
out.write(t + "\n")
|
gkucsko/NeMo
|
nemo/collections/common/metrics/metric_string_to_torchmetric.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torchmetrics import (
AUC,
AUROC,
Accuracy,
AveragePrecision,
F1Score,
MatthewsCorrCoef,
PearsonCorrCoef,
SpearmanCorrCoef,
SQuAD,
)
from nemo.collections.common.metrics.classification_accuracy import ExactStringMatchMetric
__all__ = ['MetricStringToTorchMetric']
# Dictionary that maps a metric string name to its corresponding torchmetric class.
MetricStringToTorchMetric = {
'accuracy': Accuracy,
'auc': AUC,
'auroc': AUROC,
'average_precision': AveragePrecision,
'f1': F1Score,
'pearson_corr_coef': PearsonCorrCoef,
'spearman_corr_coef': SpearmanCorrCoef,
'matthews_corr_coef': MatthewsCorrCoef,
'exact_string_match': ExactStringMatchMetric,
'squad': SQuAD,
}
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/evaluation/eval.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to compare the inference output of Thutmose tagger with multi_reference file
USAGE Example:
python eval.py \
--inference_file= \
--reference_file= \
--print_other_errors
The inference file is a tsv file in which the first column contains the predicted sentence text.
The reference file is a tsv file in which
the first column contains the input sentence text,
the second column contains the reference sentence text (taken from Google TN dataset)
the third column (optional) contains additional acceptable references for semiotic spans in this sentence.
E.g.
mizoguchi akiko september twenty ten mizoguchi akiko september 2010 DATE 2 5 | sept 2010 | sep. 2010 ...
(to get a reference file see the last steps in examples/nlp/text_normalization_as_tagging/prepare_dataset_en.sh,
starting from ".../examples/nlp/text_normalization_as_tagging/evaluation/get_multi_reference_vocab.py"
)
The script outputs the following metrics:
Word Error Rate (WER) - an automatic metric commonly used in ASR.
It does not take into account additional references.
Sentence accuracy:
The sentence is regarded as correct if its characters (without spaces) match to the reference,
It takes into account additional references.
If at least one digital character doesn't match this sentence is regarded as containing Digit Error.
If all digital character match, but at least one non-digital character doesn't match
this sentence is regarded as containing Other Error.
"""
import re
from argparse import ArgumentParser
from nemo.collections.asr.metrics.wer import word_error_rate
parser = ArgumentParser(description="Compare inference output with multi-reference")
parser.add_argument("--inference_file", type=str, required=True, help="Path to inference file")
parser.add_argument(
"--print_other_errors",
action='store_true',
help="Whether to print other errors, if false only digit errors will be printed",
)
parser.add_argument("--reference_file", type=str, required=True, help="Path to reference file")
args = parser.parse_args()
# Main code
if __name__ == "__main__":
inputs = []
references = [] # list(size=len(inputs)) of lists
skip_ids = set() # sentences ids to be skipped during evaluation
with open(args.reference_file, "r", encoding="utf-8") as f:
for line in f:
multi_references = []
parts = line.strip().split("\t")
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Bad format: " + line)
words = parts[0].split()
inputs.append(words)
if len(parts) == 3: # there are non-trivial semiotic spans
multi_references.append("")
input_position = 0
if "TELEPHONE" in parts[2] or "ELECTRONIC" in parts[2]:
skip_ids.add(len(references))
spans = parts[2].split(";")
multi_references_updated = []
for span in spans:
span_parts = span.split(" | ")
try:
sem, begin, end = span_parts[0].split(" ")
except Exception:
print("error: ", line)
continue
begin = int(begin)
end = int(end)
for ref in multi_references:
if len(span_parts) > 20 or len(multi_references_updated) > 20000:
print("warning: too many references: ", inputs[-1])
break
for tr_variant in span_parts[1:]:
multi_references_updated.append(
ref
+ " "
+ " ".join(inputs[-1][input_position:begin]) # copy needed words from input
+ " "
+ tr_variant
)
multi_references = multi_references_updated[:] # copy
multi_references_updated = []
input_position = end
for i in range(len(multi_references)): # copy needed words from the input end
multi_references[i] += " " + " ".join(inputs[-1][input_position : len(inputs[-1])])
# the last reference added is the actual one
multi_references.append(parts[1])
references.append(multi_references)
predictions = []
predicted_tags = []
predicted_semiotic = []
# load predictions
with open(args.inference_file, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split("\t")
if len(parts) == 1:
predictions.append(parts[0].casefold())
predicted_tags.append([])
continue
if len(parts) != 5:
raise ValueError("Bad format: " + line)
prediction, inp_str, tag_str, tags_with_swap_str, semiotic = parts
predictions.append(prediction.casefold())
tags = tag_str.split(" ")
predicted_tags.append(tags)
predicted_semiotic.append(semiotic)
sentences_with_errors_on_digits = 0
correct_sentences_disregarding_space = 0
if len(inputs) != len(predictions) or len(inputs) != len(references):
raise ValueError(
"Length mismatch: len(inputs)="
+ str(len(inputs))
+ "; len(predictions)="
+ str(len(predictions))
+ "; len(references)="
+ str(len(references))
)
refs_for_wer = []
preds_for_wer = []
for i in range(len(inputs)):
ok_digit = False
ok_all = False
if i in skip_ids:
continue
refs_for_wer.append(references[i][-1])
preds_for_wer.append(predictions[i])
for ref in references[i]:
ref_digit_fragments = re.findall(r"\d+", ref)
pred_digit_fragments = re.findall(r"\d+", predictions[i])
if "".join(pred_digit_fragments) == "".join(ref_digit_fragments):
ok_digit = True
if predictions[i].replace("_", "").replace(" ", "") == ref.replace("_", "").replace(" ", ""):
ok_all = True
if not ok_digit:
print("digit error:")
print("\tinput=", " ".join(inputs[i]))
print("\ttags=", " ".join(predicted_tags[i]))
print("\tpred=", predictions[i])
print("\tsemiotic=", predicted_semiotic[i])
print("\tref=", references[i][-1]) # last reference is actual reference
sentences_with_errors_on_digits += 1
elif ok_all:
correct_sentences_disregarding_space += 1
elif args.print_other_errors:
print("other error:")
print("\tinput=", " ".join(inputs[i]))
print("\ttags=", " ".join(predicted_tags[i]))
print("\tpred=", predictions[i])
print("\tsemiotic=", predicted_semiotic[i])
print("\tref=", references[i][-1]) # last reference is actual reference
wer = word_error_rate(refs_for_wer, preds_for_wer)
print("WER: ", wer)
print(
"Sentence accuracy: ",
correct_sentences_disregarding_space / (len(inputs) - len(skip_ids)),
correct_sentences_disregarding_space,
)
print(
"digit errors: ",
sentences_with_errors_on_digits / (len(inputs) - len(skip_ids)),
sentences_with_errors_on_digits,
)
print(
"other errors: ",
(len(inputs) - len(skip_ids) - correct_sentences_disregarding_space - sentences_with_errors_on_digits)
/ (len(inputs) - len(skip_ids)),
len(inputs) - len(skip_ids) - correct_sentences_disregarding_space - sentences_with_errors_on_digits,
)
|
gkucsko/NeMo
|
nemo_text_processing/text_normalization/ru/verbalizers/verbalize.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.ru.verbalizers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.ru.verbalizers.date import DateFst
from nemo_text_processing.text_normalization.ru.verbalizers.decimal import DecimalFst
from nemo_text_processing.text_normalization.ru.verbalizers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.ru.verbalizers.measure import MeasureFst
from nemo_text_processing.text_normalization.ru.verbalizers.money import MoneyFst
from nemo_text_processing.text_normalization.ru.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.ru.verbalizers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.ru.verbalizers.time import TimeFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal_graph = OrdinalFst().fst
decimal = DecimalFst()
decimal_graph = decimal.fst
date = DateFst()
date_graph = date.fst
measure = MeasureFst()
measure_graph = measure.fst
electronic = ElectronicFst()
electronic_graph = electronic.fst
whitelist_graph = WhiteListFst().fst
money_graph = MoneyFst().fst
telephone_graph = TelephoneFst().fst
time_graph = TimeFst().fst
graph = (
measure_graph
| cardinal_graph
| decimal_graph
| ordinal_graph
| date_graph
| electronic_graph
| money_graph
| whitelist_graph
| telephone_graph
| time_graph
)
self.fst = graph
|
gkucsko/NeMo
|
nemo/collections/asr/parts/utils/diarization_utils.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import csv
import json
import os
from collections import OrderedDict as od
from datetime import datetime
from typing import Dict, List, Tuple
import numpy as np
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.models import ClusteringDiarizer
from nemo.collections.asr.parts.utils.speaker_utils import (
audio_rttm_map,
get_uniqname_from_filepath,
labels_to_rttmfile,
rttm_to_labels,
write_rttm2manifest,
)
from nemo.utils import logging
try:
import arpa
ARPA = True
except ImportError:
ARPA = False
try:
import diff_match_patch
DIFF_MATCH_PATCH = True
except ImportError:
DIFF_MATCH_PATCH = False
__all__ = ['ASR_DIAR_OFFLINE']
def dump_json_to_file(file_path, riva_dict):
"""
Write a json file from the riva_dict dictionary.
"""
with open(file_path, "w") as outfile:
json.dump(riva_dict, outfile, indent=4)
def write_txt(w_path, val):
"""
Write a text file from the string input.
"""
with open(w_path, "w") as output:
output.write(val + '\n')
return None
def get_diff_text(text1: List[str], text2: List[str]) -> List[Tuple[int, str]]:
"""
Take the alignment between two lists and get the difference.
"""
orig_words = '\n'.join(text1.split()) + '\n'
pred_words = '\n'.join(text2.split()) + '\n'
diff = diff_match_patch.diff_match_patch()
diff.Diff_Timeout = 0
orig_enc, pred_enc, enc = diff.diff_linesToChars(orig_words, pred_words)
diffs = diff.diff_main(orig_enc, pred_enc, False)
diff.diff_charsToLines(diffs, enc)
return diffs
def get_speaker_error_mismatch(ctm_error_dict, error_buffer, w_range_buffer, pred_rttm_eval):
"""
Calculate the diarization confusion error using the reference CTM file.
"""
correct_count, error_count, align_error = 0, 0, []
for k, _d in enumerate(error_buffer):
if _d[0] == 1:
stt, end = w_range_buffer[k]
bool_list = [_bool for _bool in pred_rttm_eval[stt:end]]
error_count = len(bool_list) - sum(bool_list)
ctm_error_dict['diar_confuse_count'] += error_count
def get_speaker_error_match(ctm_error_dict, w_range, ctm_info_list, pred_info_list, mapping_dict):
"""
Count the words with wrong speaker assignments.
"""
error_count, align_error_list = 0, []
for ref, prd in zip(range(w_range[0][0], w_range[0][1]), range(w_range[1][0], w_range[1][1])):
ref_spk, ref_start, ref_end = ctm_info_list[ref]
pred_spk, pred_start, pred_end = pred_info_list[prd]
if pred_spk in mapping_dict:
error_count += 1 if ref_spk != mapping_dict[pred_spk] else 0
else:
error_count += 1
align_error_list.append(ref_start - pred_start)
ctm_error_dict['diar_confuse_count'] += error_count
return error_count, align_error_list
class ASR_DIAR_OFFLINE(object):
"""
A class designed for performing ASR and diarization together.
"""
def __init__(self, **cfg_diarizer):
self.manifest_filepath = cfg_diarizer['manifest_filepath']
self.params = cfg_diarizer['asr']['parameters']
self.ctc_decoder_params = cfg_diarizer['asr']['ctc_decoder_parameters']
self.realigning_lm_params = cfg_diarizer['asr']['realigning_lm_parameters']
self.nonspeech_threshold = self.params['asr_based_vad_threshold']
self.fix_word_ts_with_VAD = self.params['fix_word_ts_with_VAD']
self.root_path = cfg_diarizer['out_dir']
self.vad_threshold_for_word_ts = 0.7
self.max_word_ts_length_in_sec = 0.6
self.cfg_diarizer = cfg_diarizer
self.word_ts_anchor_offset = 0.0
self.run_ASR = None
self.realigning_lm = None
self.ctm_exists = {}
self.frame_VAD = {}
self.align_error_list = []
self.AUDIO_RTTM_MAP = audio_rttm_map(self.manifest_filepath)
self.audio_file_list = [value['audio_filepath'] for _, value in self.AUDIO_RTTM_MAP.items()]
self.color_palette = {
'speaker_0': '\033[1;32m',
'speaker_1': '\033[1;34m',
'speaker_2': '\033[1;30m',
'speaker_3': '\033[1;31m',
'speaker_4': '\033[1;35m',
'speaker_5': '\033[1;36m',
'speaker_6': '\033[1;37m',
'speaker_7': '\033[1;30m',
'speaker_8': '\033[1;33m',
'speaker_9': '\033[0;34m',
'white': '\033[0;37m',
}
def load_realigning_LM(self):
self.N_range = (
self.realigning_lm_params['min_number_of_words'],
self.realigning_lm_params['max_number_of_words'],
)
self.stt_end_tokens = ['</s>', '<s>']
logging.info(f"Loading LM for realigning: {self.realigning_lm_params['arpa_language_model']}")
return arpa.loadf(self.realigning_lm_params['arpa_language_model'])[0]
def save_VAD_labels_list(self, word_ts_dict):
"""
Take the non_speech labels from logit output. The logit output is obtained from
run_ASR() function.
Args:
word_ts_dict (dict):
List containing word timestamps.
"""
self.VAD_RTTM_MAP = {}
for idx, (uniq_id, word_timestamps) in enumerate(word_ts_dict.items()):
speech_labels_float = self.get_speech_labels_from_decoded_prediction(word_timestamps)
speech_labels = self.get_str_speech_labels(speech_labels_float)
output_path = os.path.join(self.root_path, 'pred_rttms')
if not os.path.exists(output_path):
os.makedirs(output_path)
filename = labels_to_rttmfile(speech_labels, uniq_id, output_path)
self.VAD_RTTM_MAP[uniq_id] = {'audio_filepath': self.audio_file_list[idx], 'rttm_filepath': filename}
def get_speech_labels_from_decoded_prediction(self, input_word_ts):
"""
Extract speech labels from the ASR output (decoded predictions)
Args:
input_word_ts (list):
List containing word timestamps.
Returns:
word_ts (list):
The ranges of the speech segments, which are merged ranges of input_word_ts.
"""
speech_labels = []
word_ts = copy.deepcopy(input_word_ts)
if word_ts == []:
return speech_labels
else:
count = len(word_ts) - 1
while count > 0:
if len(word_ts) > 1:
if word_ts[count][0] - word_ts[count - 1][1] <= self.nonspeech_threshold:
trangeB = word_ts.pop(count)
trangeA = word_ts.pop(count - 1)
word_ts.insert(count - 1, [trangeA[0], trangeB[1]])
count -= 1
return word_ts
def run_diarization(
self, diar_model_config, word_timestamps,
):
"""
Launch the diarization process using the given VAD timestamp (oracle_manifest).
Args:
word_and_timestamps (list):
List containing words and word timestamps
Returns:
diar_hyp (dict):
A dictionary containing rttm results which are indexed by a unique ID.
score Tuple[pyannote object, dict]:
A tuple containing pyannote metric instance and mapping dictionary between
speakers in hypotheses and speakers in reference RTTM files.
"""
if diar_model_config.diarizer.asr.parameters.asr_based_vad:
self.save_VAD_labels_list(word_timestamps)
oracle_manifest = os.path.join(self.root_path, 'asr_vad_manifest.json')
oracle_manifest = write_rttm2manifest(self.VAD_RTTM_MAP, oracle_manifest)
diar_model_config.diarizer.vad.model_path = None
diar_model_config.diarizer.vad.external_vad_manifest = oracle_manifest
diar_model = ClusteringDiarizer(cfg=diar_model_config)
score = diar_model.diarize()
if diar_model_config.diarizer.vad.model_path is not None and not diar_model_config.diarizer.oracle_vad:
self.get_frame_level_VAD(vad_processing_dir=diar_model.vad_pred_dir)
diar_hyp = {}
for k, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
pred_rttm = os.path.join(self.root_path, 'pred_rttms', uniq_id + '.rttm')
diar_hyp[uniq_id] = rttm_to_labels(pred_rttm)
return diar_hyp, score
def get_frame_level_VAD(self, vad_processing_dir):
"""
Read frame-level VAD outputs.
Args:
vad_processing_dir (str):
The path where VAD results are saved.
"""
for uniq_id in self.AUDIO_RTTM_MAP:
frame_vad = os.path.join(vad_processing_dir, uniq_id + '.median')
frame_vad_float_list = []
with open(frame_vad, 'r') as fp:
for line in fp.readlines():
frame_vad_float_list.append(float(line.strip()))
self.frame_VAD[uniq_id] = frame_vad_float_list
def gather_eval_results(self, metric, mapping_dict, total_riva_dict):
"""
Gather diarization evaluation results from pyannote DiarizationErrorRate metric object.
Args:
metric (DiarizationErrorRate metric): DiarizationErrorRate metric pyannote object
mapping_dict (dict): A dictionary containing speaker mapping labels for each audio file with key as unique name
Returns:
DER_result_dict (dict): A dictionary containing scores for each audio file along with aggregated results
"""
results = metric.results_
DER_result_dict = {}
count_correct_spk_counting = 0
for result in results:
key, score = result
pred_rttm = os.path.join(self.root_path, 'pred_rttms', key + '.rttm')
pred_labels = rttm_to_labels(pred_rttm)
est_n_spk = self.get_num_of_spk_from_labels(pred_labels)
ref_rttm = self.AUDIO_RTTM_MAP[key]['rttm_filepath']
ref_labels = rttm_to_labels(ref_rttm)
ref_n_spk = self.get_num_of_spk_from_labels(ref_labels)
if self.cfg_diarizer['oracle_vad']:
score['missed detection'] = 0
score['false alarm'] = 0
_DER, _CER, _FA, _MISS = (
(score['confusion'] + score['false alarm'] + score['missed detection']) / score['total'],
score['confusion'] / score['total'],
score['false alarm'] / score['total'],
score['missed detection'] / score['total'],
)
DER_result_dict[key] = {
"DER": round(_DER, 4),
"CER": round(_CER, 4),
"FA": round(_FA, 4),
"MISS": round(_MISS, 4),
"est_n_spk": est_n_spk,
"mapping": mapping_dict[key],
"is_spk_count_correct": (est_n_spk == ref_n_spk),
}
count_correct_spk_counting += int(est_n_spk == ref_n_spk)
DER, CER, FA, MISS = (
abs(metric),
metric['confusion'] / metric['total'],
metric['false alarm'] / metric['total'],
metric['missed detection'] / metric['total'],
)
DER_result_dict["total"] = {
"DER": DER,
"CER": CER,
"FA": FA,
"MISS": MISS,
"spk_counting_acc": count_correct_spk_counting / len(metric.results_),
}
return DER_result_dict
def get_the_closest_silence_start(self, vad_index_word_end, vad_frames, params, offset=10):
"""
Find the closest silence frame from the given starting position.
Args:
vad_index_word_end (float):
The timestamp of the end of the current word.
vad_frames (numpy.array):
The numpy array containing frame-level VAD probability.
params (dict):
Contains the parameters for diarization and ASR decoding.
Returns:
c (float):
A timestamp of the earliest start of a silence region from
the given time point, vad_index_word_end.
"""
c = vad_index_word_end + offset
limit = int(100 * self.max_word_ts_length_in_sec + vad_index_word_end)
while c < len(vad_frames):
if vad_frames[c] < self.vad_threshold_for_word_ts:
break
else:
c += 1
if c > limit:
break
c = min(len(vad_frames) - 1, c)
c = round(c / 100.0, 2)
return c
def compensate_word_ts_list(self, audio_file_list, word_ts_dict, params):
"""
Compensate the word timestamps based on the VAD output.
The length of each word is capped by self.max_word_ts_length_in_sec.
Args:
audio_file_list (list):
List containing audio file paths.
word_ts_dict (dict):
Dictionary containing timestamps of words.
params (dict):
The parameter dictionary for diarization and ASR decoding.
Returns:
enhanced_word_ts_dict (list):
List of the enhanced word timestamp values.
"""
enhanced_word_ts_dict = {}
for idx, (uniq_id, word_ts_seq_list) in enumerate(word_ts_dict.items()):
N = len(word_ts_seq_list)
enhanced_word_ts_buffer = []
for k, word_ts in enumerate(word_ts_seq_list):
if k < N - 1:
word_len = round(word_ts[1] - word_ts[0], 2)
len_to_next_word = round(word_ts_seq_list[k + 1][0] - word_ts[0] - 0.01, 2)
if uniq_id in self.frame_VAD:
vad_index_word_end = int(100 * word_ts[1])
closest_sil_stt = self.get_the_closest_silence_start(
vad_index_word_end, self.frame_VAD[uniq_id], params
)
vad_est_len = round(closest_sil_stt - word_ts[0], 2)
else:
vad_est_len = len_to_next_word
min_candidate = min(vad_est_len, len_to_next_word)
fixed_word_len = max(min(self.max_word_ts_length_in_sec, min_candidate), word_len)
enhanced_word_ts_buffer.append([word_ts[0], word_ts[0] + fixed_word_len])
else:
enhanced_word_ts_buffer.append([word_ts[0], word_ts[1]])
enhanced_word_ts_dict[uniq_id] = enhanced_word_ts_buffer
return enhanced_word_ts_dict
def get_transcript_with_speaker_labels(self, diar_hyp, word_hyp, word_ts_hyp):
"""
Match the diarization result with the ASR output.
The words and the timestamps for the corresponding words are matched
in a for loop.
Args:
diar_labels (dict):
Dictionary of the Diarization output labels in str.
word_hyp (dict):
Dictionary of words from ASR inference.
word_ts_hyp (dict):
Dictionary containing the start time and the end time of each word.
Returns:
total_riva_dict (dict):
A dictionary containing word timestamps, speaker labels and words.
"""
total_riva_dict = {}
if self.fix_word_ts_with_VAD:
if self.frame_VAD == {}:
logging.info(
f"VAD timestamps are not provided. Fixing word timestamps without VAD. Please check the hydra configurations."
)
word_ts_refined = self.compensate_word_ts_list(self.audio_file_list, word_ts_hyp, self.params)
else:
word_ts_refined = word_ts_hyp
if self.realigning_lm_params['arpa_language_model']:
if not ARPA:
raise ImportError(
'LM for realigning is provided but arpa is not installed. Install arpa using PyPI: pip install arpa'
)
else:
self.realigning_lm = self.load_realigning_LM()
for k, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
word_dict_seq_list = self.get_word_dict_seq_list(uniq_id, diar_hyp, word_hyp, word_ts_hyp, word_ts_refined)
if self.realigning_lm:
word_dict_seq_list = self.realign_words_with_lm(word_dict_seq_list)
self.make_json_output(uniq_id, diar_hyp, word_dict_seq_list, total_riva_dict)
logging.info(f"Diarization with ASR output files are saved in: {self.root_path}/pred_rttms")
return total_riva_dict
def get_word_dict_seq_list(self, uniq_id, diar_hyp, word_hyp, word_ts_hyp, word_ts_refined):
"""
Save the hypothesis words and speaker labels to a dictionary variable for future use.
"""
words, labels = word_hyp[uniq_id], diar_hyp[uniq_id]
start_point, end_point, speaker = labels[0].split()
word_pos, idx = 0, 0
word_dict_seq_list = []
for j, word_ts_stt_end in enumerate(word_ts_hyp[uniq_id]):
word_pos = self.get_word_timestamp_anchor(word_ts_stt_end)
if word_pos > float(end_point):
idx += 1
idx = min(idx, len(labels) - 1)
start_point, end_point, speaker = labels[idx].split()
refined_word_ts_stt_end = word_ts_refined[uniq_id][j]
stt_sec, end_sec = round(refined_word_ts_stt_end[0], 2), round(refined_word_ts_stt_end[1], 2)
word_dict_seq_list.append(
{'word': words[j], 'start_time': stt_sec, 'end_time': end_sec, 'speaker_label': speaker}
)
return word_dict_seq_list
def make_json_output(self, uniq_id, diar_hyp, word_dict_seq_list, total_riva_dict):
"""
Generate json output files and transcripts from the ASR and diarization results.
Args:
uniq_id (str):
A unique ID (key) that identifies each input audio file.
diar_hyp (list):
Dictionary containing the word sequence from ASR output.
word_dict_seq_list (list):
List containing words and corresponding word timestamps in dictionary format.
total_riva_dict (dict):
Dictionary containing the final transcription, alignment and speaker labels.
Returns:
total_riva_dict (dict):
A dictionary containing overall results of diarization and ASR inference.
"""
word_seq_list, audacity_label_words = [], []
labels = diar_hyp[uniq_id]
n_spk = self.get_num_of_spk_from_labels(labels)
riva_dict = od(
{
'status': 'Success',
'session_id': uniq_id,
'transcription': '',
'speaker_count': n_spk,
'words': [],
'sentences': [],
}
)
gecko_dict = od({'schemaVersion': 2.0, 'monologues': []})
start_point, end_point, speaker = labels[0].split()
prev_speaker = speaker
terms_list = []
sentences = []
sentence = {'speaker': speaker, 'start_point': float(start_point), 'end_point': float(end_point), 'text': ''}
logging.info(f"Creating results for Session: {uniq_id} n_spk: {n_spk} ")
for k, line_dict in enumerate(word_dict_seq_list):
word, speaker = line_dict['word'], line_dict['speaker_label']
word_seq_list.append(word)
start_point, end_point = line_dict['start_time'], line_dict['end_time']
if speaker != prev_speaker:
if len(terms_list) != 0:
gecko_dict['monologues'].append(
{'speaker': {'name': None, 'id': prev_speaker}, 'terms': terms_list}
)
terms_list = []
# remove trailing space in text
sentence['text'] = sentence['text'].strip()
# store last sentence
sentences.append(sentence)
# start construction of a new sentence
sentence = {'speaker': speaker, 'start_point': start_point, 'end_point': end_point, 'text': ''}
else:
# correct the ending time
sentence['end_point'] = end_point
stt_sec, end_sec = start_point, end_point
terms_list.append({'start': stt_sec, 'end': end_sec, 'text': word, 'type': 'WORD'})
# add current word to sentence
sentence['text'] += word.strip() + ' '
self.add_json_to_dict(riva_dict, word, stt_sec, end_sec, speaker)
audacity_label_words.append(self.get_audacity_label(word, stt_sec, end_sec, speaker))
total_riva_dict[uniq_id] = riva_dict
prev_speaker = speaker
# note that we need to add the very last sentence.
sentence['text'] = sentence['text'].strip()
sentences.append(sentence)
gecko_dict['monologues'].append({'speaker': {'name': None, 'id': speaker}, 'terms': terms_list})
riva_dict['transcription'] = ' '.join(word_seq_list)
self.write_and_log(uniq_id, riva_dict, audacity_label_words, gecko_dict, sentences)
return total_riva_dict
def get_realignment_ranges(self, k, word_seq_len):
"""
Calculate word ranges for realignment operation.
N1, N2 are calculated to not exceed the start and end of the input word sequence.
"""
if k < self.N_range[1]:
N1 = max(k, self.N_range[0])
N2 = min(word_seq_len - k, self.N_range[1])
elif k > (word_seq_len - self.N_range[1]):
N1 = min(k, self.N_range[1])
N2 = max(word_seq_len - k, self.N_range[0])
else:
N1, N2 = self.N_range[1], self.N_range[1]
return N1, N2
def get_word_timestamp_anchor(self, word_ts_stt_end: List[float]) -> float:
"""
Determine a reference point to match a word with the diarization results.
word_ts_anchor_pos determines the position of a word in relation to the given diarization labels:
- 'start' uses the beginning of the word
- 'end' uses the end of the word
- 'mid' uses the mean of start and end of the word
word_ts_anchor_offset determines how much offset we want to add to the anchor position.
It is recommended to use the default value.
"""
if self.params['word_ts_anchor_pos'] == 'start':
word_pos = word_ts_stt_end[0]
elif self.params['word_ts_anchor_pos'] == 'end':
word_pos = word_ts_stt_end[1]
elif self.params['word_ts_anchor_pos'] == 'mid':
word_pos = (word_ts_stt_end[0] + word_ts_stt_end[1]) / 2
else:
logging.info(
f"word_ts_anchor_pos: {self.params['word_ts_anchor']} is not a supported option. Using the default 'start' option."
)
word_pos = word_ts_stt_end[0]
word_pos = word_pos + self.word_ts_anchor_offset
return word_pos
def realign_words_with_lm(self, word_dict_seq_list: List[Dict[str, float]]):
"""
Realign the mapping between speaker labels and words using a language model.
The realigning process calculates the probability of the certain range around the words,
especially at the boundary between two hypothetical sentences spoken by different speakers.
<Example> k-th word: "but"
hyp_former:
since i think like tuesday </s> <s> but he's coming back to albuquerque
hyp_latter:
since i think like tuesday but </s> <s> he's coming back to albuquerque
The joint probabilities of words in the sentence are computed for these two hypotheses. In addition,
logprob_diff_threshold parameter is used for reducing the false positive realigning.
"""
word_seq_len = len(word_dict_seq_list)
hyp_w_dict_list, spk_list = [], []
for k, line_dict in enumerate(word_dict_seq_list):
word, spk_label = line_dict['word'], line_dict['speaker_label']
hyp_w_dict_list.append(word)
spk_list.append(spk_label)
realigned_list = []
org_spk_list = copy.deepcopy(spk_list)
for k, line_dict in enumerate(word_dict_seq_list):
if self.N_range[0] < k < (word_seq_len - self.N_range[0]) and (
spk_list[k] != org_spk_list[k + 1] or spk_list[k] != org_spk_list[k - 1]
):
N1, N2 = self.get_realignment_ranges(k, word_seq_len)
hyp_former = self.realigning_lm.log_s(
' '.join(hyp_w_dict_list[k - N1 : k] + self.stt_end_tokens + hyp_w_dict_list[k : k + N2])
)
hyp_latter = self.realigning_lm.log_s(
' '.join(hyp_w_dict_list[k - N1 : k + 1] + self.stt_end_tokens + hyp_w_dict_list[k + 1 : k + N2])
)
log_p = [hyp_former, hyp_latter]
p_order = np.argsort(log_p)[::-1]
if log_p[p_order[0]] > log_p[p_order[1]] + self.realigning_lm_params['logprob_diff_threshold']:
if p_order[0] == 0:
spk_list[k] = org_spk_list[k + 1]
line_dict['speaker_label'] = spk_list[k]
realigned_list.append(line_dict)
return realigned_list
def get_alignment_errors(self, ctm_content, hyp_w_dict_list, mapping_dict):
"""
Compute various types of errors using the provided CTM file and RTTM file.
The variables computed for CTM file based evaluation:
error_count : Number of words that have wrong speaker labels
align_error : (reference word timestamp - hypothesis word timestamp)
The error metrics in ctm_error_dict variable:
ref_word_count: The number of words in the reference transcript
hyp_word_count: The number of words in the hypothesis
diar_confuse_count: Number of incorrectly diarized words
all_correct_count: Count the word if both hypothesis word and speaker label are correct.
hyp_based_wder: The number of incorrectly diarized words divided by the number of words in the hypothesis
ref_based_wder: The number of incorrectly diarized words divided by the number of words in the reference transcript
"""
ctm_ref_word_seq, ctm_info_list = [], []
pred_word_seq, pred_info_list, pred_rttm_eval = [], [], []
for ctm_line in ctm_content:
spl = ctm_line.split()
ctm_ref_word_seq.append(spl[4])
ctm_info_list.append([spl[1], float(spl[2]), float(spl[3])])
for w_dict in hyp_w_dict_list:
pred_rttm_eval.append(w_dict['diar_correct'])
pred_word_seq.append(w_dict['word'])
pred_info_list.append([w_dict['speaker_label'], w_dict['start_time'], w_dict['end_time']])
ctm_text = ' '.join(ctm_ref_word_seq)
pred_text = ' '.join(pred_word_seq)
diff = get_diff_text(ctm_text, pred_text)
ref_word_count, hyp_word_count, all_correct_count, wder_count = 0, 0, 0, 0
ctm_error_dict = {
'ref_word_count': 0,
'hyp_word_count': 0,
'diar_confuse_count': 0,
'all_correct_count': 0,
'hyp_based_wder': 0,
'ref_based_wder': 0,
}
error_buffer, w_range_buffer, cumul_align_error = [], [], []
for k, d in enumerate(diff):
word_seq = d[1].strip().split('\n')
if d[0] == 0:
if error_buffer != []:
get_speaker_error_mismatch(ctm_error_dict, error_buffer, w_range_buffer, pred_rttm_eval)
error_buffer, w_range_buffer = [], []
w_range = [
(ctm_error_dict['ref_word_count'], ctm_error_dict['ref_word_count'] + len(word_seq)),
(ctm_error_dict['hyp_word_count'], ctm_error_dict['hyp_word_count'] + len(word_seq)),
]
error_count, align_error = get_speaker_error_match(
ctm_error_dict, w_range, ctm_info_list, pred_info_list, mapping_dict
)
ctm_error_dict['all_correct_count'] += len(word_seq) - error_count
ctm_error_dict['ref_word_count'] += len(word_seq)
ctm_error_dict['hyp_word_count'] += len(word_seq)
cumul_align_error += align_error
elif d[0] == -1:
error_buffer.append(d)
w_range_buffer.append((ref_word_count, ref_word_count + len(word_seq)))
ctm_error_dict['ref_word_count'] += len(word_seq)
elif d[0] == 1:
error_buffer.append(d)
w_range_buffer.append((hyp_word_count, hyp_word_count + len(word_seq)))
ctm_error_dict['hyp_word_count'] += len(word_seq)
if error_buffer != []:
get_speaker_error_mismatch(ctm_error_dict, error_buffer, w_range_buffer, pred_rttm_eval)
ctm_error_dict['hyp_based_wder'] = round(
ctm_error_dict['diar_confuse_count'] / ctm_error_dict['hyp_word_count'], 4
)
ctm_error_dict['ref_based_wder'] = round(
ctm_error_dict['diar_confuse_count'] / ctm_error_dict['ref_word_count'], 4
)
ctm_error_dict['diar_trans_acc'] = round(
ctm_error_dict['all_correct_count'] / ctm_error_dict['ref_word_count'], 4
)
return cumul_align_error, ctm_error_dict
def get_WDER(self, total_riva_dict, DER_result_dict):
"""
Calculate word-level diarization error rate (WDER). WDER is calculated by
counting the wrongly diarized words and divided by the total number of words
recognized by the ASR model.
Args:
total_riva_dict (dict):
Dictionary that stores riva_dict(dict) which is indexed by uniq_id variable.
DER_result_dict (dict):
Dictionary that stores DER, FA, Miss, CER, mapping, the estimated
number of speakers and speaker counting accuracy.
Returns:
wder_dict (dict):
A dictionary containing WDER value for each session and total WDER.
"""
wder_dict, count_dict = {'session_level': {}}, {}
asr_eval_dict = {'hypotheses_list': [], 'references_list': []}
align_error_list = []
count_dict['total_ctm_wder_count'], count_dict['total_asr_and_spk_correct_words'] = 0, 0
(
count_dict['grand_total_ctm_word_count'],
count_dict['grand_total_pred_word_count'],
count_dict['grand_total_correct_word_count'],
) = (0, 0, 0)
if any([self.AUDIO_RTTM_MAP[uniq_id]['ctm_filepath'] != None for uniq_id in self.AUDIO_RTTM_MAP.keys()]):
if not DIFF_MATCH_PATCH:
raise ImportError(
'CTM file is provided but diff_match_patch is not installed. Install diff_match_patch using PyPI: pip install diff_match_patch'
)
for k, audio_file_path in enumerate(self.audio_file_list):
uniq_id = get_uniqname_from_filepath(audio_file_path)
error_dict = {'uniq_id': uniq_id}
ref_rttm = self.AUDIO_RTTM_MAP[uniq_id]['rttm_filepath']
ref_labels = rttm_to_labels(ref_rttm)
mapping_dict = DER_result_dict[uniq_id]['mapping']
hyp_w_dict_list = total_riva_dict[uniq_id]['words']
hyp_w_dict_list, word_seq_list, correct_word_count, rttm_wder = self.calculate_WDER_from_RTTM(
hyp_w_dict_list, ref_labels, mapping_dict
)
error_dict['rttm_based_wder'] = rttm_wder
error_dict.update(DER_result_dict[uniq_id])
# If CTM files are provided, evaluate word-level diarization and WER with the CTM files.
if self.AUDIO_RTTM_MAP[uniq_id]['ctm_filepath']:
self.ctm_exists[uniq_id] = True
ctm_content = open(self.AUDIO_RTTM_MAP[uniq_id]['ctm_filepath']).readlines()
self.get_ctm_based_eval(ctm_content, error_dict, count_dict, hyp_w_dict_list, mapping_dict)
else:
self.ctm_exists[uniq_id] = False
wder_dict['session_level'][uniq_id] = error_dict
asr_eval_dict['hypotheses_list'].append(' '.join(word_seq_list))
asr_eval_dict['references_list'].append(self.AUDIO_RTTM_MAP[uniq_id]['text'])
count_dict['grand_total_pred_word_count'] += len(hyp_w_dict_list)
count_dict['grand_total_correct_word_count'] += correct_word_count
wder_dict = self.get_wder_dict_values(asr_eval_dict, wder_dict, count_dict, align_error_list)
return wder_dict
def calculate_WDER_from_RTTM(self, hyp_w_dict_list, ref_labels, mapping_dict):
"""
Calculate word-level diarization error rate (WDER) using the provided RTTM files.
If lenient_overlap_WDER is True, the words are considered to be correctly diarized
if the words fall into overlapped regions that include the correct speaker labels.
Note that WDER values computed from RTTM may not be accurate if the word timestamps
have limited accuracy. It is recommended to use CTM files to compute an accurate
evaluation result.
"""
correct_word_count = 0
ref_label_list = [[float(x.split()[0]), float(x.split()[1])] for x in ref_labels]
ref_label_array = np.array(ref_label_list)
word_seq_list = []
for w_idx in range(len(hyp_w_dict_list)):
wdict = hyp_w_dict_list[w_idx]
wdict['diar_correct'] = False
speaker_label = wdict['speaker_label']
if speaker_label in mapping_dict:
est_spk_label = mapping_dict[speaker_label]
else:
continue
word_range = np.array(
[wdict['start_time'] + self.word_ts_anchor_offset, wdict['end_time'] + self.word_ts_anchor_offset]
)
word_seq_list.append(wdict['word'])
word_range_tile = np.tile(word_range, (ref_label_array.shape[0], 1))
ovl_bool = self.isOverlapArray(ref_label_array, word_range_tile)
if np.any(ovl_bool) == False:
continue
ovl_length = self.getOverlapRangeArray(ref_label_array, word_range_tile)
if self.params['lenient_overlap_WDER']:
ovl_length_list = list(ovl_length[ovl_bool])
max_ovl_sub_idx = np.where(ovl_length_list == np.max(ovl_length_list))[0]
max_ovl_idx = np.where(ovl_bool == True)[0][max_ovl_sub_idx]
ref_spk_labels = [x.split()[-1] for x in list(np.array(ref_labels)[max_ovl_idx])]
if est_spk_label in ref_spk_labels:
correct_word_count += 1
wdict['diar_correct'] = True
else:
max_ovl_sub_idx = np.argmax(ovl_length[ovl_bool])
max_ovl_idx = np.where(ovl_bool == True)[0][max_ovl_sub_idx]
_, _, ref_spk_label = ref_labels[max_ovl_idx].split()
if est_spk_label == ref_spk_labels:
correct_word_count += 1
wdict['diar_correct'] = True
hyp_w_dict_list[w_idx] = wdict
rttm_wder = round(1 - (correct_word_count / len(hyp_w_dict_list)), 4)
return hyp_w_dict_list, word_seq_list, correct_word_count, rttm_wder
def get_ctm_based_eval(self, ctm_content, error_dict, count_dict, hyp_w_dict_list, mapping_dict):
"""
Calculate errors using the given CTM files.
"""
count_dict['grand_total_ctm_word_count'] += len(ctm_content)
align_errors, ctm_error_dict = self.get_alignment_errors(ctm_content, hyp_w_dict_list, mapping_dict)
count_dict['total_asr_and_spk_correct_words'] += ctm_error_dict['all_correct_count']
count_dict['total_ctm_wder_count'] += ctm_error_dict['diar_confuse_count']
self.align_error_list += align_errors
error_dict.update(ctm_error_dict)
def get_wder_dict_values(self, asr_eval_dict, wder_dict, count_dict, align_error_list):
"""
Calculate the total error rates for WDER, WER and alignment error.
"""
if '-' in asr_eval_dict['references_list'] or None in asr_eval_dict['references_list']:
wer = -1
else:
wer = word_error_rate(
hypotheses=asr_eval_dict['hypotheses_list'], references=asr_eval_dict['references_list']
)
wder_dict['total_WER'] = wer
wder_dict['total_wder_rttm'] = 1 - (
count_dict['grand_total_correct_word_count'] / count_dict['grand_total_pred_word_count']
)
if all(x for x in self.ctm_exists.values()) == True:
wder_dict['total_wder_ctm_ref_trans'] = (
count_dict['total_ctm_wder_count'] / count_dict['grand_total_ctm_word_count']
if count_dict['grand_total_ctm_word_count'] > 0
else -1
)
wder_dict['total_wder_ctm_pred_asr'] = (
count_dict['total_ctm_wder_count'] / count_dict['grand_total_pred_word_count']
if count_dict['grand_total_pred_word_count'] > 0
else -1
)
wder_dict['total_diar_trans_acc'] = (
count_dict['total_asr_and_spk_correct_words'] / count_dict['grand_total_ctm_word_count']
if count_dict['grand_total_ctm_word_count'] > 0
else -1
)
wder_dict['total_alignment_error_mean'] = (
np.mean(self.align_error_list).round(4) if self.align_error_list != [] else -1
)
wder_dict['total_alignment_error_std'] = (
np.std(self.align_error_list).round(4) if self.align_error_list != [] else -1
)
return wder_dict
def get_str_speech_labels(self, speech_labels_float):
"""
Convert speech_labels_float to a list that contains string values.
"""
speech_labels = []
for start, end in speech_labels_float:
speech_labels.append("{:.3f} {:.3f} speech".format(start, end))
return speech_labels
def write_result_in_csv(self, args, WDER_dict, DER_result_dict, effective_WDER):
"""
This function is for development use.
Saves the diarization result into a csv file.
"""
row = [
args.asr_based_vad_threshold,
WDER_dict['total'],
DER_result_dict['total']['DER'],
DER_result_dict['total']['FA'],
DER_result_dict['total']['MISS'],
DER_result_dict['total']['CER'],
DER_result_dict['total']['spk_counting_acc'],
effective_WDER,
]
with open(os.path.join(self.root_path, args.csv), 'a') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(row)
def write_session_level_result_in_csv(self, WDER_dict):
"""
This function is for development use when a CTM file is provided.
Saves the session-level diarization and ASR result into a csv file.
"""
target_path = f"{self.root_path}/pred_rttms/ctm_eval.csv"
logging.info(f"Writing {target_path}")
csv_columns = [
'uniq_id',
'DER',
'CER',
'FA',
'MISS',
'est_n_spk',
'is_spk_count_correct',
'ref_word_count',
'hyp_word_count',
'diar_confuse_count',
'all_correct_count',
'diar_trans_acc',
'hyp_based_wder',
'ref_based_wder',
'rttm_based_wder',
'mapping',
]
dict_data = [x for k, x in WDER_dict['session_level'].items()]
try:
with open(target_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError:
logging.info("I/O error has occurred while writing a csv file.")
def break_lines(self, string_out, max_chars_in_line=90):
"""
Break the lines in the transcript.
"""
color_str_len = len('\033[1;00m') if self.params['colored_text'] else 0
split_string_out = string_out.split('\n')
return_string_out = []
for org_chunk in split_string_out:
buffer = []
if len(org_chunk) - color_str_len > max_chars_in_line:
color_str = org_chunk[:color_str_len] if color_str_len > 0 else ''
for i in range(color_str_len, len(org_chunk), max_chars_in_line):
trans_str = org_chunk[i : i + max_chars_in_line]
if len(trans_str.strip()) > 0:
c_trans_str = color_str + trans_str
buffer.append(c_trans_str)
return_string_out.extend(buffer)
else:
return_string_out.append(org_chunk)
return '\n'.join(return_string_out)
def write_and_log(self, uniq_id, riva_dict, audacity_label_words, gecko_dict, sentences):
"""
Write output files and display logging messages.
"""
# print the sentences in the .txt output
string_out = self.print_sentences(sentences, self.params)
if self.params['break_lines']:
string_out = self.break_lines(string_out)
# add sentences to the json array
self.add_sentences_to_dict(riva_dict, sentences)
ROOT = self.root_path
dump_json_to_file(f'{ROOT}/pred_rttms/{uniq_id}.json', riva_dict)
dump_json_to_file(f'{ROOT}/pred_rttms/{uniq_id}_gecko.json', gecko_dict)
write_txt(f'{ROOT}/pred_rttms/{uniq_id}.txt', string_out.strip())
write_txt(f'{ROOT}/pred_rttms/{uniq_id}.w.label', '\n'.join(audacity_label_words))
def print_errors(self, DER_result_dict, WDER_dict):
"""
Print a slew of error metrics for ASR and Diarization.
"""
if all(x for x in self.ctm_exists.values()) == True:
self.write_session_level_result_in_csv(WDER_dict)
logging.info(
f"\nDER : {DER_result_dict['total']['DER']:.4f} \
\nFA : {DER_result_dict['total']['FA']:.4f} \
\nMISS : {DER_result_dict['total']['MISS']:.4f} \
\nCER : {DER_result_dict['total']['CER']:.4f} \
\nrttm WDER : {WDER_dict['total_wder_rttm']:.4f} \
\nCTM WDER Ref. : {WDER_dict['total_wder_ctm_ref_trans']:.4f} \
\nCTM WDER ASR Hyp. : {WDER_dict['total_wder_ctm_pred_asr']:.4f} \
\nCTM diar-trans Acc.: {WDER_dict['total_diar_trans_acc']:.4f} \
\nmanifest text WER : {WDER_dict['total_WER']:.4f} \
\nalignment Err. : Mean: {WDER_dict['total_alignment_error_mean']:.4f} STD:{WDER_dict['total_alignment_error_std']:.4f} \
\nSpk. counting Acc. : {DER_result_dict['total']['spk_counting_acc']:.4f}"
)
else:
logging.info(
f"\nDER : {DER_result_dict['total']['DER']:.4f} \
\nFA : {DER_result_dict['total']['FA']:.4f} \
\nMISS : {DER_result_dict['total']['MISS']:.4f} \
\nCER : {DER_result_dict['total']['CER']:.4f} \
\nWDER : {WDER_dict['total_wder_rttm']:.4f} \
\nWER : {WDER_dict['total_WER']:.4f} \
\nSpk. counting acc.: {DER_result_dict['total']['spk_counting_acc']:.4f}"
)
def print_sentences(self, sentences, params):
"""
Print a transcript with speaker labels and timestamps.
"""
# init output
string_out = ''
for sentence in sentences:
# extract info
speaker = sentence['speaker']
start_point = sentence['start_point']
end_point = sentence['end_point']
text = sentence['text']
if params['colored_text']:
color = self.color_palette.get(speaker, '\033[0;37m')
else:
color = ''
# cast timestamp to the correct format
datetime_offset = 16 * 3600
if float(start_point) > 3600:
time_str = '%H:%M:%S.%f'
else:
time_str = '%M:%S.%f'
start_point, end_point = max(float(start_point), 0), max(float(end_point), 0)
start_point_str = datetime.fromtimestamp(start_point - datetime_offset).strftime(time_str)[:-4]
end_point_str = datetime.fromtimestamp(end_point - datetime_offset).strftime(time_str)[:-4]
if params['print_time']:
time_str = f'[{start_point_str} - {end_point_str}] '
else:
time_str = ''
# string out concatenation
string_out += f'{color}{time_str}{speaker}: {text}\n'
return string_out
@staticmethod
def threshold_non_speech(source_list, params):
return list(filter(lambda x: x[1] - x[0] > params['asr_based_vad_threshold'], source_list))
@staticmethod
def get_effective_WDER(DER_result_dict, WDER_dict):
return 1 - (
(1 - (DER_result_dict['total']['FA'] + DER_result_dict['total']['MISS'])) * (1 - WDER_dict['total'])
)
@staticmethod
def isOverlapArray(rangeA, rangeB):
startA, endA = rangeA[:, 0], rangeA[:, 1]
startB, endB = rangeB[:, 0], rangeB[:, 1]
return (endA > startB) & (endB > startA)
@staticmethod
def getOverlapRangeArray(rangeA, rangeB):
left = np.max(np.vstack((rangeA[:, 0], rangeB[:, 0])), axis=0)
right = np.min(np.vstack((rangeA[:, 1], rangeB[:, 1])), axis=0)
return right - left
@staticmethod
def get_audacity_label(word, stt_sec, end_sec, speaker):
spk = speaker.split('_')[-1]
return f'{stt_sec}\t{end_sec}\t[{spk}] {word}'
@staticmethod
def softmax(logits):
e = np.exp(logits - np.max(logits))
return e / e.sum(axis=-1).reshape([logits.shape[0], 1])
@staticmethod
def get_num_of_spk_from_labels(labels):
spk_set = [x.split(' ')[-1].strip() for x in labels]
return len(set(spk_set))
@staticmethod
def add_json_to_dict(riva_dict, word, stt, end, speaker):
riva_dict['words'].append({'word': word, 'start_time': stt, 'end_time': end, 'speaker_label': speaker})
@staticmethod
def add_sentences_to_dict(riva_dict, sentences):
# iterate over sentences
for sentence in sentences:
# extract info
speaker = sentence['speaker']
start_point = sentence['start_point']
end_point = sentence['end_point']
text = sentence['text']
# save to riva_dict
riva_dict['sentences'].append(
{'sentence': text, 'start_time': start_point, 'end_time': end_point, 'speaker_label': speaker}
)
|
gkucsko/NeMo
|
nemo_text_processing/text_normalization/en/taggers/electronic.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
get_abs_path,
insert_space,
)
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic: as URLs, email addresses, etc.
e.g. <EMAIL> -> tokens { electronic { username: "cdf1" domain: "abc.edu" } }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
accepted_symbols = pynini.project(pynini.string_file(get_abs_path("data/electronic/symbol.tsv")), "input")
accepted_common_domains = pynini.project(
pynini.string_file(get_abs_path("data/electronic/domain.tsv")), "input"
)
all_accepted_symbols = NEMO_ALPHA + pynini.closure(NEMO_ALPHA | NEMO_DIGIT | accepted_symbols)
graph_symbols = pynini.string_file(get_abs_path("data/electronic/symbol.tsv")).optimize()
username = (
pynutil.insert("username: \"") + all_accepted_symbols + pynutil.insert("\"") + pynini.cross('@', ' ')
)
domain_graph = all_accepted_symbols + pynini.accep('.') + all_accepted_symbols
protocol_symbols = pynini.closure((graph_symbols | pynini.cross(":", "semicolon")) + pynutil.insert(" "))
protocol_start = (pynini.cross("https", "HTTPS ") | pynini.cross("http", "HTTP ")) + (
pynini.accep("://") @ protocol_symbols
)
protocol_file_start = pynini.accep("file") + insert_space + (pynini.accep(":///") @ protocol_symbols)
protocol_end = pynini.cross("www", "WWW ") + pynini.accep(".") @ protocol_symbols
protocol = protocol_file_start | protocol_start | protocol_end | (protocol_start + protocol_end)
domain_graph = (
pynutil.insert("domain: \"")
+ pynini.difference(domain_graph, pynini.project(protocol, "input") + NEMO_SIGMA)
+ pynutil.insert("\"")
)
domain_common_graph = (
pynutil.insert("domain: \"")
+ pynini.difference(
all_accepted_symbols
+ accepted_common_domains
+ pynini.closure(accepted_symbols + pynini.closure(NEMO_ALPHA | NEMO_DIGIT | accepted_symbols), 0, 1),
pynini.project(protocol, "input") + NEMO_SIGMA,
)
+ pynutil.insert("\"")
)
protocol = pynutil.insert("protocol: \"") + protocol + pynutil.insert("\"")
# email
graph = username + domain_graph
# abc.com, abc.com/123-sm
graph |= domain_common_graph
# www.abc.com/sdafsdf, or https://www.abc.com/asdfad or www.abc.abc/asdfad
graph |= protocol + pynutil.insert(" ") + domain_graph
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
|
gkucsko/NeMo
|
nemo/collections/nlp/data/text_normalization_as_tagging/thutmose_tagger_dataset.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
import numpy as np
from nemo.collections.nlp.data.text_normalization_as_tagging.bert_example import BertExampleBuilder, read_input_file
from nemo.core.classes.dataset import Dataset
from nemo.core.neural_types import ChannelType, IntType, LabelsType, MaskType, NeuralType
__all__ = ["ThutmoseTaggerDataset", "ThutmoseTaggerTestDataset"]
class ThutmoseTaggerDataset(Dataset):
"""
Dataset as used by the ThutmoseTaggerModel for training, validation, and inference
pipelines.
Args:
input_file (str): path to tsv-file with data
example_builder: instance of BertExampleBuilder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"input_mask": NeuralType(('B', 'T'), MaskType()),
"segment_ids": NeuralType(('B', 'T'), ChannelType()),
"labels_mask": NeuralType(('B', 'T'), MaskType()),
"tag_labels": NeuralType(('B', 'T'), LabelsType()),
"semiotic_labels": NeuralType(('B', 'T'), LabelsType()),
"semiotic_spans": NeuralType(('B', 'T', 'C'), IntType()),
}
def __init__(self, input_file: str, example_builder: BertExampleBuilder) -> None:
self.examples = read_input_file(example_builder, input_file, infer=False)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
input_ids = np.array(self.examples[idx].features["input_ids"])
input_mask = np.array(self.examples[idx].features["input_mask"])
segment_ids = np.array(self.examples[idx].features["segment_ids"])
labels_mask = np.array(self.examples[idx].features["labels_mask"])
tag_labels = np.array(self.examples[idx].features["tag_labels"])
semiotic_labels = np.array(self.examples[idx].features["semiotic_labels"])
semiotic_spans = np.array(self.examples[idx].features["semiotic_spans"])
return input_ids, input_mask, segment_ids, labels_mask, tag_labels, semiotic_labels, semiotic_spans
class ThutmoseTaggerTestDataset(Dataset):
"""
Dataset for inference pipeline.
Args:
sents: list of strings
example_builder: instance of BertExampleBuilder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"input_mask": NeuralType(('B', 'T'), MaskType()),
"segment_ids": NeuralType(('B', 'T'), ChannelType()),
}
def __init__(self, sents: List[str], example_builder: BertExampleBuilder) -> None:
self.examples = []
for source in sents:
example = example_builder.build_bert_example(source, infer=True)
if example is None:
raise ValueError("Cannot build example from: " + source)
self.examples.append(example)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
input_ids = np.array(self.examples[idx].features["input_ids"])
input_mask = np.array(self.examples[idx].features["input_mask"])
segment_ids = np.array(self.examples[idx].features["segment_ids"])
return input_ids, input_mask, segment_ids
|
gkucsko/NeMo
|
nemo/collections/nlp/models/machine_translation/mt_enc_dec_bottleneck_model.py
|
<gh_stars>0
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import random
from multiprocessing import Value
from pathlib import Path
from typing import Dict, List, Optional, Union
import numpy as np
import torch
import torch.distributed as dist
import torch.utils.data as pt_data
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import rank_zero_only
from sacrebleu import corpus_bleu
from nemo.collections.common.losses import NLLLoss
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTBottleneckModelConfig
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
from nemo.collections.nlp.modules.common.transformer import AttentionBridge, TopKSequenceGenerator
from nemo.core.classes.common import typecheck
from nemo.utils import logging, model_utils, timers
__all__ = ['MTBottleneckModel']
def build_linear_or_identity(input_dim, output_dim):
"""
Auxiliary method to return FC layer when input_dim != output_dim
else return identity
"""
if input_dim != output_dim:
model = torch.nn.Linear(input_dim, output_dim)
else:
model = torch.nn.Identity()
return model
class MTBottleneckModel(MTEncDecModel):
"""
Machine translation model which supports bottleneck architecture,
NLL, VAE, and MIM loss.
Supported losses:
1) nll - Conditional cross entropy (the usual NMT loss)
2) mim - MIM learning framework. A latent variable model with good
reconstruction and compressed latent representation.
https://arxiv.org/pdf/2003.02645.pdf
3) vae - VAE learning framework. A latent variable model which learns
good probability estimation over observations and
a regularized latent representation.
https://arxiv.org/pdf/1312.6114.pdf
"""
def __init__(self, cfg: MTBottleneckModelConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self.model_type: str = cfg.get("model_type", "nll")
self.min_logv: float = cfg.get("min_logv", -6)
self.latent_size: int = cfg.get("latent_size", -1)
self.non_recon_warmup_batches: int = cfg.get("non_recon_warmup_batches", 200000)
self.recon_per_token: bool = cfg.get("recon_per_token", True)
self.log_timing: bool = cfg.get("log_timing", True)
# if True, translation uses the mean of latent for VAE and MIM
self.deterministic_translate = True
# latent_size -1 will take value of encoder.hidden_size
if self.latent_size < 0:
self.latent_size = self.encoder.hidden_size
if not self.recon_per_token:
# disable reduction for train and eval loss
self.eval_loss_fn = NLLLoss(ignore_index=self.decoder_tokenizer.pad_id, reduction='none')
self.loss_fn._per_token_reduction = False
if self.model_type not in ["nll", "mim", "vae"]:
raise ValueError(f"Unknown model_type = {self.model_type}")
# project bridge dimension back to decoder hidden dimensions
self.latent2hidden = build_linear_or_identity(self.latent_size, self.decoder.hidden_size)
if self.model_type == "nll":
# project dimension of encoder hidden to latent dimension
self.hidden2latent_mean = build_linear_or_identity(self.encoder.hidden_size, self.latent_size)
else:
# MIM or VAE requires two independent projections for mean/variance
# project dimension of encoder hidden to latent dimension
self.hidden2latent_mean = torch.nn.Linear(self.encoder.hidden_size, self.latent_size)
# for probabilistic latent variable models we also need variance
self.hidden2latent_logv = torch.nn.Linear(self.encoder.hidden_size, self.latent_size)
def _validate_encoder_decoder_hidden_size(self):
"""
Validate encoder and decoder hidden sizes, and enforce same size.
We support here encoder/decoder with different hidden_size, so do nothing.
"""
pass
def eval_epoch_end(self, outputs, mode, global_rank):
# call parent for logging
super().eval_epoch_end(outputs, mode, global_rank)
# if user specifies one validation dataloader, then PTL reverts to giving a list of dictionary instead of a list of list of dictionary
if isinstance(outputs[0], dict):
outputs = [outputs]
for dataloader_idx, output in enumerate(outputs):
# add logs if available in outputs
log_dict = {}
for x in output:
if "log" in x:
for k, v in x["log"].items():
log_dict[k] = log_dict.get(k, []) + [v]
for k, v in log_dict.items():
if dataloader_idx == 0:
self.log(f"{mode}_{k}", np.mean(v), sync_dist=True)
else:
self.log(f"{mode}_{k}_dl_index_{dataloader_idx}", np.mean(v), sync_dist=True)
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
def encode_latent(self, hidden):
"""
Sample latent code z with reparameterization from bridge for
probabilistic latent variable models (e.g., mim, vae),
or return value for non-probabilistic models (nll)
"""
# all models have mean
z_mean = self.hidden2latent_mean(hidden)
if self.model_type == "nll":
# reconstruction only
z = z_mean
z_logv = torch.zeros_like(z)
else:
# mim or vae
# sample posterior q(z|x) for MIM and VAE
z_logv = self.hidden2latent_logv(hidden)
# avoid numerical instability for MIM
z_logv = z_logv.clamp_min(self.min_logv)
# sample z with reparameterization
e = torch.randn_like(z_mean)
z = e * torch.exp(0.5 * z_logv) + z_mean
return z, z_mean, z_logv
def loss(
self, z, z_mean, z_logv, z_mask, tgt_log_probs, tgt, tgt_mask, tgt_labels, train=False, return_info=False
):
"""
Compute the loss from latent (z) and target (x).
train - If True enables loss annealing, and label smoothing
"""
recon_loss_fn = self.loss_fn if train else self.eval_loss_fn
info_dict = {}
if self.recon_per_token:
log_p_x_given_z_per_token = -recon_loss_fn(log_probs=tgt_log_probs, labels=tgt_labels)
log_p_x_given_z = log_p_x_given_z_per_token
log_p_x_given_z_per_token = log_p_x_given_z_per_token.detach()
else:
# averaging of log_p_x_given_z per sample
output_mask = (tgt_labels != self.decoder_tokenizer.pad_id).type_as(tgt_log_probs)
log_p_x_given_z_per_token = (
-recon_loss_fn(log_probs=tgt_log_probs, labels=tgt_labels,).view(tgt_log_probs.shape[:2]) * output_mask
)
# probability per sample
log_p_x_given_z = log_p_x_given_z_per_token.sum(-1).mean()
tokens = output_mask.sum()
log_p_x_given_z_per_token = log_p_x_given_z_per_token.sum().detach() / tokens
info_dict["log_p_x_given_z"] = log_p_x_given_z.detach().cpu()
info_dict["log_p_x_given_z_per_token"] = log_p_x_given_z_per_token.detach().cpu()
# loss warmup during training only
if train:
trainer = self.trainer
# if we do not have a trainer ignore annealing
if trainer is None:
# ignore warmup and auxiliary loss
warmup_coef = 1.0
else:
global_step = self.trainer.global_step
warmup_coef = min(global_step / self.non_recon_warmup_batches, 1)
else:
# ignore warmup and auxiliary loss
warmup_coef = 1.0
info_dict["warmup_coef_recon"] = warmup_coef
if self.model_type in ["mim", "vae"]:
# tokens = tgt_mask.sum()
q_z_given_x = torch.distributions.Normal(loc=z_mean, scale=torch.exp(0.5 * z_logv),)
# average latent distribution to match averaging of observations
if self.recon_per_token:
# average latent per dimension - to heuristically match per-token reconstruction
log_q_z_given_x = q_z_given_x.log_prob(z).mean(-1).mean(-1).mean()
else:
log_q_z_given_x = q_z_given_x.log_prob(z).sum(-1).sum(-1).mean()
# build prior distribution
p_z = torch.distributions.Normal(loc=torch.zeros_like(z), scale=torch.ones_like(z),)
if self.recon_per_token:
# average latent distribution similar to averaging of observations
log_p_z = p_z.log_prob(z).mean(-1).mean(-1).mean()
else:
log_p_z = p_z.log_prob(z).sum(-1).sum(-1).mean()
if self.model_type == "mim":
loss_terms = 0.5 * (log_q_z_given_x + log_p_z)
elif self.model_type == "vae":
# KL divergence -Dkl( q(z|x) || p(z) )
loss_terms = log_p_z - log_q_z_given_x
# show loss value for reconstruction but train with MIM/VAE loss
loss = -(log_p_x_given_z + warmup_coef * loss_terms)
info_dict["log_q_z_given_x"] = log_q_z_given_x.detach().cpu()
info_dict["log_var_q_z_given_x"] = z_logv.detach().mean().cpu()
info_dict["log_p_z"] = log_p_z.detach().cpu()
info_dict["kl_div_q_p"] = (log_q_z_given_x - log_p_z).detach().cpu()
elif self.model_type == "nll":
loss = -log_p_x_given_z
if return_info:
return loss, info_dict
else:
return loss
@typecheck()
def forward(self, src, src_mask, tgt, tgt_mask, timer=None):
"""
return_info - if True, returns loss, info_dict with additional information
regarding the loss that can be logged
"""
if self.validate_input_ids:
# test src/tgt for id range (i.e., hellp in catching wrong tokenizer)
self.test_encoder_ids(src, raise_error=True)
self.test_decoder_ids(tgt, raise_error=True)
if timer is not None:
timer.start("encoder")
enc_hiddens, enc_mask = self.encoder(input_ids=src, encoder_mask=src_mask, return_mask=True,)
# build posterior distribution q(x|z)
z, z_mean, z_logv = self.encode_latent(hidden=enc_hiddens)
z_mask = enc_mask
if timer is not None:
timer.stop("encoder")
if timer is not None:
timer.start("decoder")
# decoding cross attention context
context_hiddens = self.latent2hidden(z)
tgt_hiddens = self.decoder(
input_ids=tgt, decoder_mask=tgt_mask, encoder_embeddings=context_hiddens, encoder_mask=enc_mask,
)
# build decoding distribution
tgt_log_probs = self.log_softmax(hidden_states=tgt_hiddens)
if timer is not None:
timer.stop("decoder")
return z, z_mean, z_logv, z_mask, tgt_log_probs
@torch.no_grad()
def batch_translate(
self, src: torch.LongTensor, src_mask: torch.LongTensor, return_beam_scores: bool = False, cache={}
):
"""
Translates a minibatch of inputs from source language to target language.
Args:
src: minibatch of inputs in the src language (batch x seq_len)
src_mask: mask tensor indicating elements to be ignored (batch x seq_len)
Returns:
translations: a list strings containing detokenized translations
inputs: a list of string containing detokenized inputs
"""
mode = self.training
timer = cache.get("timer", None)
try:
self.eval()
# build posterior distribution q(x|z)
if ("z" not in cache) or ("z_mean" not in cache) or ("z_mask" not in cache):
if timer is not None:
timer.start("encoder")
enc_hiddens, enc_mask = self.encoder(input_ids=src, encoder_mask=src_mask, return_mask=True)
z, z_mean, _ = self.encode_latent(hidden=enc_hiddens)
if timer is not None:
timer.stop("encoder")
else:
enc_mask = cache["z_mask"]
z = cache["z"]
z_mean = cache["z_mean"]
if getattr(self, "deterministic_translate", True):
z = z_mean
if timer is not None:
timer.start("sampler")
# decoding cross attention context
context_hiddens = self.latent2hidden(z)
best_translations = self.beam_search(
encoder_hidden_states=context_hiddens,
encoder_input_mask=enc_mask,
return_beam_scores=return_beam_scores,
)
if timer is not None:
timer.stop("sampler")
if return_beam_scores:
all_translations, scores, best_translations = best_translations
scores = scores.view(-1)
all_translations = self.ids_to_postprocessed_text(
all_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True
)
best_translations = self.ids_to_postprocessed_text(
best_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True
)
inputs = self.ids_to_postprocessed_text(
src, self.encoder_tokenizer, self.source_processor, filter_beam_ids=False
)
finally:
self.train(mode=mode)
if return_beam_scores:
return inputs, all_translations, scores.data.cpu().numpy().tolist(), best_translations
return inputs, best_translations
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1 added by DataLoader
# is excess.
batch[i] = batch[i].squeeze(dim=0)
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
z, z_mean, z_logv, z_mask, tgt_log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)
train_loss, info_dict = self.loss(
z=z,
z_mean=z_mean,
z_logv=z_logv,
z_mask=z_mask,
tgt_log_probs=tgt_log_probs,
tgt=tgt_ids,
tgt_mask=tgt_mask,
tgt_labels=labels,
train=True,
return_info=True,
)
tensorboard_logs = {
'train_loss': train_loss,
'lr': self._optimizer.param_groups[0]['lr'],
}
tensorboard_logs.update(info_dict)
return {'loss': train_loss, 'log': tensorboard_logs}
def eval_step(self, batch, batch_idx, mode, dataloader_idx=0):
if self.log_timing:
timer = timers.NamedTimer()
else:
timer = None
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1 added by DataLoader
# is excess.
batch[i] = batch[i].squeeze(dim=0)
if self.multilingual:
self.source_processor = self.source_processor_list[dataloader_idx]
self.target_processor = self.target_processor_list[dataloader_idx]
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
z, z_mean, z_logv, z_mask, tgt_log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask, timer=timer)
eval_loss, info_dict = self.loss(
z=z,
z_mean=z_mean,
z_logv=z_logv,
z_mask=z_mask,
tgt_log_probs=tgt_log_probs,
tgt=tgt_ids,
tgt_mask=tgt_mask,
tgt_labels=labels,
train=False,
return_info=True,
)
# pass cache to sampler in order to reuse encoder's output
cache = dict(z=z, z_mean=z_mean, z_mask=z_mask, timer=timer,)
inputs, translations = self.batch_translate(src=src_ids, src_mask=src_mask, cache=cache)
num_measurements = labels.shape[0] * labels.shape[1]
if dataloader_idx == 0:
getattr(self, f'{mode}_loss')(
loss=eval_loss, num_measurements=num_measurements,
)
else:
getattr(self, f'{mode}_loss_{dataloader_idx}')(
loss=eval_loss, num_measurements=num_measurements,
)
np_tgt = tgt_ids.detach().cpu().numpy()
ground_truths = [self.decoder_tokenizer.ids_to_text(tgt) for tgt in np_tgt]
ground_truths = [self.target_processor.detokenize(tgt.split(' ')) for tgt in ground_truths]
num_non_pad_tokens = np.not_equal(np_tgt, self.decoder_tokenizer.pad_id).sum().item()
# collect logs
log_dict = {k: v.detach().cpu().numpy() if torch.is_tensor(v) else v for k, v in info_dict.items()}
# add timing if required
if timer is not None:
for k, v in timer.export().items():
log_dict[f"{k}_timing"] = v
return {
'inputs': inputs,
'translations': translations,
'ground_truths': ground_truths,
'num_non_pad_tokens': num_non_pad_tokens,
'log': log_dict,
}
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/data_processor/sgd_data_processor.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/data_utils.py
"""
import collections
import json
import os
import pickle
import re
from typing import List
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
from nemo.collections.nlp.data.dialogue.sgd.schema import Schema
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
__all__ = ['DialogueSGDDataProcessor']
FILE_RANGES = {
"sgd_single_domain": {"train": range(1, 44), "dev": range(1, 8), "test": range(1, 12)},
"sgd_multi_domain": {"train": range(44, 128), "dev": range(8, 21), "test": range(12, 35)},
"sgd_all": {"train": range(1, 128), "dev": range(1, 21), "test": range(1, 35)},
"sgd_all_single": {"train": range(1, 128), "dev": range(1, 8), "test": range(1, 12)},
"multiwoz": {"train": range(1, 18), "dev": range(1, 3), "test": range(1, 3)},
"debug_sample": {"train": range(1, 2), "dev": range(1, 2), "test": range(1, 2)},
}
class DialogueSGDDataProcessor(DialogueDataProcessor):
"""Data Processor for SGD dialogues.
More information at https://arxiv.org/abs/1909.05855
***Downloading the dataset***
# git clone https://github.com/google-research-datasets/dstc8-schema-guided-dialogue.git
***Data format***
SGD data comes with a JSON schema file and dialogue files for each dataset split.
In the following we will show an example for a service entry in the schema file.
* service_name
* description
* slots
* name
* description
* is_categorical
* possible values
* intents
* name
* description
* required_slots (not used)
* is_transactional (not used)
* optional_slots (not used)
* result_slots (not used)
In the following we will show an example for a dialogue.
* dialogue_id
* services
* turns
* frames
* actions
* act
* slot
* values
* service
* slots
* exclusive_end
* slot
* start
* state
* active_intent
* requeste_slots
* slot_values
* speaker - [USER, SYSTEM]
* utterance
"""
def __init__(
self, data_dir: str, dialogues_example_dir: str, tokenizer: object, cfg=None,
):
"""
Constructs DialogueSGDDataProcessor
Args:
data_dir: path to data directory
dialogues_example_dir: path to store processed dialogue examples
tokenizer: tokenizer object
cfg: cfg container for dataset
"""
self.data_dir = data_dir
self.cfg = cfg
self._task_name = self.cfg.task_name # e.g. "sgd_single_domain"
self._subsample = self.cfg.subsample
all_schema_json_paths = []
for dataset_split in ['train', 'test', 'dev']:
all_schema_json_paths.append(os.path.join(self.cfg.data_dir, dataset_split, "schema.json"))
self.schemas = Schema(all_schema_json_paths)
self.schema_config = {
"MAX_NUM_CAT_SLOT": self.cfg.max_num_cat_slot,
"MAX_NUM_NONCAT_SLOT": self.cfg.max_num_noncat_slot,
"MAX_NUM_VALUE_PER_CAT_SLOT": self.cfg.max_value_per_cat_slot,
"MAX_NUM_INTENT": self.cfg.max_num_intent,
"NUM_TASKS": self.cfg.num_tasks,
"MAX_SEQ_LENGTH": self.cfg.max_seq_length,
}
train_file_range = FILE_RANGES[self._task_name]["train"]
dev_file_range = FILE_RANGES[self._task_name]["dev"]
test_file_range = FILE_RANGES[self._task_name]["test"]
self._file_ranges = {
"train": train_file_range,
"dev": dev_file_range,
"test": test_file_range,
}
self._seen_services = {
"train": set(),
"dev": set(),
"test": set(),
}
self._tokenizer = tokenizer
self._dialogues_example_dir = dialogues_example_dir
self.dial_files = {}
# slots_relation_list.np would contain the candidate list of slots for each (service, slot) which would be
# looked into when a switch between two services happens in the dialogue and we can not find any value for a slot in the current user utterance.
# This file would get generated from the dialogues in the training set.
self.slots_relation_file = os.path.join(
dialogues_example_dir, f"{self._task_name}_train_slots_relation_list.np"
)
for dataset in ["train", "dev", "test"]:
# Process dialogue files
dial_file = f"{self._task_name}_{dataset}_examples.json"
dial_file = os.path.join(dialogues_example_dir, dial_file)
self.dial_files[(self._task_name, dataset)] = dial_file
dialog_paths = DialogueSGDDataProcessor.get_dialogue_files(data_dir, dataset, self._task_name)
dialogs = DialogueSGDDataProcessor.load_dialogues(dialog_paths)
for dialog in dialogs:
self._seen_services[dataset].update(set(dialog['services']))
if is_global_rank_zero():
overwrite_dial_files = not self.cfg.use_cache
self.save_dialog_examples(overwrite_dial_files=overwrite_dial_files)
def save_dialog_examples(self, overwrite_dial_files: bool):
"""
Preprocesses dialogues and saves to disk.
Args:
overwrite_dial_files: whether or not to overwrite saved file if already exists
"""
for dataset in ["train", "dev", "test"]:
dial_file = self.dial_files[(self._task_name, dataset)]
if not os.path.exists(dial_file) or overwrite_dial_files:
logging.info(f"Start generating the dialogue examples for {dataset} dataset.")
if not os.path.exists(self._dialogues_example_dir):
os.makedirs(self._dialogues_example_dir)
dial_examples, slots_relation_list = self._generate_dialog_examples(
dataset, self.schemas, self._subsample
)
with open(dial_file, "w", encoding="UTF-8") as f:
json.dump([i.data for i in dial_examples], f)
if dataset == "train":
with open(self.slots_relation_file, "wb") as f:
pickle.dump(slots_relation_list, f)
logging.info(f"The slot carry-over list for train set is stored at {self.slots_relation_file}")
logging.info(f"The dialogue examples for {dataset} dataset saved at {dial_file}")
logging.info(f"Finish generating the dialogue examples for {dataset} dataset.")
# common interface for Data Processor
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def get_dialog_examples(self, dataset_split: str) -> List[object]:
"""
Loads preprocessed dialogue examples from disk.
Args:
dataset_split: dataset split
Returns:
dial_examples: list of InputExample's.
"""
if (self._task_name, dataset_split) not in self.dial_files or not os.path.exists(
self.dial_files[(self._task_name, dataset_split)]
):
raise ValueError(
f"{dataset_split} dialogue examples were not processed for {self._task_name} task. Re-initialize SGDDataProcessor and add {dataset_split} dataset split to datasets arg."
)
dial_file = self.dial_files[(self._task_name, dataset_split)]
logging.info(f"Loading dialogue examples from {dial_file}.")
with open(dial_file, "rb") as f:
dial_examples = json.load(f)
dial_examples = [DialogueInputExample(i) for i in dial_examples]
if not os.path.exists(self.slots_relation_file):
raise ValueError(
f"Slots relation file {self.slots_relation_file} does not exist. It is needed for the carry-over mechanism of state tracker for switches between services."
)
if os.path.getsize(self.slots_relation_file) > 0:
with open(self.slots_relation_file, "rb") as f:
self.schemas._slots_relation_list = pickle.load(f)
logging.info(
f"Loaded the slot relation list for value carry-over between services from {self.slots_relation_file}."
)
return dial_examples
def get_seen_services(self, dataset_split: str):
"""
Returns list of seen services, i.e. both in given and training split
Args:
dataset_split: data split
Returns:
seen_services: list of seen services
"""
seen_services = self._seen_services[dataset_split]
return seen_services
def _generate_dialog_examples(self, dataset_split: str, schemas: object, subsample: bool):
"""
Returns a list of `InputExample`s of the data splits' dialogues.
Args:
dataset_split: data split, can be "train", "dev", or "test".
schemas: schema for all services of all datasets
subsample: whether to balance postive and negative samples in the dataset
Returns:
examples: a list of `InputExample`s.
"""
logging.info(f'Creating examples and slot relation list from the dialogues started...')
dialog_paths = [
os.path.join(self.data_dir, dataset_split, "dialogues_{:03d}.json".format(i))
for i in self._file_ranges[dataset_split]
]
dialogs = DialogueSGDDataProcessor.load_dialogues(dialog_paths)
examples = []
slot_carryover_candlist = collections.defaultdict(int)
for dialog_idx, dialog in enumerate(dialogs):
if dialog_idx % 1000 == 0:
logging.info(f'Processed {dialog_idx} dialogues.')
examples.extend(
self._create_examples_from_dialog(dialog, schemas, dataset_split, slot_carryover_candlist, subsample)
)
slots_relation_list = collections.defaultdict(list)
for slots_relation, relation_size in slot_carryover_candlist.items():
if relation_size > 0:
slots_relation_list[(slots_relation[0], slots_relation[1])].append(
(slots_relation[2], slots_relation[3], relation_size)
)
slots_relation_list[(slots_relation[2], slots_relation[3])].append(
(slots_relation[0], slots_relation[1], relation_size)
)
return examples, slots_relation_list
def _create_examples_from_dialog(
self, dialog: dict, schemas: object, dataset_split: str, slot_carryover_candlist: dict, subsample: bool
):
"""
Create examples for every turn in the dialogue.
Args:
dialog: dialogue example
schemas: schema for all services of all datasets
dataset_split: data split
slot_carryover_candlist: a dictionary to keep and count the number of carry-over cases between two slots from two different services
subsample: whether to balance postive and negative samples in the dataset
Returns:
examples: a list of `InputExample`s.
"""
dialog_id = dialog["dialogue_id"]
prev_states = {}
examples = []
for turn_idx, turn in enumerate(dialog["turns"]):
# Generate an example for every frame in every user turn.
if turn["speaker"] == "USER":
user_utterance = turn["utterance"]
user_frames = {f["service"]: f for f in turn["frames"]}
if self.cfg.system_utterance == 'prev_turn':
if turn_idx > 0:
system_turn = dialog["turns"][turn_idx - 1]
system_utterance = system_turn["utterance"]
system_frames = {f["service"]: f for f in system_turn["frames"]}
else:
system_utterance = ""
system_frames = {}
else: # takes the system utterance of the next turn
system_turn = dialog["turns"][turn_idx + 1]
system_utterance = system_turn["utterance"]
system_frames = {f["service"]: f for f in system_turn["frames"]}
turn_id = "{}-{}-{:02d}".format(dataset_split, dialog_id, turn_idx)
turn_examples, prev_states, slot_carryover_values = self._create_examples_from_turn(
turn_id,
system_utterance,
user_utterance,
system_frames,
user_frames,
prev_states,
schemas,
subsample,
)
examples.extend(turn_examples)
for value, slots_list in slot_carryover_values.items():
if value in ["True", "False"]:
continue
if len(slots_list) > 1:
for service1, slot1 in slots_list:
for service2, slot2 in slots_list:
if service1 == service2:
continue
if service1 > service2:
service1, service2 = service2, service1
slot1, slot2 = slot2, slot1
slot_carryover_candlist[(service1, slot1, service2, slot2)] += 1
return examples
def _get_state_update(self, current_state: dict, prev_state: dict) -> dict:
"""
Updates dialogue state
Args:
current_state: slot values pairs for the current dialogue turn
prev_state: slot values pairs for the previous dialogue turns
Returns:
state_update: slot values pairs that are added/updated during the current dialogue turn
"""
state_update = dict(current_state)
for slot, values in current_state.items():
if slot in prev_state and prev_state[slot][0] in values:
# Remove the slot from state if its value didn't change.
state_update.pop(slot)
return state_update
@staticmethod
def convert_camelcase_to_lower(label):
"""Converts camelcase to lowercase with spaces e.g. 'HelloWorld' --> 'hello world'"""
if label.lower() == "none":
return "none"
label = label.split("_")[0]
tokens = re.findall('[A-Z][^A-Z]*', label)
return ' '.join([token.lower() for token in tokens])
def preprocess_intent(self, intent, schemas, service):
if self.cfg.preprocess_intent_function == 'default':
return intent
elif self.cfg.preprocess_intent_function == 'lowercase':
return DialogueSGDDataProcessor.convert_camelcase_to_lower(intent)
elif self.cfg.preprocess_intent_function == 'description':
return schemas.get_service_schema(service).intent_descriptions[intent]
else:
raise ValueError(
'Only default, lowercase and description are allowed for model.dataset.preprocess_intent_function for SGD task'
)
def _create_examples_from_turn(
self,
turn_id: int,
system_utterance: str,
user_utterance: str,
system_frames: dict,
user_frames: dict,
prev_states: dict,
schemas: object,
subsample: bool,
):
"""
Creates an example for each frame in the user turn.
Args:
turn_id: turn number
system_utterance: last system utterance
user_utterance: lst user utterance
system_frames: all system utterances and slot - slot value pairs
user_frames: all user utterances and slot - slot value pairs
prev_states: slot - slot value pairs from the previous turns
schemas: schema for all services of all datasets
subsample: whether to balance postive and negative samples in the dataset
Returns:
examples: a list of `InputExample`s.
prev_states: updated dialogue state e.g. {'Restaurants_1': {'city': ['San Jose'], 'cuisine': ['American']}}
"""
system_user_utterance = system_utterance + ' ' + user_utterance
states = {}
examples = []
slot_carryover_values = collections.defaultdict(list)
for service, user_frame in user_frames.items():
state = user_frame["state"]["slot_values"]
state_update = self._get_state_update(state, prev_states.get(service, {}))
states[service] = state
system_frame = system_frames.get(service, None)
dataset_split, dialog_id, turn_id_ = turn_id.split('-')
dialog_id_1, dialog_id_2 = dialog_id.split('_')
example_id = f"{turn_id}-{service}"
example_id_num = [
int(dialog_id_1),
int(dialog_id_2),
int(turn_id_),
schemas.get_service_id(service),
]
intent = user_frames[service]["state"]['active_intent']
all_possible_slots = schemas.get_service_schema(service).slots
categorical_slots = schemas.get_service_schema(service).categorical_slots
one_example = {
"example_id": example_id,
"example_id_num": example_id_num,
"utterance": user_utterance,
"system_utterance": system_utterance,
"system_slots": {slot["slot"]: slot for slot in system_frame["slots"]}
if system_frame is not None
else None,
"system_actions": system_frame["actions"] if system_frame is not None else None,
"labels": {
"service": service,
"intent": self.preprocess_intent(intent, schemas, service),
"slots": {slot: state[slot] for slot in state_update},
},
"label_positions": {"slots": {slot["slot"]: slot for slot in user_frames[service]["slots"]}},
"possible_labels": {
"service": schemas.services,
"intent": [
self.preprocess_intent(intent, schemas, service)
for intent in schemas.get_service_schema(service).intents
],
"slots": {
slot: schemas.get_service_schema(service).get_categorical_slot_values(slot)
if slot in categorical_slots
else []
for slot in all_possible_slots
},
},
"description": {
"service": schemas.get_service_schema(service).description,
"intent": schemas.get_service_schema(service).intent_descriptions[intent],
"slots": {
slot: schemas.get_service_schema(service).slot_descriptions[slot] for slot in state_update
},
},
}
examples.append(DialogueInputExample(one_example))
if service not in prev_states and int(turn_id_) > 0:
for slot_name, values in state_update.items():
for value in values:
slot_carryover_values[value].append((service, slot_name))
for prev_service, prev_slot_value_list in prev_states.items():
if prev_service == service:
continue
if prev_service in state:
prev_slot_value_list = state[prev_service]
for prev_slot_name, prev_values in prev_slot_value_list.items():
for prev_value in prev_values:
slot_carryover_values[prev_value].append((prev_service, prev_slot_name))
return examples, states, slot_carryover_values
def _find_subword_indices(
self,
slot_values: dict,
utterance: str,
char_slot_spans: dict,
alignments: List[int],
subwords: List[str],
bias: int,
) -> dict:
"""
Find indices for subwords corresponding to slot values.
Args:
slot_values: slot - slot value pairs
utterance: utterance
char_slot_spans: char - slot spans
alignments: alignments
subwords: subtokens mapping
bias: offset
Returns:
span_boundaries: span boundaries
"""
span_boundaries = {}
for slot, values in slot_values.items():
# Get all values present in the utterance for the specified slot.
value_char_spans = {}
for slot_span in char_slot_spans:
if slot_span["slot"] == slot:
value = utterance[slot_span["start"] : slot_span["exclusive_end"]]
start_tok_idx = alignments[slot_span["start"]]
end_tok_idx = alignments[slot_span["exclusive_end"] - 1]
if 0 <= start_tok_idx < len(subwords):
end_tok_idx = min(end_tok_idx, len(subwords) - 1)
value_char_spans[value] = (start_tok_idx + bias, end_tok_idx + bias)
for v in values:
if v in value_char_spans:
span_boundaries[slot] = value_char_spans[v]
break
return span_boundaries
@classmethod
def load_dialogues(cls, dialog_json_filepaths: List[str]) -> List[dict]:
"""
Obtain the list of all dialogues from specified json files.
Args:
dialog_json_filepaths: list of json files
Returns:
dialogs: the list of all dialogues
"""
dialogs = []
for dialog_json_filepath in sorted(dialog_json_filepaths):
with open(dialog_json_filepath, 'r', encoding="UTF-8") as f:
dialogs.extend(json.load(f))
f.close()
return dialogs
@classmethod
def get_dialogue_files(cls, data_dir: str, dataset_split: str, task_name: str):
"""
Obtain the list of all dialogue json files
Args:
data_dir: path to the data folder
dataset_split: data split
task_name: SGD task name, see keys of the FILE_RANGES
Returns:
dialog: the list of all dialogue json files paths
"""
return [
os.path.join(data_dir, dataset_split, 'dialogues_{:03d}.json'.format(fid))
for fid in FILE_RANGES[task_name][dataset_split]
]
|
gkucsko/NeMo
|
nemo/collections/asr/parts/utils/nmesc_clustering.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2007-2020 The scikit-learn developers.
# BSD 3-Clause License
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# NME-SC clustering is based on the implementation from the paper
# https://arxiv.org/pdf/2003.02405.pdf and the implementation from
# https://github.com/tango4j/Auto-Tuning-Spectral-Clustering.
from collections import Counter
from typing import Dict, List
import torch
from torch.linalg import eigh
@torch.jit.script
def cos_similarity(a: torch.Tensor, b: torch.Tensor, eps=torch.tensor(3.5e-4)):
"""
Args:
a: (torch.tensor)
Matrix containing speaker representation vectors. (N x embedding_dim)
b: (torch.tensor)
Matrix containing speaker representation vectors. (N x embedding_dim)
Returns:
res (torch.tensor)
N by N matrix containing the cosine similarities of the values.
"""
a_norm = a / (torch.norm(a, dim=1).unsqueeze(1) + eps)
b_norm = b / (torch.norm(a, dim=1).unsqueeze(1) + eps)
res = torch.mm(a_norm, b_norm.transpose(0, 1))
res.fill_diagonal_(1)
return res
@torch.jit.script
def ScalerMinMax(X: torch.Tensor):
"""
Min-max scale the input affinity matrix X, which will lead to a dynamic range of
[0, 1].
Args:
X: (torch.tensor)
Matrix containing cosine similarity values among embedding vectors (N x N)
Returns:
v_norm: (torch.tensor)
Min-max normalized value of X.
"""
v_min, v_max = X.min(), X.max()
v_norm = (X - v_min) / (v_max - v_min)
return v_norm
@torch.jit.script
def getEuclideanDistance(specEmbA: torch.Tensor, specEmbB: torch.Tensor, device: torch.device = torch.device('cpu')):
"""
Args:
specEmbA: (torch.tensor)
Matrix containing spectral embedding vectors from eigenvalue decomposition (N x embedding_dim).
specEmbB: (torch.tensor)
Matrix containing spectral embedding vectors from eigenvalue decomposition (N x embedding_dim).
Returns:
dis: (torch.tensor)
Euclidean distance values of the two sets of spectral embedding vectors.
"""
specEmbA, specEmbB = specEmbA.to(device), specEmbB.to(device)
A, B = specEmbA.unsqueeze(dim=1), specEmbB.unsqueeze(dim=0)
dis = (A - B) ** 2.0
dis = dis.sum(dim=-1).squeeze()
return dis
@torch.jit.script
def kmeans_plusplus_torch(
X: torch.Tensor,
n_clusters: int,
random_state: int,
n_local_trials: int = 30,
device: torch.device = torch.device('cpu'),
):
"""
Choose initial centroids for initializing k-means algorithm. The performance of
k-means algorithm can vary significantly by the initial centroids. To alleviate
this problem, k-means++ algorithm chooses initial centroids based on the probability
proportional to the distance from the formally chosen centroids. The centroids
selected by k-means++ algorithm improve the chance of getting more accurate and
stable clustering results. The overall implementation of k-means++ algorithm is
inspired by the numpy based k-means++ implementation in:
https://github.com/scikit-learn/scikit-learn
Originally, the implementation of the k-means++ algorithm in scikit-learn is based
on the following research article:
<NAME>, and <NAME>. k-means++: The advantages of careful
seeding. Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete
algorithms, Society for Industrial and Applied Mathematics (2007)
Args:
X: (torch.tensor)
Matrix containing cosine similarity values among embedding vectors (N x N)
n_clusters: (int)
Maximum number of speakers for estimating number of speakers.
Shows stable performance under 20.
random_state: (int)
Seed variable for setting up a random state.
n_local_trials: (int)
Number of trials for creating initial values of the center points.
device: (torch.device)
Torch device variable.
Returns:
centers: (torch.tensor)
The coordinates for center points that are used for initializing k-means algorithm.
indices: (torch.tensor)
The indices of the best candidate center points.
"""
torch.manual_seed(random_state)
X = X.to(device)
n_samples, n_features = X.shape
centers = torch.zeros(n_clusters, n_features, dtype=X.dtype)
center_id = torch.randint(0, n_samples, (1,)).long()
indices = torch.full([n_clusters,], -1, dtype=torch.int)
centers[0] = X[center_id].squeeze(0)
indices[0] = center_id.squeeze(0)
centers = centers.to(device)
closest_dist_diff = centers[0, None].repeat(1, X.shape[0]).view(X.shape[0], -1) - X
closest_dist_sq = closest_dist_diff.pow(2).sum(dim=1).unsqueeze(dim=0)
current_pot = closest_dist_sq.sum()
for c in range(1, n_clusters):
rand_vals = torch.rand(n_local_trials) * current_pot.item()
if len(closest_dist_sq.shape) > 1:
torch_cumsum = torch.cumsum(closest_dist_sq, dim=1)[0]
else:
torch_cumsum = torch.cumsum(closest_dist_sq, dim=0)
candidate_ids = torch.searchsorted(torch_cumsum, rand_vals.to(device))
N_ci = candidate_ids.shape[0]
distance_diff = X[candidate_ids].repeat(1, X.shape[0]).view(X.shape[0] * N_ci, -1) - X.repeat(N_ci, 1)
distance = distance_diff.pow(2).sum(dim=1).view(N_ci, -1)
distance_to_candidates = torch.minimum(closest_dist_sq, distance)
candidates_pot = distance_to_candidates.sum(dim=1)
best_candidate = torch.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
centers[c] = X[best_candidate]
indices[c] = best_candidate
return centers, indices
@torch.jit.script
def kmeans_torch(
X: torch.Tensor,
num_clusters: int,
threshold: float = 1e-4,
iter_limit: int = 15,
random_state: int = 0,
device: torch.device = torch.device('cpu'),
):
"""
Run k-means algorithm on the given set of spectral embeddings in X. The threshold
and iter_limit variables are set to show the best performance on speaker diarization
tasks. The overall implementation of k-means algorithm is inspired by the k-means
algorithm implemented in https://github.com/scikit-learn/scikit-learn.
References:
<NAME>, and <NAME>. k-means++: The advantages of careful
seeding. Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete
algorithms, Society for Industrial and Applied Mathematics (2007).
Args:
X: (torch.tensor)
Cosine similarity matrix calculated from speaker embeddings
num_clusters: (int)
The estimated number of speakers.
threshold: (float)
This threshold limits the change of center values. If the square of
the center shift values are bigger than this threshold, the iteration stops.
iter_limit: (int)
The maximum number of iterations that is allowed by the k-means algorithm.
device: (torch.device)
Torch device variable
Returns:
selected_cluster_indices: (torch.tensor)
The assigned cluster labels from the k-means clustering.
"""
# Convert tensor type to float
X = X.float().to(device)
input_size = X.shape[0]
# Initialize the cluster centers with kmeans_plusplus algorithm.
plusplus_init_states = kmeans_plusplus_torch(X, n_clusters=num_clusters, random_state=random_state, device=device)
centers = plusplus_init_states[0]
iter_count = 0
selected_cluster_indices = torch.zeros(input_size).int()
for iter_count in range(iter_limit):
euc_dist = getEuclideanDistance(X, centers, device=device)
if len(euc_dist.shape) <= 1:
break
else:
selected_cluster_indices = torch.argmin(euc_dist, dim=1)
center_inits = centers.clone()
for index in range(num_clusters):
selected_cluster = torch.nonzero(selected_cluster_indices == index).squeeze().to(device)
chosen_indices = torch.index_select(X, 0, selected_cluster)
if chosen_indices.shape[0] == 0:
chosen_indices = X[torch.randint(len(X), (1,))]
centers[index] = chosen_indices.mean(dim=0)
# Calculate the delta from center_inits to centers
center_delta_pow = torch.pow((centers - center_inits), 2)
center_shift_pow = torch.pow(torch.sum(torch.sqrt(torch.sum(center_delta_pow, dim=1))), 2)
# If the cluster centers are not changing significantly, stop the loop.
if center_shift_pow < threshold:
break
return selected_cluster_indices
@torch.jit.script
def getTheLargestComponent(affinity_mat: torch.Tensor, seg_index: int, device: torch.device):
"""
Find the largest affinity_mat connected components for each given node.
This is for checking whether the affinity_mat is fully connected.
Args:
affinity_mat: (torch.tensor)
A square matrix (tensor) containing normalized cosine distance values
seg_index: (int)
The segment index that is targeted to be explored.
Returns:
connected_nodes: (torch.tensor)
A tensor containing booleans that indicate whether the node is connected.
"""
num_of_segments = affinity_mat.shape[0]
connected_nodes = torch.zeros(num_of_segments, dtype=torch.bool).to(device)
nodes_to_explore = torch.zeros(num_of_segments, dtype=torch.bool).to(device)
nodes_to_explore[seg_index] = True
for k in range(num_of_segments):
last_num_component = connected_nodes.sum()
torch.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = (nodes_to_explore == torch.tensor(True)).nonzero().t().squeeze()
if len(indices.size()) == 0:
indices = indices.unsqueeze(0)
for i in indices:
neighbors = affinity_mat[i]
torch.logical_or(nodes_to_explore, neighbors.squeeze(0), out=nodes_to_explore)
return connected_nodes
@torch.jit.script
def isGraphFullyConnected(affinity_mat: torch.Tensor, device: torch.device):
"""
Check whether the given affinity matrix is a fully connected graph.
"""
return getTheLargestComponent(affinity_mat, 0, device).sum() == affinity_mat.shape[0]
@torch.jit.script
def getKneighborsConnections(affinity_mat: torch.Tensor, p_value: int):
"""
Binarize top-p values for each row from the given affinity matrix.
"""
binarized_affinity_mat = torch.zeros_like(affinity_mat).int()
for i in range(affinity_mat.shape[0]):
line = affinity_mat[i, :]
sorted_idx = torch.argsort(line, descending=True)
indices = sorted_idx[:p_value]
binarized_affinity_mat[indices, i] = torch.ones(indices.shape[0]).to(affinity_mat.device).int()
return binarized_affinity_mat
@torch.jit.script
def getAffinityGraphMat(affinity_mat_raw: torch.Tensor, p_value: int):
"""
Calculate a binarized graph matrix and
symmetrize the binarized graph matrix.
"""
X = getKneighborsConnections(affinity_mat_raw, p_value)
symm_affinity_mat = 0.5 * (X + X.T)
return symm_affinity_mat
@torch.jit.script
def getMinimumConnection(mat: torch.Tensor, max_N: torch.Tensor, n_list: torch.Tensor, device: torch.device):
"""
Generate connections until fully connect all the nodes in the graph.
If the graph is not fully connected, it might generate inaccurate results.
"""
p_value = torch.tensor(1)
affinity_mat = getAffinityGraphMat(mat, p_value)
for i, p_value in enumerate(n_list):
fully_connected = isGraphFullyConnected(affinity_mat, device)
affinity_mat = getAffinityGraphMat(mat, p_value)
if fully_connected or p_value > max_N:
break
return affinity_mat, p_value
@torch.jit.script
def getRepeatedList(mapping_argmat: torch.Tensor, score_mat_size: torch.Tensor):
"""
Count the numbers in the mapping dictionary and create lists that contain
repeated indices that will be used for creating a repeated affinity matrix.
This repeated matrix is then used for fusing multiple affinity values.
"""
repeat_list = torch.zeros(score_mat_size, dtype=torch.int32)
idxs, counts = torch.unique(mapping_argmat, return_counts=True)
repeat_list[idxs] = counts.int()
return repeat_list
def get_argmin_mat(uniq_scale_dict: dict):
"""
Calculate the mapping between the base scale and other scales. A segment from a longer scale is
repeatedly mapped to a segment from a shorter scale or the base scale.
Args:
uniq_scale_dict (dict) :
Dictionary of embeddings and timestamps for each scale.
Returns:
session_scale_mapping_dict (dict) :
Dictionary containing argmin arrays indexed by scale index.
"""
scale_list = sorted(list(uniq_scale_dict.keys()))
segment_anchor_dict = {}
for scale_idx in scale_list:
time_stamp_list = uniq_scale_dict[scale_idx]['time_stamps']
time_stamps_float = torch.tensor([[float(x.split()[0]), float(x.split()[1])] for x in time_stamp_list])
segment_anchor_dict[scale_idx] = torch.mean(time_stamps_float, dim=1)
base_scale_idx = max(scale_list)
base_scale_anchor = segment_anchor_dict[base_scale_idx]
session_scale_mapping_dict = {}
for scale_idx in scale_list:
curr_scale_anchor = segment_anchor_dict[scale_idx]
curr_mat = torch.tile(curr_scale_anchor, (base_scale_anchor.shape[0], 1))
base_mat = torch.tile(base_scale_anchor, (curr_scale_anchor.shape[0], 1)).t()
argmin_mat = torch.argmin(torch.abs(curr_mat - base_mat), dim=1)
session_scale_mapping_dict[scale_idx] = argmin_mat
return session_scale_mapping_dict
def getMultiScaleCosAffinityMatrix(uniq_embs_and_timestamps: dict, device: torch.device = torch.device('cpu')):
"""
Calculate cosine similarity values among speaker embeddings for each scale then
apply multiscale weights to calculate the fused similarity matrix.
Args:
uniq_embs_and_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
Returns:
fused_sim_d (torch.tensor):
This function generates an affinity matrix that is obtained by calculating
the weighted sum of the affinity matrices from the different scales.
base_scale_emb (torch.tensor):
The base scale embedding (the embeddings from the finest scale)
"""
uniq_scale_dict = uniq_embs_and_timestamps['scale_dict']
base_scale_idx = max(uniq_scale_dict.keys())
base_scale_emb = uniq_scale_dict[base_scale_idx]['embeddings']
multiscale_weights = uniq_embs_and_timestamps['multiscale_weights'].float().to(device)
score_mat_list, repeated_tensor_list = [], []
session_scale_mapping_dict = get_argmin_mat(uniq_scale_dict)
for scale_idx in sorted(uniq_scale_dict.keys()):
mapping_argmat = session_scale_mapping_dict[scale_idx]
emb_t = uniq_scale_dict[scale_idx]['embeddings'].half().to(device)
score_mat_torch = getCosAffinityMatrix(emb_t)
repeat_list = getRepeatedList(mapping_argmat, torch.tensor(score_mat_torch.shape[0])).to(device)
repeated_tensor_0 = torch.repeat_interleave(score_mat_torch, repeats=repeat_list, dim=0)
repeated_tensor_1 = torch.repeat_interleave(repeated_tensor_0, repeats=repeat_list, dim=1)
repeated_tensor_list.append(repeated_tensor_1)
repp = torch.stack(repeated_tensor_list).float()
fused_sim_d = torch.matmul(repp.permute(2, 1, 0), multiscale_weights.t()).squeeze(2).t()
return fused_sim_d, base_scale_emb
@torch.jit.script
def getCosAffinityMatrix(_emb: torch.Tensor):
"""
Calculate cosine similarity values among speaker embeddings then min-max normalize
the affinity matrix.
"""
emb = _emb.half()
sim_d = cos_similarity(emb, emb)
sim_d = ScalerMinMax(sim_d)
return sim_d
@torch.jit.script
def getLaplacian(X: torch.Tensor):
"""
Calculate a laplacian matrix from an affinity matrix X.
"""
X.fill_diagonal_(0)
D = torch.sum(torch.abs(X), dim=1)
D = torch.diag_embed(D)
L = D - X
return L
@torch.jit.script
def eigDecompose(laplacian: torch.Tensor, cuda: bool, device: torch.device = torch.device('cpu')):
"""
Calculate eigenvalues and eigenvectors from the Laplacian matrix.
"""
if cuda:
if device is None:
device = torch.cuda.current_device()
laplacian = laplacian.float().to(device)
else:
laplacian = laplacian.float()
lambdas, diffusion_map = eigh(laplacian)
return lambdas, diffusion_map
@torch.jit.script
def getLamdaGaplist(lambdas: torch.Tensor):
"""
Calculate the gaps between lambda values.
"""
if torch.is_complex(lambdas):
lambdas = torch.real(lambdas)
return lambdas[1:] - lambdas[:-1]
@torch.jit.script
def addAnchorEmb(emb: torch.Tensor, anchor_sample_n: int, anchor_spk_n: int, sigma: float):
"""
Add randomly generated synthetic embeddings to make eigen analysis more stable.
We refer to these embeddings as anchor embeddings.
emb (torch.tensor):
The input embedding from the embedding extractor.
anchor_sample_n (int):
Number of embedding samples per speaker.
anchor_sample_n = 10 is recommended.
anchor_spk_n (int):
Number of speakers for synthetic embedding.
anchor_spk_n = 3 is recommended.
sigma (int):
The amplitude of synthetic noise for each embedding vector.
If the sigma value is too small, under-counting could happen.
If the sigma value is too large, over-counting could happen.
sigma = 50 is recommended.
"""
emb_dim = emb.shape[1]
std_org = torch.std(emb, dim=0)
new_emb_list = []
for _ in range(anchor_spk_n):
emb_m = torch.tile(torch.randn(1, emb_dim), (anchor_sample_n, 1))
emb_noise = torch.randn(anchor_sample_n, emb_dim).T
emb_noise = torch.matmul(
torch.diag(std_org), emb_noise / torch.max(torch.abs(emb_noise), dim=0)[0].unsqueeze(0)
).T
emb_gen = emb_m + sigma * emb_noise
new_emb_list.append(emb_gen)
new_emb_list.append(emb)
new_emb_np = torch.vstack(new_emb_list)
return new_emb_np
def getEnhancedSpeakerCount(
emb: torch.Tensor,
cuda: bool,
random_test_count: int = 5,
anchor_spk_n: int = 3,
anchor_sample_n: int = 10,
sigma: float = 50,
):
"""
Calculate the number of speakers using NME analysis with anchor embeddings.
emb (torch.Tensor):
The input embedding from the embedding extractor.
cuda (bool):
Use cuda for the operations if cuda==True.
random_test_count (int):
Number of trials of the enhanced counting with randomness.
The higher the count, the more accurate the enhanced counting is.
anchor_spk_n (int):
Number of speakers for synthetic embedding.
anchor_spk_n = 3 is recommended.
anchor_sample_n (int):
Number of embedding samples per speaker.
anchor_sample_n = 10 is recommended.
sigma (float):
The amplitude of synthetic noise for each embedding vector.
If the sigma value is too small, under-counting could happen.
If the sigma value is too large, over-counting could happen.
sigma = 50 is recommended.
"""
est_num_of_spk_list = []
for seed in range(random_test_count):
torch.manual_seed(seed)
emb_aug = addAnchorEmb(emb, anchor_sample_n, anchor_spk_n, sigma)
mat = getCosAffinityMatrix(emb_aug)
nmesc = NMESC(
mat,
max_num_speaker=emb.shape[0],
max_rp_threshold=0.15,
sparse_search=True,
sparse_search_volume=50,
fixed_thres=-1.0,
NME_mat_size=300,
cuda=cuda,
)
est_num_of_spk, _ = nmesc.NMEanalysis()
est_num_of_spk_list.append(est_num_of_spk)
ctt = Counter(est_num_of_spk_list)
comp_est_num_of_spk = max(ctt.most_common(1)[0][0] - anchor_spk_n, 1)
return comp_est_num_of_spk
@torch.jit.script
def estimateNumofSpeakers(affinity_mat: torch.Tensor, max_num_speaker: int, cuda: bool = False):
"""
Estimate the number of speakers using eigendecomposition on the Laplacian Matrix.
Args:
affinity_mat: (torch.tensor)
N by N affinity matrix
max_num_speaker: (int)
Maximum number of clusters to consider for each session
cuda: (bool)
If cuda available eigendecomposition is computed on GPUs.
Returns:
num_of_spk: (torch.tensor)
The estimated number of speakers
lambdas: (torch.tensor)
The lambda values from eigendecomposition
lambda_gap: (torch.tensor)
The gap between the lambda values from eigendecomposition
"""
laplacian = getLaplacian(affinity_mat)
lambdas, _ = eigDecompose(laplacian, cuda)
lambdas = torch.sort(lambdas)[0]
lambda_gap = getLamdaGaplist(lambdas)
num_of_spk = torch.argmax(lambda_gap[: min(max_num_speaker, lambda_gap.shape[0])]) + 1
return num_of_spk, lambdas, lambda_gap
@torch.jit.script
class SpectralClustering:
"""
Perform spectral clustering by calculating spectral embeddings then run k-means clustering
algorithm on the spectral embeddings.
"""
def __init__(
self,
n_clusters: int = 8,
random_state: int = 0,
n_random_trials: int = 1,
cuda: bool = False,
device: torch.device = torch.device('cpu'),
):
"""
Initialize the variables needed for spectral clustering and k-means++.
Args:
n_clusters (int):
Number of the estimated (or oracle) number of speakers
random_state (int):
Random seed that determines a random state of k-means initialization.
n_random_trials (int):
Number of trials with different random seeds for k-means initialization.
k-means++ algorithm is executed for multiple times then the final result
is obtained by taking a majority vote.
cuda (bool):
if cuda=True, spectral clustering is done on GPU.
device (torch.device):
Torch device variable
"""
self.n_clusters = n_clusters
self.random_state = random_state
self.n_random_trials = max(n_random_trials, 1)
self.cuda = cuda
self.device = device
def predict(self, X):
"""
Call self.clusterSpectralEmbeddings() function to predict cluster labels.
Args:
X (torch.tensor):
Affinity matrix input
Returns:
labels (torch.tensor):
clustering label output
"""
if X.shape[0] != X.shape[1]:
raise ValueError("The affinity matrix is not a square matrix.")
labels = self.clusterSpectralEmbeddings(X, cuda=self.cuda, device=self.device)
return labels
def clusterSpectralEmbeddings(self, affinity, cuda: bool = False, device: torch.device = torch.device('cpu')):
"""
Perform k-means clustering on spectral embeddings. To alleviate the effect of randomness,
k-means clustering is performed for (self.n_random_trials) times then the final labels are obtained
by taking a majority vote. If speed is the major concern, self.n_random_trials should be set to 1.
n_random_trials=30 is recommended to see an improved result.
Args:
affinity (torch.tensor):
Affinity matrix input
cuda (torch.bool):
Use cuda for spectral clustering if cuda=True
device (torch.device):
Torch device variable
Returns:
labels (torch.tensor):
clustering label output
"""
spectral_emb = self.getSpectralEmbeddings(affinity, n_spks=self.n_clusters, cuda=cuda)
labels_set = []
for random_state_seed in range(self.random_state, self.random_state + self.n_random_trials):
_labels = kmeans_torch(
X=spectral_emb, num_clusters=self.n_clusters, random_state=random_state_seed, device=device
)
labels_set.append(_labels)
stacked_labels = torch.stack(labels_set)
label_index = torch.mode(torch.mode(stacked_labels, 0)[1])[0]
labels = stacked_labels[label_index]
return labels
def getSpectralEmbeddings(self, affinity_mat: torch.Tensor, n_spks: int = 8, cuda: bool = False):
"""
Calculate eigenvalues and eigenvectors to extract spectral embeddings.
Args:
affinity (torch.tensor):
Affinity matrix input
cuda (torch.bool):
Use cuda for spectral clustering if cuda=True
device (torch.device):
Torch device variable
Returns:
labels (torch.Tensor):
clustering label output
"""
laplacian = getLaplacian(affinity_mat)
lambdas_, diffusion_map_ = eigDecompose(laplacian, cuda)
diffusion_map = diffusion_map_[:, :n_spks]
inv_idx = torch.arange(diffusion_map.size(1) - 1, -1, -1).long()
embedding = diffusion_map.T[inv_idx, :]
return embedding[:n_spks].T
@torch.jit.script
class NMESC:
"""
Normalized Maximum Eigengap based Spectral Clustering (NME-SC)
uses Eigengap analysis to get an estimated p-value for
affinity binarization and an estimated number of speakers.
p_value (also referred to as p_neighbors) is for taking
top p number of affinity values and convert those to 1 while
convert the rest of values to 0.
p_value can be also tuned on a development set without performing
NME-analysis. Fixing p_value brings about significantly faster clustering
speed, but the performance is limited to the development set.
References:
<NAME> al., Auto-Tuning Spectral Clustering for Speaker Diarization
Using Normalized Maximum Eigengap, IEEE Signal Processing Letters 27 (2019),
https://arxiv.org/abs/2003.02405
Args:
Please refer to def __init__().
Methods:
NMEanalysis():
Performs NME-analysis to estimate p_value and the number of speakers
subsampleAffinityMat(NME_mat_size):
Subsamples the number of speakers to reduce the computational load
getPvalueList():
Generates a list containing p-values that need to be examined.
getEigRatio(p_neighbors):
Calculates g_p, which is a ratio between p_neighbors and the maximum eigengap
getLamdaGaplist(lambdas):
Calculates lambda gap values from an array contains lambda values
estimateNumofSpeakers(affinity_mat):
Estimates the number of speakers using lambda gap list
"""
def __init__(
self,
mat,
max_num_speaker: int = 10,
max_rp_threshold: float = 0.15,
sparse_search: bool = True,
sparse_search_volume: int = 30,
use_subsampling_for_NME: bool = True,
fixed_thres: float = 0.0,
cuda: bool = False,
NME_mat_size: int = 512,
device: torch.device = torch.device('cpu'),
):
"""
Args:
mat: (torch.tensor)
Cosine similarity matrix calculated from the provided speaker embeddings.
max_num_speaker: (int)
Maximum number of speakers for estimating number of speakers.
Shows stable performance under 20.
max_rp_threshold: (float)
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.25.
sparse_search: (bool)
To increase the speed of parameter estimation, sparse_search=True
limits the number of p_values we search.
sparse_search_volume: (int)
Number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
However, a value lower than 20 might cause a poor parameter estimation.
use_subsampling_for_NME: (bool)
Use subsampling to reduce the calculational complexity.
Default is True.
fixed_thres: (float or None)
A fixed threshold which can be used instead of estimating the
threshold with NME analysis. If fixed_thres is float,
it skips the NME analysis part.
cuda (bool)
Use cuda for Eigen decomposition if cuda=True.
NME_mat_size: (int)
Targeted size of matrix for NME analysis.
"""
self.max_num_speaker: int = max_num_speaker
self.max_rp_threshold = max_rp_threshold
self.use_subsampling_for_NME = use_subsampling_for_NME
self.NME_mat_size: int = NME_mat_size
self.sparse_search = sparse_search
self.sparse_search_volume = sparse_search_volume
self.fixed_thres: float = fixed_thres
self.cuda: bool = cuda
self.eps = 1e-10
self.max_N = torch.tensor(0)
self.mat = mat
self.p_value_list: torch.Tensor = torch.tensor(0)
self.device = device
def NMEanalysis(self):
"""
Subsample the input matrix to reduce the computational load.
"""
if self.use_subsampling_for_NME:
subsample_ratio = self.subsampleAffinityMat(self.NME_mat_size)
else:
subsample_ratio = torch.tensor(1)
# Scans p_values and find a p_value that generates
# the smallest g_p value.
eig_ratio_list = []
est_spk_n_dict: Dict[int, torch.Tensor] = {}
self.p_value_list = self.getPvalueList()
for p_value in self.p_value_list:
est_num_of_spk, g_p = self.getEigRatio(p_value)
est_spk_n_dict[p_value.item()] = est_num_of_spk
eig_ratio_list.append(g_p)
index_nn = torch.argmin(torch.tensor(eig_ratio_list))
rp_p_value = self.p_value_list[index_nn]
affinity_mat = getAffinityGraphMat(self.mat, rp_p_value)
# Checks whether the affinity graph is fully connected.
# If not, it adds a minimum number of connections to make it fully connected.
if not isGraphFullyConnected(affinity_mat, device=self.device):
affinity_mat, rp_p_value = getMinimumConnection(
self.mat, self.max_N, self.p_value_list, device=self.device
)
p_hat_value = (subsample_ratio * rp_p_value).type(torch.int)
est_num_of_spk = est_spk_n_dict[rp_p_value.item()]
return est_num_of_spk, p_hat_value
def subsampleAffinityMat(self, NME_mat_size: int):
"""
Perform subsampling of affinity matrix.
This subsampling is for calculational complexity, not for performance.
The smaller NME_mat_size is,
- the bigger the chance of missing a speaker.
- the faster p-value estimation speed (based on eigen decomposition).
The recommended NME_mat_size is 250~750.
However, if there are speakers who speak for very short period of time in the recording,
this subsampling might make the system miss underrepresented speakers.
Use this variable with caution.
Args:
NME_mat_size: (int)
The targeted matrix size
Returns:
subsample_ratio : (float)
The ratio between NME_mat_size and the original matrix size
"""
subsample_ratio = torch.max(torch.tensor(1), torch.tensor(self.mat.shape[0] / NME_mat_size)).type(torch.int)
self.mat = self.mat[:: subsample_ratio.item(), :: subsample_ratio.item()]
return subsample_ratio
def getEigRatio(self, p_neighbors: int):
"""
For a given p_neighbors value, calculate g_p, which is a ratio between p_neighbors and the
maximum eigengap values.
References:
<NAME> et al., Auto-Tuning Spectral Clustering for Speaker Diarization Using
Normalized Maximum Eigengap, IEEE Signal Processing Letters 27 (2019),
https://arxiv.org/abs/2003.02405
Args:
p_neighbors: (int)
Determines how many binary graph connections we want to keep for each row.
Returns:
est_num_of_spk: (int)
Estimated number of speakers
g_p: (float)
The ratio between p_neighbors value and the maximum eigen gap value.
"""
affinity_mat = getAffinityGraphMat(self.mat, p_neighbors)
est_num_of_spk, lambdas, lambda_gap_list = estimateNumofSpeakers(affinity_mat, self.max_num_speaker, self.cuda)
arg_sorted_idx = torch.argsort(lambda_gap_list[: self.max_num_speaker], descending=True)
max_key = arg_sorted_idx[0]
max_eig_gap = lambda_gap_list[max_key] / (max(lambdas) + self.eps)
g_p = (p_neighbors / self.mat.shape[0]) / (max_eig_gap + self.eps)
return est_num_of_spk, g_p
def getPvalueList(self):
"""
Generates a p-value (p_neighbour) list for searching.
"""
if self.fixed_thres > 0.0:
p_value_list = torch.floor(torch.tensor(self.mat.shape[0] * self.fixed_thres)).type(torch.int)
self.max_N = p_value_list[0]
else:
self.max_N = torch.floor(torch.tensor(self.mat.shape[0] * self.max_rp_threshold)).type(torch.int)
if self.sparse_search:
N = torch.min(self.max_N, torch.tensor(self.sparse_search_volume).type(torch.int))
p_value_list = torch.unique(torch.linspace(start=1, end=self.max_N, steps=N).type(torch.int))
else:
p_value_list = torch.arange(1, self.max_N)
return p_value_list
def COSclustering(
uniq_embs_and_timestamps,
oracle_num_speakers=None,
max_num_speaker: int = 8,
min_samples_for_NMESC: int = 6,
enhanced_count_thres: int = 80,
max_rp_threshold: float = 0.15,
sparse_search_volume: int = 30,
fixed_thres: float = 0.0,
cuda=False,
):
"""
Clustering method for speaker diarization based on cosine similarity.
NME-SC part is converted to torch.tensor based operations in NeMo 1.9.
Args:
uniq_embs_and_timestamps: (dict)
The dictionary containing embeddings, timestamps and multiscale weights.
If uniq_embs_and_timestamps contains only one scale, single scale diarization
is performed.
oracle_num_speaker: (int or None)
The oracle number of speakers if known else None
max_num_speaker: (int)
The maximum number of clusters to consider for each session
min_samples_for_NMESC: (int)
The minimum number of samples required for NME clustering. This avoids
zero p_neighbour_lists. If the input has fewer segments than min_samples,
it is directed to the enhanced speaker counting mode.
enhanced_count_thres: (int)
For the short audio recordings under 60 seconds, clustering algorithm cannot
accumulate enough amount of speaker profile for each cluster.
Thus, getEnhancedSpeakerCount() employs anchor embeddings (dummy representations)
to mitigate the effect of cluster sparsity.
enhanced_count_thres = 80 is recommended.
max_rp_threshold: (float)
Limits the range of parameter search.
Clustering performance can vary depending on this range.
Default is 0.15.
sparse_search_volume: (int)
Number of p_values we search during NME analysis.
Default is 30. The lower the value, the faster NME-analysis becomes.
Lower than 20 might cause a poor parameter estimation.
fixed_thres: (float)
If fixed_thres value is provided, NME-analysis process will be skipped.
This value should be optimized on a development set to obtain a quality result.
Default is None and performs NME-analysis to estimate the threshold.
Returns:
Y: (torch.tensor[int])
Speaker label for each segment.
"""
device = torch.device("cuda") if cuda else torch.device("cpu")
# Get base-scale (the highest index) information from uniq_embs_and_timestamps.
uniq_scale_dict = uniq_embs_and_timestamps['scale_dict']
emb = uniq_scale_dict[max(uniq_scale_dict.keys())]['embeddings']
if emb.shape[0] == 1:
return torch.zeros((1,), dtype=torch.int32)
elif emb.shape[0] <= max(enhanced_count_thres, min_samples_for_NMESC) and oracle_num_speakers is None:
est_num_of_spk_enhanced = getEnhancedSpeakerCount(emb, cuda)
else:
est_num_of_spk_enhanced = None
if oracle_num_speakers:
max_num_speaker = oracle_num_speakers
mat, emb = getMultiScaleCosAffinityMatrix(uniq_embs_and_timestamps, device)
nmesc = NMESC(
mat,
max_num_speaker=max_num_speaker,
max_rp_threshold=max_rp_threshold,
sparse_search=True,
sparse_search_volume=sparse_search_volume,
fixed_thres=fixed_thres,
NME_mat_size=300,
cuda=cuda,
device=device,
)
if emb.shape[0] > min_samples_for_NMESC:
est_num_of_spk, p_hat_value = nmesc.NMEanalysis()
affinity_mat = getAffinityGraphMat(mat, p_hat_value)
else:
affinity_mat = mat
if oracle_num_speakers:
est_num_of_spk = oracle_num_speakers
elif est_num_of_spk_enhanced:
est_num_of_spk = est_num_of_spk_enhanced
spectral_model = SpectralClustering(n_clusters=est_num_of_spk, cuda=cuda, device=device)
Y = spectral_model.predict(affinity_mat)
return Y.cpu().numpy()
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/data_processor/assistant_data_processor.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
__all__ = ['DialogueAssistantDataProcessor']
class DialogueAssistantDataProcessor(DialogueDataProcessor):
"""Data Processor for Assistant dialogues."""
def __init__(self, data_dir: str, tokenizer: object, cfg):
"""
Constructs DialogueAssistantDataProcessor
Args:
data_dir: path to data directory
tokenizer: tokenizer object
"""
self.data_dir = data_dir
self._tokenizer = tokenizer
self.cfg = cfg
self.intents = self.open_file("dict.intents.csv")
if self.cfg.preprocess_intent_function == 'remove_domain':
self.intents = [
DialogueAssistantDataProcessor.normalize_zero_shot_intent(intent) for intent in self.intents
]
self.slots = self.open_file("dict.slots.csv")
(
bio_slot_ids_to_unified_slot_ids,
unified_slots,
) = DialogueAssistantDataProcessor.map_bio_format_slots_to_unified_slots(self.slots)
self.slots = unified_slots
self.bio_slot_ids_to_unified_slot_ids = bio_slot_ids_to_unified_slot_ids
self.services = sorted(list(set([intent.split('_')[0] for intent in self.intents])))
self.empty_slot_id = [str(idx) for idx, slot_name in enumerate(self.slots) if slot_name == "O"][0]
@staticmethod
def normalize_zero_shot_intent(label):
label = label.split('.')[1]
if label == 'nomatch':
return 'no match'
else:
return label.replace('_', ' ')
def open_file(self, filename):
"""
Reads file into a list
"""
filename = os.path.join(self.data_dir, filename)
with open(filename, "r", encoding="UTF-8") as f:
lines = [i.strip() for i in f.readlines()]
return lines
@staticmethod
def get_continuous_slots(slot_ids, empty_slot_id, bio_slot_ids_to_unified_slot_ids):
"""
Extract continuous spans of slot_ids
To accomodate slots with distinct labels for B-label1 and I-label1,
slot_id = self.bio_slot_ids_to_unified_slot_ids[slot_id] is called to map them both to label1
Args:
Slot: list of int representing slot of each word token
For instance, 54 54 54 54 54 54 54 54 18 54 44 44 54 46 46 54 12
Corresponds to "please set an alarm clock for my next meeting with the team at three pm next friday"
Except for the empty_slot_id (54 in this case), we hope to extract the continuous spans of tokens,
each containing a start position and an exclusive end position
E.g {18: [9, 10], 44: [11, 13], 46: [14, 16], 12: [17, 18]}
"""
slot_id_stack = []
position_stack = []
for i in range(len(slot_ids)):
slot_id = slot_ids[i]
slot_id = bio_slot_ids_to_unified_slot_ids[slot_id]
if not slot_id_stack or slot_id != slot_id_stack[-1]:
slot_id_stack.append(slot_id)
position_stack.append([])
position_stack[-1].append(i)
slot_id_to_start_and_exclusive_end = {
slot_id_stack[i]: [position_stack[i][0], position_stack[i][-1] + 1]
for i in range(len(position_stack))
if slot_id_stack[i] != empty_slot_id
}
return slot_id_to_start_and_exclusive_end
@staticmethod
def map_bio_format_slots_to_unified_slots(slots):
"""
maps BIO format slots to unified slots (meaning that B-alarm_time and I-alarm_time both map to alarm_time)
called even slots does not contain BIO, for unified interface
in that case slots == unified_slots and bio_slot_ids_to_unified_slot_ids is an identity mapping i.e. {"0": "0", "1": "1"}
"""
bio_slot_ids_to_unified_slot_ids = {}
unified_slots = []
unified_idx = -1
for idx, slot in enumerate(slots):
if slot.replace('I-', '').replace('B-', '') not in unified_slots:
unified_idx += 1
unified_slots.append(slot.replace('I-', '').replace('B-', ''))
bio_slot_ids_to_unified_slot_ids[str(idx)] = str(unified_idx)
return bio_slot_ids_to_unified_slot_ids, unified_slots
def get_dialog_examples(self, dataset_split: str):
"""
Process raw files into DialogueInputExample
Args:
dataset_split: {train, dev, test}
For the assistant dataset, there is no explicit dev set (instead uses the test set as the dev set)
Therefore, this function creates a dev set and a new train set from the train set.
This is done by taking every 10th example and putting it into the dev set,
with all other examples going into the new train set.
"""
examples = []
dataset_split_print = {"train": "train", "dev": "train", "test": "test"}
raw_examples_intent = self.open_file("{}.tsv".format(dataset_split_print[dataset_split]))
# removes header of tsv file
raw_examples_intent = raw_examples_intent[1:]
raw_examples_slots = self.open_file("{}_slots.tsv".format(dataset_split_print[dataset_split]))
if dataset_split in ["train", "dev"]:
train_idx = []
dev_idx = []
for idx in range(len(raw_examples_intent)):
if idx % 10 == 0:
dev_idx.append(idx)
else:
train_idx.append(idx)
if dataset_split == "train":
raw_examples_intent = [raw_examples_intent[idx] for idx in train_idx]
raw_examples_slots = [raw_examples_slots[idx] for idx in train_idx]
elif dataset_split == "dev":
raw_examples_intent = [raw_examples_intent[idx] for idx in dev_idx]
raw_examples_slots = [raw_examples_slots[idx] for idx in dev_idx]
for i in range(len(raw_examples_intent)):
utterance, intent_id = raw_examples_intent[i].split('\t')
slot_ids = raw_examples_slots[i].split()
utterance_tokens = utterance.split()
intent = self.intents[int(intent_id)]
slot_id_to_start_and_exclusive_end = DialogueAssistantDataProcessor.get_continuous_slots(
slot_ids, self.empty_slot_id, self.bio_slot_ids_to_unified_slot_ids
)
slot_to_start_and_exclusive_end = {
self.slots[int(slot_id)]: position for slot_id, position in slot_id_to_start_and_exclusive_end.items()
}
slot_to_words = {
slot: ' '.join(utterance_tokens[position[0] : position[1]])
for slot, position in slot_to_start_and_exclusive_end.items()
}
input_example = {
"utterance": utterance,
"labels": {"service": intent.split('_')[0], "intent": intent, "slots": slot_to_words},
"label_positions": {
"slots": {
slot: {"start": position[0], "exclusive_end": position[1], "slot": slot,}
for slot, position in slot_to_start_and_exclusive_end.items()
}
},
"possible_labels": {
"service": self.services,
"intent": self.intents,
"slots": {
# this dataset does not support categorical slots (i.e. only extractive slots)
# therefore use empty list for all values
slot: []
for slot in self.slots
},
},
}
example = DialogueInputExample(input_example)
examples.append(example)
return examples
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
|
gkucsko/NeMo
|
examples/nlp/dialogue/dialogue.py
|
<filename>examples/nlp/dialogue/dialogue.py
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example of how to train and test dialogue models in NeMo.
***Setting the configs***
The model and the PT trainer are defined in a config file that declares multiple important sections.
The most important ones are:
model: All arguments that are related to the Model - model, loss, optimizer,
schedulers, and datasets/data loaders.
trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs,
precision level, etc.
This script uses the `/examples/nlp/dialogue_state_tracking/conf/dialog_config.yaml` config file
by default. You may update the config file from the file directly. The other option is to set another config file via command-line arguments by `--config-name=CONFIG_FILE_PATH'.
***Model Training***
python dialogue.py
do_training=True
model.dataset.data_dir=<DATA_DIR_WITH_JSON_DATA>
model.dataset.dialogues_example_dir=<DAT_DIR_FOR_CACHING_INTERMEDIATE_AND_SAVING_PREDICTIONS>
model.dataset.task=<TASK - see conf/dialogue_config.yaml for full list> e.g. sgd
model.language_model.pretrained_model_name=<TASK - see conf/dialogue_config.yaml for full list> e.g. gpt2
trainer.devices=[<DEVICE_IDS_TO_USE>]
***Model Evaluation***
command as above, change do_training=False
"""
import os
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.dialogue.dialogue_gpt_classification_model import DialogueGPTClassificationModel
from nemo.collections.nlp.models.dialogue.dialogue_gpt_generation_model import DialogueGPTGenerationModel
from nemo.collections.nlp.models.dialogue.dialogue_nearest_neighbour_model import DialogueNearestNeighbourModel
from nemo.collections.nlp.models.dialogue.dialogue_s2s_generation_model import DialogueS2SGenerationModel
from nemo.collections.nlp.models.dialogue.dialogue_zero_shot_intent_model import DialogueZeroShotIntentModel
from nemo.collections.nlp.models.dialogue.intent_slot_classification_model import IntentSlotClassificationModel
from nemo.collections.nlp.models.dialogue.sgdqa_model import SGDQAModel
from nemo.collections.nlp.modules.common.megatron.megatron_utils import compute_model_parallel_rank
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="dialogue_config")
def main(cfg: DictConfig) -> None:
pl.seed_everything(42)
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
try:
plugin = NLPDDPPlugin()
except (ImportError, ModuleNotFoundError):
plugin = None
trainer = pl.Trainer(**cfg.trainer, plugins=plugin)
exp_manager(trainer, cfg.get("exp_manager", None))
app_state = AppState()
if cfg.model.tensor_model_parallel_size > 1:
app_state.model_parallel_size = cfg.model.tensor_model_parallel_size
app_state.model_parallel_rank = compute_model_parallel_rank(trainer.local_rank, app_state.model_parallel_size)
if 'bert' in cfg.model.language_model.pretrained_model_name:
if cfg.model.dataset.task == 'sgd':
if cfg.model.original_nemo_checkpoint is not None:
model_class = DialogueZeroShotIntentModel
else:
model_class = SGDQAModel
elif cfg.model.dataset.task in ['zero_shot', 'design']:
model_class = DialogueZeroShotIntentModel
else:
model_class = IntentSlotClassificationModel
elif 'gpt' in cfg.model.language_model.pretrained_model_name.lower():
if cfg.model.dataset.task in ['ms_marco', 'mellon_qa']:
model_class = DialogueGPTGenerationModel
else:
model_class = DialogueGPTClassificationModel
elif (
'bart' in cfg.model.language_model.pretrained_model_name.lower()
or 't5' in cfg.model.language_model.pretrained_model_name.lower()
):
# please use bf16/32 with t5-large and above
# see https://github.com/huggingface/transformers/pull/10956
model_class = DialogueS2SGenerationModel
elif 'sentence-transformers' in cfg.model.language_model.pretrained_model_name.lower():
model_class = DialogueNearestNeighbourModel
if cfg.pretrained_model or (cfg.model.nemo_path and os.path.exists(cfg.model.nemo_path)):
if cfg.pretrained_model:
logging.info(f'Loading pretrained model {cfg.pretrained_model}')
model = model_class.from_pretrained(cfg.pretrained_model)
else:
logging.info(f'Restoring model from {cfg.model.nemo_path}')
model = model_class.restore_from(cfg.model.nemo_path)
if cfg.do_training:
model.setup_training_data(train_data_config=cfg.model.train_ds)
model.setup_multiple_validation_data(val_data_config=cfg.model.validation_ds)
else:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
model = model_class(cfg.model, trainer=trainer)
if cfg.do_training:
trainer.fit(model)
if cfg.model.nemo_path:
model.save_to(cfg.model.nemo_path)
else:
data_dir = cfg.model.dataset.get('data_dir', None)
dialogues_example_dir = cfg.model.dataset.get('dialogues_example_dir', None)
if data_dir is None or dialogues_example_dir is None:
raise ValueError('No dataset directory provided. Skipping evaluation. ')
elif not os.path.exists(data_dir):
raise ValueError(f'{data_dir} is not found, skipping evaluation on the test set.')
else:
if hasattr(model, "update_data_dirs"):
model.update_data_dirs(data_dir=data_dir, dialogues_example_dir=dialogues_example_dir)
model._cfg.dataset = cfg.model.dataset
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.ds_item is not None:
eval_device = [cfg.trainer.devices[0]] if isinstance(cfg.trainer.devices, list) else 1
trainer = pl.Trainer(devices=eval_device, accelerator=cfg.trainer.accelerator, precision=16)
model.setup_multiple_test_data(test_data_config=cfg.model.test_ds)
if model.prepare_test(trainer):
trainer.test(model)
if __name__ == '__main__':
main()
|
gkucsko/NeMo
|
examples/nlp/dialogue/analyse_prediction_results.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
import numpy as np
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
def read_jsonl(filename):
with open(filename, 'r', encoding="UTF-8") as f:
docs = [json.loads(line) for line in f.readlines()]
return docs
def get_incorrect_labels(docs):
incorrect_labels_docs = []
for doc in docs:
if doc["ground_truth_labels"] != doc["generated_labels"]:
incorrect_labels_docs.append(
{
"input": doc["input"],
"ground_truth_labels": doc["ground_truth_labels"],
"generated_labels": doc["generated_labels"],
}
)
return incorrect_labels_docs
def get_incorrect_slots(docs):
incorrect_slots_docs = []
for doc in docs:
if doc["ground_truth_slots"] != doc["generated_slots"]:
incorrect_slots_docs.append(
{
"input": doc["input"],
"ground_truth_slots": doc["ground_truth_slots"],
"generated_slots": doc["generated_slots"],
}
)
return incorrect_slots_docs
def sort_by_f1(docs):
for i in range(len(docs)):
doc = docs[i]
generated_field = doc["generated"]
ground_truth_field = doc["ground_truth"]
generated_field = remove_punctation(generated_field.lower())
ground_truth_field = remove_punctation(ground_truth_field.lower())
p, r, f1 = DialogueGenerationMetrics._get_one_f1(generated_field, ground_truth_field)
docs[i]["f1"] = f1
docs[i]["generated"] = generated_field
docs[i]["ground_truth"] = ground_truth_field
docs.sort(key=lambda x: x["f1"])
return docs
def remove_punctation(sentence):
return re.sub(r'[^\w\s]', '', sentence)
def generation_main(filename):
docs = read_jsonl(filename)
docs = sort_by_f1(docs)
bleu = DialogueGenerationMetrics.get_bleu(
[doc["generated"] for doc in docs], [doc["ground_truth"] for doc in docs]
)
acc = np.mean([int(doc["generated"] == doc["ground_truth"]) for doc in docs]) * 100
f1 = np.mean([doc["f1"] for doc in docs])
print("Token level F1 is {:.3}".format(f1))
print("BLEU is {:.3}".format(bleu))
print("Exact match accuracy is {:.3}".format(acc))
for i in range(0):
print(docs[i])
def classification_main(filename):
docs = read_jsonl(filename)
incorrect_labels_docs = get_incorrect_labels(docs)
incorrect_slots_docs = get_incorrect_slots(docs)
print("{} / {} have incorrect labels".format(len(incorrect_labels_docs), len(docs)))
print("{} / {} have incorrect slots".format(len(incorrect_slots_docs), len(docs)))
for doc in incorrect_labels_docs:
print(doc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--prediction_filename")
parser.add_argument("--mode", choices=['generation', 'classification'], default='classification')
args = parser.parse_args()
if args.mode == 'classification':
classification_main(args.prediction_filename)
else:
generation_main(args.prediction_filename)
|
gkucsko/NeMo
|
nemo_text_processing/text_normalization/ru/taggers/ordinal.py
|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/google/TextNormalizationCoveringGrammars
# Russian minimally supervised number grammar.
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from nemo_text_processing.text_normalization.ru.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals, e.g.
"2" -> ordinal { integer: "второе" } }
Args:
number_names: number_names for cardinal and ordinal numbers
alternative_formats: alternative format for cardinal and ordinal numbers
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, number_names: dict, alternative_formats: dict, deterministic=False):
super().__init__(name="ordinal", kind="classify", deterministic=deterministic)
one_thousand_alternative = alternative_formats['one_thousand_alternative']
separators = alternative_formats['separators']
ordinal = number_names['ordinal_number_names']
ordinal |= ordinal @ one_thousand_alternative
ordinal_numbers = separators @ ordinal
# to handle cases like 2-ая
endings = pynini.string_file(get_abs_path("data/numbers/ordinal_endings.tsv"))
not_dash = pynini.closure(pynini.difference(NEMO_SIGMA, "-"))
del_ending = pynini.cdrewrite(pynini.cross("-" + not_dash, ""), "", "[EOS]", NEMO_SIGMA)
ordinal_numbers_marked = (
((separators @ ordinal).optimize() + pynini.accep("-") + not_dash).optimize()
@ (NEMO_SIGMA + endings).optimize()
@ del_ending
).optimize()
self.ordinal_numbers = ordinal_numbers
# "03" -> remove leading zeros and verbalize
leading_zeros = pynini.closure(pynini.cross("0", ""))
self.ordinal_numbers_with_leading_zeros = (leading_zeros + ordinal_numbers).optimize()
final_graph = (ordinal_numbers | ordinal_numbers_marked).optimize()
final_graph = pynutil.insert("integer: \"") + final_graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
|
gkucsko/NeMo
|
examples/nlp/machine_translation/nmt_transformer_infer_megatron.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Given NMT model's .nemo file(s), this script can be used to translate text.
USAGE Example:
1. Obtain text file in src language. You can use sacrebleu to obtain standard test sets like so:
sacrebleu -t wmt14 -l de-en --echo src > wmt14-de-en.src
2. Translate:
python nmt_transformer_infer.py --model=[Path to .nemo file(s)] --srctext=wmt14-de-en.src --tgtout=wmt14-de-en.pre
"""
import os
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.machine_translation.megatron_nmt_model import MegatronNMTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.model_utils import inject_model_parallel_rank
try:
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
ModelType = ApexGuardDefaults()
HAVE_APEX = False
@hydra_runner(config_path="conf", config_name="nmt_megatron_infer")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(plugins=NLPDDPPlugin(), **cfg.trainer)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
app_state = AppState()
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
if cfg.model_file is not None:
if not os.path.exists(cfg.model_file):
raise ValueError(f"Model file {cfg.model_file} does not exist")
model = MegatronNMTModel.restore_from(
restore_path=cfg.model_file, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector(),
)
elif cfg.checkpoint_dir is not None:
checkpoint_path = inject_model_parallel_rank(os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name))
model = MegatronNMTModel.load_from_checkpoint(checkpoint_path, hparams_file=cfg.hparams_file, trainer=trainer)
else:
raise ValueError("need at least a nemo file or checkpoint dir")
model.freeze()
logging.info(f"Translating: {cfg.srctext}")
src_text = []
translations = []
with open(cfg.srctext, 'r') as src_f, open(cfg.tgtout, 'w') as tgt_f:
for line in src_f:
src_text.append(line.strip())
if len(src_text) == cfg.batch_size:
translations = model.translate(
text=src_text, source_lang=cfg.source_lang, target_lang=cfg.target_lang,
)
for translation in translations:
tgt_f.write(translation + "\n")
src_text = []
if len(src_text) > 0:
translations = model.translate(text=src_text, source_lang=cfg.source_lang, target_lang=cfg.target_lang,)
for translation in translations:
tgt_f.write(translation + "\n")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
|
gkucsko/NeMo
|
nemo/collections/nlp/modules/common/text_generation_server.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating text."""
import json
import threading
import torch
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from nemo.collections.nlp.modules.common.text_generation_utils import generate
from nemo.utils import logging
GENERATE_NUM = 0
lock = threading.Lock()
API_ALLOWED_KEYS = set(
[
'all_probs',
'sentences',
"task_ids",
"tokens_to_generate",
"temperature",
"add_BOS",
"greedy",
"top_k",
"top_p",
"repetition_penalty",
"min_tokens_to_generate",
]
)
class MegatronGenerate(Resource):
def __init__(self, model):
self.model = model
@staticmethod
def send_do_generate():
choice = torch.cuda.LongTensor([GENERATE_NUM])
torch.distributed.broadcast(choice, 0)
def put(self):
logging.info("request IP: " + str(request.remote_addr))
logging.info(json.dumps(request.get_json()))
# check keys
for key in request.get_json().keys():
if key not in API_ALLOWED_KEYS:
logging.error(f"The request key {key} is not allowed")
sentences = request.get_json()["sentences"]
if isinstance(sentences, tuple): # Input can be text or tensor
if len(sentences[0]) != len(sentences[1]) or sentences[0] > 128:
return "Maximum number of sentences is 128", 400
elif len(sentences) > 128:
return "Maximum number of sentences is 128", 400
task_ids = None # Used for ptuned/prompt tuned models only
if "task_ids" in request.get_json():
task_ids = request.get_json()["task_ids"]
if not isinstance(sentences, tuple):
return "Input at 'sentences' must by a tuple of two tensors like:\
(context_tokens_tensor, context_length_tensor) if task ids are given"
if len(task_ids) != len(sentences[0]):
return "Each sentence must have a corresponding task id for p-tuned/prompt-tuned models"
tokens_to_generate = 64 # Choosing hopefully sane default. Full sequence is slow
if "tokens_to_generate" in request.get_json():
tokens_to_generate = request.get_json()["tokens_to_generate"]
if not isinstance(tokens_to_generate, int):
return "tokens_to_generate must be an integer greater than 0"
if tokens_to_generate < 1:
return "tokens_to_generate must be an integer greater than 0"
all_probs = False
if "all_probs" in request.get_json():
all_probs = request.get_json()["all_probs"]
if not isinstance(all_probs, bool):
return "all_probs must be a boolean value"
temperature = 1.0
if "temperature" in request.get_json():
temperature = request.get_json()["temperature"]
if not (type(temperature) == int or type(temperature) == float):
return "temperature must be a positive number less than or equal to 100.0"
if not (0.0 < temperature <= 100.0):
return "temperature must be a positive number less than or equal to 100.0"
add_BOS = False
if "add_BOS" in request.get_json():
add_BOS = request.get_json()["add_BOS"]
if not isinstance(add_BOS, bool):
return "add_BOS must be a boolean value"
greedy = False
if "greedy" in request.get_json():
greedy = request.get_json()["greedy"]
if not isinstance(greedy, bool):
return "greedy must be a boolean value"
top_k = 0
if "top_k" in request.get_json():
top_k = request.get_json()["top_k"]
if not (type(top_k) == int or type(top_k) == float):
return "top_k must be a positive integer number"
if not (0 <= top_k):
return "top_k must be a positive integer number"
top_p = 0.9
if "top_p" in request.get_json():
top_p = request.get_json()["top_p"]
if not (type(top_p) == int or type(top_p) == float):
return "top_p must be a positive number less than or equal to 1.0"
if not (0.0 <= top_p <= 1.0):
return "top_p must be a positive number less than or equal to 1.0"
repetition_penalty = 1.2
if "repetition_penalty" in request.get_json():
repetition_penalty = request.get_json()["repetition_penalty"]
if not (type(repetition_penalty) == int or type(repetition_penalty) == float):
return "repetition_penalty must be a positive number no less than 1.0"
if not (1.0 <= repetition_penalty):
return "repetition_penalty must be a positive number no less than 1.0"
min_tokens_to_generate = 0
if "min_tokens_to_generate" in request.get_json():
min_tokens_to_generate = request.get_json()["min_tokens_to_generate"]
if not isinstance(min_tokens_to_generate, int):
return "min_tokens_to_generate must be an integer no less than 0"
if min_tokens_to_generate < 0:
return "min_tokens_to_generate must be an integer no less than 0"
with lock: # Need to get lock to keep multiple threads from hitting code
MegatronGenerate.send_do_generate() # Tell other ranks we're doing generate
output = generate(
self.model,
sentences,
task_ids,
tokens_to_generate,
all_probs,
temperature,
add_BOS,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
)
if not all_probs:
del output['full_logprob']
return jsonify(output)
class MegatronServer(object):
def __init__(self, model):
self.app = Flask(__name__, static_url_path='')
api = Api(self.app)
api.add_resource(MegatronGenerate, '/generate', resource_class_args=[model])
def run(self, url, port=5000):
self.app.run(url, threaded=True, port=port, debug=False)
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to combine joined giza alignments and Google Text Normalization dataset
to produce training corpus for the ThutmoseTaggerModel.
"""
import glob
import os
from argparse import ArgumentParser
from collections import Counter
from typing import Dict, Optional, TextIO, Tuple
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import get_src_and_dst_for_alignment
from nemo.utils import logging
parser = ArgumentParser(description="Produce data for the ThutmoseTaggerModel")
parser.add_argument(
"--mode",
required=True,
type=str,
help='Mode, one of ["get_replacement_vocab", "filter_by_vocab", "get_labeled_corpus"]',
)
parser.add_argument(
"--data_dir", required=True, type=str, help='Path to data directory with files like output-00000-of-00100.tsv'
)
parser.add_argument(
"--giza_dir", required=True, type=str, help='Path to directory with class folders like ordinal, date etc'
)
parser.add_argument(
"--alignment_filename", required=True, type=str, help='Name of alignment file, like "itn.out", "itn.out.vocab2000"'
)
parser.add_argument("--out_filename", required=True, type=str, help='Output file')
parser.add_argument("--vocab_filename", required=True, type=str, help='Vocab name')
parser.add_argument("--lang", required=True, type=str, help="Language")
args = parser.parse_args()
def process_file_itn(inputname: str, out: TextIO, keys2replacements: Dict[str, str]) -> None:
"""Processes one file in Google TN Dataset format to get the labeled data for ThutmoseTaggerModel
Args:
inputname: name of input file
out: output stream
keys2replacements: Mapping from (semiotic class, spoken, written) to the segmented written form,
which is aligned one-to-one to spoken words (this is the result obtained from Giza++ alignment pipeline)
"""
words = []
tags = []
semiotic_info = []
sent_is_ok = True
with open(inputname, "r", encoding="utf-8") as f:
for line in f:
if line.startswith("<eos>"):
if sent_is_ok and len(words) > 0:
out.write(" ".join(words) + "\t" + " ".join(tags) + "\t" + ";".join(semiotic_info) + "\n")
words = []
tags = []
semiotic_info = []
sent_is_ok = True
else:
cls, written, spoken = line.strip().split("\t")
if spoken == "sil":
continue
if spoken == "<self>":
words.append(written.casefold())
tags.append("<SELF>")
continue
src, dst, same_begin, same_end = get_src_and_dst_for_alignment(
cls.casefold(), written, spoken, args.lang
)
same_from_begin = [] if same_begin == "" else same_begin.split(" ")
same_from_end = [] if same_end == "" else same_end.split(" ")
key = cls.casefold() + "\t" + src + "\t" + dst
if key in keys2replacements:
replacements = keys2replacements[key].split(" ")
spoken_words = dst.split(" ")
for w, r in zip(
same_from_begin + spoken_words + same_from_end, same_from_begin + replacements + same_from_end
):
words.append(w)
if cls == "LETTERS" or cls == "PLAIN":
if w == r:
tags.append("<SELF>")
else:
tags.append(r)
elif w == r.replace("_", ""):
tags.append("<SELF>")
else:
tags.append(r)
semiotic_info.append(
cls
+ " "
+ str(len(words) - len(spoken_words) - len(same_from_begin) - len(same_from_end))
+ " "
+ str(len(words))
)
else:
sent_is_ok = False
def process_line(semiotic_class: str, line: str) -> Optional[Tuple[str, str, str, int]]:
"""A helper function to read the file with alignment results"""
parts = line.strip().split("\t")
if len(parts) != 6:
return None
freq = int(parts[0])
if parts[1] != "good:":
return None
src, dst, leftside_align, rightside_align = parts[2], parts[3], parts[4], parts[5]
align = rightside_align
if semiotic_class == "letters" or semiotic_class == "plain":
align = leftside_align
return src, dst, align, freq
def get_replacement_vocab() -> None:
"""Loops through the files with alignment results in each semiotic class subfolder, counts frequencies of different
replacement segments.
"""
full_vocab = Counter()
alignment_files = glob.glob(args.giza_dir + "/*/" + args.alignment_filename)
for fn in alignment_files:
fn_parts = fn.split("/")
if len(fn_parts) < 2:
raise ValueError("Bad filename: " + fn)
semiotic_class = fn_parts[-2]
class_vocab = Counter()
with open(fn, "r", encoding="utf-8") as f:
for line in f:
t = process_line(semiotic_class, line)
if t is None:
continue
src, dst, replacement, freq = t
inputs = src.split(" ")
replacements = replacement.split(" ")
if len(inputs) != len(replacements):
raise ValueError("Length mismatch in: " + line)
for inp, rep in zip(inputs, replacements):
if inp == rep: # skip same words
continue
full_vocab[rep] += freq
class_vocab[rep] += freq
with open(args.vocab_filename + "." + semiotic_class, "w", encoding="utf-8") as out:
for k, v in class_vocab.most_common(1000000000):
out.write(k + "\t" + str(v) + "\n")
with open(args.vocab_filename, "w", encoding="utf-8") as out:
for k, v in full_vocab.most_common(1000000000):
out.write(k + "\t" + str(v) + "\n")
def filter_by_vocab() -> None:
"""Given a restricted vocabulary of replacements,
loops through the files with alignment results in each semiotic class subfolder,
discards the examples containing a replacement which is not in our restricted vocabulary.
"""
if not os.path.exists(args.vocab_filename):
raise ValueError(f"Alignments dir {args.giza_dir} does not exist")
# load vocab from file
vocab = {}
with open(args.vocab_filename, "r", encoding="utf-8") as f:
for line in f:
k, v = line.strip().split("\t")
vocab[k] = int(v)
print("len(vocab)=", len(vocab))
alignment_files = glob.glob(args.giza_dir + "/*/" + args.alignment_filename)
for fn in alignment_files:
fn_parts = fn.split("/")
if len(fn_parts) < 2:
raise ValueError("Bad filename: " + fn)
semiotic_class = fn_parts[-2]
out = open(args.giza_dir + "/" + semiotic_class + "/" + args.out_filename, "w", encoding="utf-8")
with open(fn, "r", encoding="utf-8") as f:
for line in f:
t = process_line(semiotic_class, line)
if t is None:
continue
src, dst, replacement, freq = t
ok = True
for s, r in zip(src.split(" "), replacement.split(" ")):
if s != r and r not in vocab:
ok = False
if ok:
out.write(semiotic_class + "\t" + src + "\t" + dst + "\t" + replacement + "\n")
out.close()
def get_labeled_corpus() -> None:
"""Loops through the files with alignment results in each semiotic class subfolder,
collects a mapping from (semiotic class, spoken, written) to the segmented written form,
which is aligned one-to-one to spoken words.
Then loops through the files in Google TN Dataset format to get the labeled data for ThutmoseTaggerModel.
It extracts the whole sentences and substitutes the semiotic spans to their aligned form from the dictionary.
"""
if not os.path.exists(args.data_dir):
raise ValueError(f"Data dir {args.data_dir} does not exist")
keys2replacements = {}
alignment_files = glob.glob(args.giza_dir + "/*/" + args.alignment_filename)
if len(alignment_files) == 0:
raise ValueError("Did not found any such files: " + args.giza_dir + "/*/" + args.alignment_filename)
for af in alignment_files:
with open(af, "r", encoding="utf-8") as f:
for line in f:
cls, src, dst, replacements = line.strip().split("\t")
key = cls + "\t" + dst + "\t" + src
if key in keys2replacements and keys2replacements[key] != replacements:
logging.warning("keys2replacements[key] != replacements", keys2replacements[key], replacements)
keys2replacements[key] = replacements
print("size of phrase-to-replacements dictionary =", len(keys2replacements))
out = open(args.out_filename, "w", encoding="utf-8")
input_paths = sorted([os.path.join(args.data_dir, f) for f in os.listdir(args.data_dir)])
for inputname in input_paths:
process_file_itn(inputname, out, keys2replacements)
out.close()
def main() -> None:
if not os.path.exists(args.giza_dir):
raise ValueError(f"Alignments dir {args.giza_dir} does not exist")
if args.mode == "get_replacement_vocab":
get_replacement_vocab()
elif args.mode == "filter_by_vocab":
filter_by_vocab()
elif args.mode == "get_labeled_corpus":
get_labeled_corpus()
else:
raise ValueError("unknown mode: " + args.mode)
if __name__ == "__main__":
main()
|
gkucsko/NeMo
|
tests/collections/nlp/test_dialogue.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.nlp.data.dialogue.data_processor.assistant_data_processor import DialogueAssistantDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.sgd_data_processor import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_gpt_classification_dataset import (
DialogueGPTClassificationDataset,
)
from nemo.collections.nlp.data.dialogue.dataset.dialogue_s2s_generation_dataset import DialogueS2SGenerationDataset
from nemo.collections.nlp.data.dialogue.dataset.dialogue_sgd_bert_dataset import DialogueSGDBERTDataset
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueClassificationMetrics, DialogueGenerationMetrics
from nemo.collections.nlp.models.dialogue.dialogue_nearest_neighbour_model import DialogueNearestNeighbourModel
@pytest.mark.unit
def test_dialogue_metric_generation_f1():
generated_field = 'That is so good'
ground_truth_field = 'That is so awesome'
precision, recall, f1 = DialogueGenerationMetrics._get_one_f1(generated_field, ground_truth_field)
assert precision == 75
assert recall == 75
assert f1 == 75
@pytest.mark.unit
def test_dialogue_metric_split_label_and_slots():
fields = ["reserve_restaurant\nslots: time_of_day(7pm), number_of_people(3)", "time_of_day(7pm)"]
labels, slots_list = DialogueClassificationMetrics.split_label_and_slots(fields, with_slots=True)
assert labels == ["reserve_restaurant", 'none']
assert slots_list == [["time_of_day(7pm)", "number_of_people(3)"], ["time_of_day(7pm)"]]
@pytest.mark.unit
def test_dialogue_metric_slot_filling_metrics():
generated_slots = [["time_of_day(7pm)", "number_of_people(3)"], ["time_of_day(7pm)"]]
ground_truth_slots = [["time_of_day(7pm)"], ["time_of_day(7pm)", "number_of_people(3)"]]
(
avg_precision,
avg_recall,
avg_f1,
avg_joint_goal_accuracy,
) = DialogueClassificationMetrics.get_slot_filling_metrics(generated_slots, ground_truth_slots)
assert avg_precision == 75
assert avg_recall == 75
assert avg_f1 == 75
assert avg_joint_goal_accuracy == 0
@pytest.mark.unit
def test_dialogue_assistant_data_processor_normalize_zero_shot_intent():
label0 = 'food_ordering.contextual_query'
normalized_label0 = 'contextual query'
label1 = 'food_ordering.nomatch'
normalized_label1 = 'no match'
label2 = 'food_ordering.no'
normalized_label2 = 'no'
assert normalized_label0 == DialogueAssistantDataProcessor.normalize_zero_shot_intent(label0)
assert normalized_label1 == DialogueAssistantDataProcessor.normalize_zero_shot_intent(label1)
assert normalized_label2 == DialogueAssistantDataProcessor.normalize_zero_shot_intent(label2)
@pytest.mark.unit
def test_dialogue_assistant_data_processor_get_continuous_slots():
slot_ids = [54, 54, 54, 19, 19, 18, 54, 54, 54]
empty_slot_id = 54
bio_slot_ids_to_unified_slot_ids = {18: 18, 19: 19, 54: 54}
continuous_slots = DialogueAssistantDataProcessor.get_continuous_slots(
slot_ids, empty_slot_id, bio_slot_ids_to_unified_slot_ids
)
assert continuous_slots == {19: [3, 5], 18: [5, 6]}
# here 18 and 19 maps to the same slot (originally variants of B-slot and I-slot)
slot_ids = [54, 54, 54, 19, 19, 18, 54, 54, 54]
empty_slot_id = 54
bio_slot_ids_to_unified_slot_ids = {18: 18, 19: 18, 54: 54}
continuous_slots = DialogueAssistantDataProcessor.get_continuous_slots(
slot_ids, empty_slot_id, bio_slot_ids_to_unified_slot_ids
)
assert continuous_slots == {18: [3, 6]}
# test if function works when non-empty slots are at boundary
slot_ids = [18, 54, 54, 19, 19]
empty_slot_id = 54
bio_slot_ids_to_unified_slot_ids = {18: 18, 19: 19, 54: 54}
continuous_slots = DialogueAssistantDataProcessor.get_continuous_slots(
slot_ids, empty_slot_id, bio_slot_ids_to_unified_slot_ids
)
assert continuous_slots == {18: [0, 1], 19: [3, 5]}
@pytest.mark.unit
def test_dialogue_assistant_map_bio_format_slots_to_unified_slots():
slots = ['B-time', 'I-time', 'B-alarm', 'I-alarm', 'O']
gt_bio_slot_ids_to_unified_slot_ids = {'0': '0', '1': '0', '2': '1', '3': '1', '4': '2'}
gt_unified_slots = ['time', 'alarm', 'O']
(
bio_slot_ids_to_unified_slot_ids,
unified_slots,
) = DialogueAssistantDataProcessor.map_bio_format_slots_to_unified_slots(slots)
assert gt_bio_slot_ids_to_unified_slot_ids == bio_slot_ids_to_unified_slot_ids
assert gt_unified_slots == unified_slots
# case in which BIOS scheme was not used in annotation
slots = ['time', 'alarm', 'O']
gt_bio_slot_ids_to_unified_slot_ids = {'0': '0', '1': '1', '2': '2'}
gt_unified_slots = ['time', 'alarm', 'O']
(
bio_slot_ids_to_unified_slot_ids,
unified_slots,
) = DialogueAssistantDataProcessor.map_bio_format_slots_to_unified_slots(slots)
assert gt_bio_slot_ids_to_unified_slot_ids == bio_slot_ids_to_unified_slot_ids
assert gt_unified_slots == unified_slots
@pytest.mark.unit
def test_dialogue_data_processor_get_relevant_idxs():
dataset_split = 'train'
dev_proportion = 10
n_samples = 1000
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, dev_proportion)
assert len(idxs) == 900
assert idxs != list(range(900))
dataset_split = 'dev'
dev_proportion = 40
n_samples = 1000
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, dev_proportion)
assert len(idxs) == 400
assert idxs != list(range(400))
dataset_split = 'test'
dev_proportion = 40
n_samples = 1000
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, dev_proportion)
assert len(idxs) == 1000
assert idxs == list(range(1000))
@pytest.mark.unit
def test_dialogue_sgd_data_processor_convert_camelcase_to_lower():
label = 'none'
gt_converted_label = 'none'
assert gt_converted_label == DialogueSGDDataProcessor.convert_camelcase_to_lower(label)
label = 'ReserveRestaurant'
gt_converted_label = 'reserve restaurant'
assert gt_converted_label == DialogueSGDDataProcessor.convert_camelcase_to_lower(label)
label = 'Alarm'
gt_converted_label = 'alarm'
assert gt_converted_label == DialogueSGDDataProcessor.convert_camelcase_to_lower(label)
@pytest.mark.unit
def test_dialogue_gpt_classification_dataset_linearize_slots():
slots = []
linearized_slots = 'None'
assert linearized_slots == DialogueGPTClassificationDataset.linearize_slots(slots)
slots = {'time': '7pm', 'place': 'field'}
linearized_slots = 'time(7pm), place(field)'
assert linearized_slots == DialogueGPTClassificationDataset.linearize_slots(slots)
slots = {'time': ['7pm', '1900'], 'place': 'field'}
linearized_slots = 'time(7pm), place(field)'
assert linearized_slots == DialogueGPTClassificationDataset.linearize_slots(slots)
@pytest.mark.unit
def test_dialogue_gpt_classification_dataset_linearize_slots():
actions = [
{'act': 'inform', 'slot': 'time', 'values': ['7pm', '1900']},
{'act': 'confirm', 'slot': 'place', 'values': ['hall']},
]
prompt_template = 'values'
formatted_actions = '7pm hall'
assert formatted_actions == DialogueS2SGenerationDataset.format_actions(prompt_template, actions)
prompt_template = 'slots_values'
formatted_actions = 'time (7pm) place (hall)'
assert formatted_actions == DialogueS2SGenerationDataset.format_actions(prompt_template, actions)
prompt_template = 'acts_slots_values'
formatted_actions = 'inform time (7pm) confirm place (hall)'
assert formatted_actions == DialogueS2SGenerationDataset.format_actions(prompt_template, actions)
@pytest.mark.unit
def test_dialogue_sgd_dataset_naive_tokenize():
utterance = 'I am feeling hungry so I would like to find a place to eat.'
tokens = [
'I',
' ',
'am',
' ',
'feeling',
' ',
'hungry',
' ',
'so',
' ',
'I',
' ',
'would',
' ',
'like',
' ',
'to',
' ',
'find',
' ',
'a',
' ',
'place',
' ',
'to',
' ',
'eat',
'.',
]
assert tokens == DialogueSGDBERTDataset._naive_tokenize(utterance)
@pytest.mark.unit
def test_dialogue_nearest_neighbour_mean_pooling():
model_output = [torch.ones(8, 512, 768)]
attention_mask = torch.ones(8, 512)
assert torch.equal(
torch.ones(8, 768).float(), DialogueNearestNeighbourModel.mean_pooling(model_output, attention_mask)
)
model_output = [torch.zeros(8, 512, 768)]
attention_mask = torch.ones(8, 512)
assert torch.equal(
torch.zeros(8, 768).float(), DialogueNearestNeighbourModel.mean_pooling(model_output, attention_mask)
)
model_output = [torch.cat([torch.zeros(8, 256, 768), torch.ones(8, 256, 768)], axis=1)]
attention_mask = torch.ones(8, 512)
assert torch.equal(
torch.ones(8, 768).float() * 0.5, DialogueNearestNeighbourModel.mean_pooling(model_output, attention_mask)
)
|
gkucsko/NeMo
|
scripts/asr_language_modeling/ngram_lm/eval_beamsearch_ngram.py
|
<filename>scripts/asr_language_modeling/ngram_lm/eval_beamsearch_ngram.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script would evaluate an N-gram language model trained with KenLM library (https://github.com/kpu/kenlm) in
# fusion with beam search decoders on top of a trained ASR model. NeMo's beam search decoders are capable of using the
# KenLM's N-gram models to find the best candidates. This script supports both character level and BPE level
# encodings and models which is detected automatically from the type of the model.
# You may train the LM model with 'scripts/ngram_lm/train_kenlm.py'.
#
# USAGE: python eval_beamsearch_ngram.py --nemo_model_file <path to the .nemo file of the model> \
# --input_manifest <path to the evaluation JSON manifest file \
# --kenlm_model_file <path to the binary KenLM model> \
# --beam_width <list of the beam widths> \
# --beam_alpha <list of the beam alphas> \
# --beam_beta <list of the beam betas> \
# --preds_output_folder <optional folder to store the predictions> \
# --decoding_mode beamsearch_ngram
# ...
#
# You may find more info on how to use this script at:
# https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/asr_language_modeling.html
# Please check train_kenlm.py to find out why we need TOKEN_OFFSET for BPE-based models
TOKEN_OFFSET = 100
import argparse
import contextlib
import json
import os
import pickle
from pathlib import Path
import editdistance
import kenlm_utils
import numpy as np
import torch
from sklearn.model_selection import ParameterGrid
from tqdm.auto import tqdm
import nemo
import nemo.collections.asr as nemo_asr
from nemo.utils import logging
def beam_search_eval(
all_probs,
target_transcripts,
vocab,
ids_to_text_func=None,
preds_output_file=None,
lm_path=None,
beam_alpha=1.0,
beam_beta=0.0,
beam_width=128,
beam_batch_size=128,
progress_bar=True,
):
# creating the beam search decoder
beam_search_lm = nemo_asr.modules.BeamSearchDecoderWithLM(
vocab=vocab,
beam_width=beam_width,
alpha=beam_alpha,
beta=beam_beta,
lm_path=lm_path,
num_cpus=max(os.cpu_count(), 1),
input_tensor=False,
)
wer_dist_first = cer_dist_first = 0
wer_dist_best = cer_dist_best = 0
words_count = 0
chars_count = 0
sample_idx = 0
if preds_output_file:
out_file = open(preds_output_file, 'w')
if progress_bar:
it = tqdm(
range(int(np.ceil(len(all_probs) / beam_batch_size))),
desc=f"Beam search decoding with width={beam_width}, alpha={beam_alpha}, beta={beam_beta}",
ncols=120,
)
else:
it = range(int(np.ceil(len(all_probs) / beam_batch_size)))
for batch_idx in it:
# disabling type checking
with nemo.core.typecheck.disable_checks():
probs_batch = all_probs[batch_idx * beam_batch_size : (batch_idx + 1) * beam_batch_size]
beams_batch = beam_search_lm.forward(log_probs=probs_batch, log_probs_length=None,)
for beams_idx, beams in enumerate(beams_batch):
target = target_transcripts[sample_idx + beams_idx]
target_split_w = target.split()
target_split_c = list(target)
words_count += len(target_split_w)
chars_count += len(target_split_c)
wer_dist_min = cer_dist_min = 10000
for candidate_idx, candidate in enumerate(beams):
if ids_to_text_func is not None:
# For BPE encodings, need to shift by TOKEN_OFFSET to retrieve the original sub-word ids
pred_text = ids_to_text_func([ord(c) - TOKEN_OFFSET for c in candidate[1]])
else:
pred_text = candidate[1]
pred_split_w = pred_text.split()
wer_dist = editdistance.eval(target_split_w, pred_split_w)
pred_split_c = list(pred_text)
cer_dist = editdistance.eval(target_split_c, pred_split_c)
wer_dist_min = min(wer_dist_min, wer_dist)
cer_dist_min = min(cer_dist_min, cer_dist)
if candidate_idx == 0:
# first candidate
wer_dist_first += wer_dist
cer_dist_first += cer_dist
score = candidate[0]
if preds_output_file:
out_file.write('{}\t{}\n'.format(pred_text, score))
wer_dist_best += wer_dist_min
cer_dist_best += cer_dist_min
sample_idx += len(probs_batch)
if preds_output_file:
out_file.close()
logging.info(f"Stored the predictions of beam search decoding at '{preds_output_file}'.")
if lm_path:
logging.info(
'WER/CER with beam search decoding and N-gram model = {:.2%}/{:.2%}'.format(
wer_dist_first / words_count, cer_dist_first / chars_count
)
)
else:
logging.info(
'WER/CER with beam search decoding = {:.2%}/{:.2%}'.format(
wer_dist_first / words_count, cer_dist_first / chars_count
)
)
logging.info(
'Oracle WER/CER in candidates with perfect LM= {:.2%}/{:.2%}'.format(
wer_dist_best / words_count, cer_dist_best / chars_count
)
)
logging.info(f"=================================================================================")
def main():
parser = argparse.ArgumentParser(
description='Evaluate an ASR model with beam search decoding and n-gram KenLM language model.'
)
parser.add_argument(
"--nemo_model_file",
required=True,
type=str,
help="The path of the '.nemo' file of the ASR model or name of a pretrained model",
)
parser.add_argument(
"--kenlm_model_file", required=False, default=None, type=str, help="The path of the KenLM binary model file"
)
parser.add_argument("--input_manifest", required=True, type=str, help="The manifest file of the evaluation set")
parser.add_argument(
"--preds_output_folder", default=None, type=str, help="The optional folder where the predictions are stored"
)
parser.add_argument(
"--probs_cache_file", default=None, type=str, help="The cache file for storing the outputs of the model"
)
parser.add_argument(
"--acoustic_batch_size", default=16, type=int, help="The batch size to calculate log probabilities"
)
parser.add_argument(
"--device", default="cuda", type=str, help="The device to load the model onto to calculate log probabilities"
)
parser.add_argument(
"--use_amp", action="store_true", help="Whether to use AMP if available to calculate log probabilities"
)
parser.add_argument(
"--decoding_mode",
choices=["greedy", "beamsearch", "beamsearch_ngram"],
default="beamsearch_ngram",
type=str,
help="The decoding scheme to be used for evaluation.",
)
parser.add_argument(
"--beam_width",
required=False,
type=int,
nargs="+",
help="The width or list of the widths for the beam search decoding",
)
parser.add_argument(
"--beam_alpha",
required=False,
type=float,
nargs="+",
help="The alpha parameter or list of the alphas for the beam search decoding",
)
parser.add_argument(
"--beam_beta",
required=False,
type=float,
nargs="+",
help="The beta parameter or list of the betas for the beam search decoding",
)
parser.add_argument(
"--beam_batch_size", default=128, type=int, help="The batch size to be used for beam search decoding"
)
args = parser.parse_args()
if args.nemo_model_file.endswith('.nemo'):
asr_model = nemo_asr.models.ASRModel.restore_from(args.nemo_model_file, map_location=torch.device(args.device))
else:
logging.warning(
"nemo_model_file does not end with .nemo, therefore trying to load a pretrained model with this name."
)
asr_model = nemo_asr.models.ASRModel.from_pretrained(
args.nemo_model_file, map_location=torch.device(args.device)
)
target_transcripts = []
manifest_dir = Path(args.input_manifest).parent
with open(args.input_manifest, 'r') as manifest_file:
audio_file_paths = []
for line in tqdm(manifest_file, desc=f"Reading Manifest {args.input_manifest} ...", ncols=120):
data = json.loads(line)
audio_file = Path(data['audio_filepath'])
if not audio_file.is_file() and not audio_file.is_absolute():
audio_file = manifest_dir / audio_file
target_transcripts.append(data['text'])
audio_file_paths.append(str(audio_file.absolute()))
if args.probs_cache_file and os.path.exists(args.probs_cache_file):
logging.info(f"Found a pickle file of probabilities at '{args.probs_cache_file}'.")
logging.info(f"Loading the cached pickle file of probabilities from '{args.probs_cache_file}' ...")
with open(args.probs_cache_file, 'rb') as probs_file:
all_probs = pickle.load(probs_file)
if len(all_probs) != len(audio_file_paths):
raise ValueError(
f"The number of samples in the probabilities file '{args.probs_cache_file}' does not "
f"match the manifest file. You may need to delete the probabilities cached file."
)
else:
if args.use_amp:
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP is enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
with autocast():
with torch.no_grad():
all_logits = asr_model.transcribe(audio_file_paths, batch_size=args.acoustic_batch_size, logprobs=True)
all_probs = [kenlm_utils.softmax(logits) for logits in all_logits]
if args.probs_cache_file:
logging.info(f"Writing pickle files of probabilities at '{args.probs_cache_file}'...")
with open(args.probs_cache_file, 'wb') as f_dump:
pickle.dump(all_probs, f_dump)
wer_dist_greedy = 0
cer_dist_greedy = 0
words_count = 0
chars_count = 0
for batch_idx, probs in enumerate(all_probs):
preds = np.argmax(probs, axis=1)
preds_tensor = torch.tensor(preds, device='cpu').unsqueeze(0)
pred_text = asr_model._wer.ctc_decoder_predictions_tensor(preds_tensor)[0]
pred_split_w = pred_text.split()
target_split_w = target_transcripts[batch_idx].split()
pred_split_c = list(pred_text)
target_split_c = list(target_transcripts[batch_idx])
wer_dist = editdistance.eval(target_split_w, pred_split_w)
cer_dist = editdistance.eval(target_split_c, pred_split_c)
wer_dist_greedy += wer_dist
cer_dist_greedy += cer_dist
words_count += len(target_split_w)
chars_count += len(target_split_c)
logging.info('Greedy WER/CER = {:.2%}/{:.2%}'.format(wer_dist_greedy / words_count, cer_dist_greedy / chars_count))
encoding_level = kenlm_utils.SUPPORTED_MODELS.get(type(asr_model).__name__, None)
if not encoding_level:
logging.warning(
f"Model type '{type(asr_model).__name__}' may not be supported. Would try to train a char-level LM."
)
encoding_level = 'char'
vocab = asr_model.decoder.vocabulary
ids_to_text_func = None
if encoding_level == "subword":
vocab = [chr(idx + TOKEN_OFFSET) for idx in range(len(vocab))]
ids_to_text_func = asr_model.tokenizer.ids_to_text
# delete the model to free the memory
del asr_model
if args.decoding_mode == "beamsearch_ngram":
if not os.path.exists(args.kenlm_model_file):
raise FileNotFoundError(f"Could not find the KenLM model file '{args.kenlm_model_file}'.")
lm_path = args.kenlm_model_file
else:
lm_path = None
# 'greedy' decoding_mode would skip the beam search decoding
if args.decoding_mode in ["beamsearch_ngram", "beamsearch"]:
if args.beam_width is None or args.beam_alpha is None or args.beam_beta is None:
raise ValueError("beam_width, beam_alpha and beam_beta are needed to perform beam search decoding.")
params = {'beam_width': args.beam_width, 'beam_alpha': args.beam_alpha, 'beam_beta': args.beam_beta}
hp_grid = ParameterGrid(params)
hp_grid = list(hp_grid)
logging.info(f"==============================Starting the beam search decoding===============================")
logging.info(f"Grid search size: {len(hp_grid)}")
logging.info(f"It may take some time...")
logging.info(f"==============================================================================================")
if args.preds_output_folder and not os.path.exists(args.preds_output_folder):
os.mkdir(args.preds_output_folder)
for hp in hp_grid:
if args.preds_output_folder:
preds_output_file = os.path.join(
args.preds_output_folder,
f"preds_out_width{hp['beam_width']}_alpha{hp['beam_alpha']}_beta{hp['beam_beta']}.tsv",
)
else:
preds_output_file = None
beam_search_eval(
all_probs=all_probs,
target_transcripts=target_transcripts,
vocab=vocab,
ids_to_text_func=ids_to_text_func,
preds_output_file=preds_output_file,
lm_path=lm_path,
beam_width=hp["beam_width"],
beam_alpha=hp["beam_alpha"],
beam_beta=hp["beam_beta"],
beam_batch_size=args.beam_batch_size,
progress_bar=True,
)
if __name__ == '__main__':
main()
|
gkucsko/NeMo
|
examples/asr/speech_classification/vad_infer.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
During inference, we perform frame-level prediction by two approaches:
1) shift the window of length window_length_in_sec (e.g. 0.63s) by shift_length_in_sec (e.g. 10ms) to generate the frame and use the prediction of the window to represent the label for the frame;
[this script demonstrate how to do this approach]
2) generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.
[get frame level prediction by this script and use vad_overlap_posterior.py in NeMo/scripts/voice_activity_detection
One can also find posterior about converting frame level prediction
to speech/no-speech segment in start and end times format in that script.]
Image https://raw.githubusercontent.com/NVIDIA/NeMo/main/tutorials/asr/images/vad_post_overlap_diagram.png
will help you understand this method.
This script will also help you perform postprocessing and generate speech segments if needed
Usage:
python vad_infer.py --config-path="../conf/vad" --config-name="vad_inference_postprocessing.yaml" dataset=<Path of json file of evaluation data. Audio files should have unique names>
"""
import json
import os
import torch
from nemo.collections.asr.parts.utils.speaker_utils import write_rttm2manifest
from nemo.collections.asr.parts.utils.vad_utils import (
generate_overlap_vad_seq,
generate_vad_frame_pred,
generate_vad_segment_table,
init_vad_model,
prepare_manifest,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@hydra_runner(config_path="../conf/vad", config_name="vad_inference_postprocessing.yaml")
def main(cfg):
if not cfg.dataset:
raise ValueError("You must input the path of json file of evaluation data")
# each line of dataset should be have different audio_filepath and unique name to simplify edge cases or conditions
key_meta_map = {}
with open(cfg.dataset, 'r') as manifest:
for line in manifest.readlines():
audio_filepath = json.loads(line.strip())['audio_filepath']
uniq_audio_name = audio_filepath.split('/')[-1].rsplit('.', 1)[0]
if uniq_audio_name in key_meta_map:
raise ValueError("Please make sure each line is with different audio_filepath! ")
key_meta_map[uniq_audio_name] = {'audio_filepath': audio_filepath}
# Prepare manifest for streaming VAD
manifest_vad_input = cfg.dataset
if cfg.prepare_manifest.auto_split:
logging.info("Split long audio file to avoid CUDA memory issue")
logging.debug("Try smaller split_duration if you still have CUDA memory issue")
config = {
'input': manifest_vad_input,
'window_length_in_sec': cfg.vad.parameters.window_length_in_sec,
'split_duration': cfg.prepare_manifest.split_duration,
'num_workers': cfg.num_workers,
'prepared_manifest_vad_input': cfg.prepared_manifest_vad_input,
}
manifest_vad_input = prepare_manifest(config)
else:
logging.warning(
"If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it."
)
torch.set_grad_enabled(False)
vad_model = init_vad_model(cfg.vad.model_path)
# setup_test_data
vad_model.setup_test_data(
test_data_config={
'vad_stream': True,
'sample_rate': 16000,
'manifest_filepath': manifest_vad_input,
'labels': ['infer',],
'num_workers': cfg.num_workers,
'shuffle': False,
'window_length_in_sec': cfg.vad.parameters.window_length_in_sec,
'shift_length_in_sec': cfg.vad.parameters.shift_length_in_sec,
'trim_silence': False,
'normalize_audio': cfg.vad.parameters.normalize_audio,
}
)
vad_model = vad_model.to(device)
vad_model.eval()
if not os.path.exists(cfg.frame_out_dir):
os.mkdir(cfg.frame_out_dir)
else:
logging.warning(
"Note frame_out_dir exists. If new file has same name as file inside existing folder, it will append result to existing file and might cause mistakes for next steps."
)
logging.info("Generating frame level prediction ")
pred_dir = generate_vad_frame_pred(
vad_model=vad_model,
window_length_in_sec=cfg.vad.parameters.window_length_in_sec,
shift_length_in_sec=cfg.vad.parameters.shift_length_in_sec,
manifest_vad_input=manifest_vad_input,
out_dir=cfg.frame_out_dir,
)
logging.info(
f"Finish generating VAD frame level prediction with window_length_in_sec={cfg.vad.parameters.window_length_in_sec} and shift_length_in_sec={cfg.vad.parameters.shift_length_in_sec}"
)
# overlap smoothing filter
if cfg.gen_overlap_seq:
# Generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.
# smoothing_method would be either in majority vote (median) or average (mean)
logging.info("Generating predictions with overlapping input segments")
smoothing_pred_dir = generate_overlap_vad_seq(
frame_pred_dir=pred_dir,
smoothing_method=cfg.vad.parameters.smoothing,
overlap=cfg.vad.parameters.overlap,
window_length_in_sec=cfg.vad.parameters.window_length_in_sec,
shift_length_in_sec=cfg.vad.parameters.shift_length_in_sec,
num_workers=cfg.num_workers,
out_dir=cfg.smoothing_out_dir,
)
logging.info(
f"Finish generating predictions with overlapping input segments with smoothing_method={cfg.vad.parameters.smoothing} and overlap={cfg.vad.parameters.overlap}"
)
pred_dir = smoothing_pred_dir
# postprocessing and generate speech segments
if cfg.gen_seg_table:
logging.info("Converting frame level prediction to speech/no-speech segment in start and end times format.")
table_out_dir = generate_vad_segment_table(
vad_pred_dir=pred_dir,
postprocessing_params=cfg.vad.parameters.postprocessing,
shift_length_in_sec=cfg.vad.parameters.shift_length_in_sec,
num_workers=cfg.num_workers,
out_dir=cfg.table_out_dir,
)
logging.info(
f"Finish generating speech semgents table with postprocessing_params: {cfg.vad.parameters.postprocessing}"
)
if cfg.write_to_manifest:
for i in key_meta_map:
key_meta_map[i]['rttm_filepath'] = os.path.join(table_out_dir, i + ".txt")
if not cfg.out_manifest_filepath:
out_manifest_filepath = "vad_out.json"
else:
out_manifest_filepath = cfg.out_manifest_filepath
out_manifest_filepath = write_rttm2manifest(key_meta_map, out_manifest_filepath)
logging.info(f"Writing VAD output to manifest: {out_manifest_filepath}")
if __name__ == '__main__':
main()
|
gkucsko/NeMo
|
tests/core/test_config_utils.py
|
<reponame>gkucsko/NeMo<filename>tests/core/test_config_utils.py
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any
import pytest
import pytorch_lightning as ptl
from nemo.core.config.pytorch_lightning import TrainerConfig
from nemo.utils import config_utils
@pytest.fixture()
def cls():
class DummyClass:
def __init__(self, a, b=5, c: int = 0, d: 'ABC' = None):
pass
return DummyClass
class TestConfigUtils:
@pytest.mark.unit
def test_all_args_exist(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
d: Any = None
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_all_args_dont_exist(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass)
signatures_match, cls_subset, dataclass_subset = result
assert not signatures_match
assert len(cls_subset) > 0
assert len(dataclass_subset) == 0
@pytest.mark.unit
def test_extra_args_exist(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
d: Any = None
e: float = 0.0
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass)
signatures_match, cls_subset, dataclass_subset = result
assert not signatures_match
assert len(cls_subset) == 0
assert len(dataclass_subset) > 0
@pytest.mark.unit
def test_extra_args_exist_but_is_ignored(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
d: Any = None
e: float = 0.0 # Assume ignored
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass, ignore_args=['e'])
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_args_exist_but_is_remapped(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
e: Any = None # Assume remapped
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass, remap_args={'e': 'd'})
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_ptl_config(self):
PTL_DEPRECATED = ['distributed_backend', 'automatic_optimization', 'gpus', 'num_processes']
result = config_utils.assert_dataclass_signature_match(ptl.Trainer, TrainerConfig, ignore_args=PTL_DEPRECATED)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/dataset/dialogue_nearest_neighbour_dataset.py
|
<reponame>gkucsko/NeMo<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
__all__ = ['DialogueNearestNeighbourDataset']
class DialogueNearestNeighbourDataset(DialogueDataset):
"""
Dataset for training a Nearest Neighbour model for zero shot intent recognition.
"""
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
"""
Args:
dataset_split: dataset split
dialogues_processor: Data generator for dialogues
tokenizer: tokenizer to split text into sub-word tokens
"""
self.cfg = cfg
self.tokenizer = tokenizer
self.raw_features = dialogues_processor.get_dialog_examples(dataset_split)
self.max_n = self.find_max_n_candidates()
self.examples = self._create_examples(self.raw_features)
def find_max_n_candidates(self):
max_n = 0
for idx in range(len(self.raw_features)):
ex = self.raw_features[idx].data
n = len(ex["possible_labels"]["intent"])
max_n = max(max_n, n)
return max_n
def _create_examples(self, raw_features):
"""Creates examples for the training and dev sets."""
examples = []
seen_utterances = set()
for idx in range(len(raw_features)):
ex = self.raw_features[idx].data
user_utterance = ex["utterance"]
if user_utterance in seen_utterances:
continue
seen_utterances.add(user_utterance)
intent = ex["labels"]["intent"]
sentences = [user_utterance]
labels = [-1]
for candidate_intent in ex["possible_labels"]["intent"]:
text_b = "{} {}".format(self.cfg.prompt_template, candidate_intent)
label = 1 if candidate_intent == intent else 0
labels.append(label)
sentences.append(text_b)
while self.max_n > len(labels) - 1:
labels.append(label)
sentences.append(text_b)
encoded_input = self.tokenizer.tokenizer(
sentences,
padding='max_length',
truncation=True,
return_tensors='pt',
max_length=self.cfg.max_seq_length,
)
examples.append((encoded_input['input_ids'], encoded_input['attention_mask'], torch.tensor(labels)))
return examples
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
return self.examples[idx]
|
gkucsko/NeMo
|
nemo/collections/nlp/models/machine_translation/megatron_nmt_model.py
|
<gh_stars>0
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import random
import re
from typing import List, Optional
import numpy as np
import torch
from omegaconf.dictconfig import DictConfig
from omegaconf.listconfig import ListConfig
from pytorch_lightning.trainer.trainer import Trainer
from sacrebleu import corpus_bleu
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import (
BinarizedMemmapSequenceToSequenceDataset,
TextMemmapSequenceToSequenceDataset,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.megatron_batch_samplers import (
MegatronPretrainingBatchSampler,
MegatronPretrainingRandomBatchSampler,
)
from nemo.collections.nlp.models.language_modeling.megatron_lm_encoder_decoder_model import (
MegatronLMEncoderDecoderModel,
)
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher
from nemo.collections.nlp.parts.utils_funcs import get_last_rank
from nemo.utils import AppState, logging, timers
try:
from apex.transformer import parallel_state
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = ["MegatronNMTModel"]
class MegatronNMTModel(MegatronLMEncoderDecoderModel):
"""
Megatron NMT training
"""
def __init__(self, cfg: DictConfig, trainer: Trainer):
# All of the lines below need to be set when the parent class calls self._build_tokenizer()
self.encoder_tokenizer_library = cfg.encoder_tokenizer.get('library', 'yttm')
self.decoder_tokenizer_library = cfg.decoder_tokenizer.get('library', 'yttm')
self.special_tokens = {}
self.src_language = cfg.get("src_language", None)
self.tgt_language = cfg.get("tgt_language", None)
self.multilingual = cfg.get("multilingual", False)
self.multilingual_ids = []
self.validate_input_ids = cfg.get("validate_input_ids", True)
if self.multilingual:
if isinstance(self.src_language, ListConfig) and isinstance(self.tgt_language, ListConfig):
raise ValueError(
"cfg.src_language and cfg.tgt_language cannot both be lists. We only support many-to-one or one-to-many multilingual models."
)
elif isinstance(self.src_language, ListConfig):
pass
elif isinstance(self.tgt_language, ListConfig):
for lng in self.tgt_language:
self.special_tokens["<" + lng + ">"] = "<" + lng + ">"
else:
raise ValueError(
"Expect either cfg.src_language or cfg.tgt_language to be a list when multilingual=True."
)
super().__init__(cfg, trainer=trainer)
def setup(self, stage=None):
# NOTE: super().__init__ will try and setup train/val/test datasets, but we sidestep this using a if self._train_ds is not None condition
# We then set things up for real only once setup() of this class is called.
resume_checkpoint_path = self.trainer._checkpoint_connector.resume_from_checkpoint_fit_path
if resume_checkpoint_path:
try:
init_consumed_samples = int(
float(re.findall(r"consumed_samples\=([0-9]+.[0-9]+)", resume_checkpoint_path)[0])
)
except (ValueError, TypeError):
logging.warning(
"Cannot parse the checkpoint file to get the consumed samples. This is expected if you are not using memmap datasets."
)
init_consumed_samples = 0
else:
init_consumed_samples = 0
self.init_consumed_samples = init_consumed_samples
if stage == 'predict':
return
# If the user wants to manually override train and validation dataloaders before calling `.fit()`
if self._train_dl is not None and self._validation_dl is not None:
return
self.build_train_valid_test_datasets()
self.setup_training_data(self._cfg.train_ds)
self.setup_validation_data(self._cfg.validation_ds)
if hasattr(self._cfg, 'test_ds'):
self.setup_test_data(self._cfg.test_ds)
# when using pipeline model parallel the final stage need to initialize word embeddings
if parallel_state.get_pipeline_model_parallel_world_size() > 1:
self.enc_dec_model.sync_initial_word_embeddings()
self.enc_dec_model.sync_initial_position_embeddings()
def _build_tokenizer(self):
# Instantiates tokenizers and register to be saved with NeMo Model archive
# After this call, there will be self.encoder_tokenizer and self.decoder_tokenizer
# Which can convert between tokens and token_ids for SRC and TGT languages correspondingly.
encoder_tokenizer_model = self.register_artifact(
"encoder_tokenizer.model", self._cfg.encoder_tokenizer.get('model')
)
decoder_tokenizer_model = self.register_artifact(
"decoder_tokenizer.model", self._cfg.decoder_tokenizer.get('model')
)
self.encoder_tokenizer, self.decoder_tokenizer = MTEncDecModel.setup_enc_dec_tokenizers(
encoder_tokenizer_library=self.encoder_tokenizer_library,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=self._cfg.encoder_tokenizer.get('bpe_dropout', 0.0)
if self._cfg.encoder_tokenizer.get('bpe_dropout', 0.0) is not None
else 0.0,
encoder_model_name=None,
encoder_r2l=self._cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_library=self.decoder_tokenizer_library,
encoder_tokenizer_vocab_file=self._cfg.encoder_tokenizer.get('vocab_file', None),
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=self._cfg.decoder_tokenizer.get('bpe_dropout', 0.0)
if self._cfg.decoder_tokenizer.get('bpe_dropout', 0.0) is not None
else 0.0,
decoder_model_name=None,
decoder_r2l=self._cfg.decoder_tokenizer.get('r2l', False),
special_tokens=self.special_tokens,
encoder_sentencepiece_legacy=self._cfg.encoder_tokenizer.get('sentencepiece_legacy', False),
decoder_sentencepiece_legacy=self._cfg.decoder_tokenizer.get('sentencepiece_legacy', False),
)
# Set up pre and post processors as well.
if self.multilingual:
(
self.source_processor_list,
self.target_processor_list,
self.multilingual_ids,
) = MTEncDecModel.setup_multilingual_ids_and_processors(
src_language=self.src_language,
tgt_language=self.tgt_language,
tokenizer=self.encoder_tokenizer, # Multilingual training requires shared tokenizers.
tokenizer_library=self.encoder_tokenizer_library,
)
else:
# After this call, the model will have self.source_processor and self.target_processor objects
self.source_processor, self.target_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
self.src_language, self.tgt_language, self.encoder_tokenizer_library, self.decoder_tokenizer_library,
)
self.multilingual_ids = [None]
def _build_vocab(self):
self.padded_vocab_size = self._vocab_size_with_padding(
orig_vocab_size=self.encoder_tokenizer.vocab_size,
make_vocab_size_divisible_by=self._cfg.get('make_vocab_size_divisible_by', 128),
tensor_model_parallel_size=self._cfg.get('tensor_model_parallel_size', 1),
)
def training_step(self, batch, batch_idx):
# Need to squeze dim 0 for tarred datasets since things are pre-batched and we ask the dataloader for batch size 1.
if self._cfg.train_ds.dataset_type in ['tarred', 'text']:
batch = [[x.squeeze(dim=0) if x.ndim == 3 else x for x in microbatch] for microbatch in batch]
batch = self.process_global_batch_for_tarred_datasets(batch)
elif (
self._cfg.train_ds.dataset_type in ['bin_memmap', 'text_memmap']
and self._cfg.train_ds.get("sampler", "distributed") == 'distributed'
):
batch = self._process_global_batch_without_megatron_batch_sampler(batch, tokenizer=self.encoder_tokenizer)
if self._cfg.train_ds.dataset_type in ['tarred', 'text']:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=batch['text_enc'].size(0) * parallel_state.get_data_parallel_world_size(),
micro_batch_size=batch['text_enc'].size(0),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
return super().training_step(batch, batch_idx)
def eval_step(self, batch, batch_idx, dataloader_idx, data_cfg):
# Need to squeze dim 0 for tarred datasets since things are pre-batched and we ask the dataloader for batch size 1.
batch = [[x.squeeze(dim=0) if x.ndim == 3 else x for x in microbatch] for microbatch in batch]
batch = self.process_global_batch_for_tarred_datasets(batch)
if data_cfg.dataset_type in ['tarred', 'text']:
app_state = AppState()
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=batch['text_enc'].size(0) * parallel_state.get_data_parallel_world_size(),
micro_batch_size=batch['text_enc'].size(0),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
# This returns the averaged loss across data-parallel groups.
reduced_loss = super().validation_step(batch, batch_idx)
tokens_enc, labels, enc_mask = batch['text_enc'], batch['labels'], batch['enc_mask']
predicted_tokens_ids, _ = self.decode(
tokens_enc,
enc_mask,
tokens_enc.size(1)
+ self._cfg.max_generation_delta, # Generate up to src-length + max generation delta. TODO: Implement better stopping when everything hits <EOS>.
tokenizer=self.decoder_tokenizer,
)
if self.multilingual:
source_processor = self.source_processor_list[dataloader_idx]
target_processor = self.target_processor_list[dataloader_idx]
else:
source_processor = self.source_processor
target_processor = self.target_processor
# Post-process the translations and inputs to log.
preds = self.postprocess_outputs(
outputs=predicted_tokens_ids, tokenizer=self.decoder_tokenizer, processor=target_processor,
)
labels = self.postprocess_outputs(
outputs=labels, tokenizer=self.decoder_tokenizer, processor=target_processor,
)
encoder_inputs = self.postprocess_outputs(
outputs=tokens_enc, tokenizer=self.encoder_tokenizer, processor=source_processor,
)
return {
'inputs': encoder_inputs,
'translations': preds,
'ground_truths': labels,
'loss': reduced_loss,
}
def postprocess_outputs(self, outputs, tokenizer, processor):
# Convert ids to lists.
outputs = outputs.cpu().numpy().tolist()
# Filter out the special tokens and de-tokenize.
results = []
for item in outputs:
if tokenizer.eos_id in item:
idx = item.index(tokenizer.eos_id)
item = item[:idx]
# Legacy sentencepiece detokenization still preserves special tokens which messes up exact string match.
if hasattr(tokenizer, 'special_token_to_id'):
item = [id for id in item if id not in tokenizer.special_token_to_id.values()]
item = tokenizer.ids_to_text(item)
results.append(item)
if processor is not None:
results = [processor.detokenize(item.split(' ')) for item in results]
return results
def validation_step(self, batch, batch_idx, dataloader_idx=0):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
return self.eval_step(batch, batch_idx, dataloader_idx, self._cfg.validation_ds)
def _setup_eval_dataloader_from_config(self, cfg: DictConfig, dataset):
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
dataloaders = []
for _dataset in dataset:
sampler = torch.utils.data.distributed.DistributedSampler(
_dataset, num_replicas=world_size, rank=rank, shuffle=False
)
dataloaders.append(
torch.utils.data.DataLoader(
dataset=_dataset,
batch_size=1,
sampler=sampler,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
shuffle=False,
)
)
return dataloaders
def validation_epoch_end(self, outputs):
return self.eval_epoch_end(outputs, 'val')
def test_epoch_end(self, outputs):
return self.eval_epoch_end(outputs, 'test')
def eval_epoch_end(self, outputs, mode):
if not outputs:
return
if isinstance(outputs[0], dict):
outputs = [outputs]
loss_list = []
bleu_score_list = []
for dataloader_idx, output in enumerate(outputs):
if parallel_state.is_pipeline_last_stage():
# only the last pipeline parallel stages return loss
averaged_loss = torch.stack([x['loss'] for x in output]).mean()
else:
averaged_loss = torch.tensor(0.0).to(self.device)
# we can only log on one rank if it is rank zero so we broadcast from last rank
torch.distributed.broadcast(averaged_loss, get_last_rank())
# averaged_loss = average_losses_across_data_parallel_group([x['loss'] for x in output])
inputs = list(itertools.chain(*[x['inputs'] for x in output]))
translations = list(itertools.chain(*[x['translations'] for x in output]))
ground_truths = list(itertools.chain(*[x['ground_truths'] for x in output]))
assert len(translations) == len(inputs)
assert len(translations) == len(ground_truths)
# Gather translations and ground truths from all workers
tr_gt_inp = [None for _ in range(parallel_state.get_data_parallel_world_size())]
# we also need to drop pairs where ground truth is an empty string
torch.distributed.all_gather_object(
tr_gt_inp,
[(t, g, i) for (t, g, i) in zip(translations, ground_truths, inputs)],
group=parallel_state.get_data_parallel_group(),
)
if parallel_state.get_data_parallel_rank() == 0:
_translations = []
_ground_truths = []
_inputs = []
# Deduplicate sentences that may have been distributed across multiple data parallel ranks.
gt_inp_set = set()
for rank in range(0, parallel_state.get_data_parallel_world_size()):
for t, g, i in tr_gt_inp[rank]:
if g + i not in gt_inp_set:
gt_inp_set.add(g + i)
_translations.append(t)
_ground_truths.append(g)
_inputs.append(i)
if self.tgt_language in ['ja']:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="ja-mecab")
elif self.tgt_language in ['zh']:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="zh")
else:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="13a")
bleu_score = sacre_bleu.score * parallel_state.get_data_parallel_world_size()
dataset_name = "Validation" if mode == 'val' else "Test"
logging.info(f"{dataset_name}, Dataloader index: {dataloader_idx}, Set size: {len(_translations)}")
logging.info(
f"{dataset_name}, Dataloader index: {dataloader_idx}, SacreBLEU = {bleu_score / parallel_state.get_data_parallel_world_size()}"
)
logging.info(f"{dataset_name}, Dataloader index: {dataloader_idx}, Translation Examples:")
logging.info('============================================================')
for example_idx in range(0, 3):
random_index = random.randint(0, len(_translations) - 1)
logging.info(" " + '\u0332'.join(f"Example {example_idx}:"))
logging.info(f" Input: {_inputs[random_index]}")
logging.info(f" Prediction: {_translations[random_index]}")
logging.info(f" Ground Truth: {_ground_truths[random_index]}")
logging.info('============================================================')
else:
bleu_score = 0.0
loss_list.append(averaged_loss.cpu().numpy())
bleu_score_list.append(bleu_score)
if dataloader_idx == 0:
self.log(f'{mode}_sacreBLEU', bleu_score, sync_dist=True)
self.log(f'{mode}_loss', averaged_loss, prog_bar=True)
if self.multilingual:
self._log_multilingual_bleu_and_loss(dataloader_idx, bleu_score, averaged_loss, mode)
else:
if self.multilingual:
self._log_multilingual_bleu_and_loss(dataloader_idx, bleu_score, averaged_loss, mode)
else:
self.log(f'{mode}_sacreBLEU_dl_index_{dataloader_idx}', bleu_score, sync_dist=True)
self.log(f'{mode}_loss_dl_index_{dataloader_idx}', averaged_loss, prog_bar=False)
if len(loss_list) > 1:
self.log(f"{mode}_loss_avg", np.mean(loss_list), sync_dist=True)
self.log(f"{mode}_sacreBLEU_avg", np.mean(bleu_score_list), sync_dist=True)
def _log_multilingual_bleu_and_loss(self, dataloader_idx, bleu_score, loss, mode):
"""
Function to log multilingual BLEU scores with the right source-target language string instead of just the dataloader idx.
"""
# Check if one-many or many-one and log with lang ids instead of dataloader_idx
reverse_lang_direction = self._cfg.train_ds.reverse_lang_direction
if isinstance(self.src_language, ListConfig):
translation_lang_string = (
f'{self.src_language[dataloader_idx]}-{self.tgt_language}'
if not reverse_lang_direction
else f'{self.tgt_language}-{self.src_language[dataloader_idx]}'
)
self.log(f'{mode}_sacreBLEU_{translation_lang_string}', bleu_score, sync_dist=True)
self.log(f'{mode}_loss_{translation_lang_string}', loss, sync_dist=True)
else:
translation_lang_string = (
f'{self.src_language}-{self.tgt_language[dataloader_idx]}'
if not reverse_lang_direction
else f'{self.tgt_language[dataloader_idx]}-{self.src_language}'
)
self.log(f'{mode}_sacreBLEU_{translation_lang_string}', bleu_score, sync_dist=True)
self.log(f'{mode}_loss_{translation_lang_string}', loss, sync_dist=True)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if hasattr(self, '_validation_ds'):
self._validation_dl = self._setup_eval_dataloader_from_config(
cfg=val_data_config, dataset=self._validation_ds
)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if hasattr(self, '_test_ds'):
self._test_dl = self._setup_eval_dataloader_from_config(cfg=test_data_config, dataset=self._test_ds)
def setup_training_data(self, train_data_config: Optional[DictConfig]):
# TODO: Figure out how to set global rank and world size for model parallel.
if hasattr(self, '_train_ds'):
if train_data_config.dataset_type in ['tarred', 'text']:
self._train_dl = MTEncDecModel._setup_dataloader_from_config(
cfg=train_data_config, dataset=self._train_ds
)
elif train_data_config.dataset_type in ['bin_memmap', 'text_memmap']:
consumed_samples = self.compute_consumed_samples(0)
self._train_dl = self._setup_megatron_dataloader_from_config(
cfg=train_data_config, dataset=self._train_ds, consumed_samples=consumed_samples
)
def _setup_megatron_dataloader_from_config(self, cfg, dataset, consumed_samples):
logging.info(f'Building dataloader with consumed samples: {consumed_samples}')
rank = parallel_state.get_data_parallel_rank()
world_size = parallel_state.get_data_parallel_world_size()
if isinstance(dataset, BlendableDataset):
collate_fn = dataset.datasets[0].collate_fn
else:
collate_fn = dataset.collate_fn
if cfg.get("sampler", "distributed") == 'distributed':
sampler = torch.utils.data.distributed.DistributedSampler(
dataset,
num_replicas=world_size,
rank=rank,
shuffle=True,
seed=consumed_samples, # Ensures that each time the model is restored, a new seed is used to see examples in a different order.
)
return torch.utils.data.DataLoader(
dataset,
collate_fn=collate_fn,
sampler=sampler,
batch_size=cfg.micro_batch_size,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
drop_last=cfg.drop_last,
)
elif cfg.get("sampler", "distributed") == 'megatron':
batch_sampler = MegatronPretrainingBatchSampler(
total_samples=len(dataset),
consumed_samples=consumed_samples,
micro_batch_size=cfg.micro_batch_size,
global_batch_size=cfg.global_batch_size,
data_parallel_rank=parallel_state.get_data_parallel_rank(),
data_parallel_size=parallel_state.get_data_parallel_world_size(),
drop_last=True,
)
return torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=collate_fn,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
else:
raise ValueError(f"Invalid sampler {cfg.sampler}. Options: ['distributed', 'megatron']")
def process_global_batch_for_tarred_datasets(self, batch):
"""Override parent process_batch since TranslationDataset does not return dictionaries."""
global_batch = []
for microbatch in batch:
# Convert each microbatch into a dictionary.
src_ids, src_mask, tgt_ids, tgt_mask, labels = microbatch
batch = {
'text_enc': src_ids,
'text_dec': tgt_ids,
'labels': labels,
'enc_mask': src_mask.long(), # super().process_batch() expects torch.int64
'dec_mask': tgt_mask.long(), # super().process_batch() expects torch.int64
'loss_mask': tgt_mask.long(), # super().process_batch() expects torch.int64
}
global_batch.append(batch)
# Parent function will pad microbatches to the same length.
return self._process_global_batch_without_megatron_batch_sampler(
global_batch, tokenizer=self.encoder_tokenizer
)
def build_train_valid_test_datasets(self):
"""Builds the train, validation, and test datasets."""
# Builds datasets if the type is tarred or from raw text without memmap.
if self._cfg.train_ds.dataset_type in ['tarred', 'text']:
self._train_ds = self.build_tarred_train_dataset()
elif self._cfg.train_ds.dataset_type in ['bin_memmap', 'text_memmap']:
self._train_ds = self.build_memmap_dataset_from_config(self._cfg.train_ds)
if self._cfg.validation_ds.get("dataset_type", "text") != "text":
raise ValueError(f"Validation dataset type must be 'text', found {self._cfg.validation_ds.dataset_type}")
self._validation_ds = MTEncDecModel._setup_eval_dataset_from_config(
cfg=self._cfg.validation_ds,
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
)
# Test data config is optional.
if hasattr(self._cfg, 'test_ds'):
if self._cfg.validation_ds.get("dataset_type", "text") != "text":
raise ValueError(f"Test dataset type must be 'text', found {self._cfg.test_ds.dataset_type}")
self._test_ds = MTEncDecModel._setup_eval_dataset_from_config(
cfg=self._cfg.validation_ds,
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
)
def build_memmap_dataset_from_config(self, cfg: DictConfig):
"""Builds a memmap dataset from a existing binary based o nthe provided config."""
is_src_listconfig = isinstance(cfg.src_file_name, ListConfig)
is_tgt_listconfig = isinstance(cfg.tgt_file_name, ListConfig)
# If multilingual, make sure both source and target are list configs
if self.multilingual:
if not (is_src_listconfig and is_tgt_listconfig):
raise ValueError(
f"Multilingual datasets must be configured with a ListConfig for both src_file_name and tgt_file_name"
)
if is_src_listconfig and not is_tgt_listconfig or is_tgt_listconfig and not is_src_listconfig:
raise ValueError(
f"Datasets must be configured with a ListConfig for both src_file_name and tgt_file_name or neither. Found only one of them as listconfig."
)
if is_src_listconfig and is_tgt_listconfig:
if len(cfg.src_file_name) != len(cfg.tgt_file_name):
raise ValueError(f"Datasets must have the same number of files in src_file_name and tgt_file_name")
if cfg.concat_sampling_probabilities is None or not isinstance(
cfg.concat_sampling_probabilities, ListConfig
):
raise ValueError(
f"concat_sampling_probabilities must be a ListConfig with the same number of files in src_file_name and tgt_file_name, found {cfg.concat_sampling_probabilities}"
)
if len(cfg.concat_sampling_probabilities) != len(cfg.src_file_name):
raise ValueError(
f"concat_sampling_probabilities must be of the same size as src_file_name and tgt_file_name. Provided size {len(cfg.concat_sampling_probabilities)}, number of datasets {len(cfg.src_file_name)}"
)
datasets = []
for src_file, tgt_fille in zip(cfg.src_file_name, cfg.tgt_file_name):
if cfg.dataset_type == 'bin_memmap':
dataset = BinarizedMemmapSequenceToSequenceDataset(
src_dataset_prefix=src_file,
tgt_dataset_prefix=tgt_fille,
src_tokenizer=self.encoder_tokenizer,
tgt_tokenizer=self.decoder_tokenizer,
max_src_seq_length=cfg.max_seq_length,
max_tgt_seq_length=cfg.max_seq_length,
start_index=0,
end_index=None,
data_impl="mmap",
skip_warmup=True,
)
elif cfg.dataset_type == 'text_memmap':
dataset = TextMemmapSequenceToSequenceDataset(
src_file_name=src_file,
tgt_file_name=tgt_fille,
src_tokenizer=self.encoder_tokenizer,
tgt_tokenizer=self.decoder_tokenizer,
max_src_seq_length=cfg.max_seq_length,
max_tgt_seq_length=cfg.max_seq_length,
)
datasets.append(dataset)
dataset = BlendableDataset(datasets=datasets, weights=cfg.concat_sampling_probabilities)
else:
if cfg.dataset_type == 'bin_memmap':
dataset = BinarizedMemmapSequenceToSequenceDataset(
src_dataset_prefix=cfg.src_file_name,
tgt_dataset_prefix=cfg.tgt_file_name,
src_tokenizer=self.encoder_tokenizer,
tgt_tokenizer=self.decoder_tokenizer,
max_src_seq_length=cfg.max_seq_length,
max_tgt_seq_length=cfg.max_seq_length,
start_index=0,
end_index=None,
data_impl="mmap",
skip_warmup=True,
)
elif cfg.dataset_type == 'text_memmap':
dataset = TextMemmapSequenceToSequenceDataset(
src_file_name=cfg.src_file_name,
tgt_file_name=cfg.tgt_file_name,
src_tokenizer=self.encoder_tokenizer,
tgt_tokenizer=self.decoder_tokenizer,
max_src_seq_length=cfg.max_seq_length,
max_tgt_seq_length=cfg.max_seq_length,
)
return dataset
def build_tarred_train_dataset(self):
return MTEncDecModel._setup_dataset_from_config(
cfg=self._cfg.train_ds,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
global_rank=parallel_state.get_data_parallel_rank(),
world_size=parallel_state.get_data_parallel_world_size(),
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
)
def list_available_models(self):
pass
def on_validation_epoch_end(self):
app_state = AppState()
if hasattr(self, "_train_ds"):
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=self._cfg.train_ds.global_batch_size,
micro_batch_size=self._cfg.train_ds.micro_batch_size,
data_parallel_size=parallel_state.get_data_parallel_world_size(),
)
@torch.no_grad()
def translate(
self,
text: List[str],
source_lang: str = None,
target_lang: str = None,
return_beam_scores: bool = False,
log_timing: bool = False,
) -> List[str]:
"""
Translates list of sentences from source language to target language.
Should be regular text, this method performs its own tokenization/de-tokenization
Args:
text: list of strings to translate
source_lang: if not "ignore", corresponding MosesTokenizer and MosesPunctNormalizer will be run
target_lang: if not "ignore", corresponding MosesDecokenizer will be run
return_beam_scores: if True, returns a list of translations and their corresponding beam scores.
log_timing: if True, prints timing information.
Returns:
list of translated strings
"""
# __TODO__: This will reset both source and target processors even if you want to reset just one.
# NOTE: This will also set up appropriate source and target processors for a given src/tgt language for multilingual models instead of creating a list of them.
if source_lang is not None or target_lang is not None:
self.source_processor, self.target_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
source_lang, target_lang, self.encoder_tokenizer_library, self.decoder_tokenizer_library
)
mode = self.training
prepend_ids = []
if self.multilingual:
if source_lang is None or target_lang is None:
raise ValueError("Expect source_lang and target_lang to run inference for multilingual model.")
src_symbol = self.encoder_tokenizer.token_to_id('<' + source_lang + '>')
tgt_symbol = self.encoder_tokenizer.token_to_id('<' + target_lang + '>')
if src_symbol in self.multilingual_ids:
prepend_ids = [src_symbol]
elif tgt_symbol in self.multilingual_ids:
prepend_ids = [tgt_symbol]
if log_timing:
timer = timers.NamedTimer()
else:
timer = None
cache = {
"timer": timer,
}
try:
self.eval()
src, src_mask = MTEncDecModel.prepare_inference_batch(
text=text,
prepend_ids=prepend_ids,
target=False,
source_processor=self.source_processor,
target_processor=self.target_processor,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
device=self.device,
)
predicted_tokens_ids, _ = self.decode(
src,
src_mask,
src.size(1)
+ self._cfg.max_generation_delta, # Generate up to src-length + max generation delta. TODO: Implement better stopping when everything hits <EOS>.
tokenizer=self.decoder_tokenizer,
)
best_translations = self.postprocess_outputs(
outputs=predicted_tokens_ids, tokenizer=self.decoder_tokenizer, processor=self.target_processor
)
return_val = best_translations
finally:
self.train(mode=mode)
if log_timing:
timing = timer.export()
timing["mean_src_length"] = src_mask.sum().cpu().item() / src_mask.shape[0]
tgt, tgt_mask = self.prepare_inference_batch(
text=best_translations,
prepend_ids=prepend_ids,
target=True,
source_processor=self.source_processor,
target_processor=self.target_processor,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
device=self.device,
)
timing["mean_tgt_length"] = tgt_mask.sum().cpu().item() / tgt_mask.shape[0]
if type(return_val) is tuple:
return_val = return_val + (timing,)
else:
return_val = (return_val, timing)
return return_val
def itn_translate_tn(
self,
text: List[str],
source_lang: str = None,
target_lang: str = None,
return_beam_scores: bool = False,
log_timing: bool = False,
inverse_normalizer=None,
normalizer=None,
) -> List[str]:
"""
Calls the translate() method with the option of running ITN (inverse text-normalization) on the input and TN (text-normalization) on the output.
Pipeline : ITN -> translate -> TN
NOTE: ITN and TN objects must be initialized with the right languages.
Args:
text: list of strings to translate
source_lang: if not "ignore", corresponding MosesTokenizer and MosesPunctNormalizer will be run
target_lang: if not "ignore", corresponding MosesDecokenizer will be run
return_beam_scores: if True, returns a list of translations and their corresponding beam scores.
log_timing: if True, prints timing information.
inverse_normalizer: instance of nemo_text_processing.inverse_text_normalization.inverse_normalize.InverseNormalizer
normalizer: instance of nemo_text_processing.text_normalization.normalize.Normalizer
Returns:
list of translated strings
"""
if inverse_normalizer is not None:
text = [inverse_normalizer.normalize(example) for example in text]
translations = self.translate(text, source_lang, target_lang, return_beam_scores, log_timing)
if normalizer is not None:
translations = [normalizer.normalize(example) for example in translations]
return translations
def on_train_start(self) -> None:
"""PTL hook used to override DataFetcher with GlobalBatchDataFetcher """
if self._cfg.train_ds.get("sampler", "distributed") == 'distributed':
self.trainer.fit_loop._data_fetcher = GlobalBatchDataFetcher()
def on_validation_start(self) -> None:
"""PTL hook used to override DataFetcher with GlobalBatchDataFetcher """
logging.info('Validation start ...')
self.trainer.fit_loop.epoch_loop.val_loop._data_fetcher = GlobalBatchDataFetcher()
self.trainer.validate_loop._data_fetcher = GlobalBatchDataFetcher()
def on_test_start(self) -> None:
self.trainer.test_loop._data_fetcher = GlobalBatchDataFetcher()
|
gkucsko/NeMo
|
nemo/collections/nlp/data/language_modeling/megatron/lm_adapted_t5_dataset.py
|
<reponame>gkucsko/NeMo<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import GPTDataset
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
class T5LMAdaptedDataset(GPTDataset):
"""
Dataset for unlearning span corruption (https://arxiv.org/abs/2104.08691) in T5 models.
Corresponds to the prefix-LM objective in the T5 paper (Table 3 in https://arxiv.org/abs/1910.10683).
"""
def __init__(
self,
cfg,
trainer,
tokenizer,
name,
data_prefix,
documents,
indexed_dataset,
num_samples,
seed,
max_seq_length_encoder,
max_seq_length_decoder,
**kwargs,
):
self.max_seq_length_encoder = max_seq_length_encoder
self.max_seq_length_decoder = max_seq_length_decoder
self.seed = seed
self.tokenizer = tokenizer
super().__init__(
cfg,
trainer,
tokenizer,
name,
data_prefix,
documents,
indexed_dataset,
num_samples,
self.max_seq_length_encoder
+ self.max_seq_length_decoder
+ 1, # +1 because the decoder sequence gets truncated by one due to shifting to for teacher-forcing.
seed,
)
@classmethod
def get_prefix_lm_sample(
cls,
sample,
max_seq_length_encoder,
max_seq_length_decoder,
np_rng,
tokenizer,
pivot_mean=0.25,
pivot_distribution=LengthDistribution.uniform,
):
# get random split index
if pivot_distribution == LengthDistribution.truncated_normal and (pivot_mean < 0.0 or pivot_mean > 1.0):
raise ValueError(
f"Invalid pivot_mean: {pivot_mean}. Must be in [0.0, 1.0]. It is a fraction of the encoder sequence length."
)
# If the sample is larger than max encoder sequence length, use max encoder sequence length, otherwwise use sample length.
max_split_idx = min(len(sample), max_seq_length_encoder)
if pivot_distribution == LengthDistribution.uniform:
split_idx = np_rng.randint(0, max_split_idx)
elif pivot_distribution == LengthDistribution.truncated_normal:
loc = pivot_mean * max_split_idx
split_idx = np.clip(int(np_rng.normal(loc=loc, scale=loc)), 0, max_split_idx,)
else:
raise ValueError(f"Invalid pivot_distribution: {pivot_distribution}")
# Encoder inputs get truncated based on the split indx
tokens_enc = np.concatenate(
[sample[:split_idx], [tokenizer.pad_id] * (max_seq_length_encoder - split_idx)]
).astype(np.int64)
# The decoder sequence is never truncated and is always of max decoder length.
tokens_dec = sample[split_idx : split_idx + max_seq_length_decoder + 1]
# NOTE: Add bos only and not eos because the model will always generate till max seq length.
tokens_dec = np.concatenate(
[[tokenizer.bos_id], tokens_dec, [tokenizer.pad_id] * (max_seq_length_decoder - len(tokens_dec) + 1)]
).astype(np.int64)
# Shift sequences for teacher forcing
tokens_dec_in = tokens_dec[:-1]
labels = tokens_dec[1:]
# Create attention masks
enc_mask = (tokens_enc != tokenizer.pad_id).astype(np.int64)
dec_mask = (tokens_dec_in != tokenizer.pad_id).astype(np.int64)
loss_mask = dec_mask
train_sample = {
'text_enc': tokens_enc,
'text_dec': tokens_dec_in,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
return train_sample
def __getitem__(self, idx):
text = super()._get_text(idx)
np_rng = np.random.RandomState(seed=(self.seed + idx))
sample = T5LMAdaptedDataset.get_prefix_lm_sample(
sample=text,
max_seq_length_encoder=self.max_seq_length_encoder,
max_seq_length_decoder=self.max_seq_length_decoder,
np_rng=np_rng,
tokenizer=self.tokenizer,
pivot_distribution=LengthDistribution.uniform,
)
return sample
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/evaluation/get_multi_reference_vocab.py
|
<filename>examples/nlp/text_normalization_as_tagging/evaluation/get_multi_reference_vocab.py<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to construct a vocabulary of multiple references
"""
from argparse import ArgumentParser
from collections import Counter
from os import listdir
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import spoken_preprocessing
parser = ArgumentParser(description="Get reference vocabulary from corpus (it will be used in testing)")
parser.add_argument("--data_dir", type=str, required=True, help="Path to folder with data")
parser.add_argument("--out_filename", type=str, required=True, help="Path to output file")
args = parser.parse_args()
if __name__ == "__main__":
vcb = {}
filenames = []
for fn in listdir(args.data_dir + "/train"):
filenames.append(args.data_dir + "/train/" + fn)
for fn in listdir(args.data_dir + "/dev"):
filenames.append(args.data_dir + "/dev/" + fn)
for fn in filenames:
print("Processing ", fn)
with open(fn, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split("\t")
if len(parts) < 3:
continue
if len(parts) != 3:
raise ValueError("Expect 3 parts, got " + str(len(parts)))
semiotic_class, written, spoken = parts[0], parts[1].strip().casefold(), parts[2].strip().casefold()
spoken = spoken_preprocessing(spoken)
if spoken == "<self>":
continue
if spoken == "" or written == "":
continue
if len(spoken.split(" ")) >= 100:
continue
k = (semiotic_class, spoken)
if k not in vcb:
vcb[k] = Counter()
vcb[k][written] += 1
with open(args.out_filename, "w", encoding="utf-8") as out:
for sem, spoken in vcb:
for written in vcb[(sem, spoken)]:
out.write(sem + "\t" + spoken + "\t" + written + "\t" + str(vcb[(sem, spoken)][written]) + "\n")
out.write(sem + "\t" + spoken + "\t" + spoken + "\t1\n")
|
gkucsko/NeMo
|
tests/collections/nlp/test_gpt_model.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import get_ltor_masks_and_position_ids
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin
DEVICE_CAPABILITY = None
if torch.cuda.is_available():
DEVICE_CAPABILITY = torch.cuda.get_device_capability()
@pytest.fixture()
def model_cfg(test_data_dir):
model_cfg = {
'precision': 16,
'micro_batch_size': 4,
'global_batch_size': 8,
'tensor_model_parallel_size': 1,
'pipeline_model_parallel_size': 1,
'resume_from_checkpoint': None,
'encoder_seq_length': 512,
'max_position_embeddings': 512,
'num_layers': 1,
'hidden_size': 128,
'ffn_hidden_size': 512,
'num_attention_heads': 2,
'init_method_std': 0.02,
'hidden_dropout': 0.1,
'kv_channels': None,
'apply_query_key_layer_scaling': True,
'layernorm_epsilon': 1e-5,
'make_vocab_size_divisible_by': 128,
'pre_process': True,
'post_process': True,
'persist_layer_norm': True,
'gradient_as_bucket_view': True,
'tokenizer': {
'library': 'megatron',
'type': 'GPT2BPETokenizer',
'model': None,
'vocab_file': os.path.join(test_data_dir, 'nlp/gpt_vocab_merges/vocab.json'),
'merge_file': os.path.join(test_data_dir, 'nlp/gpt_vocab_merges/merges.txt'),
'delimiter': None,
},
'native_amp_init_scale': 4294967296,
'native_amp_growth_interval': 1000,
'hysteresis': 2,
'fp32_residual_connection': False,
'fp16_lm_cross_entropy': False,
'megatron_amp_O2': False,
'seed': 1234,
'use_cpu_initialization': False,
'onnx_safe': False,
'apex_transformer_log_level': 30,
'activations_checkpoint_method': None,
'activations_checkpoint_num_layers': 1,
'data': {
'data_prefix': '???',
'index_mapping_dir': None,
'data_impl': 'mmap',
'splits_string': '900,50,50',
'seq_length': 512,
'skip_warmup': True,
'num_workers': 2,
'dataloader_type': 'single',
'reset_position_ids': False,
'reset_attention_mask': False,
'eod_mask_loss': False,
},
'optim': {
'name': 'fused_adam',
'lr': 2e-4,
'weight_decay': 0.01,
'betas': [0.9, 0.98],
'sched': {'name': 'CosineAnnealing', 'warmup_steps': 500, 'constant_steps': 50000, 'min_lr': '2e-5'},
},
}
return model_cfg
@pytest.fixture()
def trainer_cfg():
trainer_cfg = {
'devices': 1,
'num_nodes': 1,
'accelerator': 'gpu',
'precision': 16,
'logger': False,
'enable_checkpointing': False,
'replace_sampler_ddp': False,
'max_epochs': 1000,
'max_steps': 100000,
'log_every_n_steps': 10,
'val_check_interval': 100,
'limit_val_batches': 50,
'limit_test_batches': 500,
'accumulate_grad_batches': 1,
'gradient_clip_val': 1.0,
}
return trainer_cfg
@pytest.fixture()
def precision():
return 32
@pytest.fixture()
def gpt_model(model_cfg, trainer_cfg, precision):
model_cfg['precision'] = precision
trainer_cfg['precision'] = precision
plugins = [NLPDDPPlugin()]
trainer = Trainer(plugins=plugins, **trainer_cfg)
cfg = DictConfig(model_cfg)
model = MegatronGPTModel(cfg=cfg, trainer=trainer)
return model
@pytest.fixture()
def test_text():
test_text = [
"hello, world",
"four score and seven years ago",
"Your time is limited",
"If you set goals rediculously high",
]
return test_text
@pytest.mark.run_only_on('GPU')
class TestGPTModel:
@pytest.mark.unit
def test_constructor(self, gpt_model):
assert isinstance(gpt_model, MegatronGPTModel)
num_weights = gpt_model.num_weights
assert num_weights == 6702976
@pytest.mark.unit
def test_tokenizer(self, gpt_model, test_text):
assert isinstance(gpt_model.tokenizer, AutoTokenizer)
assert gpt_model.tokenizer.name == 'GPT2Tokenizer'
assert gpt_model.tokenizer.vocab_size == 50257
ids = [gpt_model.tokenizer.text_to_ids(text) for text in test_text]
true_ids = [
[31373, 11, 995],
[14337, 4776, 290, 3598, 812, 2084],
[7120, 640, 318, 3614],
[1532, 345, 900, 4661, 2266, 291, 18117, 1029],
]
assert sum([id_list == true_id_list for id_list, true_id_list in zip(ids, true_ids)]) == 4
@pytest.mark.parametrize(
"precision",
[
32,
16,
pytest.param(
"bf16",
marks=pytest.mark.skipif(
not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8,
reason='bfloat16 is not supported on this device',
),
),
],
)
@pytest.mark.unit
def test_forward(self, gpt_model, test_text):
dtype = None
if gpt_model.cfg['precision'] == 32:
dtype = torch.float
elif gpt_model.cfg['precision'] == 16:
dtype = torch.float16
elif gpt_model.cfg['precision'] == 'bf16':
dtype = torch.bfloat16
else:
raise ValueError(f"precision: {gpt_model.cfg['precision']} is not supported.")
gpt_model.eval()
ids = [gpt_model.tokenizer.text_to_ids(text) for text in test_text]
id_tensors = [torch.unsqueeze(torch.LongTensor(id_list), dim=0) for id_list in ids]
masks_and_position_ids = [
get_ltor_masks_and_position_ids(id_tensor, gpt_model.tokenizer.eos_id, False, False, False)
for id_tensor in id_tensors
]
output_tensors = []
with torch.no_grad():
for tokens, attn_mask_and_pos_ids in zip(id_tensors, masks_and_position_ids):
attn_mask, _, pos_ids = attn_mask_and_pos_ids
assert tokens.shape == pos_ids.shape
assert attn_mask.shape[2] == attn_mask.shape[3] == tokens.shape[1] == pos_ids.shape[1]
with torch.autocast('cuda', dtype=dtype):
output_tensor = gpt_model.forward(
tokens=tokens.cuda(),
text_position_ids=pos_ids.cuda(),
attention_mask=attn_mask.cuda(),
labels=None,
)
assert output_tensor.shape[1] == tokens.shape[1]
assert output_tensor.shape[2] == gpt_model.padded_vocab_size
assert output_tensor.dtype == dtype
output_tensors.append(output_tensor)
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/dataset/dialogue_gpt_generation_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
class DialogueGPTGenerationDataset(DialogueDataset):
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
""" Constructor
Designed for free form generation tasks such as Dialogue Response Generation
Args:
dataset_split: dataset split
dialogues_processor: dialogues processor
tokenizer: tokenizer
cfg: cfg container for dataset
"""
self.cfg = cfg
self.input_label_type = self.cfg.input_field
self.output_label_type = self.cfg.output_field
self.tokenizer = tokenizer
self.tokenizer.tokenizer.padding_side = "right"
if not isinstance(dataset_split, str):
dataset_split = dataset_split[0]
self.features = dialogues_processor.get_dialog_examples(dataset_split)
self.features = self.remove_invalid_samples(self.features)
if self.cfg.debug_mode:
self.features = self.features[:16]
def remove_invalid_samples(self, features):
valid_idxs = []
all_fields = self.input_label_type.split('+') + self.output_label_type.split('+')
for i in range(len(features)):
features[i].data["labels"]["utterance"] = features[i].data["utterance"]
all_fields_non_empty = True
for field in all_fields:
if not features[i].data["labels"][field] or not features[i].data["labels"][field].strip():
all_fields_non_empty = False
if all_fields_non_empty:
valid_idxs.append(i)
return [features[i] for i in valid_idxs]
def __len__(self):
return len(self.features)
def get_n_tokens_in_sentence(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding=False, return_tensors="pt"
)
output = torch.squeeze(encodings_dict['input_ids'])
return len(output) if len(output.size()) > 0 else 0
def default_encode(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding="max_length", return_tensors="pt"
)
input_ids = torch.squeeze(encodings_dict['input_ids'])
attn_masks = torch.squeeze(encodings_dict['attention_mask'])
return encodings_dict, input_ids, attn_masks
def format_prompt(self, ex):
'''
Formats training prompt based on self.input_field_type
Training example:
e.g. response: <response> # input_label_type = response
e.g. utterance: <utterance> # input_label_type = utterance
e.g. passage: <passage> utterance: <utterance> # input_label_type = passage+utterance
'''
ex["labels"]["utterance"] = ex["utterance"]
parts = self.input_label_type.split('+')
input_sentence = ' '.join([part + ': ' + ex["labels"][part] for part in parts])
return input_sentence
def __getitem__(self, idx: int):
'''
For each example, this function determines the format of input and output sequences based on user-specified conguration.
This is controlled by model.dataset.input_field and model.dataset.output_field
For instance:
If model.dataset.input_field == response and model.dataset.output_field == fluent_response:
Input = "response: <response>" and output = "response: <response> fluent_response: <fluent_response>" (with loss calculated from <fluent_response> only)
If model.dataset.input_field == utterance and model.dataset.output_field == response:
Input = "utterance: <utterance>" and output = "utterance: <utterance> response: <response>" (with loss calculated from <response> only)
If model.dataset.input_field == passage+utterance and model.dataset.output_field == response:
Input = "passage: <passage> utterance: <utterance>" and output="passage: <passage> utterance: <utterance> response: <response>" (with loss calculated from <response> only)
'''
ex = self.features[idx].data
input_sentence = self.format_prompt(ex)
utterance_length = self.get_n_tokens_in_sentence(input_sentence)
output_sentence = ex["labels"][self.output_label_type]
base_template = input_sentence
sentence_without_answer = base_template + ' ' + self.output_label_type + ':'
sentence = sentence_without_answer + ' ' + output_sentence
encodings_dict, input_ids, attn_masks = self.default_encode(sentence)
labels = copy.copy(torch.squeeze(encodings_dict['input_ids']))
training_mask_end = self.get_n_tokens_in_sentence(sentence_without_answer)
labels.data = torch.tensor(
[-100 if i < training_mask_end else labels.data[i] for i in range(len(labels.data))]
)
return (input_ids, attn_masks, labels, training_mask_end, utterance_length)
|
gkucsko/NeMo
|
nemo_text_processing/text_normalization/en/verbalizers/post_processing.py
|
<filename>nemo_text_processing/text_normalization/en/verbalizers/post_processing.py
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nemo_text_processing.text_normalization.en.graph_utils import (
MIN_NEG_WEIGHT,
NEMO_ALPHA,
NEMO_CHAR,
NEMO_SIGMA,
NEMO_SPACE,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo.utils import logging
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class PostProcessingFst:
"""
Finite state transducer that post-processing an entire sentence after verbalization is complete, e.g.
removes extra spaces around punctuation marks " ( one hundred and twenty three ) " -> "(one hundred and twenty three)"
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, "en_tn_post_processing.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["post_process_graph"]
logging.info(f'Post processing graph was restored from {far_file}.')
else:
self.set_punct_dict()
self.fst = self.get_punct_postprocess_graph()
if far_file:
generator_main(far_file, {"post_process_graph": self.fst})
def set_punct_dict(self):
self.punct_marks = {
"'": [
"'",
'´',
'ʹ',
'ʻ',
'ʼ',
'ʽ',
'ʾ',
'ˈ',
'ˊ',
'ˋ',
'˴',
'ʹ',
'΄',
'՚',
'՝',
'י',
'׳',
'ߴ',
'ߵ',
'ᑊ',
'ᛌ',
'᾽',
'᾿',
'`',
'´',
'῾',
'‘',
'’',
'‛',
'′',
'‵',
'ꞌ',
''',
'`',
'𖽑',
'𖽒',
],
}
def get_punct_postprocess_graph(self):
"""
Returns graph to post process punctuation marks.
{``} quotes are converted to {"}. Note, if there are spaces around single quote {'}, they will be kept.
By default, a space is added after a punctuation mark, and spaces are removed before punctuation marks.
"""
punct_marks_all = PunctuationFst().punct_marks
# no_space_before_punct assume no space before them
quotes = ["'", "\"", "``", "«"]
dashes = ["-", "—"]
brackets = ["<", "{", "("]
open_close_single_quotes = [
("`", "`"),
]
open_close_double_quotes = [('"', '"'), ("``", "``"), ("“", "”")]
open_close_symbols = open_close_single_quotes + open_close_double_quotes
allow_space_before_punct = ["&"] + quotes + dashes + brackets + [k[0] for k in open_close_symbols]
no_space_before_punct = [m for m in punct_marks_all if m not in allow_space_before_punct]
no_space_before_punct = pynini.union(*no_space_before_punct)
no_space_after_punct = pynini.union(*brackets)
delete_space = pynutil.delete(" ")
delete_space_optional = pynini.closure(delete_space, 0, 1)
# non_punct allows space
# delete space before no_space_before_punct marks, if present
non_punct = pynini.difference(NEMO_CHAR, no_space_before_punct).optimize()
graph = (
pynini.closure(non_punct)
+ pynini.closure(
no_space_before_punct | pynutil.add_weight(delete_space + no_space_before_punct, MIN_NEG_WEIGHT)
)
+ pynini.closure(non_punct)
)
graph = pynini.closure(graph).optimize()
graph = pynini.compose(
graph, pynini.cdrewrite(pynini.cross("``", '"'), "", "", NEMO_SIGMA).optimize()
).optimize()
# remove space after no_space_after_punct (even if there are no matching closing brackets)
no_space_after_punct = pynini.cdrewrite(delete_space, no_space_after_punct, NEMO_SIGMA, NEMO_SIGMA).optimize()
graph = pynini.compose(graph, no_space_after_punct).optimize()
# remove space around text in quotes
single_quote = pynutil.add_weight(pynini.accep("`"), MIN_NEG_WEIGHT)
double_quotes = pynutil.add_weight(pynini.accep('"'), MIN_NEG_WEIGHT)
quotes_graph = (
single_quote + delete_space_optional + NEMO_ALPHA + NEMO_SIGMA + delete_space_optional + single_quote
).optimize()
# this is to make sure multiple quotes are tagged from right to left without skipping any quotes in the left
not_alpha = pynini.difference(NEMO_CHAR, NEMO_ALPHA).optimize() | pynutil.add_weight(
NEMO_SPACE, MIN_NEG_WEIGHT
)
end = pynini.closure(pynutil.add_weight(not_alpha, MIN_NEG_WEIGHT))
quotes_graph |= (
double_quotes
+ delete_space_optional
+ NEMO_ALPHA
+ NEMO_SIGMA
+ delete_space_optional
+ double_quotes
+ end
)
quotes_graph = pynutil.add_weight(quotes_graph, MIN_NEG_WEIGHT)
quotes_graph = NEMO_SIGMA + pynini.closure(NEMO_SIGMA + quotes_graph + NEMO_SIGMA)
graph = pynini.compose(graph, quotes_graph).optimize()
# remove space between a word and a single quote followed by s
remove_space_around_single_quote = pynini.cdrewrite(
delete_space_optional + pynini.union(*self.punct_marks["'"]) + delete_space,
NEMO_ALPHA,
pynini.union("s ", "s[EOS]"),
NEMO_SIGMA,
)
graph = pynini.compose(graph, remove_space_around_single_quote).optimize()
return graph
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/dataset/dialogue_dataset.py
|
<filename>nemo/collections/nlp/data/dialogue/dataset/dialogue_dataset.py
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.classes import Dataset
__all__ = ['DialogueDataset']
class DialogueDataset(Dataset):
'''
Base class for Dialogue Datasets
1. Performs Model-dependent (but Data-independent) operations (tokenization etc)
2. This can allow the same model preprocessing for multiple datasources
3. Users can configurate which labels to use for modelling
(e.g. intent classification, slot filling or sequence generation etc)
'''
def __init__(self, dataset_split: str, dialogues_processor: object, **kwargs):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __getitem__(self, idx: int):
raise NotImplementedError
|
gkucsko/NeMo
|
nemo/collections/nlp/data/text_normalization_as_tagging/bert_example.py
|
<filename>nemo/collections/nlp/data/text_normalization_as_tagging/bert_example.py
# Copyright 2019 The Google Research Authors.
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/lasertagger/blob/master/bert_example.py
"""
import logging
from collections import OrderedDict
from os import path
from typing import Dict, List, Optional, Tuple, Union
from transformers import PreTrainedTokenizerBase
from nemo.collections.nlp.data.text_normalization_as_tagging.tagging import EditingTask, Tag
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import yield_sources_and_targets
"""Build BERT Examples from source, target pairs.
The difference from the original Lasertagger approach is that our target already consists of tags,
so the preprocesssing is trivial.
"""
class BertExample(object):
"""Class for training and inference examples for BERT.
Attributes:
editing_task: The EditingTask from which this example was created. Needed
when realizing labels predicted for this example.
features: Feature dictionary.
"""
def __init__(
self,
input_ids: List[int],
input_mask: List[int],
segment_ids: List[int],
labels_mask: List[int],
tag_labels: List[int],
semiotic_labels: List[int],
semiotic_spans: List[Tuple[int, int, int]],
token_start_indices: List[int],
task: EditingTask,
default_label: int,
) -> None:
"""Inputs to the example wrapper
Args:
input_ids: indices of tokens which constitute batches of masked text segments
input_mask: bool tensor with 0s in place of source tokens to be masked
segment_ids: bool tensor with 0's and 1's to denote the text segment type
tag_labels: indices of tokens which should be predicted from each of the
corresponding input tokens
labels_mask: bool tensor with 0s in place of label tokens to be masked
token_start_indices: the indices of the WordPieces that start a token.
semiotic_labels: indices of semiotic classes which should be predicted from each of the
corresponding input tokens
semiotic_spans: list of tuples (class_id, start_wordpiece_idx, end_wordpiece_idx), end is exclusive
task: Example Text-Editing Task used by the LaserTagger model during inference.
default_label: The default label for the KEEP tag-ID
"""
input_len = len(input_ids)
if not (
input_len == len(input_mask)
and input_len == len(segment_ids)
and input_len == len(labels_mask)
and input_len == len(tag_labels)
and input_len == len(semiotic_labels)
):
raise ValueError('All feature lists should have the same length ({})'.format(input_len))
self.features = OrderedDict(
[
("input_ids", input_ids),
("input_mask", input_mask),
("segment_ids", segment_ids),
("labels_mask", labels_mask),
("tag_labels", tag_labels),
("semiotic_labels", semiotic_labels),
("semiotic_spans", semiotic_spans),
]
)
self._token_start_indices = token_start_indices
self.editing_task = task
self._default_label = default_label
def pad_to_max_length(self, max_seq_length: int, max_semiotic_length: int, pad_token_id: int) -> None:
"""Pad the feature vectors so that they all have max_seq_length.
Args:
max_seq_length: The length that all features, except semiotic_classes, will have after padding.
max_semiotic_length: The length that semiotic_classes will have after padding.
pad_token_id: input_ids feature is padded with this ID, other features
with ID 0.
"""
pad_len = max_seq_length - len(self.features['input_ids'])
self.features["semiotic_spans"].extend(
[(-1, -1, -1)] * (max_semiotic_length - len(self.features["semiotic_spans"]))
)
for key in self.features:
if key == "semiotic_spans":
continue
pad_id = pad_token_id if (key == "input_ids") else 0
self.features[key].extend([pad_id] * pad_len)
if len(self.features[key]) != max_seq_length:
raise ValueError(
"{} has length {} (should be {}).".format(key, len(self.features[key]), max_seq_length)
)
def get_token_labels(self, features_key: str) -> List[int]:
"""Returns labels/tags for the original tokens, not for wordpieces."""
labels = []
for idx in self._token_start_indices:
# For unmasked and untruncated tokens, use the label in the features, and
# for the truncated tokens, use the default label.
if idx < len(self.features[features_key]) and self.features["labels_mask"][idx]:
labels.append(self.features[features_key][idx])
else:
labels.append(self._default_label)
return labels
class BertExampleBuilder(object):
"""Builder class for BertExample objects."""
def __init__(
self,
label_map: Dict[str, int],
semiotic_classes: Dict[str, int],
tokenizer: PreTrainedTokenizerBase,
max_seq_length: int,
) -> None:
"""Initializes an instance of BertExampleBuilder.
Args:
label_map: Mapping from tags to tag IDs.
semiotic_classes: Mapping from semiotic classes to their ids.
tokenizer: Tokenizer object.
max_seq_length: Maximum sequence length.
"""
self._label_map = label_map
self._semiotic_classes = semiotic_classes
self._tokenizer = tokenizer
self._max_seq_length = max_seq_length
self._max_semiotic_length = max(4, int(max_seq_length / 2))
self._pad_id = self._tokenizer.pad_token_id
self._keep_tag_id = self._label_map["KEEP"]
def build_bert_example(
self, source: str, target: Optional[str] = None, semiotic_info: Optional[str] = None, infer: bool = False
) -> Optional[BertExample]:
"""Constructs a BERT Example.
Args:
source: Source text.
target: Target text or None when building an example during inference.
semiotic_info: String or None
infer: inference mode
Returns:
BertExample, or None if the conversion from text to tags was infeasible
"""
# Compute target labels.
task = EditingTask(source)
if (target is not None) and (not infer):
tags = BertExampleBuilder._compute_tags(task, target)
if not tags:
return None
else:
# If target is not provided, we set all target labels to KEEP.
tags = [Tag("KEEP") for _ in task.source_tokens]
source_tags = [self._label_map[str(tag)] for tag in tags]
tokens, tag_labels, token_start_indices = self._split_to_wordpieces(task.source_tokens, source_tags)
tokens = self._truncate_list(tokens)
tag_labels = self._truncate_list(tag_labels)
input_tokens = ["[CLS]"] + tokens + ["[SEP]"]
labels_mask = [0] + [1] * len(tag_labels) + [0]
tag_labels = [0] + tag_labels + [0]
if "PLAIN" not in self._semiotic_classes:
raise KeyError("PLAIN should be in self._semiotic_classes")
plain_cid = self._semiotic_classes["PLAIN"]
semiotic_labels = [plain_cid] * len(tag_labels) # we use the same mask for semiotic labels as for tag labels
input_ids = self._tokenizer.convert_tokens_to_ids(input_tokens)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
semiotic_spans = []
if semiotic_info is not None:
# e.g. semiotic_info="CARDINAL 7 8;DATE 9 12"
# translate class name to its id, translate coords from tokens to wordpieces
semiotic_info_parts = semiotic_info.split(";")
previous_end = 0
for p in semiotic_info_parts:
if p == "":
break
c, start, end = p.split(" ")
if c not in self._semiotic_classes:
raise KeyError("c=" + c + " not found in self._semiotic_classes")
cid = self._semiotic_classes[c]
start = int(start)
end = int(end)
if start >= len(token_start_indices):
raise IndexError(
"start=" + str(start) + " is outside len(token_start_indices)=" + str(len(token_start_indices))
)
while previous_end < start:
subtoken_start = token_start_indices[previous_end]
subtoken_end = (
token_start_indices[previous_end + 1]
if previous_end + 1 < len(token_start_indices)
else len(input_ids) - 1
)
semiotic_spans.append((plain_cid, subtoken_start, subtoken_end))
previous_end += 1
subtoken_start = token_start_indices[start]
subtoken_end = token_start_indices[end] if end < len(token_start_indices) else len(input_ids) - 1
if subtoken_end >= self._max_seq_length: # possible if input_ids gets truncated to the max_seq_length
break
semiotic_spans.append((cid, subtoken_start, subtoken_end))
semiotic_labels[subtoken_start:subtoken_end] = [cid] * (subtoken_end - subtoken_start)
previous_end = end
while previous_end < len(token_start_indices):
subtoken_start = token_start_indices[previous_end]
subtoken_end = (
token_start_indices[previous_end + 1]
if previous_end + 1 < len(token_start_indices)
else len(input_ids) - 1
)
semiotic_spans.append((plain_cid, subtoken_start, subtoken_end))
previous_end += 1
if len(input_ids) > self._max_seq_length or len(semiotic_spans) > self._max_semiotic_length:
return None
example = BertExample(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
labels_mask=labels_mask,
tag_labels=tag_labels,
semiotic_labels=semiotic_labels,
semiotic_spans=semiotic_spans,
token_start_indices=token_start_indices,
task=task,
default_label=self._keep_tag_id,
)
example.pad_to_max_length(self._max_seq_length, self._max_semiotic_length, self._pad_id)
return example
def _split_to_wordpieces(self, tokens: List[str], labels: List[int]) -> Tuple[List[str], List[int], List[int]]:
"""Splits tokens (and the labels accordingly) to WordPieces.
Args:
tokens: Tokens to be split.
labels: Labels (one per token) to be split.
Returns:
3-tuple with the split tokens, split labels, and the indices of the
WordPieces that start a token.
"""
bert_tokens = [] # Original tokens split into wordpieces.
bert_labels = [] # Label for each wordpiece.
# Index of each wordpiece that starts a new token.
token_start_indices = []
for i, token in enumerate(tokens):
# '+ 1' is because bert_tokens will be prepended by [CLS] token later.
token_start_indices.append(len(bert_tokens) + 1)
pieces = self._tokenizer.tokenize(token)
bert_tokens.extend(pieces)
bert_labels.extend([labels[i]] * len(pieces))
return bert_tokens, bert_labels, token_start_indices
def _truncate_list(self, x: Union[List[str], List[int]]) -> Union[List[str], List[int]]:
"""Returns truncated version of x according to the self._max_seq_length."""
# Save two slots for the first [CLS] token and the last [SEP] token.
return x[: self._max_seq_length - 2]
def _get_pad_id(self) -> int:
"""Returns the ID of the [PAD] token (or 0 if it's not in the vocab)."""
try:
return self._tokenizer.pad_token_id
except KeyError:
return 0
@staticmethod
def _compute_tags(task: EditingTask, target: str) -> List[Tag]:
"""Computes tags needed for converting the source into the target.
Args:
task: tagging.EditingTask that specifies the input.
target: Target text.
Returns:
List of tagging.Tag objects.
"""
target_tokens = target.split(" ")
if len(target_tokens) != len(task.source_tokens):
raise ValueError("Length mismatch: " + str(task.source_tokens) + "\n" + target)
tags = []
for t in target_tokens:
if t == "<SELF>":
tags.append(Tag("KEEP"))
elif t == "<DELETE>":
tags.append(Tag("DELETE"))
else:
tags.append(Tag("DELETE|" + t))
return tags
def read_input_file(
example_builder: 'BertExampleBuilder', input_filename: str, infer: bool = False
) -> List['BertExample']:
"""Reads in Tab Separated Value file and converts to training/inference-ready examples.
Args:
example_builder: Instance of BertExampleBuilder
input_filename: Path to the TSV input file.
infer: Whether test files or not.
Returns:
examples: List of converted examples(features and Editing Tasks)
"""
if not path.exists(input_filename):
raise ValueError("Cannot find file: " + input_filename)
examples = []
for i, (source, target, semiotic_info) in enumerate(yield_sources_and_targets(input_filename)):
if len(examples) % 1000 == 0:
logging.info("{} examples processed.".format(len(examples)))
example = example_builder.build_bert_example(source, target, semiotic_info, infer)
if example is None:
continue
examples.append(example)
logging.info(f"Done. {len(examples)} examples converted.")
return examples
|
gkucsko/NeMo
|
tests/collections/asr/mixins/adapters/test_asr_adapter_mixin.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf
from nemo.collections.asr.models import ASRModel, EncDecCTCModel, EncDecRNNTModel
from nemo.collections.common.parts import adapter_modules
from nemo.core.classes.mixins.adapter_mixins import AdapterModuleMixin, get_registered_adapter
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
from nemo.utils import config_utils
NUMBA_RNNT_LOSS_AVAILABLE = numba_utils.numba_cpu_is_supported(
__NUMBA_MINIMUM_VERSION__
) or numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__)
@pytest.fixture()
def model():
preprocessor = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoderAdapter',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 50,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 50,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
model_instance = EncDecCTCModel(cfg=modelConfig)
return model_instance
@pytest.fixture()
def rnnt_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
# fmt: off
labels = [' ', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', "'",
]
# fmt: on
model_defaults = {'enc_hidden': 96, 'pred_hidden': 64}
# Test case where Encoder (default) is not adapter compatible
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': model_defaults['pred_hidden'], 'pred_rnn_layers': 1},
}
joint = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'jointnet': {'joint_hidden': 32, 'activation': 'relu'},
}
decoding = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 10}}
loss = {'loss_name': 'default', 'warprnnt_numba_kwargs': {'fastemit_lambda': 0.001}}
modelConfig = DictConfig(
{
'labels': ListConfig(labels),
'preprocessor': DictConfig(preprocessor),
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'joint': DictConfig(joint),
'decoding': DictConfig(decoding),
'loss': DictConfig(loss),
}
)
model_instance = EncDecRNNTModel(cfg=modelConfig)
return model_instance
def get_adapter_cfg(in_features=50, dim=100, norm_pos='pre'):
cfg = adapter_modules.LinearAdapterConfig(in_features=in_features, dim=dim, norm_position=norm_pos)
cfg = OmegaConf.structured(cfg)
return cfg
class TestASRAdapterMixin:
@pytest.mark.unit
def test_asr_model_constructor(self, model):
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_asr_model_constructor_encoder_module(self, model):
original_num_params = model.num_weights
model.add_adapter(name='encoder:adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_asr_model_constructor_decoder_module(self, model):
original_num_params = model.num_weights
model.add_adapter(name='decoder:adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
assert model.decoder.is_adapter_available()
assert model.decoder.get_enabled_adapters()[0] == 'adapter_0'
@pytest.mark.unit
def test_asr_model_constructor_joint_module_ctc_skip(self, model):
original_num_params = model.num_weights
# this step should exit without adding adapters and without errors
model.add_adapter(name='joint:adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params == original_num_params
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_asr_model_constructor_joint_module_rnnt(self, rnnt_model):
original_num_params = rnnt_model.num_weights
rnnt_model.add_adapter(name='joint:adapter_0', cfg=get_adapter_cfg())
new_num_params = rnnt_model.num_weights
assert new_num_params > original_num_params
assert rnnt_model.joint.is_adapter_available()
assert rnnt_model.joint.get_enabled_adapters()[0] == 'adapter_0'
@pytest.mark.unit
def test_asr_multiple_adapter(self, model):
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
original_num_params = new_num_params
model.add_adapter(name='adapter_1', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
@pytest.mark.parametrize('name', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
def test_asr_forward_linear_pre(self, model, name):
model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
model.add_adapter(name=name, cfg=get_adapter_cfg())
new_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
def test_asr_forward_linear_post(self, model, name):
model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
model.add_adapter(name=name, cfg=get_adapter_cfg(norm_pos='post'))
new_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name1', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
@pytest.mark.parametrize('name2', ['adapter_1', 'encoder:adapter_1', 'decoder:adapter_1'])
def test_asr_multi_adapter_forward(self, model, name1, name2):
model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
model.add_adapter(name=name1, cfg=get_adapter_cfg())
model.add_adapter(name=name2, cfg=get_adapter_cfg())
new_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
resolved_name1 = model.resolve_adapter_module_name_(name1)[-1]
resolved_name2 = model.resolve_adapter_module_name_(name2)[-1]
assert model.get_enabled_adapters() == [resolved_name1, resolved_name2]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.parametrize('name1', ['decoder:adapter_0', 'joint:adapter_0'])
@pytest.mark.parametrize('name2', ['decoder:adapter_1', 'joint:adapter_1'])
@pytest.mark.unit
def test_asr_multi_adapter_forward(self, rnnt_model, name1, name2):
rnnt_model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = rnnt_model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
rnnt_model.add_adapter(name=name1, cfg=get_adapter_cfg())
rnnt_model.add_adapter(name=name2, cfg=get_adapter_cfg())
new_output = rnnt_model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
resolved_name1 = rnnt_model.resolve_adapter_module_name_(name1)[-1]
resolved_name2 = rnnt_model.resolve_adapter_module_name_(name2)[-1]
assert rnnt_model.get_enabled_adapters() == [resolved_name1, resolved_name2]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name1', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
@pytest.mark.parametrize('name2', ['adapter_1', 'encoder:adapter_1', 'decoder:adapter_1'])
def test_asr_multi_adapter_partial_forward(self, model, name1, name2):
model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
model.add_adapter(name=name1, cfg=get_adapter_cfg())
model.add_adapter(name=name2, cfg=get_adapter_cfg())
model.set_enabled_adapters(name=name1, enabled=False)
new_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
resolved_name2 = model.resolve_adapter_module_name_(name2)[-1]
assert model.get_enabled_adapters() == [resolved_name2]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
def test_asr_forward_unfrozen_adapters(self, model, name):
model.eval()
original_num_params = model.num_weights
dim = 10
model.add_adapter(name=name, cfg=get_adapter_cfg(dim=dim))
model.freeze()
model.unfreeze_enabled_adapters()
assert original_num_params == 5443
original_params = 0
adapter_params = 0
for name, param in model.named_parameters():
if 'adapter' not in name:
assert param.requires_grad is False
original_params += param.numel()
else:
assert param.requires_grad is True
adapter_params += param.numel()
for mname, module in model.named_modules():
if isinstance(module, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
assert module.track_running_stats is False
assert original_params > adapter_params
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_constructor_pretrained(self):
# Check to/from config_dict:
cfg = ASRModel.from_pretrained('stt_en_citrinet_256', map_location='cpu', return_config=True)
adapter_metadata = get_registered_adapter(cfg.encoder._target_)
if adapter_metadata is not None:
cfg.encoder._target_ = adapter_metadata.adapter_class_path
model = ASRModel.from_pretrained('stt_en_citrinet_256', override_config_path=cfg)
assert isinstance(model, AdapterModuleMixin)
assert hasattr(model, 'encoder')
assert isinstance(model.encoder, AdapterModuleMixin)
model.add_adapter('adapter_0', cfg=get_adapter_cfg(in_features=cfg.encoder.jasper[0].filters, dim=5))
assert model.is_adapter_available()
model.freeze()
model.unfreeze_enabled_adapters()
assert model.num_weights < 1e5
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_constructor_pretrained_rnnt(self):
# Check to/from config_dict:
cfg = ASRModel.from_pretrained('stt_en_contextnet_256', map_location='cpu', return_config=True)
adapter_metadata = get_registered_adapter(cfg.encoder._target_)
if adapter_metadata is not None:
cfg.encoder._target_ = adapter_metadata.adapter_class_path
model = ASRModel.from_pretrained('stt_en_contextnet_256', override_config_path=cfg)
assert isinstance(model, AdapterModuleMixin)
assert hasattr(model, 'encoder')
assert isinstance(model.encoder, AdapterModuleMixin)
assert hasattr(model, 'decoder')
assert isinstance(model.decoder, AdapterModuleMixin)
assert hasattr(model, 'joint')
assert isinstance(model.joint, AdapterModuleMixin)
model.add_adapter('adapter_0', cfg=get_adapter_cfg(in_features=cfg.encoder.jasper[0].filters, dim=5))
model.add_adapter('decoder:adapter_1', cfg=get_adapter_cfg(in_features=cfg.decoder.prednet.pred_hidden, dim=5))
model.add_adapter('joint:adapter_2', cfg=get_adapter_cfg(in_features=cfg.joint.jointnet.joint_hidden, dim=5))
assert model.is_adapter_available()
model.freeze()
model.unfreeze_enabled_adapters()
assert model.num_weights < 1e5
|
gkucsko/NeMo
|
nemo/collections/nlp/models/dialogue/dialogue_s2s_generation_model.py
|
<filename>nemo/collections/nlp/models/dialogue/dialogue_s2s_generation_model.py
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
from pytorch_lightning import Trainer
from torch.utils.data import DataLoader
from transformers import AutoModelForSeq2SeqLM
from nemo.collections.nlp.data.dialogue import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.mellon_qa_data_processor import DialogueMellonQADataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.ms_marco_data_processor import DialogueMSMarcoDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_s2s_generation_dataset import DialogueS2SGenerationDataset
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
__all__ = ['DialogueS2SGenerationModel']
class DialogueS2SGenerationModel(NLPModel):
def __init__(
self, cfg: DictConfig, trainer: Trainer = None,
):
self.cfg = cfg
self.data_prepared = False
self.epoch_number = 0
if self.cfg.library == "huggingface":
self.setup_tokenizer(cfg.tokenizer)
elif self.cfg.library == "megatron":
# supporting MegatronT5Model in precision = fp16
t5_cfg = MegatronT5Model.restore_from(
restore_path=cfg.language_model.lm_checkpoint, trainer=trainer, return_config=True
)
# Override the T5 configuration with the one from the config file.
OmegaConf.set_struct(t5_cfg, True)
with open_dict(t5_cfg):
t5_cfg.masked_softmax_fusion = False
t5_cfg.precision = 16
language_model = MegatronT5Model.restore_from(
restore_path=cfg.language_model.lm_checkpoint, trainer=trainer, override_config_path=t5_cfg
)
self.tokenizer = language_model.tokenizer
super().__init__(cfg=cfg, trainer=trainer, no_lm_init=True)
if self.cfg.library == "huggingface":
self.language_model = AutoModelForSeq2SeqLM.from_pretrained(cfg.language_model.pretrained_model_name)
self.language_model.resize_token_embeddings(len(self.tokenizer.tokenizer))
if self.cfg.language_model.lm_checkpoint:
self.language_model.load_state_dict(torch.load(self.cfg.language_model.lm_checkpoint))
elif self.cfg.library == "megatron":
self.language_model = language_model
def training_step(self, batch, batch_idx):
input_ids, attn_masks, labels = batch
loss = self(input_ids, attn_masks, labels)
self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
return self.eval_step_helper(batch=batch)
def validation_epoch_end(self, outputs):
self.eval_epoch_end(outputs, mode='val')
def test_epoch_end(self, outputs):
self.eval_epoch_end(outputs, mode='test')
def eval_epoch_end(self, outputs, mode='val'):
generated_field = []
ground_truth_field = []
inputs = []
loss = []
for output in outputs:
generated_field += output["generated_field"]
ground_truth_field += output["ground_truth_field"]
inputs += output["input"]
loss.append(output["loss"].item())
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(
self.cfg.dataset.dialogues_example_dir, f"{mode}_predictions_epoch{self.epoch_number}.jsonl"
)
DialogueGenerationMetrics.save_predictions(
filename, generated_field, ground_truth_field, inputs,
)
label_acc = np.mean([int(generated_field[i] == ground_truth_field[i]) for i in range(len(generated_field))])
precision, recall, f1 = DialogueGenerationMetrics.get_f1(generated_field, ground_truth_field)
bleu = DialogueGenerationMetrics.get_bleu(generated_field, ground_truth_field)
avg_loss = np.mean(loss)
ppl = np.exp(avg_loss)
self.log('{}_accuracy'.format(mode), label_acc * 100)
self.log('precision', precision)
self.log('recall', recall)
self.log('f1', f1)
self.log('bleu', bleu)
self.log('{}_loss'.format(mode), avg_loss)
self.log('{}_ppl'.format(mode), ppl)
if mode == 'val':
self.epoch_number += 1
if self.cfg.save_model:
filename = '{}/val_loss-{}-epoch-{}-answer-extender.bin'.format(
self.cfg.dataset.dialogues_example_dir, avg_loss, self.epoch_number
)
torch.save(self.language_model.state_dict(), filename)
def test_step(self, batch, batch_idx):
return self.eval_step_helper(batch=batch, mode='test')
# for inference only
def predict_step(self, batch, batch_idx, dataloader_idx=None):
# return self(batch)
raise NotImplementedError()
def forward(self, input_ids, attention_masks, labels):
if self.cfg.library == "huggingface":
output = self.language_model(input_ids=input_ids, attention_mask=attention_masks, labels=labels)
loss = output['loss']
elif self.cfg.library == "megatron":
labels = torch.where(labels != -100, labels, torch.zeros_like(labels))
decoder_attn_masks = torch.where(labels > 0, torch.ones_like(labels), torch.zeros_like(labels))
unmasked_unreduced_loss = self.language_model(
input_ids, labels[:, :-1], attention_masks, decoder_attn_masks[:, :-1], lm_labels=labels[:, 1:]
)
loss = self.language_model.loss_func(decoder_attn_masks[:, 1:], unmasked_unreduced_loss)
return loss
def prepare_megatron_generation(self, labels, input_ids, template_length):
"""
# adapted from MegatronGPTModel._bucketize_gpt_inference
"""
batch_size = labels.size(0)
prompt_tags = [self.prompt_tags[0]] * batch_size if self.prompt_tags else None
batch_tokens = input_ids.tolist()
# unpad tokens
lens = template_length
indxs = [index for index in range(batch_size)]
for lenn, index in zip(lens, indxs):
batch_tokens[index] = batch_tokens[index][:lenn]
# chunk tokens by same length
pre_buckets, lens = [], list(set(lens.tolist()))
for lenn in lens:
pre_buckets.append([(tokens, index) for index, tokens in enumerate(batch_tokens) if len(tokens) == lenn])
buckets, positions, bucket_prompt_tags = [], [], []
# get buckets and prompts initial positions
for bucket in pre_buckets:
buckets.append(torch.tensor([item[0] for item in bucket]).to(device=labels.device))
positions.append([item[1] for item in bucket])
# bucket prompt tags identically to their corresponding examples
if prompt_tags:
bucket_prompt_tags.append([prompt_tags[item[1]] for item in bucket])
# Flatten position list
positions = [item for sublist in positions for item in sublist]
# Flatten buckets and bucket_prompt_tags # temp fix for megatron complete issue. However, this is also slower than bucketized inference
buckets = [item.unsqueeze(0) for sublist in buckets for item in sublist]
bucket_prompt_tags = [[item] for sublist in bucket_prompt_tags for item in sublist]
request = {"tokens": buckets, "prompt_tags": bucket_prompt_tags}
return positions, request
def post_process_megatron_generation(self, outputs):
text_outputs = [output[0] for output in outputs]
generated_tokens = self.tokenizer.tokenizer(text_outputs, padding=True, return_tensors="pt").data["input_ids"]
return generated_tokens
def generate_candidates(self, input_ids, attn_masks, labels):
tokens_to_generate = self.cfg.tokens_to_generate
if self.cfg.library == "huggingface":
param_dict = {
"input_ids": input_ids,
"attention_mask": attn_masks,
"max_length": tokens_to_generate,
}
generated_tokens = self.language_model.generate(**param_dict)
elif self.cfg.library == 'megatron':
raise ValueError("Megatron Generation is not supported by DialogueS2SGenerationModel")
generated_field = self.process_into_structured_fields(generated_tokens)
ground_truth_field = self.process_into_structured_fields(labels)
return generated_field, ground_truth_field
def process_into_structured_fields(self, full_seq_ids, template_length=None):
structured_field = []
for i in range(full_seq_ids.size(0)):
start_point = 0 if template_length is None else template_length[i].item()
stop_point = full_seq_ids.size(1)
for j in range(start_point, stop_point):
if full_seq_ids.data[i, j] in [self.tokenizer.tokenizer.pad_token_id, -100] and j != 0:
stop_point = j
break
token_ids = full_seq_ids[i, start_point:stop_point]
one_generated_field = self.tokenizer.tokenizer.decode(token_ids, skip_special_tokens=True).strip()
structured_field.append(one_generated_field)
return structured_field
def eval_step_helper(self, batch, mode='val'):
input_ids, attn_masks, labels = batch
loss = self(input_ids, attn_masks, labels)
self.log("{}_loss".format(mode), loss, on_step=True, on_epoch=True, prog_bar=True, logger=True)
generated_field, ground_truth_field = self.generate_candidates(input_ids, attn_masks, labels)
return {
'loss': loss,
'input': self.tokenizer.tokenizer.batch_decode(input_ids, skip_special_tokens=True),
'generated_field': generated_field,
'ground_truth_field': ground_truth_field,
}
def prepare_data(self):
"""
Preprocessed schema and dialogues and caches this
"""
if self.data_prepared:
return
if self._cfg.dataset.task == "ms_marco":
self.dialogues_processor = DialogueMSMarcoDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
elif self._cfg.dataset.task == "sgd_generation":
self.dialogues_processor = DialogueSGDDataProcessor(
data_dir=self._cfg.dataset.data_dir,
dialogues_example_dir=self._cfg.dataset.dialogues_example_dir,
tokenizer=self.tokenizer,
cfg=self._cfg.dataset,
)
elif self._cfg.dataset.task == "mellon_qa":
self.dialogues_processor = DialogueMellonQADataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
else:
raise ValueError("Only ms_marco, sgd_generation and mellon_qa supported for Dialogue GPT Generation Model")
self.data_prepared = True
def update_data_dirs(self, data_dir: str, dialogues_example_dir: str):
"""
Update data directories
Args:
data_dir: path to data directory
dialogues_example_dir: path to preprocessed dialogues example directory, if not exists will be created.
"""
if not os.path.exists(data_dir):
raise ValueError(f"{data_dir} is not found")
self._cfg.dataset.data_dir = data_dir
self._cfg.dataset.dialogues_example_dir = dialogues_example_dir
logging.info(f'Setting model.dataset.data_dir to {data_dir}.')
logging.info(f'Setting model.dataset.dialogues_example_dir to {dialogues_example_dir}.')
def setup_training_data(self, train_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._train_dl = self._setup_dataloader_from_config(cfg=train_data_config, split=train_data_config.ds_item)
def setup_multiple_validation_data(self, val_data_config: Optional[DictConfig] = None):
return self.setup_validation_data(val_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._validation_dl = self._setup_dataloader_from_config(cfg=val_data_config, split=val_data_config.ds_item)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
self.setup_test_data(test_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig] = None):
self.prepare_data()
self._test_dl = self._setup_dataloader_from_config(cfg=test_data_config, split=test_data_config.ds_item)
def _setup_dataloader_from_config(self, cfg: DictConfig, split: str) -> DataLoader:
dataset_cfg = self._cfg.dataset
data_dir = dataset_cfg.data_dir
if not os.path.exists(data_dir):
raise FileNotFoundError(f"Data directory is not found at: {data_dir}.")
dataset = DialogueS2SGenerationDataset(
dataset_split=split,
dialogues_processor=self.dialogues_processor,
tokenizer=self.dialogues_processor._tokenizer,
cfg=dataset_cfg,
)
dl = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset.collate_fn,
drop_last=cfg.drop_last,
shuffle=cfg.shuffle,
num_workers=cfg.num_workers,
pin_memory=cfg.pin_memory,
)
return dl
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py
|
<filename>examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to train a ThutmoseTaggerModel for inverse text normalization(ITN).
This script uses the `/examples/nlp/text_normalization_as_tagging/conf/thutmose_tagger_itn_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'. Probably it is worth looking
at the example config file to see the list of parameters used for training.
USAGE Example:
1. Obtain a processed dataset
2. Run:
python ${NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py \
lang=${LANG} \
data.validation_ds.data_path=${DATA_PATH}/valid.tsv \
data.train_ds.data_path=${DATA_PATH}/train.tsv \
data.train_ds.batch_size=128 \
data.train_ds.num_workers=8 \
model.language_model.pretrained_model_name=${LANGUAGE_MODEL} \
model.label_map=${DATA_PATH}/label_map.txt \
model.semiotic_classes=${DATA_PATH}/semiotic_classes.txt \
model.optim.lr=3e-5 \
trainer.devices=[1] \
trainer.num_nodes=1 \
trainer.accelerator=gpu \
trainer.strategy=ddp \
trainer.max_epochs=5
Information on the arguments:
Most arguments in the example config file are quite self-explanatory (e.g.,
`model.optim.lr` refers to the learning rate for training the model).
Some arguments we want to mention are:
+ lang: The language of the dataset.
+ model.language_model.pretrained_model_name: This is the backbone BERT model (depends on the language)
e.g. bert-base-uncased (English), DeepPavlov/rubert-base-cased (Russian)
"""
from helpers import ITN_MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig, OmegaConf
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="thutmose_tagger_itn_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params: {OmegaConf.to_yaml(cfg)}')
# Train the model
if cfg.model.do_training:
logging.info(
"================================================================================================"
)
logging.info('Start training...')
trainer, model = instantiate_model_and_trainer(cfg, ITN_MODEL, True)
thutmose_tagger_exp_manager = cfg.get('exp_manager', None)
exp_manager(trainer, thutmose_tagger_exp_manager)
trainer.fit(model)
logging.info('Training finished!')
if __name__ == '__main__':
main()
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/data_processor/ms_marco_data_processor.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ast import literal_eval
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
__all__ = ['DialogueMSMarcoDataProcessor']
class DialogueMSMarcoDataProcessor(DialogueDataProcessor):
"""Data Processor for MS Marco dialogues. (https://github.com/microsoft/MSMARCO-Question-Answering)
Please agree to the Terms of Use before downloading data at
https://msmarco.blob.core.windows.net/msmarco/train_v2.1.json.gz
https://msmarco.blob.core.windows.net/msmarco/dev_v2.1.json.gz
"""
def __init__(self, data_dir: str, tokenizer: object, cfg=None):
"""
Constructs DialogueMSMarcoDataProcessor
Args:
data_dir: path to data directory
tokenizer: tokenizer object
debug_mode: reduce number of samples to load in order to increase speed of processing
cfg: cfg container for dataset
"""
self.data_dir = data_dir
self._tokenizer = tokenizer
self.cfg = cfg
def open_json(self, filename):
"""
Reads file into a list
"""
filename = os.path.join(self.data_dir, filename)
with open(filename, "r", encoding="UTF-8") as f:
data = json.load(f)
return data
def get_dialog_examples(self, dataset_split: str):
"""
Process raw files into DialogueInputExample
Args:
dataset_split: {train, dev, test}
For the MS Marco dataset, there is no explicit dev set (instead uses the test set as the dev set)
Therefore, this function creates a dev set and a new train set from the train set.
Dev set contains self.cfg.dev_proportion % of samples with the rest going into the train set
"""
examples = []
dataset_split_print = {"train": "train", "dev": "train", "test": "dev"}
raw_examples = self.open_json("{}_v2.1.json".format(dataset_split_print[dataset_split]))
n_samples = len(raw_examples['answers'])
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, self.cfg.dev_proportion)
if self.cfg.debug_mode:
idxs = idxs[:100]
for i in idxs:
utterance = raw_examples['query'][str(i)]
# answer need not be extracted from passage
# taking the first answer as the ground truth correct answer as only <1% has multiple answers
answer = raw_examples['answers'][str(i)]
answer = answer[0] if isinstance(answer, list) else answer
well_formed_answer = raw_examples['wellFormedAnswers'][str(i)]
well_formed_answer = (
well_formed_answer if isinstance(well_formed_answer, list) else literal_eval(well_formed_answer)
)
well_formed_answer = well_formed_answer[0] if well_formed_answer else None
query_type = raw_examples['query_type'][str(i)]
candidate_passages = raw_examples['passages'][str(i)]
passage = [
candidate_passage["passage_text"]
for candidate_passage in candidate_passages
if int(candidate_passage["is_selected"])
]
passage = passage[0] if passage else None
possible_passages = [candidate_passage["passage_text"] for candidate_passage in candidate_passages]
input_example = {
"utterance": utterance,
"example_id": i,
"labels": {
"service": query_type,
"response": answer,
"fluent_response": well_formed_answer,
"passage": passage,
},
"possible_labels": {
"service": "LOCATION,NUMERIC,PERSON,DESCRIPTION,ENTITY".split(','),
"passage": possible_passages,
},
}
example = DialogueInputExample(input_example)
examples.append(example)
return examples
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
|
gkucsko/NeMo
|
nemo/collections/asr/metrics/wer_bpe.py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import editdistance
import torch
from torchmetrics import Metric
from nemo.collections.asr.metrics.wer import move_dimension_to_the_front
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.utils import logging
class WERBPE(Metric):
"""
This metric computes numerator and denominator for Overall Word Error Rate for BPE tokens (WER-BPE) between
prediction and reference texts. When doing distributed training/evaluation the result of
``res=WERBPE(predictions, targets, target_lengths)`` calls will be all-reduced between all workers using SUM
operations. Here ``res`` contains three numbers ``res=[wer, total_levenstein_distance, total_number_of_words]``.
If used with PytorchLightning LightningModule, include wer_numerator and wer_denominators inside validation_step
results. Then aggregate (sum) then at the end of validation epoch to correctly compute validation WER.
Example:
def validation_step(self, batch, batch_idx):
...
wer_num, wer_denom = self.__wer(predictions, transcript, transcript_len)
return {'val_loss': loss_value, 'val_wer_num': wer_num, 'val_wer_denom': wer_denom}
def validation_epoch_end(self, outputs):
...
wer_num = torch.stack([x['val_wer_num'] for x in outputs]).sum()
wer_denom = torch.stack([x['val_wer_denom'] for x in outputs]).sum()
tensorboard_logs = {'validation_loss': val_loss_mean, 'validation_avg_wer': wer_num / wer_denom}
return {'val_loss': val_loss_mean, 'log': tensorboard_logs}
Args:
vocabulary: NeMo tokenizer object, which inherits from TokenizerSpec.
batch_dim_index: Index of the batch dimension of ``targets`` and ``predictions`` parameters of ``__call__``,
``forward``, ``update``, ``ctc_decoder_predictions_tensor`` methods. Can be either 0 or 1.
use_cer: Whether to compute word-error-rate or character-error-rate.
ctc_decode: Whether to perform CTC decode.
log_prediction: Whether to log a single decoded sample per call.
fold_consecutive: Whether repeated consecutive tokens should be folded into one when decoding.
Returns:
res: a tuple of 3 zero dimensional float32 ``torch.Tensor` objects: a WER score, a sum of Levenstein's
distances for all prediction - reference pairs, total number of words in all references.
"""
def __init__(
self,
tokenizer: TokenizerSpec,
batch_dim_index=0,
use_cer=False,
ctc_decode=True,
log_prediction=True,
dist_sync_on_step=False,
fold_consecutive=True,
):
super().__init__(dist_sync_on_step=dist_sync_on_step, compute_on_step=False)
self.tokenizer = tokenizer
self.batch_dim_index = batch_dim_index
self.blank_id = tokenizer.tokenizer.vocab_size
self.use_cer = use_cer
self.ctc_decode = ctc_decode
self.log_prediction = log_prediction
self.fold_consecutive = fold_consecutive
self.add_state("scores", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
self.add_state("words", default=torch.tensor(0), dist_reduce_fx='sum', persistent=False)
def ctc_decoder_predictions_tensor(
self, predictions: torch.Tensor, predictions_len: torch.Tensor = None, return_hypotheses: bool = False
) -> List[str]:
"""
Decodes a sequence of labels to words
Args:
predictions: An integer torch.Tensor of shape [Batch, Time] (if ``batch_index_dim == 0``) or [Time, Batch]
(if ``batch_index_dim == 1``) of integer indices that correspond to the index of some character in the
label set.
predictions_len: Optional tensor of length `Batch` which contains the integer lengths
of the sequence in the padded `predictions` tensor.
return_hypotheses: Bool flag whether to return just the decoding predictions of the model
or a Hypothesis object that holds information such as the decoded `text`,
the `alignment` of emited by the CTC Model, and the `length` of the sequence (if available).
May also contain the log-probabilities of the decoder (if this method is called via
transcribe()) inside `y_sequence`, otherwise it is set None as it is a duplicate of
`alignments`.
Returns:
Either a list of str which represent the CTC decoded strings per sample,
or a list of Hypothesis objects containing additional information.
"""
hypotheses = []
# Drop predictions to CPU
predictions = move_dimension_to_the_front(predictions, self.batch_dim_index)
prediction_cpu_tensor = predictions.long().cpu()
# iterate over batch
for ind in range(prediction_cpu_tensor.shape[0]):
if self.fold_consecutive:
prediction = prediction_cpu_tensor[ind].detach().numpy().tolist()
if predictions_len is not None:
prediction = prediction[: predictions_len[ind]]
# CTC decoding procedure
decoded_prediction = []
previous = self.blank_id
for p in prediction:
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
previous = p
else:
prediction = prediction_cpu_tensor[ind].detach()
if predictions_len is not None:
prediction = prediction[: predictions_len[ind]]
decoded_prediction = prediction[prediction != self.blank_id].tolist()
text = self.decode_tokens_to_str(decoded_prediction)
if not return_hypotheses:
hypothesis = text
else:
hypothesis = Hypothesis(
y_sequence=None, # logprob info added by transcribe method
score=-1.0,
text=text,
alignments=prediction,
length=predictions_len[ind] if predictions_len is not None else 0,
)
hypotheses.append(hypothesis)
return hypotheses
def decode_tokens_to_str(self, tokens: List[int]) -> str:
"""
Implemented in order to decoder a token list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
"""
hypothesis = self.tokenizer.ids_to_text(tokens)
return hypothesis
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
"""
Implemented in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
"""
token_list = self.tokenizer.ids_to_tokens(tokens)
return token_list
def update(
self,
predictions: torch.Tensor,
targets: torch.Tensor,
target_lengths: torch.Tensor,
predictions_lengths: torch.Tensor = None,
):
"""
Updates metric state.
Args:
predictions: an integer torch.Tensor of shape ``[Batch, Time]`` (if ``batch_dim_index == 0``) or
``[Time, Batch]`` (if ``batch_dim_index == 1``)
targets: an integer torch.Tensor of shape ``[Batch, Time]`` (if ``batch_dim_index == 0``) or
``[Time, Batch]`` (if ``batch_dim_index == 1``)
target_lengths: an integer torch.Tensor of shape ``[Batch]``
predictions_lengths: an integer torch.Tensor of shape ``[Batch]``
"""
words = 0.0
scores = 0.0
references = []
with torch.no_grad():
# prediction_cpu_tensor = tensors[0].long().cpu()
targets_cpu_tensor = targets.long().cpu()
targets_cpu_tensor = move_dimension_to_the_front(targets_cpu_tensor, self.batch_dim_index)
tgt_lenths_cpu_tensor = target_lengths.long().cpu()
# iterate over batch
for ind in range(targets_cpu_tensor.shape[0]):
tgt_len = tgt_lenths_cpu_tensor[ind].item()
target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()
reference = self.decode_tokens_to_str(target)
references.append(reference)
if self.ctc_decode:
hypotheses = self.ctc_decoder_predictions_tensor(predictions, predictions_lengths)
else:
raise NotImplementedError("Implement me if you need non-CTC decode on predictions")
if self.log_prediction:
logging.info(f"\n")
logging.info(f"reference:{references[0]}")
logging.info(f"predicted:{hypotheses[0]}")
for h, r in zip(hypotheses, references):
if self.use_cer:
h_list = list(h)
r_list = list(r)
else:
h_list = h.split()
r_list = r.split()
words += len(r_list)
# Compute Levenstein's distance
scores += editdistance.eval(h_list, r_list)
self.scores = torch.tensor(scores, device=self.scores.device, dtype=self.scores.dtype)
self.words = torch.tensor(words, device=self.words.device, dtype=self.words.dtype)
# return torch.tensor([scores, words]).to(predictions.device)
def compute(self):
scores = self.scores.detach().float()
words = self.words.detach().float()
return scores / words, scores, words
|
gkucsko/NeMo
|
nemo/collections/tts/modules/fastpitch.py
|
<filename>nemo/collections/tts/modules/fastpitch.py<gh_stars>0
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# BSD 3-Clause License
#
# Copyright (c) 2021, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
from nemo.collections.tts.helpers.helpers import binarize_attention_parallel, regulate_len
from nemo.core.classes import NeuralModule, typecheck
from nemo.core.neural_types.elements import (
EncodedRepresentation,
Index,
LengthsType,
LogprobsType,
MelSpectrogramType,
ProbsType,
RegressionValuesType,
TokenDurationType,
TokenIndex,
TokenLogDurationType,
)
from nemo.core.neural_types.neural_type import NeuralType
def average_pitch(pitch, durs):
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = torch.nn.functional.pad(durs_cums_ends[:, :-1], (1, 0))
pitch_nonzero_cums = torch.nn.functional.pad(torch.cumsum(pitch != 0.0, dim=2), (1, 0))
pitch_cums = torch.nn.functional.pad(torch.cumsum(pitch, dim=2), (1, 0))
bs, l = durs_cums_ends.size()
n_formants = pitch.size(1)
dcs = durs_cums_starts[:, None, :].expand(bs, n_formants, l)
dce = durs_cums_ends[:, None, :].expand(bs, n_formants, l)
pitch_sums = (torch.gather(pitch_cums, 2, dce) - torch.gather(pitch_cums, 2, dcs)).float()
pitch_nelems = (torch.gather(pitch_nonzero_cums, 2, dce) - torch.gather(pitch_nonzero_cums, 2, dcs)).float()
pitch_avg = torch.where(pitch_nelems == 0.0, pitch_nelems, pitch_sums / pitch_nelems)
return pitch_avg
class ConvReLUNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNorm, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size // 2))
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal):
out = torch.nn.functional.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2)).transpose(1, 2)
return self.dropout(out)
class TemporalPredictor(NeuralModule):
"""Predicts a single float per each temporal location"""
def __init__(self, input_size, filter_size, kernel_size, dropout, n_layers=2):
super(TemporalPredictor, self).__init__()
self.layers = torch.nn.Sequential(
*[
ConvReLUNorm(
input_size if i == 0 else filter_size, filter_size, kernel_size=kernel_size, dropout=dropout
)
for i in range(n_layers)
]
)
self.fc = torch.nn.Linear(filter_size, 1, bias=True)
@property
def input_types(self):
return {
"enc": NeuralType(('B', 'T', 'D'), EncodedRepresentation()),
"enc_mask": NeuralType(('B', 'T', 1), TokenDurationType()),
}
@property
def output_types(self):
return {
"out": NeuralType(('B', 'T'), EncodedRepresentation()),
}
def forward(self, enc, enc_mask):
out = enc * enc_mask
out = self.layers(out.transpose(1, 2)).transpose(1, 2)
out = self.fc(out) * enc_mask
return out.squeeze(-1)
class FastPitchModule(NeuralModule):
def __init__(
self,
encoder_module: NeuralModule,
decoder_module: NeuralModule,
duration_predictor: NeuralModule,
pitch_predictor: NeuralModule,
aligner: NeuralModule,
n_speakers: int,
symbols_embedding_dim: int,
pitch_embedding_kernel_size: int,
n_mel_channels: int = 80,
max_token_duration: int = 75,
):
super().__init__()
self.encoder = encoder_module
self.decoder = decoder_module
self.duration_predictor = duration_predictor
self.pitch_predictor = pitch_predictor
self.aligner = aligner
self.learn_alignment = aligner is not None
self.use_duration_predictor = True
self.binarize = False
if n_speakers > 1:
self.speaker_emb = torch.nn.Embedding(n_speakers, symbols_embedding_dim)
else:
self.speaker_emb = None
self.max_token_duration = max_token_duration
self.min_token_duration = 0
self.pitch_emb = torch.nn.Conv1d(
1,
symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2),
)
# Store values precomputed from training data for convenience
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.proj = torch.nn.Linear(self.decoder.d_model, n_mel_channels, bias=True)
@property
def input_types(self):
return {
"text": NeuralType(('B', 'T_text'), TokenIndex()),
"durs": NeuralType(('B', 'T_text'), TokenDurationType()),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType()),
"speaker": NeuralType(('B'), Index(), optional=True),
"pace": NeuralType(optional=True),
"spec": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType(), optional=True),
"attn_prior": NeuralType(('B', 'T_spec', 'T_text'), ProbsType(), optional=True),
"mel_lens": NeuralType(('B'), LengthsType(), optional=True),
"input_lens": NeuralType(('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
return {
"spect": NeuralType(('B', 'D', 'T_spec'), MelSpectrogramType()),
"num_frames": NeuralType(('B'), TokenDurationType()),
"durs_predicted": NeuralType(('B', 'T_text'), TokenDurationType()),
"log_durs_predicted": NeuralType(('B', 'T_text'), TokenLogDurationType()),
"pitch_predicted": NeuralType(('B', 'T_text'), RegressionValuesType()),
"attn_soft": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"attn_logprob": NeuralType(('B', 'S', 'T_spec', 'T_text'), LogprobsType()),
"attn_hard": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"attn_hard_dur": NeuralType(('B', 'T_text'), TokenDurationType()),
"pitch": NeuralType(('B', 'T_audio'), RegressionValuesType()),
}
@typecheck()
def forward(
self,
*,
text,
durs=None,
pitch=None,
speaker=None,
pace=1.0,
spec=None,
attn_prior=None,
mel_lens=None,
input_lens=None,
):
if not self.learn_alignment and self.training:
assert durs is not None
assert pitch is not None
# Calculate speaker embedding
if self.speaker_emb is None or speaker is None:
spk_emb = 0
else:
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
# Input FFT
enc_out, enc_mask = self.encoder(input=text, conditioning=spk_emb)
log_durs_predicted = self.duration_predictor(enc_out, enc_mask)
durs_predicted = torch.clamp(torch.exp(log_durs_predicted) - 1, 0, self.max_token_duration)
attn_soft, attn_hard, attn_hard_dur, attn_logprob = None, None, None, None
if self.learn_alignment and spec is not None:
text_emb = self.encoder.word_emb(text)
attn_soft, attn_logprob = self.aligner(spec, text_emb.permute(0, 2, 1), enc_mask == 0, attn_prior)
attn_hard = binarize_attention_parallel(attn_soft, input_lens, mel_lens)
attn_hard_dur = attn_hard.sum(2)[:, 0, :]
# Predict pitch
pitch_predicted = self.pitch_predictor(enc_out, enc_mask)
if pitch is not None:
if self.learn_alignment and pitch.shape[-1] != pitch_predicted.shape[-1]:
# Pitch during training is per spectrogram frame, but during inference, it should be per character
pitch = average_pitch(pitch.unsqueeze(1), attn_hard_dur).squeeze(1)
pitch_emb = self.pitch_emb(pitch.unsqueeze(1))
else:
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
enc_out = enc_out + pitch_emb.transpose(1, 2)
if self.learn_alignment and spec is not None:
len_regulated, dec_lens = regulate_len(attn_hard_dur, enc_out, pace)
elif spec is None and durs is not None:
len_regulated, dec_lens = regulate_len(durs, enc_out, pace)
# Use predictions during inference
elif spec is None:
len_regulated, dec_lens = regulate_len(durs_predicted, enc_out, pace)
# Output FFT
dec_out, _ = self.decoder(input=len_regulated, seq_lens=dec_lens)
spect = self.proj(dec_out).transpose(1, 2)
return (
spect,
dec_lens,
durs_predicted,
log_durs_predicted,
pitch_predicted,
attn_soft,
attn_logprob,
attn_hard,
attn_hard_dur,
pitch,
)
def infer(self, *, text, pitch=None, speaker=None, pace=1.0, volume=None):
# Calculate speaker embedding
if self.speaker_emb is None or speaker is None:
spk_emb = 0
else:
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
# Input FFT
enc_out, enc_mask = self.encoder(input=text, conditioning=spk_emb)
# Predict duration and pitch
log_durs_predicted = self.duration_predictor(enc_out, enc_mask)
durs_predicted = torch.clamp(
torch.exp(log_durs_predicted) - 1.0, self.min_token_duration, self.max_token_duration
)
pitch_predicted = self.pitch_predictor(enc_out, enc_mask) + pitch
pitch_emb = self.pitch_emb(pitch_predicted.unsqueeze(1))
enc_out = enc_out + pitch_emb.transpose(1, 2)
# Expand to decoder time dimension
len_regulated, dec_lens = regulate_len(durs_predicted, enc_out, pace)
volume_extended = None
if volume is not None:
volume_extended, _ = regulate_len(durs_predicted, volume.unsqueeze(-1), pace)
volume_extended = volume_extended.squeeze(-1).float()
# Output FFT
dec_out, _ = self.decoder(input=len_regulated, seq_lens=dec_lens)
spect = self.proj(dec_out).transpose(1, 2)
return (
spect.to(torch.float),
dec_lens,
durs_predicted,
log_durs_predicted,
pitch_predicted,
volume_extended,
)
|
gkucsko/NeMo
|
nemo/collections/asr/parts/utils/adapter_utils.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.utils import logging
def update_adapter_cfg_input_dim(module: torch.nn.Module, cfg: DictConfig, *, module_dim: int):
if 'in_features' in cfg:
in_planes = cfg['in_features']
if in_planes != module_dim:
logging.info(f"Updating {module.__class__.__name__} Adapter input dim from {in_planes} to {module_dim}")
in_planes = module_dim
cfg['in_features'] = in_planes
return cfg
else:
raise ValueError(
f"Failed to infer the input dimension of the Adapter cfg. Provided config : \n" f"{OmegaConf.to_yaml(cfg)}"
)
|
gkucsko/NeMo
|
tests/collections/nlp/test_retrieval_module_inference.py
|
<reponame>gkucsko/NeMo<filename>tests/collections/nlp/test_retrieval_module_inference.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import torch.nn.functional as F
from einops import rearrange
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
from nemo.collections.nlp.modules.common.megatron.retrieval_token_level_encoder_decoder import (
MegatronRetrievalTokenLevelEncoderDecoderModule,
)
from nemo.collections.nlp.modules.common.megatron.retrieval_transformer import (
MegatronRetrievalTransformerDecoderModule,
MegatronRetrievalTransformerEncoderModule,
)
from nemo.collections.nlp.modules.common.megatron.rotary_pos_embedding import RotaryEmbedding
from nemo.collections.nlp.modules.common.megatron.transformer import ParallelChunkedCrossAttention
from nemo.collections.nlp.modules.common.megatron.utils import (
build_attention_mask_3d,
init_method_normal,
scaled_init_method_normal,
)
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin
try:
from apex.transformer.enums import AttnMaskType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(not HAVE_APEX, reason="apex is not installed")
class TestRetrievalModuleInference:
@classmethod
def setup_class(cls):
if not torch.cuda.is_available():
return
GPUS = 1
plugins = [NLPDDPPlugin()]
TP_SIZE = GPUS
PP_SIZE = 1
MB_SIZE = 4
GB_SIZE = 8
SEED = 1234
trainer = Trainer(
plugins=plugins, devices=GPUS, accelerator='gpu', num_nodes=1, logger=None, log_gpu_memory=None
)
initialize_model_parallel_for_nemo(
world_size=trainer.world_size,
global_rank=trainer.global_rank,
local_rank=trainer.local_rank,
tensor_model_parallel_size=TP_SIZE,
pipeline_model_parallel_size=PP_SIZE,
micro_batch_size=MB_SIZE,
global_batch_size=GB_SIZE,
seed=SEED,
apex_transformer_log_level=30,
)
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
torch.distributed.barrier()
@pytest.mark.unit
def test_retrieval_encoder_inference(self):
init_method_std = 0.02
batch = 2
neighbors = 2
# rotary pos emb dim
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
hidden = torch.randint(0, vocab_size, (batch, input_length)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(batch, input_length, dim).cuda().half() # (batch, seq, dim)
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(batch, chunks, neighbors, 2 * text_chunk_size, dim).cuda().half()
layer_type = [LayerType.encoder, LayerType.retrieval_encoder, LayerType.encoder, LayerType.retrieval_encoder]
num_layers = len(layer_type)
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
encoder = (
MegatronRetrievalTransformerEncoderModule(
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=dim,
ffn_hidden_size=dim * 4,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
precision=16,
chunk_size=text_chunk_size,
layer_type=layer_type,
hidden_dropout=0.0,
attention_dropout=0.0,
)
.cuda()
.half()
)
out_gt = encoder(retrieved_emb, context_mask, context_attn_mask=hidden_mask, encoder_output=hidden_emb)
assert out_gt.shape == torch.Size([batch, chunks, neighbors, 2 * text_chunk_size, dim])
out_1 = encoder(
None,
None,
context_attn_mask=hidden_mask[:, :62],
encoder_output=hidden_emb[:, :62, :],
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert out_1 is None
out_1 = encoder(
None,
None,
context_attn_mask=hidden_mask[:, :63],
encoder_output=hidden_emb[:, 62:63],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert out_1 is None
out_2 = encoder(
retrieved_emb[:, :1],
context_mask[:, :1],
context_attn_mask=hidden_mask[:, :64],
encoder_output=hidden_emb[:, 63:64],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (encoder.encoder_output - hidden_emb[:, :64]).abs().max().item() < 1e-5
assert (out_gt[:, 0,] - out_2[:, 0]).abs().max().item() < 1e-2
out_test = encoder(
retrieved_emb[:, :1],
context_mask[:, :1],
context_attn_mask=hidden_mask[:, :64],
encoder_output=hidden_emb[:, :64],
)
assert (out_gt[:, 0,] - out_test[:, 0]).abs().max().item() < 1e-2
assert (out_gt[:, 0,] - out_2[:, 0]).abs().max().item() < 1e-2
for i in range(64, 127):
out_3 = encoder(
retrieved_emb[:, :1],
context_mask[:, :1],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
i = 127
out_3 = encoder(
retrieved_emb[:, :2],
context_mask[:, :2],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (encoder.encoder_output - hidden_emb[:, 64:128]).abs().max().item() < 1e-5
assert (out_gt[:, :2,] - out_3).abs().max().item() < 1e-2
# test inference
for i in range(128, 191):
out_4 = encoder(
retrieved_emb[:, :2],
context_mask[:, :2],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
i = 191
out_4 = encoder(
retrieved_emb[:, :3],
context_mask[:, :3],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (encoder.encoder_output - hidden_emb[:, 128:192]).abs().max().item() < 1e-5
assert (out_gt[:, :3,] - out_4).abs().max().item() < 1e-2
out_2 = encoder(
retrieved_emb[:, :2],
context_mask[:, :2],
context_attn_mask=hidden_mask[:, :130],
encoder_output=hidden_emb[:, :130, :],
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
for i in range(130, 191):
out_2 = encoder(
retrieved_emb[:, :2],
context_mask[:, :2],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
i = 191
out_4 = encoder(
retrieved_emb[:, :3],
context_mask[:, :3],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (encoder.encoder_output - hidden_emb[:, 128:192]).abs().max().item() < 1e-5
assert (out_gt[:, :3,] - out_4).abs().max().item() < 1e-2
@pytest.mark.unit
def test_cross_attn_inference(self):
num_layers = 1
init_method_std = 0.02
batch = 2
neighbors = 2
# rotary pos emb dim
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
context_chunk_size = 2 * text_chunk_size
input_length = chunks * text_chunk_size
vocab_size = 20000
rot_dim = dim // num_attention_heads
rotary_pos_emb = RotaryEmbedding(rot_dim).cuda().half()
hidden = torch.randint(0, vocab_size, (input_length, batch)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(input_length, batch, dim).cuda().half() # (seq, batch, dim)
retrieved = torch.randint(0, vocab_size, (chunks, neighbors, context_chunk_size, batch)).cuda()
# retrieved tokens - (num chunks, num retrieved neighbors, retrieved chunk with continuation, batch)
# context attention mask [b, np, sq, sk]
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(chunks, neighbors, context_chunk_size, batch, dim).cuda().half()
# retrieved tokens - (num chunks, num retrieved neighbors, retrieved chunk with continuation, batch, hidden)
# need to add extra chunk size, since it will be shifted
cross_attn_q_pos_emb = rotary_pos_emb(text_chunk_size + text_chunk_size - 1, offset=-text_chunk_size + 1)
cross_attn_k_pos_emb = rotary_pos_emb(context_chunk_size)
cross_attn_pos_emb = (cross_attn_q_pos_emb, cross_attn_k_pos_emb)
def get_attn_mask_3d(hidden_mask, context_mask, chunks):
causal_padding = text_chunk_size - 1
reminder = (text_chunk_size - (hidden_mask.shape[0] + 1)) % text_chunk_size
hidden_mask = F.pad(hidden_mask, (0, 0, -causal_padding, reminder), value=False)
dec_attn_mask = rearrange(hidden_mask, '(k n) b -> (b k) n', k=chunks)
context_attn_mask = rearrange(context_mask, 'k r n b -> (b k) (r n)')
enc_dec_attn_mask_3d = build_attention_mask_3d(
source_mask=dec_attn_mask, target_mask=context_attn_mask, attn_mask_type=AttnMaskType.padding,
)
enc_dec_attn_mask_3d = enc_dec_attn_mask_3d[:, None, :, :]
return enc_dec_attn_mask_3d
enc_dec_attn_mask_3d = get_attn_mask_3d(hidden_mask, context_mask, chunks)
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
cross_attn = (
ParallelChunkedCrossAttention(
init_method=init_method,
output_layer_init_method=scaled_init_method,
layer_number=1,
num_attention_heads=num_attention_heads,
hidden_size=dim,
precision=16,
chunk_size=text_chunk_size,
)
.cuda()
.half()
)
out, bias = cross_attn(
hidden_emb, enc_dec_attn_mask_3d, encoder_output=retrieved_emb, rotary_pos_emb=cross_attn_pos_emb
)
assert out.shape == torch.Size([input_length, batch, dim])
assert bias.shape == torch.Size([dim])
attn_mask_3d = None
out_1, b = cross_attn(
hidden_emb[:62],
attn_mask_3d,
encoder_output=None,
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
)
assert (out_1 - torch.zeros_like(hidden_emb[:62])).abs().max() == 0
out_1, b = cross_attn(
hidden_emb[62:63],
attn_mask_3d,
encoder_output=None,
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out_1 - torch.zeros_like(hidden_emb[62:63])).abs().max() == 0
attn_mask_3d = get_attn_mask_3d(hidden_mask[:64], context_mask[:1], 1)
out_2, b = cross_attn(
hidden_emb[63:64],
attn_mask_3d,
encoder_output=retrieved_emb[:1],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[63] - out_2[0]).abs().max().item() < 1e-2
for i in range(64, 127):
attn_mask_3d = get_attn_mask_3d(hidden_mask[: i + 1], context_mask[:1], 1)
out_2, b = cross_attn(
hidden_emb[i : i + 1],
attn_mask_3d,
encoder_output=retrieved_emb[:1],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
i = 127
attn_mask_3d = get_attn_mask_3d(hidden_mask[: i + 1], context_mask[:2], 2)
out_3, b = cross_attn(
hidden_emb[i : i + 1],
attn_mask_3d,
encoder_output=retrieved_emb[:2],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[i] - out_3[0]).abs().max().item() < 1e-2
attn_mask_3d = get_attn_mask_3d(hidden_mask[:130], context_mask[:2], 2)
out_1, b = cross_attn(
hidden_emb[:130],
attn_mask_3d,
encoder_output=retrieved_emb[:2],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
)
assert (out[:130] - out_1[:130]).abs().max().item() < 1e-2
for i in range(130, 191):
attn_mask_3d = get_attn_mask_3d(hidden_mask[: i + 1], context_mask[:2], 2)
out_2, b = cross_attn(
hidden_emb[i : i + 1],
attn_mask_3d,
encoder_output=retrieved_emb[:2],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
i = 191
attn_mask_3d = get_attn_mask_3d(hidden_mask[: i + 1], context_mask[:3], 3)
out_4, b = cross_attn(
hidden_emb[i : i + 1],
attn_mask_3d,
encoder_output=retrieved_emb[:3],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[i] - out_4[0]).abs().max().item() < 1e-2
@pytest.mark.unit
def test_retrieval_decoder_inference(self):
init_method_std = 0.02
# rotary pos emb dim
batch = 2
neighbors = 2
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
# rot_dim = dim // num_attention_heads
# rotary_pos_emb = RotaryEmbedding(rot_dim).cuda().half()
hidden = torch.randint(0, vocab_size, (batch, input_length)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(batch, input_length, dim).cuda().half() # (batch, seq, dim)
# context_chunk_size = 128
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
# retrieved tokens - (batch, num chunks, num retrieved neighbors, retrieved chunk with continuation)
# context attention mask [b, np, sq, sk]
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(batch, chunks, neighbors, 2 * text_chunk_size, dim).cuda().half()
# retrieved tokens - (batch, num chunks, num retrieved neighbors, retrieved chunk with continuation, hidden)
layer_type = [LayerType.encoder, LayerType.retrieval_decoder, LayerType.encoder, LayerType.retrieval_decoder]
num_layers = len(layer_type)
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
decoder = (
MegatronRetrievalTransformerDecoderModule(
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=dim,
ffn_hidden_size=dim * 4,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
precision=16,
chunk_size=text_chunk_size,
layer_type=layer_type,
hidden_dropout=0.0,
attention_dropout=0.0,
)
.cuda()
.half()
)
out = decoder(hidden_emb, hidden_mask, retrieved_attn_mask=context_mask, retrieved_emb=retrieved_emb)
assert out.shape == torch.Size([batch, input_length, dim])
out_1 = decoder(
hidden_emb[:, :62],
hidden_mask[:, :62],
retrieved_attn_mask=None,
retrieved_emb=None,
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
)
assert (out[:, :62] - out_1[:, :62]).abs().max().item() < 1e-2
out_1 = decoder(
hidden_emb[:, 62:63],
hidden_mask[:, :63],
retrieved_attn_mask=None,
retrieved_emb=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[:, 62] - out_1[:, 0]).abs().max().item() < 1e-2
out_2 = decoder(
hidden_emb[:, 63:64],
hidden_mask[:, :64],
retrieved_attn_mask=context_mask[:, :1],
retrieved_emb=retrieved_emb[:, :1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[:, 63] - out_2[:, 0]).abs().max().item() < 1e-2
for i in range(64, 127):
out_2 = decoder(
hidden_emb[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_attn_mask=context_mask[:, :1],
retrieved_emb=retrieved_emb[:, :1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[:, i] - out_2[:, 0]).abs().max().item() < 1e-2
for i in range(127, 191):
out_3 = decoder(
hidden_emb[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_attn_mask=context_mask[:, :2],
retrieved_emb=retrieved_emb[:, :2],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[:, i] - out_3[:, 0]).abs().max().item() < 1e-2
out_1 = decoder(
hidden_emb[:, :130],
hidden_mask[:, :130],
retrieved_attn_mask=context_mask[:, :2],
retrieved_emb=retrieved_emb[:, :2],
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
)
assert (out[:, :130] - out_1[:, :130]).abs().max().item() < 1e-2
for i in range(130, 191):
out_3 = decoder(
hidden_emb[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_attn_mask=context_mask[:, :2],
retrieved_emb=retrieved_emb[:, :2],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[:, i] - out_3[:, 0]).abs().max().item() < 1e-2
@pytest.mark.unit
def test_encoder_decoder_module_inference(self):
# rotary pos emb dim
batch = 2
neighbors = 2
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
enc_num_layers = 4
dec_num_layers = 6
enc_cross_attention = [3] # layer numbers for cross attention
dec_cross_attention = [3, 5] # layer numbers for cross attention
all_tokens = torch.randint(0, vocab_size, (batch, input_length + 1)).cuda() # (seq, batch, dim)
hidden = all_tokens[:, :-1]
labels = all_tokens[:, 1:]
hidden_mask = (hidden != pad_id).cuda()
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
class FakeTokenizer:
eos_id = vocab_size - 2
tokenizer = FakeTokenizer()
encoder_decoder = (
MegatronRetrievalTokenLevelEncoderDecoderModule(
vocab_size=vocab_size,
hidden_size=dim,
max_position_embeddings=input_length,
num_attention_heads=num_attention_heads,
ffn_hidden_size=dim * 4,
precision=16,
chunk_size=text_chunk_size,
enc_num_layers=enc_num_layers,
dec_num_layers=dec_num_layers,
enc_cross_attention=enc_cross_attention,
dec_cross_attention=dec_cross_attention,
add_position_embedding=False,
tokenizer=tokenizer,
hidden_dropout=0.0,
attention_dropout=0.0,
)
.cuda()
.half()
)
out = encoder_decoder(hidden, hidden_mask, retrieved_ids=retrieved, retrieved_attn_mask=context_mask)
out_1 = encoder_decoder(
hidden[:, :62],
hidden_mask[:, :62],
retrieved_attn_mask=None,
retrieved_ids=None,
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, :62] - out_1[:, :62]).abs().max().item() < 1e-2
out_1 = encoder_decoder(
hidden[:, 62:63],
hidden_mask[:, :63],
retrieved_attn_mask=None,
retrieved_ids=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, 62] - out_1[:, 0]).abs().max().item() < 1e-2
out_2 = encoder_decoder(
hidden[:, 63:64],
hidden_mask[:, :64],
retrieved_ids=retrieved[:, :1],
retrieved_attn_mask=context_mask[:, :1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, 63] - out_2[:, 0]).abs().max().item() < 1e-2
for i in range(64, 127):
out_2 = encoder_decoder(
hidden[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_ids=retrieved[:, :1],
retrieved_attn_mask=context_mask[:, :1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, i] - out_2[:, 0]).abs().max().item() < 1e-2
for i in range(127, 191):
out_3 = encoder_decoder(
hidden[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_ids=retrieved[:, :2],
retrieved_attn_mask=context_mask[:, :2],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, i] - out_3[:, 0]).abs().max().item() < 1e-2
out_1 = encoder_decoder(
hidden[:, :130],
hidden_mask[:, :130],
retrieved_ids=retrieved[:, :2],
retrieved_attn_mask=context_mask[:, :2],
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, :130] - out_1[:, :130]).abs().max().item() < 1e-2
for i in range(130, 191):
out_3 = encoder_decoder(
hidden[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_ids=retrieved[:, :2],
retrieved_attn_mask=context_mask[:, :2],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, i] - out_3[:, 0]).abs().max().item() < 1e-2
|
gkucsko/NeMo
|
nemo/collections/nlp/data/language_modeling/megatron/ul2_dataset.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UL2 Style dataset from https://arxiv.org/abs/2205.05131"""
import numpy as np
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import create_extreme_masked_lm_predictions
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
from nemo.collections.nlp.data.language_modeling.megatron.lm_adapted_t5_dataset import T5LMAdaptedDataset
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
class UL2Dataset(T5Dataset):
""" UL2 Dataset from https://arxiv.org/abs/2205.05131.
Consists of three different objectives:
1. Short span masking with small probabilities (ex: T5). Typically max ngram size of 5 with 0.15 mask prob.
2. Extreme span masking with either large probabilities or large ngram sizes or both.
3. Prefx-LM as in the T5 or LM-adapted T5 (prompt-tuning paper).
"""
def __init__(
self,
cfg,
trainer,
tokenizer,
name,
indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
max_seq_length_dec,
seed,
masked_lm_prob=0.15,
extreme_masked_lm_prob=0.5,
short_seq_prob=0.0,
min_ngram_size=2,
max_ngram_size=10,
mean_ngram_size=3,
extreme_max_ngram_size=128,
extreme_min_ngram_size=32,
extreme_mean_ngram_size=64,
prefix_lm_pivot_mean=0.25, # This is represented as a percentage of the total length.
ngram_span_length_distribution=LengthDistribution.geometric,
extreme_ngram_span_length_distribution=LengthDistribution.truncated_normal,
permutation=False,
whole_word_masking=True,
favor_long_ngrams=False,
):
super().__init__(
cfg=cfg,
trainer=trainer,
tokenizer=tokenizer,
name=name,
indexed_dataset=indexed_dataset,
data_prefix=data_prefix,
num_epochs=num_epochs,
max_num_samples=max_num_samples,
max_seq_length=max_seq_length - 1, # -1 to account for the added mask type token
max_seq_length_dec=max_seq_length_dec,
seed=seed,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
max_ngram_size=max_ngram_size,
mean_ngram_size=None, # TODO: Determin if we want to actually pass mean ngram as an override to max here.
geometric_dist=ngram_span_length_distribution == LengthDistribution.geometric,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
)
self.mean_ngram_size = mean_ngram_size
self.min_ngram_size = min_ngram_size
self.extreme_masked_lm_prob = extreme_masked_lm_prob
self.extreme_min_ngram_size = extreme_min_ngram_size
self.extreme_max_ngram_size = extreme_max_ngram_size
self.extreme_mean_ngram_size = extreme_mean_ngram_size
self.ngram_span_length_distribution = ngram_span_length_distribution
self.extreme_ngram_span_length_distribution = extreme_ngram_span_length_distribution
self.prefix_lm_pivot_mean = prefix_lm_pivot_mean
def __getitem__(self, idx):
start_index, end_index, seq_length = self.samples_mapping[idx]
sample = []
for index in range(start_index, end_index):
sample.append(self.indexed_dataset[index])
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
np_rng = np.random.RandomState(seed=(self.seed + idx))
masking_type = np_rng.randint(0, 3) # 0: short span masking, 1: extreme masking, 2: prefix-LM
if masking_type == 0:
# Call T5's build training sample for regular short span masking.
sample = super().build_training_sample(sample=sample, target_seq_length=seq_length, np_rng=np_rng)
sample = self._prepend_mask_type_token(sample, '<extra_id_r>')
elif masking_type == 1:
sample = self.build_extreme_masking_training_sample(
sample=sample, target_seq_length=seq_length, np_rng=np_rng
)
sample = self._prepend_mask_type_token(sample, '<extra_id_x>')
elif masking_type == 2:
sample = [token for sentence in sample for token in sentence]
sample = T5LMAdaptedDataset.get_prefix_lm_sample(
sample=sample,
max_seq_length_encoder=self.max_seq_length,
max_seq_length_decoder=self.max_seq_length, # We don't use max_seq_length_decoder here since we typically want to use long decoder sequences for better LM performance.
np_rng=np_rng,
tokenizer=self.tokenizer,
pivot_mean=self.prefix_lm_pivot_mean,
pivot_distribution=self.extreme_ngram_span_length_distribution,
)
sample = self._prepend_mask_type_token(sample, '<extra_id_s>')
return sample
def _prepend_mask_type_token(self, sample, token):
token_id = self.tokenizer.text_to_ids(token)
assert len(token_id) == 1
token_id = token_id[0]
text_enc = np.concatenate([[token_id], sample['text_enc']])
sample['text_enc'] = text_enc
sample['enc_mask'] = np.concatenate([[1], sample['enc_mask']])
return sample
def build_extreme_masking_training_sample(
self, sample, target_seq_length, np_rng,
):
"""Build training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
bos_id: start of decoder example id
eos_id: end of generation id
sentinel_tokens: unique value to be substituted for every replaced span
tokenizer_type: wordpiece (BERT-style) or sentencepiece tokenizer. Used for whole word masking logic.
max_ngram_size: maximum size of ngrams to be masked.
mean_ngram_size: mean size of ngrams to be masked (only used if geometric_dist=True).
geometric_dist: Uses a geometric distribution to sample ngram size.
permutation: Permutes the ngrams.
whole_word_masking: Always masks entire words instead of individual sub-word tokens.
favor_long_ngrams: Favor longer ngrams over shorter ones.
"""
assert target_seq_length <= self.max_seq_length
# flatten sentences into one list
tokens = [token for sentence in sample for token in sentence]
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
tokens = tokens[:max_num_tokens]
# Determine if we have a lot of masking or little masking. There are three cases:
# 1. Small masking prob, large spans.
# 2. Large masking prob, small spans.
# 3. Large masking prob, large spans.
task_type = np_rng.randint(0, 3)
if task_type == 0:
# Large spans, small masking prob
max_ngram_size, mean_ngram_size, min_ngram_size, masked_lm_prob = (
self.extreme_max_ngram_size,
self.extreme_mean_ngram_size,
self.extreme_min_ngram_size,
self.masked_lm_prob,
)
elif task_type == 1:
# Small spans, large masking prob
max_ngram_size, mean_ngram_size, min_ngram_size, masked_lm_prob = (
self.max_ngram_size,
self.mean_ngram_size,
self.min_ngram_size,
self.extreme_masked_lm_prob,
)
else:
# Large spans, large masking prob
max_ngram_size, mean_ngram_size, min_ngram_size, masked_lm_prob = (
self.extreme_max_ngram_size,
self.extreme_mean_ngram_size,
self.extreme_mean_ngram_size,
self.extreme_masked_lm_prob,
)
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
lm_pred = create_extreme_masked_lm_predictions(
tokens=tokens,
masked_lm_prob=masked_lm_prob,
mask_id=self.mask_id,
max_predictions_per_seq=max_predictions_per_seq,
np_rng=np_rng,
max_ngram_size=max_ngram_size,
min_ngram_size=min_ngram_size,
mean_ngram_size=mean_ngram_size,
span_length_distribution=self.extreme_ngram_span_length_distribution,
)
if self.masked_lm_prob == 0:
(output_tokens, masked_positions, masked_labels) = lm_pred
masked_spans = None
else:
(output_tokens, masked_positions, masked_labels, masked_spans) = lm_pred
# Padding.
tokens_enc, tokens_dec_in, labels, enc_mask, dec_mask, loss_mask = self.pad_and_convert_to_numpy(
tokens=tokens,
output_tokens=output_tokens,
masked_positions=masked_positions,
masked_labels=masked_labels,
masked_spans=masked_spans,
np_rng=np_rng,
)
train_sample = {
'text_enc': tokens_enc,
'text_dec': tokens_dec_in,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
return train_sample
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/dataset_preparation/filter_sentences_with_errors.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to filter sentences containing bad examples from Google TN Dataset.
"""
from argparse import ArgumentParser
from os import listdir, mkdir
from os.path import exists, isfile, join
from typing import Set
parser = ArgumentParser(description="Filter Google TN Dataset by error vocabulary")
parser.add_argument(
"--data_dir", required=True, type=str, help='Path to data directory with files like output-00000-of-00100.tsv'
)
parser.add_argument(
"--out_dir", required=True, type=str, help='Output data directory, same files (with some sentences filtered)'
)
parser.add_argument("--errors_vocab_filename", required=True, type=str, help='File with error vocabulary')
parser.add_argument("--lang", required=True, type=str, help="Language")
args = parser.parse_args()
def filter_file(inp_filename: str, out_filename: str, error_vcb: Set) -> None:
"""Filter out whole sentences containing bad itn conversions. The output format is the same as input.
Args:
inp_filename: Name of input file in Google TN Dataset format.
out_filename: Name of output file in Google TN Dataset format.
error_vcb: Set of tuples with erroneous conversion, e.g. ("CARDINAL", "two", "132")
"""
out = open(out_filename, "w", encoding="utf-8")
sent_lines = []
sent_is_ok = True
with open(inp_filename, "r", encoding="utf-8") as f:
for line in f:
sent_lines.append(line.strip())
if line.startswith("<eos>"):
if sent_is_ok and len(sent_lines) > 1: # there should be at least one line except <eos>
out.write("\n".join(sent_lines) + "\n")
sent_lines = []
sent_is_ok = True
else:
cls, written, spoken = line.strip().split("\t")
k = (cls, spoken.casefold(), written.casefold())
if k in error_vcb:
sent_is_ok = False
out.close()
def main() -> None:
if not exists(args.data_dir):
raise ValueError(f"Data dir {args.data_dir} does not exist")
# load errors vocabulary
error_vcb = set()
with open(args.errors_vocab_filename, "r", encoding="utf-8") as f:
for line in f:
cls, spoken, written = line.strip().split("\t")
k = (cls, spoken, written)
error_vcb.add(k)
for subdir in listdir(args.data_dir):
mkdir(join(args.out_dir, subdir))
for filename in listdir(join(args.data_dir, subdir)):
if not filename.startswith('output'):
continue
inp_filename = join(args.data_dir, subdir, filename)
out_filename = join(args.out_dir, subdir, filename)
if not isfile(inp_filename):
continue
filter_file(inp_filename, out_filename, error_vcb)
if __name__ == "__main__":
main()
|
gkucsko/NeMo
|
scripts/speaker_tasks/filelist_to_manifest.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import random
import librosa as l
import numpy as np
import soundfile as sf
import sox
from sklearn.model_selection import StratifiedShuffleSplit
from tqdm.contrib.concurrent import process_map
random.seed(42)
"""
This scipt converts a filelist file where each line contains
<absolute path of wav file> to a manifest json file.
Optionally post processes the manifest file to create dev and train split for speaker embedding
training, also optionally segment an audio file in to segments of random DURATIONS and create those
wav files in CWD.
While creating segments, if audio is not sampled at 16kHz, it resamples to 16kHz and write the wav file.
Args:
--filelist: path to file containing list of audio files
--manifest(optional): if you already have manifest file, but would like to process it for creating
segments and splitting then use manifest ignoring filelist
--id: index of speaker label in filename present in filelist file that is separated by '/'
--out: output manifest file name
--split: if you would want to split the manifest file for training purposes
you may not need this for test set. output file names is <out>_<train/dev>.json, defaults to False
--create_segments: if you would want to segment each manifest line to segments of [1,2,3,4] sec or less
you may not need this for test set, defaults to False
--min_spkrs_count: min number of samples per speaker to consider and ignore otherwise, defaults to 0 (all speakers)
"""
DURATIONS = sorted([1, 2, 3, 4], reverse=True)
MIN_ENERGY = 0.01
CWD = os.getcwd()
SAMPLE_RATE = 16000
def filter_manifest_line(manifest_line):
split_manifest = []
audio_path = manifest_line['audio_filepath']
start = manifest_line.get('offset', 0)
dur = manifest_line['duration']
label = manifest_line['label']
endname = os.path.splitext(audio_path.split(label, 1)[-1])[0]
to_path = os.path.join(CWD, 'segments', label)
to_path = os.path.join(to_path, endname[1:])
os.makedirs(os.path.dirname(to_path), exist_ok=True)
if dur >= min(DURATIONS):
signal, sr = l.load(audio_path, sr=SAMPLE_RATE)
remaining_dur = dur - start
segments = DURATIONS.copy()
mode = int(remaining_dur // sum(DURATIONS))
rem = remaining_dur % sum(DURATIONS)
segments = mode * segments
for val in DURATIONS:
if rem >= val:
segments.append(val)
rem = rem - val
for temp_dur in segments:
segment_audio = signal[int(start * sr) : int(start * sr + temp_dur * sr)]
if l.feature.rms(y=segment_audio).mean() > MIN_ENERGY:
final_string = '_' + str(start) + '_' + str(temp_dur)
final_string = final_string.replace('.', '-')
to_file = to_path + final_string + '.wav'
c_start = int(float(start * sr))
c_end = c_start + int(float(temp_dur * sr))
segment = signal[c_start:c_end]
sf.write(to_file, segment, sr)
meta = manifest_line.copy()
meta['audio_filepath'] = to_file
meta['offset'] = 0
meta['duration'] = temp_dur
split_manifest.append(meta)
start = start + temp_dur
return split_manifest
def count_and_consider_only(speakers, lines, min_count=10):
"""
consider speakers only if samples per speaker is at least min_count
"""
uniq_speakers, indices, counts = np.unique(speakers, return_index=True, return_counts=True)
print("speaker count before filtering minimum number of speaker counts: ", len(uniq_speakers))
required_speakers = {}
for idx, count in enumerate(counts):
if count >= min_count:
required_speakers[uniq_speakers[idx]] = count
print("speaker count after filtering minimum number of speaker counts: ", len(required_speakers))
required_lines = []
speakers_only = []
for idx, speaker in enumerate(speakers):
if speaker in required_speakers:
required_lines.append(lines[idx])
speakers_only.append(speaker)
return speakers_only, required_lines
def write_file(name, lines, idx):
with open(name, 'w', encoding='utf-8') as fout:
for i in idx:
dic = lines[i]
json.dump(dic, fout)
fout.write('\n')
print("wrote", name)
def read_file(filelist, id=-1):
json_lines = []
with open(filelist, 'r') as fo:
lines = fo.readlines()
lines = sorted(lines)
for line in lines:
line = line.strip()
speaker = line.split('/')[id]
speaker = list(speaker)
speaker = ''.join(speaker)
meta = {"audio_filepath": line, "offset": 0, "duration": None, "label": speaker}
json_lines.append(meta)
return json_lines
def read_manifest(manifest):
data = []
with open(manifest, 'r', encoding='utf-8') as f:
for line in f:
item = json.loads(line)
data.append(item)
return data
def get_duration(json_line):
dur = json_line['duration']
if dur is None:
wav_path = json_line['audio_filepath']
json_line['duration'] = sox.file_info.duration(wav_path)
return json_line
def get_labels(lines):
labels = []
for line in lines:
label = line['label']
labels.append(label)
return labels
def main(filelist, manifest, id, out, split=False, create_segments=False, min_count=10):
if os.path.exists(out):
os.remove(out)
if filelist:
lines = read_file(filelist=filelist, id=id)
lines = process_map(get_duration, lines, chunksize=100)
out_file = os.path.splitext(filelist)[0] + '_manifest.json'
write_file(out_file, lines, range(len(lines)))
else:
lines = read_manifest(manifest)
lines = process_map(get_duration, lines, chunksize=100)
if create_segments:
print(f"creating and writing segments to {CWD}")
lines = process_map(filter_manifest_line, lines, chunksize=100)
temp = []
for line in lines:
temp.extend(line)
del lines
lines = temp
speakers = [x['label'] for x in lines]
if min_count:
speakers, lines = count_and_consider_only(speakers, lines, abs(min_count))
write_file(out, lines, range(len(lines)))
path = os.path.dirname(out)
if split:
speakers = [x['label'] for x in lines]
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)
for train_idx, test_idx in sss.split(speakers, speakers):
print("number of train samples after split: ", len(train_idx))
out = os.path.join(path, 'train.json')
write_file(out, lines, train_idx)
out = os.path.join(path, 'dev.json')
write_file(out, lines, test_idx)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--filelist", help="path to filelist file", type=str, required=False, default=None)
parser.add_argument("--manifest", help="manifest file name", type=str, required=False, default=None)
parser.add_argument(
"--id",
help="field num seperated by '/' to be considered as speaker label from filelist file, can be ignored if manifest file is already provided with labels",
type=int,
required=False,
default=None,
)
parser.add_argument("--out", help="manifest_file name", type=str, required=True)
parser.add_argument(
"--split",
help="bool if you would want to split the manifest file for training purposes",
required=False,
action='store_true',
)
parser.add_argument(
"--create_segments",
help="bool if you would want to segment each manifest line to segments of 4 sec or less",
required=False,
action='store_true',
)
parser.add_argument(
"--min_spkrs_count",
default=0,
type=int,
help="min number of samples per speaker to consider and ignore otherwise",
)
args = parser.parse_args()
main(
args.filelist, args.manifest, args.id, args.out, args.split, args.create_segments, args.min_spkrs_count,
)
|
gkucsko/NeMo
|
nemo/collections/nlp/modules/common/prompt_table.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from nemo.core.classes import Exportable, NeuralModule
try:
from apex.transformer import tensor_parallel
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = ['PromptTable', 'VirtualPromptSource', 'VirtualPromptStyle', 'VirtualPromptPlaceholderToken']
class VirtualPromptStyle(enum.Enum):
P_TUNING = 'p-tuning'
PROMPT_TUNING = 'prompt-tuning'
INFERENCE = 'inference'
class VirtualPromptSource(enum.Enum):
PROMPT_TABLE = 'prompt_table'
PROMPT_ENCODER = 'prompt_encoder'
class VirtualPromptPlaceholderToken(enum.Enum):
BASE = '<prompt_'
END = '>'
class PromptTable(NeuralModule, Exportable):
def __init__(self, existing_tasks, task_templates, task_id_num_to_name, hidden_size):
super().__init__()
self.task_templates = task_templates
self.hidden_size = hidden_size
self.prompt_table = torch.nn.ModuleDict()
self.task_id_num_to_name = {}
# Need to init prompt embeddings for each existing task before loading tuned weights
if existing_tasks and existing_tasks[0] is not None:
for taskname in existing_tasks:
total_virtual_tokens = self.task_templates[taskname]["total_virtual_tokens"]
self.prompt_table[taskname] = PromptEmbedding(
init_from_prompt_text=False,
hidden_size=self.hidden_size,
total_virtual_tokens=total_virtual_tokens,
)
# Make sure tasknames and task id nums line up correctly in prompt table
self.task_id_num_to_name = task_id_num_to_name
def forward(self, task_id_num, input_ids=None):
task_id_num = task_id_num.item()
tasknames = self.task_id_num_to_name[task_id_num]
return self.prompt_table[tasknames](input_ids)
def remove_prompt(self, taskname):
if taskname not in self.prompt_table:
return
# find the task_id_num assocaited with the tag to delete
task_id_num = None
for key, value in self.task_id_num_to_name.items():
if value == taskname:
task_id_num = key
break
del self.task_id_num_to_name[task_id_num]
del self.prompt_table[taskname]
def init_prompt_from_random(self, taskname, total_virtual_tokens):
"""Add new virtual prompt to be tuned.
Intialize prompt weights using pytorch init method
"""
# Initalize prompt embeddings from a pytorch random init method
self.prompt_table[taskname] = PromptEmbedding(
init_from_prompt_text=False, hidden_size=self.hidden_size, total_virtual_tokens=total_virtual_tokens,
)
def init_prompt_from_text(self, taskname, init_token_ids, word_embeddings, total_virtual_tokens):
"""Add new virtual prompt to be tuned.
Intialize prompt weights from existing embeddings from specific vocab tokens.
"""
# Trim or iterate until num_text_tokens matches total_virtual_tokens
num_text_tokens = len(init_token_ids)
if num_text_tokens > total_virtual_tokens:
init_token_ids = init_token_ids[:total_virtual_tokens]
elif num_text_tokens < total_virtual_tokens:
num_reps = math.ceil(total_virtual_tokens / num_text_tokens)
init_token_ids = init_token_ids * num_reps
# Set dictionary item keys and datatypes for broadcasting
keys = ['text']
datatype = torch.int64
# Broadcast int ids across gpus for tensor parallel
init_token_ids = init_token_ids[:total_virtual_tokens]
init_token_ids = {'text': torch.tensor(init_token_ids, dtype=torch.int64)}
init_token_ids_b = tensor_parallel.broadcast_data(keys, init_token_ids, datatype)
init_token_ids = init_token_ids_b['text'].long()
# Use a copy of token embedding weights to initalize the prompt embeddings
word_embedding_weights = word_embeddings(init_token_ids).detach().clone()
self.prompt_table[taskname] = PromptEmbedding(
init_from_prompt_text=True,
hidden_size=self.hidden_size,
total_virtual_tokens=total_virtual_tokens,
word_embedding_weights=word_embedding_weights,
)
def add_prompt_from_p_tuning_encoder(self, taskname, virtual_prompt_embeddings, total_virtual_tokens):
"""
Add virtual prompts that have already been tuned using p-tuning.
"""
self.prompt_table[taskname] = PromptEmbedding(
init_from_prompt_text=True,
hidden_size=self.hidden_size,
total_virtual_tokens=total_virtual_tokens,
word_embedding_weights=virtual_prompt_embeddings,
)
class PromptEmbedding(NeuralModule, Exportable):
"""Prompt embeddings
Arugments:
init_from_prompt_text: Whether to intialize prompt embeddings
from from certain lm embeddings
corresponding to a prompt string
hidden_size: hidden size should match lm embedding size
total_virtual_tokens: length of prompt initalized from torch init method
word_embedding_weights: token embedding vectors for text init option
init_method: pytorch init method
prompt_embedding_dropout_prob: dropout probablity
"""
def __init__(
self,
init_from_prompt_text,
hidden_size,
total_virtual_tokens,
word_embedding_weights=None,
init_method=init.xavier_normal_,
prompt_embedding_dropout_prob=0.0,
):
super().__init__()
self.hidden_size = hidden_size
self.total_virtual_tokens = total_virtual_tokens
# Randomly init token and position embeddings
self.prompt_embeddings = torch.nn.Embedding(self.total_virtual_tokens, self.hidden_size)
init_method(self.prompt_embeddings.weight)
# Set embedding weights to be embeddings from prompt tokens
if init_from_prompt_text:
self.prompt_embeddings.weight = nn.Parameter(word_embedding_weights)
# Set fixed indicies for forward pass
self.register_buffer('indices', torch.LongTensor(list(range(self.total_virtual_tokens))))
self.embedding_dropout = torch.nn.Dropout(prompt_embedding_dropout_prob)
def forward(self, input_ids=None):
# Just get embeddings and dropout
if input_ids is None:
prompt_embeddings = self.prompt_embeddings(self.indices)
else:
prompt_embeddings = self.prompt_embeddings(input_ids)
prompt_embeddings = self.embedding_dropout(prompt_embeddings)
return prompt_embeddings
|
gkucsko/NeMo
|
nemo/collections/asr/parts/preprocessing/__init__.py
|
<filename>nemo/collections/asr/parts/preprocessing/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.parts.preprocessing.feature_loader import ExternalFeatureLoader
from nemo.collections.asr.parts.preprocessing.features import FeaturizerFactory, FilterbankFeatures, WaveformFeaturizer
from nemo.collections.asr.parts.preprocessing.perturb import (
AudioAugmentor,
AugmentationDataset,
GainPerturbation,
ImpulsePerturbation,
NoisePerturbation,
Perturbation,
RirAndNoisePerturbation,
ShiftPerturbation,
SpeedPerturbation,
TimeStretchPerturbation,
TranscodePerturbation,
WhiteNoisePerturbation,
perturbation_types,
process_augmentations,
register_perturbation,
)
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/input_example/assistant_input_example.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
class DialogueAssistantInputExample(DialogueInputExample):
"""
Template for DialogueAssistantInputExample
Meant as a descriptor rather than to be instantiated
Please instantiate using the base class 'DialogueInputExample'
{
"utterance": <utterance>,
"labels": {
"service": <service>,
"intent": <intent>,
"slots": {
"<slot-name1>": [<slot-value1>, <slot-value2>],
"<slot-name2>": [<slot-value2>],
}
},
"label_positions":{
"slots": {
"<slot-name1>": {
# note for the Assistant dataset, start and end are word positions rather than char position
# these are whitespace-delimited word positions rather than tokenization-specific sub-word tokens.
"exclusive_end": 3,
"slot": "restaurant_name",
"start": 1
},
}
},
"possible_labels": {
"service": [<service1>, <service2>, ...],
"intent": [<intent1>, <intent2>, ...],
"slots": {
# all slots for categorical variables
# empty list for extractive slots
# Assistant only support extractive slots
"<slot-name1>": [],
"<slot-name2>": [],
}
}
}
"""
|
gkucsko/NeMo
|
nemo/core/classes/module.py
|
<reponame>gkucsko/NeMo<gh_stars>0
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from torch.nn import Module
from nemo.core.classes.common import FileIO, Serialization, Typing
__all__ = ['NeuralModule']
class NeuralModule(Module, Typing, Serialization, FileIO):
"""
Abstract class offering interface shared between all PyTorch Neural Modules.
"""
@property
def num_weights(self):
"""
Utility property that returns the total number of parameters of NeuralModule.
"""
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
def input_example(self, max_batch=None, max_dim=None):
"""
Override this method if random inputs won't work
Returns:
A tuple sample of valid input data.
"""
return None
def freeze(self) -> None:
r"""
Freeze all params for inference.
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""
Unfreeze all parameters for training.
"""
for param in self.parameters():
param.requires_grad = True
self.train()
@contextmanager
def as_frozen(self):
"""
Context manager which temporarily freezes a module, yields control and finally unfreezes the module.
"""
training_mode = self.training
grad_map = {}
for pname, param in self.named_parameters():
grad_map[pname] = param.requires_grad
self.freeze()
try:
yield
finally:
self.unfreeze()
for pname, param in self.named_parameters():
param.requires_grad = grad_map[pname]
if training_mode:
self.train()
else:
self.eval()
|
gkucsko/NeMo
|
nemo_text_processing/inverse_text_normalization/vi/taggers/time.py
|
<reponame>gkucsko/NeMo<filename>nemo_text_processing/inverse_text_normalization/vi/taggers/time.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import (
GraphFst,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from nemo_text_processing.inverse_text_normalization.vi.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g. hai rưỡi -> time { hours: "2" minutes: "30" }
e.g. chín giờ kém hai mươi -> time { hours: "8" minutes: "40" }
e.g. ba phút hai giây -> time { minutes: "3" seconds: "2" }
e.g. mười giờ chín phút bốn mươi lăm giây -> time { hours: "10" minutes: "9" seconds: "45" }
"""
def __init__(self):
super().__init__(name="time", kind="classify")
# hours, minutes, seconds, suffix, zone, style, speak_period
graph_hours_to = pynini.string_file(get_abs_path("data/time/hours_to.tsv"))
graph_minutes_to = pynini.string_file(get_abs_path("data/time/minutes_to.tsv"))
graph_hours = pynini.string_file(get_abs_path("data/time/hours.tsv"))
graph_minutes = pynini.string_file(get_abs_path("data/time/minutes.tsv"))
time_zone_graph = pynini.invert(pynini.string_file(get_abs_path("data/time/time_zone.tsv")))
graph_half = pynini.cross("rưỡi", "30")
oclock = pynini.cross("giờ", "")
minute = pynini.cross("phút", "")
optional_minute = pynini.closure(delete_space + minute, 0, 1)
second = pynini.cross("giây", "")
final_graph_hour = pynutil.insert('hours: "') + graph_hours + pynutil.insert('"') + delete_space + oclock
graph_minute = graph_minutes + optional_minute
graph_second = graph_minutes + delete_space + second
final_time_zone_optional = pynini.closure(
delete_space
+ insert_space
+ pynutil.insert('zone: "')
+ convert_space(time_zone_graph)
+ pynutil.insert('"'),
0,
1,
)
graph_hm = (
final_graph_hour
+ delete_extra_space
+ pynutil.insert('minutes: "')
+ (graph_minute | graph_half)
+ pynutil.insert('"')
)
graph_hms = (
final_graph_hour
+ delete_extra_space
+ pynutil.insert('minutes: "')
+ graph_minutes
+ delete_space
+ minute
+ pynutil.insert('"')
+ delete_extra_space
+ pynutil.insert('seconds: "')
+ graph_second
+ pynutil.insert('"')
)
graph_ms = (
pynutil.insert('minutes: "')
+ graph_minutes
+ delete_space
+ minute
+ pynutil.insert('"')
+ delete_extra_space
+ pynutil.insert('seconds: "')
+ (graph_second | graph_half)
+ pynutil.insert('"')
)
graph_hours_to_component = graph_hours @ graph_hours_to
graph_minutes_to_component = graph_minutes @ graph_minutes_to
graph_time_to = (
pynutil.insert('hours: "')
+ graph_hours_to_component
+ pynutil.insert('"')
+ delete_space
+ oclock
+ delete_space
+ pynutil.delete("kém")
+ delete_extra_space
+ pynutil.insert('minutes: "')
+ graph_minutes_to_component
+ pynutil.insert('"')
+ optional_minute
)
final_graph = (final_graph_hour | graph_hm | graph_hms) + final_time_zone_optional
final_graph |= graph_ms
final_graph |= graph_time_to
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
|
gkucsko/NeMo
|
scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py
|
<filename>scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import random
from tqdm import tqdm
"""
Dataset preprocessing script for the SQuAD dataset: https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json
Converts the dataset into a jsonl format that can be used for p-tuning/prompt tuning in NeMo.
Inputs:
data-dir: (str) The directory where the squad dataset was downloaded, files will be saved here
file-name: (str) Name of the input file you want to process
save-name-base: (str) The base name for each of the train, val, and test files. If save-name-base were 'squad' for
example, the files would be saved as squad_train.jsonl, squad_val.jsonl, and squad_test.jsonl
make-ground-truth: (bool) If true, test files will include answers, if false, test files will not include answers.
include-topic-name: Whether to include the topic name for the paragraph in the data json. See the squad explaination
below for more context on what is ment by 'topic name'.
random-seed: (int) Random seed for repeatable shuffling of train/val/test splits.
train-percent: (float) Precentage of data that should be used for the train split. The val and test splits will be made
by splitting the remaining data evenly.
Saves train, val, and test files for the SQuAD dataset.
The SQuAD dataset consists of various topics like Beyoncé, IPod, and Symbiosis. Each topic has several paragraphs
associated with it, and each paragraph has several questions and answers related to it. When we separated the
train/validation/test splits, we separated them on the topic level. For example, if the training set contains paragraphs
and questions about the topic Beyoncé, neither the validation nor test sets will contain any questions on this topic.
All questions about a certain topic are isolated to one split of the data.
An example of the processed output written to file:
{
"taskname": "squad",
"context": "Red is the traditional color of warning and danger. In the Middle Ages, a red flag announced that the defenders of a town or castle would fight to defend it, and a red flag hoisted by a warship meant they would show no mercy to their enemy. In Britain, in the early days of motoring, motor cars had to follow a man with a red flag who would warn horse-drawn vehicles, before the Locomotives on Highways Act 1896 abolished this law. In automobile races, the red flag is raised if there is danger to the drivers. In international football, a player who has made a serious violation of the rules is shown a red penalty card and ejected from the game.",
"question": "What did a red flag signal in the Middle Ages?",
"answer": " defenders of a town or castle would fight to defend it"
},
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=str, default="data/SQuAD")
parser.add_argument("--file-name", type=str, default="train-v2.0.json")
parser.add_argument("--save-name-base", type=str, default="squad")
parser.add_argument("--make-ground-truth", action='store_true')
parser.add_argument("--include-topic-name", action='store_true')
parser.add_argument("--random-seed", type=int, default=1234)
parser.add_argument("--train-percent", type=float, default=0.8)
args = parser.parse_args()
data_dict = json.load(open(f"{args.data_dir}/{args.file_name}"))
data = data_dict['data']
save_name_base = f"{args.data_dir}/{args.save_name_base}"
process_data(
data, save_name_base, args.train_percent, args.random_seed, args.include_topic_name, args.make_ground_truth
)
def process_data(data, save_name_base, train_percent, random_seed, include_topic, make_ground_truth=False):
data = extract_questions(data, include_topic)
# Data examples are currently grouped by topic, shuffle topic groups
random.seed(random_seed)
random.shuffle(data)
# Decide train/val/test splits on the topic level
data_total = len(data)
train_total = int(data_total * train_percent)
val_total = (data_total - train_total) // 2
train_set = data[0:train_total]
val_set = data[train_total : train_total + val_total]
test_set = data[train_total + val_total :]
# Flatten data for each split now that topics have been confined to one split
train_set = [question for topic in train_set for question in topic]
val_set = [question for topic in val_set for question in topic]
test_set = [question for topic in test_set for question in topic]
# Shuffle train set questions
random.shuffle(train_set)
gen_file(train_set, save_name_base, 'train')
gen_file(val_set, save_name_base, 'val')
gen_file(test_set, save_name_base, 'test', make_ground_truth)
def extract_questions(data, include_topic):
processed_data = []
# Iterate over topics, want to keep them seprate in train/val/test splits
for question_group in data:
processed_topic_data = []
topic = question_group['title']
questions = question_group['paragraphs']
# Iterate over paragraphs related to topics
for qa_group in questions:
context = qa_group['context']
qas = qa_group['qas']
# Iterate over questions about paragraph
for qa in qas:
question = qa['question']
try:
answer = qa['answers'][0]['text']
except IndexError:
continue
example_json = {"taskname": "squad", "context": context, "question": question, "answer": " " + answer}
if include_topic:
example_json["topic"] = topic
processed_topic_data.append(example_json)
processed_data.append(processed_topic_data)
return processed_data
def gen_file(data, save_name_base, split_type, make_ground_truth=False):
save_path = f"{save_name_base}_{split_type}.jsonl"
print(f"Saving {split_type} split to {save_path}")
with open(save_path, 'w') as save_file:
for example_json in tqdm(data):
# Dont want labels in the test set
if split_type == "test" and not make_ground_truth:
del example_json["answer"]
save_file.write(json.dumps(example_json) + '\n')
if __name__ == "__main__":
main()
|
gkucsko/NeMo
|
nemo/collections/nlp/modules/common/megatron/rotary_pos_embedding.py
|
<gh_stars>0
# coding=utf-8
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from einops import rearrange
from torch import einsum, nn
__all__ = ['RotaryEmbedding', 'apply_rotary_pos_emb']
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, max_seq_len, offset=0):
seq = torch.arange(max_seq_len, device=self.inv_freq.device) + offset
freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
# first part even vector components, second part odd vector components,
# 2 * dim in dimension size
emb = torch.cat((freqs, freqs), dim=-1)
# emb [seq_length, .., dim]
return rearrange(emb, 'n d -> n 1 1 d')
def _rotate_half(x):
"""
change sign so the last dimension becomes [-odd, +even]
"""
x = rearrange(x, '... (j d) -> ... j d', j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(t, freqs):
"""
input tensor t is of shape [seq_length, ..., dim]
rotary positional embeding tensor freqs is of shape [seq_length, ..., dim]
check https://kexue.fm/archives/8265 for detailed formulas
"""
rot_dim = freqs.shape[-1]
# ideally t_pass is empty so rotary pos embedding is applied to all tensor t
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
# first part is cosine component
# second part is sine component, need to change signs with _rotate_half method
t = (t * freqs.cos()) + (_rotate_half(t) * freqs.sin())
return torch.cat((t, t_pass), dim=-1)
|
gkucsko/NeMo
|
nemo/collections/nlp/modules/common/text_generation_utils.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating text."""
import torch
import torch.nn.functional as F
from nemo.collections.common.tokenizers.tabular_tokenizer import TabularTokenizer
from nemo.collections.nlp.modules.common.megatron.utils import get_ltor_masks_and_position_ids
from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, OutputType, SamplingParam
from nemo.utils import AppState
try:
from apex.transformer import parallel_state, tensor_parallel
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_pipelining_without_interleaving import (
forward_backward_pipelining_without_interleaving,
)
from apex.transformer.pipeline_parallel.schedules.fwd_bwd_no_pipelining import forward_backward_no_pipelining
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
__all__ = [
"get_default_sampling_params",
"get_default_length_params",
"megatron_gpt_generate",
"get_computeprob_response",
"generate",
]
def get_default_sampling_params():
# default do greedy sampling
sampling_params: SamplingParam = {
"use_greedy": True,
"temperature": 1.0,
"top_k": 0,
"top_p": 1.0,
"repetition_penalty": 1.0,
"add_BOS": True,
"all_probs": False,
"compute_logprob": False,
}
return sampling_params
def get_default_length_params():
# default do greedy sampling
length_params: LengthParam = {"min_length": 0, "max_length": 30}
return length_params
def megatron_gpt_generate(model, inputs, tokenizer, length_params, sampling_params, task_ids=None):
# reproduce the old compute_prob method
# a very special case
if sampling_params['compute_logprob']:
# need to overwrite some configuration, make it immutable
sampling_params = sampling_params.copy()
length_params = length_params.copy()
length_params['max_length'] = 1
sampling_params['all_probs'] = True
sampling_params["add_BOS"] = False
sampling_params['greedy'] = True
response = generate(
model,
inputs=inputs,
task_ids=task_ids,
tokens_to_generate=length_params['max_length'],
all_probs=sampling_params['all_probs'],
temperature=sampling_params['temperature'],
add_BOS=sampling_params['add_BOS'],
top_k=sampling_params['top_k'],
top_p=sampling_params['top_p'],
greedy=sampling_params['use_greedy'],
repetition_penalty=sampling_params['repetition_penalty'],
min_tokens_to_generate=length_params['min_length'],
)
compute_prob_response = get_computeprob_response(tokenizer, response, inputs)
return compute_prob_response
if isinstance(inputs, (list, tuple)):
if isinstance(inputs[0], (str, torch.Tensor)):
output = generate(
model,
inputs=inputs,
task_ids=task_ids,
tokens_to_generate=length_params['max_length'],
all_probs=sampling_params['all_probs'],
temperature=sampling_params['temperature'],
add_BOS=sampling_params['add_BOS'],
top_k=sampling_params['top_k'],
top_p=sampling_params['top_p'],
greedy=sampling_params['use_greedy'],
repetition_penalty=sampling_params['repetition_penalty'],
min_tokens_to_generate=length_params['min_length'],
)
return output
elif isinstance(inputs[0], dict):
raise NotImplementedError("json object not implemented")
else:
raise NotImplementedError("unknown type is not implemented")
else:
raise NotImplementedError("unknown type is not implemented")
def get_computeprob_response(tokenizer, response, inputs):
compute_prob_response = {}
new_token_ids = []
new_tokens = []
new_texts = []
log_probs = []
full_logprobs = []
offsets = []
for batch_id in range(len(response['tokens'])):
if isinstance(inputs, (list, tuple)):
if isinstance(inputs[0], str):
new_token_id = tokenizer.text_to_ids(inputs[batch_id])
new_text = inputs[batch_id]
token_len = len(new_token_id)
elif isinstance(inputs[0], torch.Tensor):
token_len = int(inputs[1][batch_id].item())
new_token_id = inputs[0][batch_id][:token_len].tolist()
new_text = tokenizer.ids_to_text(new_token_id)
new_token_ids.append(new_token_id)
new_tokens.append(response['tokens'][batch_id][:token_len])
new_texts.append(new_text)
log_probs.append(response['logprob'][batch_id][:token_len])
full_logprobs.append(response['full_logprob'][batch_id][:token_len])
offsets.append(response['offsets'][batch_id][:-1])
compute_prob_response['sentences'] = new_texts
compute_prob_response['tokens'] = new_tokens
compute_prob_response['token_ids'] = new_token_ids
compute_prob_response['logprob'] = log_probs
compute_prob_response['full_logprob'] = full_logprobs
compute_prob_response['offsets'] = offsets
return compute_prob_response
def get_batch(model, tokenizer, context_tokens):
"""Generate batch from context tokens."""
# Move to GPU.
tokens = context_tokens.contiguous().cuda()
# Get the attention mask and postition ids.
attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eos_id,
model.cfg.get('reset_position_ids', False),
model.cfg.get('reset_attention_mask', False),
model.cfg.get('eod_mask_loss', False),
)
return tokens, attention_mask, position_ids
def tab_logits(logits, min_id, max_id, filter_value=-float('Inf')):
logits[:, :min_id] = filter_value
logits[:, max_id:] = filter_value
return logits
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" This function has been mostly taken from huggingface conversational
ai code at
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
conversational-ai-with-transfer-learning-2d818ac26313 """
if top_k > 0:
# Remove all tokens with a probability less than the
# last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Cconvert to 1D
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token
# above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
for i in range(sorted_indices.size(0)):
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
logits[i][indices_to_remove] = filter_value
return logits
def repetition_penalty(logits, repetition_penalty, used_tokens):
""" Implement the repetition penalty, check paper
https://arxiv.org/pdf/1909.05858.pdf
"""
if used_tokens is not None and repetition_penalty != 1.0:
logits_update = torch.gather(logits, 1, used_tokens)
logits = torch.scatter(logits, 1, used_tokens, logits_update / repetition_penalty)
return logits
def pad_batch(batch, pad_id, max_len):
context_lengths = []
max_context_length = max([len(tokens) for tokens in batch])
for tokens in batch:
context_length = len(tokens)
if context_length < max_context_length + max_len:
tokens.extend([pad_id] * (max_context_length + max_len - context_length))
context_lengths.append(context_length)
return batch, context_lengths
def tokenize_batch(tokenizer, sentences, max_len, add_BOS):
if add_BOS:
context_tokens = [[tokenizer.eos_id] + tokenizer.text_to_ids(s) for s in sentences]
else:
context_tokens = [tokenizer.text_to_ids(s) for s in sentences]
context_tokens, context_lengths = pad_batch(context_tokens, tokenizer.eos_id, max_len)
context_tokens_tensor = torch.cuda.LongTensor(context_tokens)
context_length_tensor = torch.cuda.LongTensor(context_lengths)
return context_tokens_tensor, context_length_tensor
def send_generate_info(
context_tokens_tensor,
context_length_tensor,
task_ids,
tokens_to_generate,
all_probs,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
):
"""
Needs to be synced up with receive_generate_info
"""
# Send the sizes of the tensors
input_info = [
context_tokens_tensor.size(0), # batch_size
context_tokens_tensor.size(1), # seq_len
tokens_to_generate,
all_probs,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
]
input_info_tensor = torch.cuda.FloatTensor(input_info)
torch.distributed.broadcast(input_info_tensor, 0)
# Send variables to all ranks
torch.distributed.broadcast(context_length_tensor, 0)
torch.distributed.broadcast(context_tokens_tensor, 0)
torch.distributed.broadcast(task_ids, 0)
def receive_generate_info():
"""
Needs to be synced up with send_generate_info
"""
input_info_tensor = torch.empty(10, dtype=torch.float32, device=torch.cuda.current_device())
torch.distributed.broadcast(input_info_tensor, 0)
batch_size = int(input_info_tensor[0].item())
seq_len = int(input_info_tensor[1].item())
tokens_to_generate = int(input_info_tensor[2].item())
all_probs = bool(input_info_tensor[3].item())
temperature = float(input_info_tensor[4].item())
top_k = int(input_info_tensor[5].item())
top_p = float(input_info_tensor[6].item())
greedy = bool(input_info_tensor[7].item())
repetition_penalty = float(input_info_tensor[8].item())
min_tokens_to_generate = int(input_info_tensor[9].item())
context_length_tensor = torch.empty(batch_size, dtype=torch.int64, device=torch.cuda.current_device())
context_tokens_tensor = torch.empty(batch_size, seq_len, dtype=torch.int64, device=torch.cuda.current_device())
task_ids = torch.empty(batch_size, dtype=torch.int64, device=torch.cuda.current_device())
# Send variables to all ranks
torch.distributed.broadcast(context_length_tensor, 0)
torch.distributed.broadcast(context_tokens_tensor, 0)
torch.distributed.broadcast(task_ids, 0)
return (
context_length_tensor,
context_tokens_tensor,
task_ids,
tokens_to_generate,
all_probs,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
)
def synced_generate(
model,
context_tokens_tensor,
context_length_tensor,
task_ids,
tokens_to_generate,
all_probs,
temperature,
top_k=0,
top_p=0.0,
greedy=False,
repetition_penalty=1.2,
min_tokens_to_generate=0,
):
context_length = context_length_tensor.min().item()
tokenizer = model.tokenizer
tokens, attention_mask, position_ids = get_batch(model, tokenizer, context_tokens_tensor)
if isinstance(tokenizer, TabularTokenizer):
batch_token_iterator = tab_sample_sequence_batch(
model,
context_tokens_tensor,
context_length_tensor,
attention_mask,
position_ids,
tokens_to_generate,
all_probs,
temperature=temperature,
)
else:
batch_token_iterator = sample_sequence_batch(
model,
context_tokens_tensor,
context_length_tensor,
task_ids,
attention_mask,
position_ids,
tokens_to_generate,
all_probs,
temperature=temperature,
extra={
"top_p": top_p,
"top_k": top_k,
"greedy": greedy,
"repetition_penalty": repetition_penalty,
"min_tokens_to_generate": min_tokens_to_generate,
},
)
for tokens, lengths, output_logits, full_logits in batch_token_iterator:
context_length += 1
if parallel_state.is_pipeline_last_stage():
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
torch.distributed.broadcast(output_logits, src, group)
if all_probs:
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
torch.distributed.broadcast(full_logits, src, group)
else:
if parallel_state.is_pipeline_first_stage():
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
output_logits = torch.empty(
tokens.size(0), context_length - 1, dtype=torch.float32, device=torch.device("cuda")
)
torch.distributed.broadcast(output_logits, src, group)
if all_probs:
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
full_logits = torch.empty(
tokens.size(0),
context_length - 1,
model.padded_vocab_size,
dtype=torch.float32,
device=torch.device("cuda"),
)
torch.distributed.broadcast(full_logits, src, group)
if tokens is not None:
return tokens[:, :context_length], output_logits, full_logits
def generate(
model,
inputs=None,
task_ids=None,
tokens_to_generate=0,
all_probs=False,
temperature=1.0,
add_BOS=False,
top_k=0,
top_p=0.0,
greedy=False,
repetition_penalty=1.0,
min_tokens_to_generate=0,
) -> OutputType:
"""
Args:
model (NLPModel): text generative model
inputs (Union[tuple, List[str]]): if it is a tuple, it is assumed to be (context_tokens_tensor, context_length_tensor). Otherwise it it a list of prompt text strings
task_ids (Tensor): used to specify that task when generating with p-tuned/prompt-tuned models (optional, default=None)
tokens_to_generate (int): The maximum length of the tokens to be generated.
all_probs (bool): Return the log prob for all the tokens
temperature (float): sampling temperature
add_BOS (bool): add the bos token at the begining of the prompt
top_k (int): The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (float): If set to float < 1, only the most probable tokens with probabilities that add up to top_p or higher are kept for generation.
greedy (bool): Whether or not to use sampling ; use greedy decoding otherwise
repetition_penalty (float): The parameter for repetition penalty. 1.0 means no penalty
min_tokens_to_generate (int): The minimum length of the tokens to be generated
Returns:
OutputType: It generates the output in a dictionary type. It has the following keys:
sentences: List[str], output sentences
tokens: List[List[str]], output sentences borken into tokens
logprob: List[Tensor], log prob of generated tokens
full_logprob: List[Tensor], log prob of all the tokens in the vocab
token_ids: List[Tensor], output sentence token ids
offsets: List[List[int]] # list of tokens start positions in text
"""
model.eval()
tokenizer = model.tokenizer
if torch.distributed.get_rank() == 0:
if isinstance(inputs, tuple):
context_tokens_tensor, context_length_tensor = inputs
else:
context_tokens_tensor, context_length_tensor = tokenize_batch(
tokenizer, inputs, tokens_to_generate, add_BOS
)
if task_ids is None:
# Make a dummy tensor of -1s that won't be used during generation
task_ids = torch.neg(torch.ones(context_tokens_tensor.size(0), dtype=torch.int64))
task_ids = task_ids.to(device=context_tokens_tensor.get_device())
send_generate_info(
context_tokens_tensor,
context_length_tensor,
task_ids,
tokens_to_generate,
all_probs,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
)
else:
(
context_length_tensor,
context_tokens_tensor,
task_ids,
tokens_to_generate,
all_probs,
temperature,
top_k,
top_p,
greedy,
repetition_penalty,
min_tokens_to_generate,
) = receive_generate_info()
output = synced_generate(
model,
context_tokens_tensor,
context_length_tensor,
task_ids,
tokens_to_generate,
all_probs,
temperature,
top_k=top_k,
top_p=top_p,
greedy=greedy,
repetition_penalty=repetition_penalty,
min_tokens_to_generate=min_tokens_to_generate,
)
if output is not None:
decode_tokens, output_logits, full_logits = output
resp_sentences = []
resp_sentences_seg = []
decode_tokens = decode_tokens.cpu().numpy().tolist()
for decode_token in decode_tokens:
sentence = tokenizer.ids_to_text(decode_token)
resp_sentences.append(sentence)
if not isinstance(tokenizer, TabularTokenizer):
words = []
for token in decode_token:
# Skip any soft prompt pseudo tokens
if token not in tokenizer.tokenizer.decoder:
continue
word = tokenizer.tokenizer.decoder[token]
word = bytearray([tokenizer.tokenizer.byte_decoder[c] for c in word]).decode(
'utf-8', errors='replace'
)
words.append(word)
resp_sentences_seg.append(words)
else:
words = tokenizer.text_to_tokens(sentence)
resp_sentences_seg.append(words)
# offsets calculation
all_offsets = []
for item in resp_sentences_seg:
offsets = [0]
for index, token in enumerate(item):
if index != len(item) - 1:
offsets.append(len(token) + offsets[-1])
all_offsets.append(offsets)
output = {}
output['sentences'] = resp_sentences
output['tokens'] = resp_sentences_seg
output['logprob'] = output_logits
output['full_logprob'] = full_logits
output['token_ids'] = decode_tokens
output['offsets'] = all_offsets
return output
def switch(val1, val2, boolean):
boolean = boolean.type_as(val1)
return (1 - boolean) * val1 + boolean * val2
def forward_step(model, batch, tensor_shape):
# Importing here to avoid circular import errors
from nemo.collections.nlp.models.language_modeling import MegatronGPTPromptLearningModel
# Should call MegatronGPTPPromptLearningModel's forward method
if isinstance(model, MegatronGPTPromptLearningModel):
forward_model = model
# Should call GPTModel's forward method
else:
forward_model = model.model
if model.cfg.get('pipeline_model_parallel_size', 1) > 1:
output_tensor = forward_backward_pipelining_without_interleaving(
forward_step_func=model.get_forward_output_only_func(),
batch=batch,
model=forward_model,
forward_only=True,
tensor_shape=tensor_shape,
dtype=model.autocast_dtype,
)
else:
output_tensor = forward_backward_no_pipelining(
forward_step_func=model.get_forward_output_only_func(),
batch=batch,
model=forward_model,
forward_only=True,
tensor_shape=tensor_shape,
dtype=model.autocast_dtype,
)
return output_tensor
def sample_sequence_batch(
model,
context_tokens,
context_lengths,
task_ids,
attention_mask,
position_ids,
tokens_to_generate,
all_probs=False,
type_ids=None,
temperature=None,
extra={},
):
# Importing here to avoid circular import errors
from nemo.collections.nlp.models.language_modeling import MegatronGPTPromptLearningModel
app_state = AppState()
micro_batch_size = context_tokens.shape[0]
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size,
micro_batch_size=micro_batch_size,
data_parallel_size=1,
)
tokenizer = model.tokenizer
model.eval()
with torch.no_grad():
context_length = context_lengths.min().item()
# added eos_id to support the function generate_samples_eval that passes
# eos_id as an argument and needs termination when that id id found.
eod_id = tokenizer.eos_id
counter = 0
batch_size = context_tokens.size(0)
is_done = torch.zeros([batch_size]).byte().cuda()
tokens = context_tokens
output_logits = None
all_generated_indices = None # used to track all generated indices
# Generate enough tokens for the longest sequence
maxlen = tokens_to_generate + context_lengths.max().item()
if maxlen > model.cfg.encoder_seq_length + 1:
maxlen = model.cfg.encoder_seq_length + 1
lengths = torch.ones([batch_size]).long().cuda() * maxlen
while context_length < maxlen:
# types2use = None
if counter == 0:
# Allocate memory for the entire context.
set_inference_key_value_memory = True
tokens2use = tokens[:, :context_length]
positions2use = position_ids[:, :context_length]
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, :context_length]
else:
# Set this to false so the memory is not reallocated.
set_inference_key_value_memory = False
tokens2use = tokens[:, context_length - 1].view(batch_size, -1)
positions2use = position_ids[:, context_length - 1].view(batch_size, -1)
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, context_length - 1].view(batch_size, -1)
attention_mask_repeat = torch.concat([attention_mask for _ in range(micro_batch_size)])
setkey_value_array = torch.tensor(
[set_inference_key_value_memory] * micro_batch_size, device=torch.cuda.current_device()
)
len_array = torch.tensor([maxlen] * micro_batch_size, device=torch.cuda.current_device())
# Only prompt learning models will have a prompt table, and require task ids
if isinstance(model, MegatronGPTPromptLearningModel):
batch = [tokens2use, attention_mask_repeat, positions2use, task_ids, setkey_value_array, len_array]
tensor_shape = [tokens2use.shape[1], micro_batch_size, model.frozen_model.cfg.hidden_size]
else:
batch = [tokens2use, attention_mask_repeat, positions2use, setkey_value_array, len_array]
tensor_shape = [tokens2use.shape[1], micro_batch_size, model.cfg.hidden_size]
output = forward_step(model, batch, tensor_shape)
if parallel_state.is_pipeline_last_stage():
output = output[0]['logits'].float()
output = tensor_parallel.gather_from_tensor_model_parallel_region(output)
assert output is not None
output = output.float()
logits = output[:, -1].view(batch_size, -1).contiguous()
# make sure it will generate at least min_length
min_length = extra.get('min_tokens_to_generate', 0)
if min_length > 0:
within_min_length = (context_length - context_lengths) < min_length
logits[within_min_length, eod_id] = -float('Inf')
# make sure it won't sample outside the vocab_size range
logits[:, tokenizer.vocab_size :] = -float('Inf')
if extra.get('greedy', False):
prev = torch.argmax(logits, dim=-1).view(-1)
else:
logits = logits.float()
logits /= temperature
# handle repetition penality
logits = repetition_penalty(logits, extra.get('repetition_penalty', 1.2), all_generated_indices)
logits = top_k_logits(logits, top_k=extra.get('top_k', 0), top_p=extra.get('top_p', 0.9))
log_probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(log_probs, num_samples=1).view(-1)
started = context_lengths <= context_length
# Clamp the predicted out of vocabulary tokens
prev = torch.clamp(prev, max=tokenizer.vocab_size - 1)
new_tokens = switch(tokens[:, context_length].view(-1), prev, started)
# Replace sampled tokens w/ done token if EOD has already been sampled
new_tokens = switch(new_tokens, eod_id, is_done)
# Replace special soft prompt token ids with unk token ids
if isinstance(model, MegatronGPTPromptLearningModel):
pseudo_token_ids_start = model.pseudo_token_ids_start
new_tokens[(new_tokens >= pseudo_token_ids_start)] = tokenizer.unk_id
tokens[:, :context_length][
(tokens[:, :context_length] >= pseudo_token_ids_start)
] = tokenizer.unk_id
# Insert either new predicted or next prompt token
tokens[:, context_length] = new_tokens
if output_logits is None:
output = F.log_softmax(output[:, :context_length, :], 2)
indices = torch.unsqueeze(tokens[:, 1 : context_length + 1], 2)
output_logits = torch.gather(output, 2, indices).squeeze(2)
all_generated_indices = indices[:, :, 0]
if all_probs:
full_logits = output
else:
output = F.log_softmax(output, 2)
indices = torch.unsqueeze(new_tokens, 1).unsqueeze(2)
new_output_logits = torch.gather(output, 2, indices).squeeze(2)
# TODO(rprenger) we're copying output_logits every time. Should pre-allocate
output_logits = torch.cat([output_logits, new_output_logits], 1)
all_generated_indices = torch.cat([all_generated_indices, indices[:, :, 0]], 1)
if all_probs:
full_logits = torch.cat([full_logits, output], 1)
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
torch.distributed.broadcast(new_tokens, src, group)
done_token = (prev == eod_id).byte() & started.byte()
just_finished = (done_token & ~is_done).bool()
lengths[just_finished.view(-1)] = context_length
is_done = is_done | done_token
done = torch.all(is_done)
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_pipeline_model_parallel_group()
torch.distributed.broadcast(done, src, group)
if all_probs:
yield tokens, lengths, output_logits, full_logits
else:
yield tokens, lengths, output_logits, None
else:
if parallel_state.is_pipeline_first_stage():
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
new_tokens = torch.empty_like(tokens[:, context_length])
torch.distributed.broadcast(new_tokens, src, group)
tokens[:, context_length] = new_tokens
yield tokens, None, None, None
else:
yield None, None, None, None
done = torch.cuda.ByteTensor([0])
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_pipeline_model_parallel_group()
torch.distributed.broadcast(done, src, group)
context_length += 1
counter += 1
if done:
break
def tab_sample_sequence_batch(
model,
context_tokens,
context_lengths,
attention_mask,
position_ids,
tokens_to_generate,
all_probs=True,
type_ids=None,
temperature=None,
):
app_state = AppState()
micro_batch_size = context_tokens.shape[0]
_reconfigure_microbatch_calculator(
rank=app_state.global_rank,
rampup_batch_size=None,
global_batch_size=micro_batch_size,
micro_batch_size=micro_batch_size,
data_parallel_size=1,
)
tokenizer = model.tokenizer
sizes = tokenizer.code_column.sizes
tokens_per_row = sum(sizes) + 1
columns = tokenizer.code_column.columns
num_columns = len(columns)
tokenid_range = []
for i in range(num_columns):
tokenid_range.extend(tokenizer.code_column.get_range(i))
model.eval()
with torch.no_grad():
context_length = context_lengths.min().item()
context = context_tokens[:, :context_length]
# the context may start in the middle of the row,
# calculate the offset according to the position of '\n' or '<|endoftext|>'
positions = torch.where(context == tokenizer.eor)[1]
if len(positions) == 0:
positions = torch.where(context == tokenizer.eod)[1]
if len(positions) != 0:
max_position = positions.max().item()
# TODO, need to make sure context of different batch have the same offset lengths")
# otherwise, need to calculate offset per batch_id
offset = (context_length - max_position - 1) % tokens_per_row
else:
offset = 0
eod_id = tokenizer.eos_id
counter = 0
batch_size = context_tokens.size(0)
is_done = torch.zeros([batch_size]).byte().cuda()
tokens = context_tokens
output_logits = None
# Generate enough tokens for the longest sequence
maxlen = tokens_to_generate + context_lengths.max().item()
if maxlen > model.cfg.encoder_seq_length:
maxlen = model.cfg.encoder_seq_length
lengths = torch.ones([batch_size]).long().cuda() * maxlen
while context_length < maxlen:
# types2use = None
if counter == 0:
# Allocate memory for the entire context.
set_inference_key_value_memory = True
tokens2use = tokens[:, :context_length]
positions2use = position_ids[:, :context_length]
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, :context_length]
else:
# Set this to false so the memory is not reallocated.
set_inference_key_value_memory = False
tokens2use = tokens[:, context_length - 1].view(batch_size, -1)
positions2use = position_ids[:, context_length - 1].view(batch_size, -1)
# not using type2use. uncomment it if it is used
# if type_ids is not None:
# types2use = type_ids[:, context_length - 1].view(batch_size, -1)
# micro_batch_size = 2
attention_mask_repeat = torch.concat([attention_mask for _ in range(micro_batch_size)])
setkey_value_array = torch.tensor(
[set_inference_key_value_memory] * micro_batch_size, device=torch.cuda.current_device()
)
len_array = torch.tensor([maxlen] * micro_batch_size, device=torch.cuda.current_device())
batch = [tokens2use, attention_mask_repeat, positions2use, setkey_value_array, len_array]
tensor_shape = [tokens2use.shape[1], micro_batch_size, model.cfg.hidden_size]
output = forward_step(model, batch, tensor_shape)
if parallel_state.is_pipeline_last_stage():
output = output[0]['logits'].float()
output = tensor_parallel.gather_from_tensor_model_parallel_region(output)
assert output is not None
output = output.float()
logits = output[:, -1].view(batch_size, -1).contiguous()
token_in_row = (counter + offset) % tokens_per_row
logits = logits.float()
logits /= temperature
if token_in_row == tokens_per_row - 1:
# line break
eor_id = tokenizer.eor
eod_id = tokenizer.eos_id
min_id = min(eor_id, eod_id)
max_id = max(eor_id, eod_id) + 1
logits = tab_logits(logits, min_id, max_id)
else:
# limit the range
min_id, max_id = tokenid_range[token_in_row]
logits = tab_logits(logits, min_id, max_id)
log_probs = F.softmax(logits, dim=-1)
prev = torch.multinomial(log_probs, num_samples=1).view(-1)
started = context_lengths <= context_length
# Clamp the out of vocabulary tokens.
prev = torch.clamp(prev, max=tokenizer.vocab_size - 1)
new_tokens = switch(tokens[:, context_length].view(-1), prev, started)
tokens[:, context_length] = new_tokens
if output_logits is None:
output_context = F.log_softmax(output[:, :context_length, :], 2)
indices = torch.unsqueeze(tokens[:, 1 : context_length + 1], 2)
output_logits = torch.gather(output_context, 2, indices).squeeze(2)
if all_probs:
full_logits = output_context
else:
output_context = F.log_softmax(output, 2)
indices = torch.unsqueeze(new_tokens, 1).unsqueeze(2)
new_output_logits = torch.gather(output_context, 2, indices).squeeze(2)
# TODO(rprenger) we're copying output_logits every time. Should pre-allocate
output_logits = torch.cat([output_logits, new_output_logits], 1)
if all_probs:
full_logits = torch.cat([full_logits, output_context], 1)
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
torch.distributed.broadcast(new_tokens, src, group)
done_token = (prev == eod_id).byte() & started.byte()
just_finished = (done_token & ~is_done).bool()
lengths[just_finished.view(-1)] = context_length
is_done = is_done | done_token
done = torch.all(is_done)
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_pipeline_model_parallel_group()
torch.distributed.broadcast(done, src, group)
if all_probs:
yield tokens, lengths, output_logits, full_logits
else:
yield tokens, lengths, output_logits, None
else:
if parallel_state.is_pipeline_first_stage():
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_embedding_group()
new_tokens = torch.empty_like(tokens[:, context_length])
torch.distributed.broadcast(new_tokens, src, group)
tokens[:, context_length] = new_tokens
yield tokens, None, None, None
else:
yield None, None, None, None
done = torch.cuda.ByteTensor([0])
src = parallel_state.get_pipeline_model_parallel_last_rank()
group = parallel_state.get_pipeline_model_parallel_group()
torch.distributed.broadcast(done, src, group)
context_length += 1
counter += 1
if done:
break
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used after GIZA++ alignment to extract final alignments for each semiotic class.
"""
import re
from argparse import ArgumentParser
from typing import List, Tuple
import numpy as np
parser = ArgumentParser(description='Extract final alignments from GIZA++ alignments')
parser.add_argument('--mode', type=str, required=True, help='tn or itn')
parser.add_argument('--giza_dir', type=str, required=True, help='Path to folder with GIZA++ alignment')
parser.add_argument(
'--giza_suffix', type=str, required=True, help='suffix of alignment files, e.g. \"Ahmm.5\", \"A3.final\"'
)
parser.add_argument('--out_filename', type=str, required=True, help='Output file')
parser.add_argument('--lang', type=str, required=True, help="Language")
args = parser.parse_args()
def fill_alignment_matrix(
fline2: str, fline3: str, gline2: str, gline3: str
) -> Tuple[np.ndarray, List[str], List[str]]:
"""Parse Giza++ direct and reverse alignment results and represent them as an alignment matrix
Args:
fline2: e.g. "_2 0 1 4_"
fline3: e.g. "NULL ({ }) twenty ({ 1 }) fourteen ({ 2 3 4 })"
gline2: e.g. "twenty fourteen"
gline3: e.g. "NULL ({ }) _2 ({ 1 }) 0 ({ }) 1 ({ }) 4_ ({ 2 })"
Returns:
matrix: a numpy array of shape (src_len, dst_len) filled with [0, 1, 2, 3], where 3 means a reliable alignment
the corresponding words were aligned to one another in direct and reverse alignment runs, 1 and 2 mean that the
words were aligned only in one direction, 0 - no alignment.
srctokens: e.g. ["twenty", "fourteen"]
dsttokens: e.g. ["_2", "0", "1", "4_"]
For example, the alignment matrix for the above example may look like:
[[3, 0, 0, 0]
[0, 2, 2, 3]]
"""
if fline2 is None or gline2 is None or fline3 is None or gline3 is None:
raise ValueError(f"empty params")
srctokens = gline2.split()
dsttokens = fline2.split()
pattern = r"([^ ]+) \(\{ ([^\(\{\}\)]*) \}\)"
src2dst = re.findall(pattern, fline3.replace("({ })", "({ })"))
dst2src = re.findall(pattern, gline3.replace("({ })", "({ })"))
if len(src2dst) != len(srctokens) + 1:
raise ValueError(
"length mismatch: len(src2dst)="
+ str(len(src2dst))
+ "; len(srctokens)"
+ str(len(srctokens))
+ "\n"
+ gline2
+ "\n"
+ fline3
)
if len(dst2src) != len(dsttokens) + 1:
raise ValueError(
"length mismatch: len(dst2src)="
+ str(len(dst2src))
+ "; len(dsttokens)"
+ str(len(dsttokens))
+ "\n"
+ fline2
+ "\n"
+ gline3
)
matrix = np.zeros((len(srctokens), len(dsttokens)))
for i in range(1, len(src2dst)):
token, to_str = src2dst[i]
if to_str == "":
continue
to = list(map(int, to_str.split()))
for t in to:
matrix[i - 1][t - 1] = 2
for i in range(1, len(dst2src)):
token, to_str = dst2src[i]
if to_str == "":
continue
to = list(map(int, to_str.split()))
for t in to:
matrix[t - 1][i - 1] += 1
return matrix, srctokens, dsttokens
def check_monotonicity(matrix: np.ndarray) -> bool:
"""Check if alignment is monotonous - i.e. the relative order is preserved (no swaps).
Args:
matrix: a numpy array of shape (src_len, dst_len) filled with [0, 1, 2, 3], where 3 means a reliable alignment
the corresponding words were aligned to one another in direct and reverse alignment runs, 1 and 2 mean that the
words were aligned only in one direction, 0 - no alignment.
"""
is_sorted = lambda k: np.all(k[:-1] <= k[1:])
a = np.argwhere(matrix == 3)
b = np.argwhere(matrix == 2)
c = np.vstack((a, b))
d = c[c[:, 1].argsort()] # sort by second column (less important)
d = d[d[:, 0].argsort(kind="mergesort")]
return is_sorted(d[:, 1])
def get_targets(matrix: np.ndarray, dsttokens: List[str]) -> List[str]:
"""Join some of the destination tokens, so that their number becomes the same as the number of input words.
Unaligned tokens tend to join to the left aligned token.
Args:
matrix: a numpy array of shape (src_len, dst_len) filled with [0, 1, 2, 3], where 3 means a reliable alignment
the corresponding words were aligned to one another in direct and reverse alignment runs, 1 and 2 mean that the
words were aligned only in one direction, 0 - no alignment.
dsttokens: e.g. ["_2", "0", "1", "4_"]
Returns:
targets: list of string tokens, with one-to-one correspondence to matrix.shape[0]
Example:
If we get
matrix=[[3, 0, 0, 0]
[0, 2, 2, 3]]
dsttokens=["_2", "0", "1", "4_"]
it gives
targets = ["_201", "4_"]
Actually, this is a mistake instead of ["_20", "14_"]. That will be further corrected by regular expressions.
"""
targets = []
last_covered_dst_id = -1
for i in range(len(matrix)):
dstlist = []
for j in range(last_covered_dst_id + 1, len(dsttokens)):
# matrix[i][j] == 3: safe alignment point
if matrix[i][j] == 3 or (
j == last_covered_dst_id + 1
and np.all(matrix[i, :] == 0) # if the whole line does not have safe points
and np.all(matrix[:, j] == 0) # and the whole column does not have safe points, match them
):
if len(targets) == 0: # if this is first safe point, attach left unaligned columns to it, if any
for k in range(0, j):
if np.all(matrix[:, k] == 0): # if column k does not have safe points
dstlist.append(dsttokens[k])
else:
break
dstlist.append(dsttokens[j])
last_covered_dst_id = j
for k in range(j + 1, len(dsttokens)):
if np.all(matrix[:, k] == 0): # if column k does not have safe points
dstlist.append(dsttokens[k])
last_covered_dst_id = k
else:
break
if len(dstlist) > 0:
if args.mode == "tn":
targets.append("_".join(dstlist))
else:
targets.append("".join(dstlist))
else:
targets.append("<DELETE>")
return targets
def get_targets_from_back(matrix: np.ndarray, dsttokens: List[str]) -> List[str]:
"""Join some of the destination tokens, so that their number becomes the same as the number of input words.
Unaligned tokens tend to join to the right aligned token.
Args:
matrix: a numpy array of shape (src_len, dst_len) filled with [0, 1, 2, 3], where 3 means a reliable alignment
the corresponding words were aligned to one another in direct and reverse alignment runs, 1 and 2 mean that the
words were aligned only in one direction, 0 - no alignment.
dsttokens: e.g. ["_2", "0", "1", "4_"]
Returns:
targets: list of string tokens, with one-to-one correspondence to matrix.shape[0]
Example:
If we get
matrix=[[3, 0, 0, 0]
[0, 2, 2, 3]]
dsttokens=["_2", "0", "1", "4_"]
it gives
targets = ["_2", "014_"]
Actually, this is a mistake instead of ["_20", "14_"]. That will be further corrected by regular expressions.
"""
targets = []
last_covered_dst_id = len(dsttokens)
for i in range(len(matrix) - 1, -1, -1):
dstlist = []
for j in range(last_covered_dst_id - 1, -1, -1):
if matrix[i][j] == 3 or (
j == last_covered_dst_id - 1 and np.all(matrix[i, :] == 0) and np.all(matrix[:, j] == 0)
):
if len(targets) == 0:
for k in range(len(dsttokens) - 1, j, -1):
if np.all(matrix[:, k] == 0):
dstlist.append(dsttokens[k])
else:
break
dstlist.append(dsttokens[j])
last_covered_dst_id = j
for k in range(j - 1, -1, -1):
if np.all(matrix[:, k] == 0):
dstlist.append(dsttokens[k])
last_covered_dst_id = k
else:
break
if len(dstlist) > 0:
if args.mode == "tn":
targets.append("_".join(list(reversed(dstlist))))
else:
targets.append("".join(list(reversed(dstlist))))
else:
targets.append("<DELETE>")
return list(reversed(targets))
def main() -> None:
g = open(args.giza_dir + "/GIZA++." + args.giza_suffix, "r", encoding="utf-8")
f = open(args.giza_dir + "/GIZA++reverse." + args.giza_suffix, "r", encoding="utf-8")
if args.mode == "tn":
g, f = f, g
out = open(args.giza_dir + "/" + args.out_filename, "w", encoding="utf-8")
cache = {}
good_count, not_mono_count, not_covered_count, exception_count = 0, 0, 0, 0
n = 0
while True:
n += 3
if n % 10000 == 0:
print(n, "lines processed")
fline1 = f.readline().strip()
fline2 = f.readline().strip()
fline3 = f.readline().strip()
gline1 = g.readline().strip()
gline2 = g.readline().strip()
gline3 = g.readline().strip()
if fline1 == "" and gline1 == "":
break
cache_key = fline1 + "\t" + fline2 + "\t" + gline1 + "\t" + gline2
if cache_key in cache:
out.write(cache[cache_key] + "\n")
continue
if fline1 == "" or gline1 == "" or fline2 == "" or gline2 == "" or fline3 == "" or gline3 == "":
raise ValueError("Empty line: " + str(n))
try:
matrix, srctokens, dsttokens = fill_alignment_matrix(fline2, fline3, gline2, gline3)
except Exception:
print(fline1)
print(fline2)
print(fline3)
print(gline1)
print(gline2)
print(gline3)
exception_count += 1
out_str = "-exception:\t" + fline2 + "\t" + gline2
out.write(out_str + "\n")
continue
else:
matrix[matrix <= 2] = 0 # leave only 1-to-1 alignment points
if check_monotonicity(matrix):
targets = get_targets(matrix, dsttokens)
targets_from_back = get_targets_from_back(matrix, dsttokens)
if len(targets) != len(srctokens):
raise ValueError(
"targets length doesn't match srctokens length: len(targets)="
+ str(len(targets))
+ "; len(srctokens)="
+ str(len(srctokens))
)
leftside_align = " ".join(targets)
rightside_align = " ".join(targets_from_back)
rightside_align = rightside_align.replace("<DELETE> <DELETE> _11100_", "_11 <DELETE> 100_")
leftside_align = leftside_align.replace("<DELETE> <DELETE> _11100_", "_11 <DELETE> 100_")
# _1 4000_ => _14 000_
# 1 5,000 => 15 ,000
rightside_align = re.sub(r"^_1 ([\d])(,?000)", r"_1\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"^_1 ([\d])(,?000)", r"_1\g<1> \g<2>", leftside_align)
# "_2 10 0_" => "_2 <DELETE> 100_"
rightside_align = re.sub(r"([\d]) 10 0_", r"\g<1> <DELETE> 100_", rightside_align)
leftside_align = re.sub(r"([\d]) 10 0_", r"\g<1> <DELETE> 100_", leftside_align)
if srctokens[0] in [
"ten",
"twenty",
"thirty",
"forty",
"fifty",
"sixty",
"seventy",
"eighty",
"ninety",
]:
# ten thousand sixty _1 00 60_ => _10 0 60_
rightside_align = re.sub(r"^(_\d) 00 (\d)", r"\g<1>0 0 \g<2>", rightside_align)
leftside_align = re.sub(r"^(_\d) 00 (\d)", r"\g<1>0 0 \g<2>", leftside_align)
# ten thousand sixty three _1 0, 06 3_ => _10 ,0 6 3_
rightside_align = re.sub(r"([ _]\d) 0, 0(\d)", r"\g<1>0 ,0 \g<2>", rightside_align)
leftside_align = re.sub(r"([ _]\d) 0, 0(\d)", r"\g<1>0 ,0 \g<2>", leftside_align)
# _3 0, 7 7 4=> _30 , 7 7 4_
rightside_align = re.sub(r"(\d) 0, ", r"\g<1>0 , ", rightside_align)
leftside_align = re.sub(r"(\d) 0, ", r"\g<1>0 , ", leftside_align)
# _1 1, 1 <DELETE> 40_ => _11 , 1 <DELETE> 40_
rightside_align = re.sub(r"1 1, (\d)", r"11 , \g<1>", rightside_align)
leftside_align = re.sub(r"1 1, (\d)", r"11 , \g<1>", leftside_align)
if re.match(r".+надцат", srctokens[0]) or srctokens[0] in [
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]:
# "_1 <DELETE> 12 14_" -> "_11 <DELETE> 2 14_"
rightside_align = re.sub(
r"^(_1) (<DELETE>) ([\d])([\d])", r"\g<1>\g<3> \g<2> \g<4>", rightside_align
)
leftside_align = re.sub(
r"^(_1) (<DELETE>) ([\d])([\d])", r"\g<1>\g<3> \g<2> \g<4>", leftside_align
)
# "_1 10 10_" -> "_11 0 10_"
rightside_align = re.sub(r"^_1 ([\d])0 ([\d] ?[\d])", r"_1\g<1> 0 \g<2>", rightside_align)
leftside_align = re.sub(r"^_1 ([\d])0 ([\d] ?[\d])", r"_1\g<1> 0 \g<2>", leftside_align)
if args.giza_dir.endswith("decimal") and args.lang == "ru":
# "_1 <DELETE> 0, 5_" => "_10 <DELETE> , 5_" #десять целых и пять десятых
rightside_align = re.sub(
r"(\d) (<DELETE>) ([0123456789])(,) ([\d])", r"\g<1>\g<3> \g<2> \g<4> \g<5>", rightside_align
)
leftside_align = re.sub(
r"(\d) (<DELETE>) ([0123456789])(,) ([\d])", r"\g<1>\g<3> \g<2> \g<4> \g<5>", leftside_align
)
if args.giza_dir.endswith("decimal") and args.lang == "en":
# "_7 0. 7_" => _70 . 7_
rightside_align = re.sub(r"^(_\d) 0\. (\d)", r"\g<1>0 . \g<2>", rightside_align)
leftside_align = re.sub(r"^(_\d) 0\. (\d)", r"\g<1>0 . \g<2>", leftside_align)
if args.giza_dir.endswith("money") and args.lang == "en":
# "_1 , 000__£<<" => "_1 ,000_ _£<<"
rightside_align = re.sub(r"(\d) , 000_(_[£$€])", r"\g<1> ,000_ \g<2>", rightside_align)
leftside_align = re.sub(r"(\d) , 000_(_[£$€])", r"\g<1> ,000_ \g<2>", leftside_align)
if args.giza_dir.endswith("money"):
# "_5 <DELETE> 000000__иен_" => "_5 000000_ _иен_"
rightside_align = re.sub(
r"([\d]) <DELETE> 000000_(_[^\d])", r"\g<1> 000000_ \g<2>", rightside_align
)
leftside_align = re.sub(r"([\d]) <DELETE> 000000_(_[^\d])", r"\g<1> 000000_ \g<2>", leftside_align)
# _5_ <DELETE> _m__£<< => "_5_ _m_ _£<<"
rightside_align = re.sub(
r"([\d]_) <DELETE> (_[mk]_)(_[^\d])", r"\g<1> \g<2> \g<3>", rightside_align
)
leftside_align = re.sub(r"([\d]_) <DELETE> (_[mk]_)(_[^\d])", r"\g<1> \g<2> \g<3>", leftside_align)
# "_3 <DELETE> 0__m__£<<" => "_30 _m_ _£<<"
rightside_align = re.sub(
r"([\d]) <DELETE> 0_(_[mk]_)(_[^\d])", r"\g<1>0 \g<2> \g<3>", rightside_align
)
leftside_align = re.sub(
r"([\d]) <DELETE> 0_(_[mk]_)(_[^\d])", r"\g<1>0 \g<2> \g<3>", leftside_align
)
# "_15 <DELETE> 000__руб._" => "_15 000_ _руб._"
rightside_align = re.sub(r"([\d]) <DELETE> (000_)(_[^\d])", r"\g<1> \g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) <DELETE> (000_)(_[^\d])", r"\g<1> \g<2> \g<3>", leftside_align)
# "_2 5 0 000__$<<" => "_2 50 000_ _$<<"
rightside_align = re.sub(r"([\d]) 0 000_(_[^\d])", r"\g<1>0 000_ \g<2>", rightside_align)
leftside_align = re.sub(r"([\d]) 0 000_(_[^\d])", r"\g<1>0 000_ \g<2>", leftside_align)
# "_5 0 0000__$_" => "_500 000_ _$_"
rightside_align = re.sub(r"([\d]) 0 0000_(_[^\d])", r"\g<1>00 000_ \g<2>", rightside_align)
leftside_align = re.sub(r"([\d]) 0 0000_(_[^\d])", r"\g<1>00 000_ \g<2>", leftside_align)
# "_1 000__руб._" => "_1000_ _руб._"
rightside_align = re.sub(r"_1 000_(_[^\d])", r"_1000_ \g<1>", rightside_align)
leftside_align = re.sub(r"_1 000_(_[^\d])", r"_1000_ \g<1>", leftside_align)
# replace cases like "2 0__января" with "20_ _января"
leftside_align = re.sub(r"([\d]) (00?_)(_[^\d])", r"\g<1>\g<2> \g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) (00?_)(_[^\d])", r"\g<1>\g<2> \g<3>", rightside_align)
# "_3 <DELETE> 0__september_ _2 014_" => "_30_ <DELETE> _september_ _2 014_"
# "_3 <DELETE> 00__тыс.__руб._" => "_300_ <DELETE> _тыс.__руб._"
leftside_align = re.sub(
r"([\d]) <DELETE> (00?_)(_[^\d])", r"\g<1>\g<2> <DELETE> \g<3>", leftside_align
)
rightside_align = re.sub(
r"([\d]) <DELETE> (00?_)(_[^\d])", r"\g<1>\g<2> <DELETE> \g<3>", rightside_align
)
# "_october_ _2 0,2 015_" => "_october_ _20 ,2 015_"
leftside_align = re.sub(r"([\d]) (0),(\d)", r"\g<1>\g<2> ,\g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) (0),(\d)", r"\g<1>\g<2> ,\g<3>", rightside_align)
# "_3 0_.10. _1 9 4 3_" => "_30_ .10. _1 9 4 3_"
leftside_align = re.sub(r"([\d]) (0_)(\.[\d])", r"\g<1>\g<2> \g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) (0_)(\.[\d])", r"\g<1>\g<2> \g<3>", rightside_align)
# replace cases like "_1 0000_" with "_10 000_"
# replace cases like "_5 00000_" with "_500 000_"
rightside_align = re.sub(r"([\d]) ([0][0]?)(000000000_)", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([0][0]?)(000000000_)", r"\g<1>\g<2> \g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) ([0][0]?)(000000_)", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([0][0]?)(000000_)", r"\g<1>\g<2> \g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) ([0][0]?)(000_)", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([0][0]?)(000_)", r"\g<1>\g<2> \g<3>", leftside_align)
# "_4 00,000_" -> "_400 ,000_"
rightside_align = re.sub(r"([\d]) ([0][0]?),(000_)", r"\g<1>\g<2> ,\g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([0][0]?),(000_)", r"\g<1>\g<2> ,\g<3>", leftside_align)
# "_9 3 ,0__²_> _км_" => "_9 3 ,0__²_> _км_"
rightside_align = re.sub(r"([\d]) (,00?_)(_[^\d])", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) (,00?_)(_[^\d])", r"\g<1>\g<2> \g<3>", leftside_align)
# "_0 <DELETE> , <DELETE> <DELETE> 01__г_" => "_0 <DELETE> , 01 <DELETE> _г_"
rightside_align = re.sub(
r"(,) <DELETE> <DELETE> 01_(_[^\d])", r"\g<1> 01_ <DELETE> \g<2>", rightside_align
)
leftside_align = re.sub(
r"(,) <DELETE> <DELETE> 01_(_[^\d])", r"\g<1> 01_ <DELETE> \g<2>", leftside_align
)
# "_0 <DELETE> , 7 6 <DELETE> <DELETE> 1__км_" => "_0 <DELETE> , 7 6 1_ <DELETE> _км_"
rightside_align = re.sub(
r"(,) (\d) (\d) <DELETE> <DELETE> 1_(_[^\d])",
r"\g<1> \g<2> \g<3> 1_ <DELETE> \g<4>",
rightside_align,
)
leftside_align = re.sub(
r"(,) (\d) (\d) <DELETE> <DELETE> 1_(_[^\d])",
r"\g<1> \g<2> \g<3> 1_ <DELETE> \g<4>",
leftside_align,
)
# "_5 <DELETE> 0000__рублей_" => "_50 000_ рублей"
rightside_align = re.sub(
r"([\d]) <DELETE> ([0][0]?)(000_)(_)", r"\g<1>\g<2> \g<3> \g<4>", rightside_align
)
leftside_align = re.sub(
r"([\d]) <DELETE> ([0][0]?)(000_)(_)", r"\g<1>\g<2> \g<3> \g<4>", leftside_align
)
# "_1 <DELETE> 115_" -> "_1 1 15_"
rightside_align = re.sub(r"<DELETE> ([1])([1][\d])", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> ([1])([1][\d])", r"\g<1> \g<2>", leftside_align)
# "_1 <DELETE> 990-х_" -> "_1 9 90-х_"
rightside_align = re.sub(r"<DELETE> (9)(90)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (9)(90)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (8)(80)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (8)(80)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (7)(70)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (7)(70)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (6)(60)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (6)(60)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (5)(50)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (5)(50)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (4)(40)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (4)(40)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (3)(30)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (3)(30)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (2)(20)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (2)(20)", r"\g<1> \g<2>", leftside_align)
# восемь ноль ноль ноль ноль ноль ноль ноль _8 0 0 0 0 0 0 0_
# _8 <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 0000000_
rightside_align = re.sub(
r"<DELETE> <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 0000000_",
r"0 0 0 0 0 0 0_",
rightside_align,
)
leftside_align = re.sub(
r"<DELETE> <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 0000000_",
r"0 0 0 0 0 0 0_",
leftside_align,
)
# _8 <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 000000_
rightside_align = re.sub(
r"<DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 000000_", r"0 0 0 0 0 0_", rightside_align
)
leftside_align = re.sub(
r"<DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 000000_", r"0 0 0 0 0 0_", leftside_align
)
# _8 <DELETE> <DELETE> <DELETE> <DELETE> 00000_
rightside_align = re.sub(r"<DELETE> <DELETE> <DELETE> <DELETE> 00000_", r"0 0 0 0 0_", rightside_align)
leftside_align = re.sub(r"<DELETE> <DELETE> <DELETE> <DELETE> 00000_", r"0 0 0 0 0_", leftside_align)
# _8 <DELETE> <DELETE> <DELETE> 0000_
rightside_align = re.sub(r"<DELETE> <DELETE> <DELETE> 0000_", r"0 0 0 0_", rightside_align)
leftside_align = re.sub(r"<DELETE> <DELETE> <DELETE> 0000_", r"0 0 0 0_", leftside_align)
# _8 <DELETE> <DELETE> 000_
rightside_align = re.sub(r"<DELETE> <DELETE> 000_", r"0 0 0_", rightside_align)
leftside_align = re.sub(r"<DELETE> <DELETE> 000_", r"0 0 0_", leftside_align)
# "_2 <DELETE> <DELETE> 010/11" => "_2 0 10 /11"
rightside_align = re.sub(
r"<DELETE> <DELETE> (0)([1][\d])/([\d])", r"\g<1> \g<2> /\g<3>", rightside_align
)
leftside_align = re.sub(
r"<DELETE> <DELETE> (0)([1][\d])/([\d])", r"\g<1> \g<2> /\g<3>", leftside_align
)
# "_2 0 <DELETE> 11/12_" => "_2 0 11 /12_"
rightside_align = re.sub(r"<DELETE> ([\d]+)/([\d])", r"\g<1> /\g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> ([\d]+)/([\d])", r"\g<1> /\g<2>", leftside_align)
# "_2 0 1 0/2 0 11_" => "_2 0 10 /2 0 11_"
rightside_align = re.sub(r"([\d]) ([\d]+)/([\d])", r"\g<1>\g<2> /\g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([\d]+)/([\d])", r"\g<1>\g<2> /\g<3>", leftside_align)
# "_5 0%_" => "_50 %_"
# "_1 00%_" => "_100 %_"
# "_1 00,00%_" => "_100,00 %_"
rightside_align = re.sub(r"([\d]) ([0,]+)%", r"\g<1>\g<2> %", rightside_align)
leftside_align = re.sub(r"([\d]) ([0,]+)%", r"\g<1>\g<2> %", leftside_align)
# ATTENTION: keep the order of next two rules
# "_2 0½_" => "_20 ½_"
rightside_align = re.sub(r"([\d]) ([\d]+)½", r"\g<1>\g<2> ½", rightside_align)
leftside_align = re.sub(r"([\d]) ([\d]+)½", r"\g<1>\g<2> ½", leftside_align)
# "_1 ½_ <DELETE> <DELETE> <DELETE>" => "_1 <DELETE> <DELETE> <DELETE> ½_" #одна целая и одна вторая
rightside_align = re.sub(
r"([\d]) (_?½_)? <DELETE> <DELETE> <DELETE>",
r"\g<1> <DELETE> <DELETE> <DELETE> \g<2>",
rightside_align,
)
leftside_align = re.sub(
r"([\d]) (_?½_)? <DELETE> <DELETE> <DELETE>",
r"\g<1> <DELETE> <DELETE> <DELETE> \g<2>",
leftside_align,
)
if args.lang == "en" and srctokens[-1] == "half":
# _2 <DELETE> 1/ 2_ => _2 <DELETE> <DELETE> ½_
rightside_align = re.sub(r"(\d) <DELETE> 1/ 2_$", r"\g<1> <DELETE> <DELETE> ½_", rightside_align)
leftside_align = re.sub(r"(\d) <DELETE> 1/ 2_$", r"\g<1> <DELETE> <DELETE> ½_", leftside_align)
# "_1 50_ <DELETE> _тыс.__руб._" => "_1 50_ _тыс._ _руб._"
rightside_align = re.sub(r"_ <DELETE> (_[^\d]+_)(_[^\d]+_)", r"_ \g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"_ <DELETE> (_[^\d]+_)(_[^\d]+_)", r"_ \g<1> \g<2>", leftside_align)
# _1000 000__$_ => "_1000000_ _$_"
rightside_align = re.sub(r"_1000 000_(_[^\d])", r"_1000000_ \g<1>", rightside_align)
leftside_align = re.sub(r"_1000 000_(_[^\d])", r"_1000000_ \g<1>", leftside_align)
if args.giza_dir.endswith("date") and args.lang == "en":
# "_1 2_ <DELETE> _november_ _2 014_" => " <DELETE> _12_ <DELETE> _november_ _2 014_"
if srctokens[0] == "the":
leftside_align = re.sub(r"^_1 (\d_)", r"<DELETE> _1\g<1>", leftside_align)
rightside_align = re.sub(r"^_1 (\d_)", r"<DELETE> _1\g<1>", rightside_align)
# "<DELETE> <DELETE> _12,2012_" => "_12_ ,20 12_"
leftside_align = re.sub(r"^<DELETE> <DELETE> _12,2012_", r"_12_ ,20 12_", leftside_align)
rightside_align = re.sub(r"^<DELETE> <DELETE> _12,2012_", r"_12_ ,20 12_", rightside_align)
# "<DELETE> _1,20 14_" => "_1 ,20 14_"
leftside_align = re.sub(r"^<DELETE> _1,(\d)", r"_1 ,\g<1>", leftside_align)
rightside_align = re.sub(r"^<DELETE> _1,(\d)", r"_1 ,\g<1>", rightside_align)
# "_2 <DELETE> 1,20 14_" => "_2 1 ,20 14_"
leftside_align = re.sub(r"<DELETE> 1,(\d)", r"1 ,\g<1>", leftside_align)
rightside_align = re.sub(r"<DELETE> 1,(\d)", r"1 ,\g<1>", rightside_align)
# <DELETE> _11,19 9 7_ => _11 ,19 9 7_
leftside_align = re.sub(r"<DELETE> _11,(\d)", r"_11 ,\g<1>", leftside_align)
rightside_align = re.sub(r"<DELETE> _11,(\d)", r"_11 ,\g<1>", rightside_align)
if len(srctokens) >= 2 and srctokens[-2] == "twenty":
# "<DELETE> <DELETE> _12,200 9_" => "_12 ,20 09_"
leftside_align = re.sub(
r"^<DELETE> <DELETE> _12,200 (\d_)", r"_12_ ,20 0\g<1>", leftside_align
)
rightside_align = re.sub(
r"^<DELETE> <DELETE> _12,200 (\d_)", r"_12_ ,20 0\g<1>", rightside_align
)
# "_april_ _2 015_" => "_april_ _20 15_"
leftside_align = re.sub(r"2 0(\d\d_)$", r"20 \g<1>", leftside_align)
rightside_align = re.sub(r"2 0(\d\d_)$", r"20 \g<1>", rightside_align)
elif len(srctokens) >= 2 and srctokens[-2] == "thousand":
# "<DELETE> <DELETE> _12,200 9_" => "_12 ,2 00 9_"
leftside_align = re.sub(
r"^<DELETE> <DELETE> _12,200 (\d_)", r"_12_ ,2 00 \g<1>", leftside_align
)
rightside_align = re.sub(
r"^<DELETE> <DELETE> _12,200 (\d_)", r"_12_ ,2 00 \g<1>", rightside_align
)
# thirtieth twenty fifteen _3 0th__,20 15_ => _30th_ _,20 15_
leftside_align = re.sub(r"(\d) 0th_(_,\d)", r"\g<1>0th_ \g<2>", leftside_align)
rightside_align = re.sub(r"(\d) 0th_(_,\d)", r"\g<1>0th_ \g<2>", rightside_align)
if args.giza_dir.endswith("date") and args.lang == "ru":
# тысяча девятьсот шестидесятого года _1 9 6 0_ => _1 9 60_ <DELETE>
if srctokens[-1] == "года":
leftside_align = re.sub(r"(\d) 0_", r"\g<1>0_ <DELETE>", leftside_align)
rightside_align = re.sub(r"(\d) 0_", r"\g<1>0_ <DELETE>", rightside_align)
if args.giza_dir.endswith("time"):
if srctokens[-1] == "hundred":
# fifteen hundred <DELETE> _15:00_
rightside_align = re.sub(r"<DELETE> (_\d\d:)00_", r"\g<1> 00_", rightside_align)
leftside_align = re.sub(r"<DELETE> (_\d\d:)00_", r"\g<1> 00_", leftside_align)
# !! Do not change the order of next two rules
# twenty one hundred _2 1:00_ <DELETE>
rightside_align = re.sub(r"(_\d) (\d:)00_ <DELETE>", r"\g<1> \g<2> 00_", rightside_align)
leftside_align = re.sub(r"(_\d) (\d:)00_ <DELETE>", r"\g<1> \g<2> 00_", leftside_align)
# twenty hundred _2 0:00_
rightside_align = re.sub(r"(_\d) (\d:)00_", r"\g<1>\g<2> 00_", rightside_align)
leftside_align = re.sub(r"(_\d) (\d:)00_", r"\g<1>\g<2> 00_", leftside_align)
if srctokens[-1] == "o'clock":
# nine o'clock <DELETE> _09:00_ => "_09:00_ <DELETE>"
rightside_align = re.sub(r"^<DELETE> ([^ ])$", r"\g<1> <DELETE>", rightside_align)
leftside_align = re.sub(r"^<DELETE> ([^ ])$", r"\g<1> <DELETE>", leftside_align)
# "_1 1:3 3_" => "_11: 3 3_"
rightside_align = re.sub(r"_(\d) (\d:)(\d)", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"_(\d) (\d:)(\d)", r"\g<1>\g<2> \g<3>", leftside_align)
ban = False
if args.giza_dir.endswith("ordinal"):
if dsttokens[0] == "_—": # тысяча девятьсот сорок пятом _— 1 9 4 5_
ban = True
# ban roman numbers with at least two symbols, because we do not split them to parts
for t in rightside_align.split():
if re.match(r"^_?[ivxl][ivxl]+_?$", t):
ban = True
# ban cases like "_11/05/2013_", "_2005-11-25_", because they are source of incorrect alignments
if args.giza_dir.endswith("date") and args.lang == "en":
if "/" in rightside_align or "-" in rightside_align:
ban = True
# ban brackets
if "(" in rightside_align or ")" in rightside_align:
ban = True
if ban:
out_str = (
"ban:\t"
+ " ".join(srctokens)
+ "\t"
+ " ".join(dsttokens)
+ "\t"
+ leftside_align
+ "\t"
+ rightside_align
)
else:
out_str = (
"good:\t"
+ " ".join(srctokens)
+ "\t"
+ " ".join(dsttokens)
+ "\t"
+ leftside_align
+ "\t"
+ rightside_align
)
out.write(out_str + "\n")
cache[cache_key] = out_str
else:
out_str = "-mon:\t" + " ".join(srctokens) + "\t" + " ".join(dsttokens)
out.write(out_str + "\n")
cache[cache_key] = out_str
not_mono_count += 1
f.close()
g.close()
out.close()
# Main code
if __name__ == '__main__':
main()
|
gkucsko/NeMo
|
scripts/tokenizers/add_special_tokens_to_sentencepiece.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from argparse import ArgumentParser
import sentencepiece as spm
try:
import sentencepiece_model_pb2 as spt
except (ImportError, ModuleNotFoundError):
raise Exception("Ensure that sentencepiece_model_pb2.py has been generated from the protoc compiler")
"""Utility to add special tokens to existing sentencepiece models.
Generate sentencepiece_model_pb2.py in the directory of this script before running
To generate run `protoc --python_out=<path_to_NeMo>/scripts/tokenizers/ sentencepiece_model.proto`
inside the src folder in sentencepiece repo
Refer: https://github.com/google/sentencepiece/issues/121
Usage:
python edit_spt_model.py \
--input_file <input_model_dir> \
--output_file <output_model_dir> \
--tokens <space separated special tokens>
Example:
python edit_spt_model.py \
--input_file test.model \
--output_file test.model \
--tokens [CLS] [SEP]
"""
def edit_spt_model():
parser = ArgumentParser()
parser.add_argument(
"--input_file", type=str, required=True, help="Path to sentencepiece model file",
)
parser.add_argument(
"--output_file", type=str, required=True, help="Path to sentencepiece model file",
)
parser.add_argument(
"--tokens", type=str, nargs='+', required=True, help="Special tokens to add to tokenizer",
)
parser.add_argument(
"--is_userdefined", action="store_true", help="When set, the new tokens are set as user_defined tokens",
)
args = parser.parse_args()
token_type = 3
if args.is_userdefined:
token_type = 4
model = spt.ModelProto()
model.ParseFromString(open(args.input_file, 'rb').read())
for token in args.tokens:
piece = model.SentencePiece(piece=token, score=0.0, type=token_type)
if piece in model.pieces:
logging.error(f"Special Token '{token}' already exists in the input model!")
sys.exit(1)
model.pieces.append(piece)
sp = spm.SentencePieceProcessor()
try:
sp.LoadFromSerializedProto(model.SerializeToString())
for token in args.tokens:
id = sp.piece_to_id(token)
logging.info(f"Created token '{token}' at ID {id}")
logging.info(f"New tokenizer vocab size: {sp.get_piece_size()}")
except:
logging.error("Could not appropriately configure new tokenizer. Verify if the special tokens already exist.")
sys.exit(1)
with open(args.output_file, 'wb') as outf:
outf.write(model.SerializeToString())
logging.info(f"Created new tokenizer at: {args.output_file}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
edit_spt_model()
|
gkucsko/NeMo
|
tests/collections/nlp/test_indexed_retrieval_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import torch
from omegaconf import OmegaConf
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
KNNIndex,
MMapRetrievalIndexedDataset,
MMapRetrievalIndexedDatasetBuilder,
)
from nemo.collections.nlp.data.language_modeling.megatron.retro_dataset import RETRODataset
try:
from apex.transformer import parallel_state
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(not HAVE_APEX, reason="apex is not installed")
class TestRetrievalIndexFiles:
@pytest.mark.unit
def test_index(self):
chunk_size = 64
sizes = np.array([128, 256], dtype=np.int32)
dtype = np.int64
itemsize = dtype().itemsize
index_file = '/tmp/test.idx'
try:
with MMapRetrievalIndexedDataset.Index.writer(index_file, dtype, False) as index:
index.write(sizes, chunk_size)
index_load = MMapRetrievalIndexedDataset.Index(index_file)
assert index_load.chunk_size == chunk_size
assert not index_load.retrieval_db
assert np.array_equal(index_load.sizes, sizes)
assert np.array_equal(index_load._chunk_id_start, np.array([0, sizes[0] / chunk_size], dtype=np.int64))
assert np.array_equal(
index_load._chunk_address, np.arange(0, sizes.sum() * itemsize, chunk_size * itemsize, dtype=np.int64)
)
assert np.array_equal(index_load._pointers, np.array([0, sizes[0] * itemsize], dtype=np.int64))
assert len(index_load._chunk_address) == index_load.num_chunks
finally:
os.remove(index_file)
@pytest.mark.unit
def test_create_data_index(self):
chunk_size = 64
pad_id = 0
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence1) % chunk_size)
gt1 = np.pad(sentence1, (0, padded_size), 'constant', constant_values=pad_id)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence2) % chunk_size)
gt2 = np.pad(sentence2, (0, padded_size), 'constant', constant_values=pad_id)
data_file = '/tmp/test'
index_file = data_file + '.idx'
bin_file = data_file + '.bin'
try:
builder = MMapRetrievalIndexedDatasetBuilder(bin_file, chunk_size, pad_id, False)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(index_file)
# load the data
ds = MMapRetrievalIndexedDataset(data_file)
assert np.array_equal(ds.get(0), gt1)
assert np.array_equal(ds.get(1), gt2)
fetch1, fetch2 = ds[0:2]
assert np.array_equal(fetch1, gt1)
assert np.array_equal(fetch2, gt2)
chunk_id = ds.get_chunk_id(0, 64)
assert chunk_id == 1
assert ds.from_chunk_id_to_doc_id(0) == 0
assert ds.from_chunk_id_to_doc_id(1) == 0
with pytest.raises(ValueError):
ds.get_chunk_id(0, 128)
assert np.array_equal(ds.get_chunk(chunk_id), gt1[64 : 64 + 64])
chunk_id = ds.get_chunk_id(1, 0)
assert chunk_id == 2
assert ds.from_chunk_id_to_doc_id(2) == 1
assert ds.from_chunk_id_to_doc_id(3) == 1
assert ds.from_chunk_id_to_doc_id(4) == 1
assert ds.from_chunk_id_to_doc_id(5) == 1
with pytest.raises(ValueError):
ds.from_chunk_id_to_doc_id(6)
assert np.array_equal(ds.get_chunk(chunk_id), gt2[0:64])
assert np.array_equal(ds.get_chunk(chunk_id + 1), gt2[64:128])
assert np.array_equal(ds.get_chunk(chunk_id + 2), gt2[128:192])
assert np.array_equal(ds.get_chunk(chunk_id + 3), gt2[192:256])
assert ds.get_chunk_id(1, 64) == 3
assert ds.get_chunk_id(1, 128) == 4
assert ds.get_chunk_id(1, 192) == 5
with pytest.raises(ValueError):
ds.get_chunk_id(0, 256)
finally:
os.remove(index_file)
os.remove(bin_file)
@pytest.mark.unit
def test_create_retrieval_data_index(self):
chunk_size = 64
pad_id = 0
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence1) % chunk_size)
gt1 = np.pad(sentence1, (0, padded_size), 'constant', constant_values=pad_id)
padded_gt1 = np.pad(sentence1, (0, padded_size + chunk_size), 'constant', constant_values=pad_id)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence2) % chunk_size)
gt2 = np.pad(sentence2, (0, padded_size), 'constant', constant_values=pad_id)
padded_gt2 = np.pad(sentence2, (0, padded_size + chunk_size), 'constant', constant_values=pad_id)
data_file = '/tmp/test'
index_file = data_file + '.idx'
bin_file = data_file + '.bin'
try:
builder = MMapRetrievalIndexedDatasetBuilder(bin_file, chunk_size, pad_id, True)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(index_file)
# load the data
ds = MMapRetrievalIndexedDataset(data_file)
assert np.array_equal(ds.get(0), gt1)
assert np.array_equal(ds.get(1), gt2)
fetch1, fetch2 = ds[0:2]
assert np.array_equal(fetch1, gt1)
assert np.array_equal(fetch2, gt2)
chunk_id = ds.get_chunk_id(0, 64)
assert chunk_id == 1
assert ds.from_chunk_id_to_doc_id(0) == 0
assert ds.from_chunk_id_to_doc_id(1) == 0
with pytest.raises(ValueError):
ds.get_chunk_id(0, 128)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt1[64 : 64 + 64 * 2])
chunk_id = ds.get_chunk_id(1, 0)
assert chunk_id == 2
assert ds.from_chunk_id_to_doc_id(2) == 1
assert ds.from_chunk_id_to_doc_id(3) == 1
assert ds.from_chunk_id_to_doc_id(4) == 1
assert ds.from_chunk_id_to_doc_id(5) == 1
with pytest.raises(ValueError):
ds.from_chunk_id_to_doc_id(6)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt2[0:128])
assert np.array_equal(ds.get_chunk(chunk_id + 1), padded_gt2[64:192])
assert np.array_equal(ds.get_chunk(chunk_id + 2), padded_gt2[128:256])
assert np.array_equal(ds.get_chunk(chunk_id + 3), padded_gt2[192:320])
assert ds.get_chunk_id(1, 64) == 3
assert ds.get_chunk_id(1, 128) == 4
assert ds.get_chunk_id(1, 192) == 5
with pytest.raises(ValueError):
ds.get_chunk_id(0, 256)
chunk_id = ds.get_chunk_id(1, 64)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt2[64:192])
multi_chunks = ds.get_chunk(slice(0, ds.chunks))
assert np.array_equal(multi_chunks[0], padded_gt1[0:128])
assert np.array_equal(multi_chunks[1], padded_gt1[64 : 64 + 128])
assert np.array_equal(multi_chunks[2], padded_gt2[0:128])
assert np.array_equal(multi_chunks[3], padded_gt2[64 : 64 + 128])
assert np.array_equal(multi_chunks[4], padded_gt2[128 : 128 + 128])
assert np.array_equal(multi_chunks[5], padded_gt2[192 : 192 + 128])
finally:
os.remove(index_file)
os.remove(bin_file)
@pytest.mark.unit
def test_knn_index(self):
data_file = '/tmp/test'
index_file = data_file + '.idx'
K = 8
try:
with KNNIndex.writer(index_file, K) as w:
map_np0 = np.random.randint(0, 100, (50, K))
w.write(map_np0)
map_np1 = np.random.randint(0, 100, (50, K))
w.write(map_np1)
map_np2 = np.random.randint(0, 100, (50, K))
w.write(map_np2)
f = KNNIndex(index_file)
assert f.K == K
assert f.len == map_np0.shape[0] + map_np1.shape[0] + map_np2.shape[0]
assert np.array_equal(map_np0, f.knn_map[:50])
assert np.array_equal(map_np1, f.knn_map[50:100])
assert np.array_equal(map_np2, f.knn_map[100:])
assert np.array_equal(f.get_KNN_chunk_ids(5), map_np0[5])
finally:
os.remove(index_file)
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_APEX, reason="apex is not installed")
def test_retro_dataset(self):
init_method = 'tcp://'
master_ip = 'localhost'
master_port = '6000'
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(backend='gloo', world_size=1, rank=0, init_method=init_method)
parallel_state.initialize_model_parallel(1, 1)
chunk_size = 64
pad_id = 0
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
sentence3 = torch.arange(0, 300, 2, dtype=torch.int64)
sentence4 = torch.arange(1, 400, 2, dtype=torch.int64)
# test the case that
# training data and retrieval data are different
data_file = '/tmp/test_data'
data_index_file = data_file + '.idx'
data_bin_file = data_file + '.bin'
db_file = '/tmp/test_db_data'
db_index_file = db_file + '.idx'
db_bin_file = db_file + '.bin'
K = 8
map_index_file = '/tmp/test_map.idx'
index_path = '/tmp'
cfg = OmegaConf.create({'data': {"index_mapping_dir": index_path}})
# dummy tokenizer
class Tokenizer:
eos_id = 1
pad_id = 0
tokenizer = Tokenizer()
num_samples = 100
seq_len = 192
name = 'test'
data_prefix = 'pref'
seed = 1
_filename = index_path + '/' + data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_len)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
try:
builder = MMapRetrievalIndexedDatasetBuilder(data_bin_file, chunk_size, pad_id, False)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(data_index_file)
builder = MMapRetrievalIndexedDatasetBuilder(db_bin_file, chunk_size, pad_id, True)
builder.add_item(sentence3)
builder.add_item(sentence4)
builder.finalize(db_index_file)
# load the data
data_index = MMapRetrievalIndexedDataset(data_file)
db_index = MMapRetrievalIndexedDataset(db_file)
with KNNIndex.writer(map_index_file, K) as w:
map_np = np.random.randint(-3, db_index.chunks, (data_index.chunks, K))
w.write(map_np)
map_index = KNNIndex(map_index_file)
documents = np.arange(0, data_index.sizes.shape[0])
d = RETRODataset(
cfg,
None,
tokenizer,
name,
data_prefix,
documents,
data_index,
num_samples,
seq_len,
seed,
map_index,
db_index,
)
for i in range(len(d)):
record = d[i]
assert record['tokens'].shape[0] == seq_len
assert record['labels'].shape[0] == seq_len
assert record['retrieved_ids'].shape[0] == seq_len // chunk_size
assert record['retrieved_ids'].shape[1] == K
assert record['retrieved_ids'].shape[2] == chunk_size * 2
assert record['tokens_mask'].shape[0] == seq_len
finally:
os.remove(data_bin_file)
os.remove(data_index_file)
os.remove(db_bin_file)
os.remove(db_index_file)
os.remove(map_index_file)
os.remove(doc_idx_filename)
os.remove(sample_idx_filename)
os.remove(shuffle_idx_filename)
# test the case that
# training data and retrieval data are the same
try:
builder = MMapRetrievalIndexedDatasetBuilder(db_bin_file, chunk_size, pad_id, True)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.add_item(sentence3)
builder.add_item(sentence4)
builder.finalize(db_index_file)
# load the data
data_index = MMapRetrievalIndexedDataset(db_file)
db_index = MMapRetrievalIndexedDataset(db_file)
with KNNIndex.writer(map_index_file, K) as w:
map_np = np.random.randint(-3, db_index.chunks, (data_index.chunks, K))
w.write(map_np)
map_index = KNNIndex(map_index_file)
documents = np.arange(0, data_index.sizes.shape[0])
d = RETRODataset(
cfg,
None,
tokenizer,
name,
data_prefix,
documents,
data_index,
num_samples,
seq_len,
seed,
map_index,
db_index,
)
for i in range(len(d)):
record = d[i]
assert record['tokens'].shape[0] == seq_len
assert record['labels'].shape[0] == seq_len
assert record['retrieved_ids'].shape[0] == seq_len // chunk_size
assert record['retrieved_ids'].shape[1] == K
assert record['retrieved_ids'].shape[2] == chunk_size * 2
assert record['tokens_mask'].shape[0] == seq_len
finally:
os.remove(db_bin_file)
os.remove(db_index_file)
os.remove(map_index_file)
os.remove(doc_idx_filename)
os.remove(sample_idx_filename)
os.remove(shuffle_idx_filename)
|
gkucsko/NeMo
|
nemo/collections/nlp/models/nlp_model.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import hashlib
import json
import os
from typing import Any, Optional
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.migration import pl_legacy_patch
from transformers import TRANSFORMERS_CACHE
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.nlp.modules import BertModule
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import VOCAB_FILE_NAME
from nemo.collections.nlp.modules.common.lm_utils import get_lm_model
from nemo.collections.nlp.modules.common.megatron.megatron_utils import (
MEGATRON_CONFIG_MAP,
get_megatron_pretrained_bert_models,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.core.classes import ModelPT
from nemo.core.classes.exportable import Exportable
from nemo.utils import AppState, logging
__all__ = ['NLPModel']
NEMO_NLP_TMP = os.path.join(os.path.dirname(str(TRANSFORMERS_CACHE)), "nemo_nlp_tmp")
os.makedirs(NEMO_NLP_TMP, exist_ok=True)
class NLPModel(ModelPT, Exportable):
"""Base class for NLP Models.
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None, no_lm_init=False):
self.hidden_size = None
self.bert_model = None
vocab_file = None
nemo_file = None
config_dict = None
config_file = None
# tokenizer needs to get initialized before the super.__init__()
# as dataloaders and datasets need it to process the data
pretrain_model_name = ''
if cfg.get('language_model') and cfg.language_model.get('pretrained_model_name', ''):
pretrain_model_name = cfg.language_model.get('pretrained_model_name', '')
all_pretrained_megatron_bert_models = get_megatron_pretrained_bert_models()
if cfg.get('tokenizer'):
# Some models have their own tokenizer setup
if (
not hasattr(self, 'tokenizer')
and cfg.tokenizer.get('tokenizer_name')
and pretrain_model_name not in all_pretrained_megatron_bert_models
):
self.setup_tokenizer(cfg.tokenizer)
elif pretrain_model_name in all_pretrained_megatron_bert_models:
copy_cfg = copy.deepcopy(cfg)
bert_model = get_lm_model(
config_file=config_file,
config_dict=config_dict,
vocab_file=vocab_file,
trainer=trainer,
cfg=copy_cfg,
)
# set the tokenizer if it is not initialized explicitly
if (
(hasattr(self, 'tokenizer') and self.tokenizer is None) or not hasattr(self, 'tokenizer')
) and hasattr(bert_model, 'tokenizer'):
self.tokenizer = bert_model.tokenizer
if (
cfg.get('tokenizer')
and hasattr(cfg.get('tokenizer'), 'vocab_file')
and cfg.get('tokenizer').get('vocab_file')
):
vocab_file = self.register_artifact('tokenizer.vocab_file', cfg.tokenizer.vocab_file)
super().__init__(cfg, trainer)
# handles model parallel save and restore logic
self._save_restore_connector = NLPSaveRestoreConnector()
if cfg.get('language_model') and not no_lm_init:
if cfg.get('language_model').get('nemo_file'):
nemo_file = self.register_artifact('language_model.nemo_file', cfg.language_model.nemo_file)
if cfg.get('language_model').get('config'):
config_dict = OmegaConf.to_container(cfg.language_model.config)
if cfg.get('language_model').get('config_file'):
config_file = self.register_artifact('language_model.config_file', cfg.language_model.config_file)
bert_model = get_lm_model(
config_file=config_file, config_dict=config_dict, vocab_file=vocab_file, trainer=trainer, cfg=cfg,
)
# set the tokenizer if it is not initialized explicitly
if ((hasattr(self, 'tokenizer') and self.tokenizer is None) or not hasattr(self, 'tokenizer')) and hasattr(
bert_model, 'tokenizer'
):
self.tokenizer = bert_model.tokenizer
# Required to pull up the config for MegatronBert models
self.pretrained_model_name = cfg.language_model.pretrained_model_name
if (
cfg.tokenizer is not None
and cfg.tokenizer.get("tokenizer_name", "") is not None
and "megatron" in cfg.tokenizer.get("tokenizer_name", "")
) or pretrain_model_name in all_pretrained_megatron_bert_models:
self.hidden_size = bert_model.cfg.hidden_size
else:
self.hidden_size = bert_model.config.hidden_size
if cfg.get('language_model') and not no_lm_init:
self.bert_model = bert_model
# register encoder config
self.register_bert_model()
def register_artifact(
self, config_path: str, src: str, verify_src_exists: bool = False,
):
""" Overrides ModelPT register_artifact default behavior.
NLP models usually need artifacts that are optional."""
return super().register_artifact(config_path, src, verify_src_exists=verify_src_exists)
@rank_zero_only
def register_bert_model(self):
"""Adds encoder config to .nemo archive for Jarvis.
"""
# check if there is an encoder, warn if not
if self.bert_model is not None:
# get encoder config and create source for artifact
if isinstance(self.bert_model, BertModule):
# HuggingFace Transformer Config
pretrained_model_name = self.bert_model.name_or_path
# Some HF names have "/" in them so we replace with _
pretrained_model_name = pretrained_model_name.replace("/", "_")
encoder_config_path = pretrained_model_name + '_encoder_config'
encoder_config_src = os.path.join(NEMO_NLP_TMP, encoder_config_path + '.json')
self.bert_model.config.to_json_file(encoder_config_src) # name requested by jarvis team
self.register_artifact('language_model.config_file', encoder_config_src) # for .nemo
# MegatronBertModel's superclass is NLP model, hence can't check for isinstance of self.bert_modelel
elif hasattr(self, 'pretrained_model_name') and 'megatron' in self.pretrained_model_name:
if self.pretrained_model_name in MEGATRON_CONFIG_MAP:
output_config = MEGATRON_CONFIG_MAP[self.pretrained_model_name]["config"]
if output_config is not None:
encoder_config_path = self.pretrained_model_name + '_encoder_config'
encoder_config_src = os.path.join(NEMO_NLP_TMP, encoder_config_path + '.json')
with open(encoder_config_src, 'w', encoding='utf-8') as f:
f.write(json.dumps(output_config, indent=2, sort_keys=True) + '\n')
self.register_artifact('language_model.config_file', encoder_config_src) # for .nemo
else:
# No defaults as this case can be any possible hyper-parameter combination of MegatronBert config
logging.info(f'For {self.pretrained_model_name}, set the config_file in the YAML file')
else:
logging.info(
f'Registering MegatronBERT model config for {self.pretrained_model_name} is not yet supported. \
Please override this method if needed.'
)
else:
logging.info(
f'Registering BERT model config for {self.bert_model} is not yet supported. Please override this method if needed.'
)
def setup_tokenizer(self, cfg: DictConfig):
"""Instantiates tokenizer based on config and registers tokenizer artifacts.
If model is being restored from .nemo file then the tokenizer.vocab_file will
be used (if it exists).
Otherwise, we will use the vocab file provided in the config (if it exists).
Finally, if no vocab file is given (this happens frequently when using HF),
we will attempt to extract the vocab from the tokenizer object and then register it.
Args:
cfg (DictConfig): Tokenizer config
"""
vocab_file = None
if cfg.get('vocab_file'):
vocab_file = self.register_artifact(config_path='tokenizer.vocab_file', src=cfg.vocab_file)
# only load tokenizer if vocab_file and tokenizer_model is not None
if cfg.tokenizer_name or vocab_file or cfg.tokenizer_model:
self.tokenizer = get_tokenizer(
tokenizer_name=cfg.tokenizer_name,
vocab_file=vocab_file,
special_tokens=OmegaConf.to_container(cfg.special_tokens) if cfg.special_tokens else None,
tokenizer_model=self.register_artifact(
config_path='tokenizer.tokenizer_model', src=cfg.tokenizer_model
),
)
if vocab_file is None:
# when there is no vocab file we try to get the vocab from the tokenizer and register it
self._register_vocab_from_tokenizer(vocab_file_config_path='tokenizer.vocab_file', cfg=cfg)
@rank_zero_only
def _register_vocab_from_tokenizer(
self,
vocab_file_config_path: str = 'tokenizer.vocab_file',
vocab_dict_config_path: str = 'tokenizer_vocab_dict',
cfg: DictConfig = None,
):
"""Creates vocab file from tokenizer if vocab file is None.
Args:
vocab_file_config_path: path to the vocab_file in the config
vocab_dict_config_path: path to the vocab_dict in the config
cfg: tokenizer config
"""
if self.tokenizer is None:
raise ValueError('Instantiate self.tokenizer before registering vocab from it.')
else:
if isinstance(self.tokenizer, AutoTokenizer):
# extract vocab from tokenizer
vocab_dict = self.tokenizer.tokenizer.get_vocab()
# for fast and slow tokenizer vocabularies compatibility
vocab_dict = dict(sorted(vocab_dict.items(), key=lambda item: item[1]))
# get hash of vocab_dict to create a unique directory to write vocab_dict and vocab_file
m = hashlib.md5()
if 'tokenizer_name' in cfg:
if cfg.tokenizer_name is not None:
# different pretrained models with the same vocab will have different hash
m.update(cfg.tokenizer_name.encode())
# get string representation of vocab_dict
vocab_dict_str = json.dumps(vocab_dict, sort_keys=True).encode()
m.update(vocab_dict_str)
vocab_dict_hash = m.hexdigest()
hash_path = os.path.join(NEMO_NLP_TMP, vocab_dict_hash)
os.makedirs(hash_path, exist_ok=True)
vocab_json_src = os.path.join(hash_path, vocab_dict_config_path)
with open(vocab_json_src, 'w', encoding='utf-8') as f:
f.write(json.dumps(vocab_dict, indent=2, sort_keys=True) + '\n')
self.register_artifact(config_path=vocab_dict_config_path, src=vocab_json_src)
tokenizer_name = self.tokenizer.tokenizer.__class__.__name__
# save vocab file
# depending on the HuggingFace model, vocab file could mean different things, see VOCAB_FILE_NAME
self.tokenizer.save_vocabulary(hash_path)
# create vocab file
vocab_file_src = os.path.join(hash_path, VOCAB_FILE_NAME[tokenizer_name])
cfg.vocab_file = vocab_file_src
self.register_artifact(config_path=vocab_file_config_path, src=vocab_file_src)
else:
logging.info(
f'Registering tokenizer vocab for {self.tokenizer} is not yet supported. Please override this method if needed.'
)
@staticmethod
def _unpack_nemo_file(path2file: str, out_folder: str) -> str:
return super(NLPModel, NLPModel)._unpack_nemo_file(path2file, out_folder)
@staticmethod
def _make_nemo_file_from_folder(filename, source_dir):
return super(NLPModel, NLPModel)._make_nemo_file_from_folder(filename, source_dir)
@property
def input_module(self):
return self.bert_model
@property
def output_module(self):
return self.classifier
@property
def is_model_parallel_initialized(self):
app_state = AppState()
if app_state.model_parallel_group is not None:
return True
else:
return False
@classmethod
def load_from_checkpoint(
cls,
checkpoint_path: str,
map_location: Any = None,
hparams_file: Optional[str] = None,
strict: bool = True,
**kwargs,
):
"""
Loads ModelPT from checkpoint, with some maintenance of restoration.
For documentation, please refer to LightningModule.load_from_checkpoin() documentation.
"""
checkpoint = None
try:
cls._set_model_restore_state(is_being_restored=True)
# TODO: replace with proper PTL API
with pl_legacy_patch():
if map_location is not None:
checkpoint = pl_load(checkpoint_path, map_location=map_location)
else:
checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
if hparams_file is not None:
extension = hparams_file.split(".")[-1]
if extension.lower() == "csv":
hparams = load_hparams_from_tags_csv(hparams_file)
elif extension.lower() in ("yml", "yaml"):
hparams = load_hparams_from_yaml(hparams_file)
else:
raise ValueError(".csv, .yml or .yaml is required for `hparams_file`")
hparams["on_gpu"] = False
# overwrite hparams by the given file
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = hparams
# for past checkpoint need to add the new key
if cls.CHECKPOINT_HYPER_PARAMS_KEY not in checkpoint:
checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY] = {}
# override the hparams with values that were passed in
cfg = checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].get('cfg', checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY])
# TODO: can we do this without overriding?
config_kwargs = kwargs.copy()
if 'trainer' in config_kwargs:
config_kwargs.pop('trainer')
cfg.update(config_kwargs)
if cfg.get('megatron_amp_O2', False):
new_state_dict = {}
for key in checkpoint['state_dict'].keys():
new_key = key.replace('model.', 'model.module.', 1)
new_state_dict[new_key] = checkpoint['state_dict'][key]
checkpoint['state_dict'] = new_state_dict
if 'cfg' in kwargs:
model = cls._load_model_state(checkpoint, strict=strict, **kwargs)
else:
model = cls._load_model_state(checkpoint, strict=strict, cfg=cfg, **kwargs)
# cfg = checkpoint[cls.CHECKPOINT_HYPER_PARAMS_KEY].cfg
# NMT models do not have a `tokenizer` attribute, they instead have an encoder_tokenizer and decoder_tokenizer attribute.
if hasattr(cfg, "tokenizer"):
if cfg.tokenizer.get("tokenizer_model") is not None:
model.register_artifact("tokenizer.tokenizer_model", cfg.tokenizer.tokenizer_model)
if cfg.tokenizer.get("vocab_file") is not None:
model.register_artifact("tokenizer.vocab_file", cfg.tokenizer.vocab_file)
if cfg.tokenizer.get("merge_file") is not None:
model.register_artifact("tokenizer.merge_file", cfg.tokenizer.merge_file)
if hasattr(cfg, "encoder_tokenizer"):
if cfg.encoder_tokenizer.get("tokenizer_model") is not None:
model.register_artifact("encoder_tokenizer.tokenizer_model", cfg.encoder_tokenizer.tokenizer_model)
if cfg.encoder_tokenizer.get("vocab_file") is not None:
model.register_artifact("encoder_tokenizer.vocab_file", cfg.encoder_tokenizer.vocab_file)
if cfg.encoder_tokenizer.get("merge_file") is not None:
model.register_artifact("encoder_tokenizer.merge_file", cfg.encoder_tokenizer.merge_file)
if hasattr(cfg, "decoder_tokenizer"):
if cfg.decoder_tokenizer.get("tokenizer_model") is not None:
model.register_artifact("decoder_tokenizer.tokenizer_model", cfg.decoder_tokenizer.tokenizer_model)
if cfg.decoder_tokenizer.get("vocab_file") is not None:
model.register_artifact("decoder_tokenizer.vocab_file", cfg.decoder_tokenizer.vocab_file)
if cfg.decoder_tokenizer.get("merge_file") is not None:
model.register_artifact("decoder_tokenizer.merge_file", cfg.decoder_tokenizer.merge_file)
checkpoint = model
finally:
cls._set_model_restore_state(is_being_restored=False)
return checkpoint
|
gkucsko/NeMo
|
nemo/collections/nlp/modules/common/megatron/layer_type.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer."""
import enum
class LayerType(enum.Enum):
encoder = 1
decoder = 2
retrieval_encoder = (
3 # retrieval model encoder, it uses cross attention to be conditioned on the pre decoder output
)
retrieval_decoder = (
4 # retrieval model decoder, it uses chunked cross attention to be conditioned on the retrieved information
)
decoder_pre_mlp = 5 # decoder that skips the computation after the self-attention
retrieval_decoder_after_self_attn = 6 # retrieval decoder that skips the self-attention
|
gkucsko/NeMo
|
scripts/nlp_language_modeling/build_knn_map_index.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the script to build KNN index map from Training dataset to Retrieval dataset.
For example, it maps chunk_id i from training dataset to K chunk ids in the nearest neighbor in the retrieval dataset.
It requires the training text data to be converted into `bin` and `idx` files by `preprocess_data_for_megatron.py` script.
It also requires the Faiss Index file for the Retrieval dataset built by `build_retrieval_index.py` script.
Here is an example to using it:
```python
python scripts/nlp_language_modeling/build_knn_map_index.py \
--input_file=PATH_TO_INPUT_TRAINING_DATA \
--tokenizer-library=sentencepiece \
--tokenizer-model=tokenizer.model \
--process_chunk_size=51200 \
--K_neighbors=16 \
--faiss_index=PATH_TO_FAISS_INDEX_FILE \
--devices=0,1,2,3 \
--batch_size=1280 \
--remove_duplicate \
--output_file=knn_map.idx
```
Use `--remove_duplicate` flag if the data and retrieval dataset are the same. It will remove the neighbors from the same document.
It creates a knn_map.idx KNNIndex file.
During training of RETRO model, it can look up the KNN chunk ids of the
DB dataset given the input training data chunk id.
"""
import argparse
import multiprocessing
import faiss
import numpy as np
from numba import njit, prange
from sentence_transformers import SentenceTransformer
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
KNNIndex,
MMapRetrievalIndexedDataset,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.utils import logging
QUEUE_SIZE = 30
queue = multiprocessing.Queue(QUEUE_SIZE)
emb_queue = multiprocessing.Queue(QUEUE_SIZE)
@njit(parallel=True)
def build_map(chunk_start, result, total_chunks):
"""
build the map from chunk_id to document id
"""
size = len(chunk_start)
for i in prange(size):
beg = chunk_start[i]
end = chunk_start[i + 1] if i < size - 1 else total_chunks
result[beg:end] = i
@njit(parallel=True)
def dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, chunk_id_start):
"""
deduplicate the KNN who are from the same document as the data chunks.
chunk_id_to_doc_id_map is calculated by build_map function.
I is original KNN search result from Faiss.
chunk_id_start is the chunk_id offset.
filtered KNN will be stored in the tmp_neighbors
"""
for cid in prange(len(I)):
source_doc_id = chunk_id_to_doc_id_map[chunk_id_start + cid]
position = 0
for target_chunk_id in I[cid]:
if chunk_id_start + cid == target_chunk_id:
continue
target_doc_id = chunk_id_to_doc_id_map[target_chunk_id]
if source_doc_id != target_doc_id:
tmp_neighbors[cid, position] = target_chunk_id
position += 1
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.tokenizer_library,
model_name=args.tokenizer_type,
tokenizer_model=args.tokenizer_model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
def process_sentence_chunks(ds: MMapRetrievalIndexedDataset, tokenizer, chunk_size: int):
total_chunks = ds.chunks
start = 0
threshold = 0
while start < total_chunks:
if start / total_chunks > threshold:
logging.info(f"sentence processing {start / total_chunks} is done")
threshold += 0.1
id_slices = ds.get_chunk(slice(start, min(start + chunk_size, total_chunks)), force_no_padding=True)
start = min(start + chunk_size, total_chunks)
sentences = [tokenizer.ids_to_text(ids) for ids in id_slices]
queue.put(sentences)
queue.put(None)
def get_sentence_chunks():
return queue.get()
def calculate_embedding(pool, batch_size):
while True:
sentences = get_sentence_chunks()
if sentences is None:
break
emb = model.encode_multi_process(sentences=sentences, pool=pool, batch_size=batch_size)
emb_queue.put(emb)
emb_queue.put(None)
def get_emb():
return emb_queue.get()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="build Faiss index",)
parser.add_argument(
'--input_file', type=str, required=True, help='Input file',
)
parser.add_argument("--faiss_index", type=str, required=True, help='faiss index file for retrieval dataset')
parser.add_argument(
'--process_chunk_size',
type=int,
default=10000,
help='The sentences in chunks that is queries to build map index',
)
parser.add_argument(
'--remove_duplicate',
action='store_true',
help='Remove the knn neighbors that is from the same document as the data.',
)
parser.add_argument(
'--K_neighbors', type=int, default=16, help='The number of neighbors to query',
)
parser.add_argument(
'--dedup_margin',
type=int,
default=2,
help='extra neighbors to fill the spaces of the chunks in the duplicated documents',
)
parser.add_argument(
'--sentence_transformer_model',
type=str,
default='bert-base-nli-mean-tokens',
help='sentence transformer to load',
)
parser.add_argument(
'--output_file', type=str, required=True, help='Output KNN Map index file',
)
parser.add_argument(
'--devices', type=str, default=None, help='delimited list input with cuda devices. Specify like 0,1,2'
)
parser.add_argument(
"--batch_size", type=int, default=4000, help="Batch size for encoding. Use max according to GPU MEM"
)
group = parser.add_argument_group(title='tokenizer')
group.add_argument(
'--tokenizer-library',
type=str,
required=True,
choices=['yttm', 'sentencepiece', 'megatron', 'huggingface', 'tabular'],
help='What tokenizer library to use.',
)
group.add_argument(
'--tokenizer-type', type=str, default=None, help='What type of tokenizer to use.',
)
group.add_argument(
'--tokenizer-model', type=str, default=None, help='Path to tokenizer model.',
)
group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).')
group.add_argument('--delimiter', type=str, default=None, help='delimiter used for tabular tokenizer')
args = parser.parse_args()
model = SentenceTransformer(args.sentence_transformer_model)
tokenizer = get_tokenizer(args)
ds = MMapRetrievalIndexedDataset(args.input_file)
index = faiss.read_index(args.faiss_index)
process = multiprocessing.Process(target=process_sentence_chunks, args=(ds, tokenizer, args.process_chunk_size))
process.start()
if args.devices is None:
device_list = None
else:
device_list = ['cuda:' + str(device) for device in args.devices.split(',')]
pool = model.start_multi_process_pool(device_list)
emb_process = multiprocessing.Process(target=calculate_embedding, args=(pool, args.batch_size))
emb_process.start()
if ds._index.retrieval_db and args.remove_duplicate:
neighbors = args.K_neighbors + args.dedup_margin
# build the id maps for quick dedup
id_start = np.array(ds._index._chunk_id_start)
chunk_id_to_doc_id_map = np.zeros((ds.chunks,), dtype=np.int64)
build_map(id_start, chunk_id_to_doc_id_map, ds.chunks)
else:
neighbors = args.K_neighbors
chunk_id_start = 0
with KNNIndex.writer(args.output_file, args.K_neighbors) as w:
while True:
emb = get_emb()
if emb is None:
break
D, I = index.search(emb, neighbors)
if ds._index.retrieval_db and args.remove_duplicate:
tmp_neighbors = np.ones_like(I) * -1
dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, chunk_id_start)
I = tmp_neighbors[:, : args.K_neighbors]
chunk_id_start += len(I)
w.write(I)
process.join()
emb_process.join()
model.stop_multi_process_pool(pool)
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/dataset_preparation/sample_each_label.py
|
<filename>examples/nlp/text_normalization_as_tagging/dataset_preparation/sample_each_label.py
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to sample each label from the labeled files.
"""
import sys
from argparse import ArgumentParser
from collections import Counter
parser = ArgumentParser(description="Sample labels")
parser.add_argument("--filename", required=True, type=str, help='File with input data')
parser.add_argument("--max_count", required=True, type=int, help='Count')
args = parser.parse_args()
vocab = Counter()
out_sample = open(args.filename + ".sample_" + str(args.max_count), "w", encoding="utf-8")
out_rest = open(args.filename + ".rest_" + str(args.max_count), "w", encoding="utf-8")
n = 0
with open(args.filename, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split("\t")
if len(parts) < 2:
print("Warning: bad format in line: " + str(n) + ": " + line, file=sys.stderr)
continue
tags = parts[1].split(" ")
ok = False
for t in tags:
if t not in vocab:
vocab[t] = 0
if vocab[t] < args.max_count:
ok = True
vocab[t] += 1
if ok:
out_sample.write(line)
else:
out_rest.write(line)
n += 1
out_sample.close()
out_rest.close()
|
gkucsko/NeMo
|
nemo/collections/asr/losses/ssl_losses/mlm.py
|
<reponame>gkucsko/NeMo<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from torch import nn
from nemo.core import Loss, typecheck
from nemo.core.neural_types import LabelsType, LengthsType, LogprobsType, LossType, NeuralType, SpectrogramType
__all__ = ["MLMLoss"]
class MLMLoss(Loss):
@property
def input_types(self):
"""Input types definitions for Contrastive.
"""
return {
"spec_masks": NeuralType(("B", "D", "T"), SpectrogramType()),
"decoder_outputs": NeuralType(("B", "T", "D"), LogprobsType()),
"targets": NeuralType(('B', 'T'), LabelsType()),
"decoder_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
"target_lengths": NeuralType(tuple('B'), LengthsType(), optional=True),
}
@property
def output_types(self):
"""Output types definitions for Contrastive.
loss:
NeuralType(None)
"""
return {"loss": NeuralType(elements_type=LossType())}
@property
def needs_labels(self):
return True
def __init__(
self, combine_time_steps: int = 1, mask_threshold: float = 0.8,
):
super().__init__()
self.nll_loss = nn.NLLLoss()
self.combine_time_steps = combine_time_steps
self.mask_threshold = mask_threshold
@typecheck()
def forward(self, spec_masks, decoder_outputs, targets, decoder_lengths=None, target_lengths=None):
# outputs are log_probs
masks = spec_masks.transpose(-2, -1)
# BxTxC
masks = masks.reshape(masks.shape[0], masks.shape[1] // self.combine_time_steps, -1)
masks = masks.mean(-1) > self.mask_threshold
out_masked_only = decoder_outputs[masks]
targets = F.pad(targets, (0, masks.shape[-1] - targets.shape[-1]))
targets_masked_only = targets[masks]
loss = self.nll_loss(out_masked_only, targets_masked_only)
loss = torch.mean(loss)
return loss
|
gkucsko/NeMo
|
nemo/collections/nlp/data/language_modeling/megatron/request_dataset.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
import torch
from torch.utils.data.dataset import Dataset
class GPTRequestDataset(Dataset):
"""
Args:
requests: List of prompts
tokenizer: model tokenizer
tokens_to_generate: int value denoting amount of tokens model should generate
compute_logprobs: bool value denoting if model should generate tokens or compute logprobs
Returns:
data: class object
{'data': tokens, 'tokens_to_generate': tokens_to_generate, 'compute_logprobs': compute_logprobs}
* data: List of token's ids in respect to prompts
* tokens_to_generate: int value denoting amount of tokens model should generate
* compute_logprobs: bool value denoting if model should generate tokens or compute logprobs
"""
def __init__(self, requests: List, tokenizer, tokens_to_generate: int, compute_logprobs: bool) -> None:
super().__init__()
self.requests = requests
self.tokenizer = tokenizer
self.tokens_to_generate = tokens_to_generate
self.compute_logprobs = compute_logprobs
self.tokens = []
self.prompt_tags = []
# tokenize prompt
for request in self.requests:
if type(request) == dict:
prompt_tag = request['prompt_tag']
self.prompt_tags.append(prompt_tag)
text = request['text']
else:
text = request
self.tokens.append(torch.tensor(self.tokenizer.text_to_ids(text)))
if self.prompt_tags:
self.data = {
'prompt_tags': self.prompt_tags,
'data': self.tokens,
'tokens_to_generate': self.tokens_to_generate,
'compute_logprobs': self.compute_logprobs,
}
else:
self.data = {
'data': self.tokens,
'tokens_to_generate': self.tokens_to_generate,
'compute_logprobs': self.compute_logprobs,
}
def __len__(self):
return 1
def __getitem__(self, index):
return self.data
class T5RequestDataset(Dataset):
def __init__(self, request: Dict, tokenizer) -> None:
super().__init__()
self.request = request
self.tokenizer = tokenizer
# tokenize prompt
self.request['tokenized_prompt'] = ' '.join(self.tokenizer.text_to_tokens(request['prompt']))
tokens = self.tokenizer.text_to_ids(request['prompt'])
self.request['tokens'] = torch.tensor(tokens)
self.mask_prompt(self.request['prompt'])
def mask_prompt(self, sample):
if '<mask>' not in sample:
if '<extra_id_s>' not in sample:
raise ValueError(f"Did not find any <mask> or <extra_id_s> tokens in prompt {sample}.")
sample = sample.split()
sentinel_idx = 0
for i, word in enumerate(sample):
if word == '<mask>':
sample[i] = f'<extra_id_{sentinel_idx}>'
sentinel_idx += 1
sample = ' '.join(sample)
sample = torch.LongTensor(self.tokenizer.text_to_ids(sample))
self.request['masked_sample'] = sample
def __len__(self):
return 1
def __getitem__(self, index):
return self.request
|
gkucsko/NeMo
|
tests/core/mixins/adapters/test_adapter_strategy.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.core import NeuralModule
from nemo.core.classes.mixins import AdapterModuleMixin, adapter_mixin_strategies, adapter_mixins
from nemo.utils import config_utils
class DefaultModule(NeuralModule):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(50, 50)
self.bn = torch.nn.BatchNorm1d(50)
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
out = x
return out
def num_params(self):
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
class DefaultModuleAdapter(DefaultModule, AdapterModuleMixin):
def forward(self, x):
x = super(DefaultModuleAdapter, self).forward(x)
if self.is_adapter_available():
# For testing purposes, cache the adapter names
self._adapter_names = self.get_enabled_adapters()
# call forward over model adapters, summing them up
x = self.forward_enabled_adapters(x)
return x
def get_adapter_cfg(in_features=50, dim=100, norm_pos='pre'):
cfg = {
'_target_': 'nemo.collections.common.parts.adapter_modules.LinearAdapter',
'in_features': in_features,
'dim': dim,
'norm_position': norm_pos,
}
return cfg
def get_classpath(cls):
return f'{cls.__module__}.{cls.__name__}'
if adapter_mixins.get_registered_adapter(DefaultModule) is None:
adapter_mixins.register_adapter(DefaultModule, DefaultModuleAdapter)
class TestAdapterStrategy:
@pytest.mark.unit
def test_ResidualAddAdapterStrategyConfig(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_mixin_strategies.ResidualAddAdapterStrategy,
adapter_mixin_strategies.ResidualAddAdapterStrategyConfig,
ignore_args=IGNORED_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_strategy_default(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
module = DefaultModuleAdapter()
module.add_adapter(name='temp', cfg=get_adapter_cfg())
adapter = module.adapter_layer[module.get_enabled_adapters()[0]]
# update the strategy
adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy()
adapter.adapter_strategy = adapter_strategy
with torch.no_grad():
assert adapter_strategy.stochastic_depth == 0.0
out = adapter_strategy.forward(x, adapter, module=module)
assert (out - x).abs().mean() < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('stochastic_depth', [0.0, 1.0])
def test_strategy_stochasic_depth(self, stochastic_depth):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
module = DefaultModuleAdapter()
module.add_adapter(name='temp', cfg=get_adapter_cfg())
# extract adapter
adapter = module.adapter_layer[module.get_enabled_adapters()[0]]
# reinitialize the final layer of the adapter module (so that it is not zero init)
adapter.module[-1].weight.data += 1
# get just module output
module.set_enabled_adapters('temp', enabled=False)
module_out = module(x)
# get module + adapter output
module.set_enabled_adapters('temp', enabled=True)
module_adapter_out = module(x)
assert (
module_out - module_adapter_out
).abs().sum() > 0 # results should not be the same after adapter forward now
adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy(stochastic_depth=stochastic_depth)
adapter.adapter_strategy = adapter_strategy
module.eval()
with torch.inference_mode(): # stochastic depth disabled, no grad tracking
assert adapter.adapter_strategy.stochastic_depth == stochastic_depth
out = adapter_strategy.forward(module_out, adapter, module=module)
assert (out - module_adapter_out).abs().mean() < 1e-5
module.train()
with torch.inference_mode(): # stochastic depth enabled, but no grad tracking during training mode
out = adapter_strategy.forward(module_out, adapter, module=module)
if stochastic_depth == 0.0:
check = module_adapter_out
else:
check = module_out
assert (out - check).abs().mean() < 1e-5
|
gkucsko/NeMo
|
examples/nlp/language_modeling/megatron_gpt_prompt_learning.py
|
<filename>examples/nlp/language_modeling/megatron_gpt_prompt_learning.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks.timer import Timer
from pytorch_lightning.plugins.environments.torchelastic_environment import TorchElasticEnvironment
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import (
MegatronGPTPromptLearningModel,
)
from nemo.collections.nlp.parts.nlp_overrides import (
GradScaler,
NLPDDPPlugin,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import StatelessTimer, exp_manager
"""
This is an example of how to ptune/prompt-tune a pretrained GPT model.
Be sure to use a .nemo gpt model with this code. If you've downloaded
a model from NGC or are otherwise using a MegatronLM model, please use
either megatron_ckpt_to_nemo.py or megatron_lm_ckpt_to_nemo.py found
withing this examples directory to convert your model to .nemo format.
"""
@hydra_runner(config_path="conf", config_name="megatron_gpt_prompt_learning_config")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
plugins = [NLPDDPPlugin(no_ddp_communication_hook=True, find_unused_parameters=False,)]
if cfg.trainer.precision == 16:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
plugins.append(PipelineMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, **cfg.trainer)
exp_manager(trainer, cfg.exp_manager)
# Override timer callback to a stateless one
for idx, callback in enumerate(trainer.callbacks):
if isinstance(callback, Timer):
trainer.callbacks[idx] = StatelessTimer(cfg.trainer.max_time,)
# hydra interpolation does not work here as the interpolation key is lost when PTL saves hparams
with open_dict(cfg):
cfg.model.precision = cfg.trainer.precision
# load existing or init new soft prompt GPT model
if cfg.model.get("restore_path", None):
model = MegatronGPTPromptLearningModel.restore_from(
cfg.model.restore_path, cfg.model, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector()
)
else:
model = MegatronGPTPromptLearningModel(cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
main()
|
gkucsko/NeMo
|
examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py
|
<filename>examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from ast import literal_eval
import ijson
def main(filename):
with open(filename, 'r') as file:
objects = ijson.kvitems(file, 'wellFormedAnswers')
valid_old_key_to_new_key = {}
new_key = 0
for key, well_formed_answer in objects:
value = well_formed_answer if isinstance(well_formed_answer, list) else literal_eval(well_formed_answer)
if len(value) > 0:
valid_old_key_to_new_key[key] = str(new_key)
new_key += 1
filtered_data = {}
fieldnames = ['query', 'query_type', 'answers', 'wellFormedAnswers', 'passages']
for fieldname in fieldnames:
add_data(filename, filtered_data, fieldname, valid_old_key_to_new_key)
with open(filename, 'w') as fw:
json.dump(filtered_data, fw)
def add_data(filename, filtered_data, fieldname, valid_old_key_to_new_key):
with open(filename, 'r') as f:
objects = ijson.kvitems(f, fieldname)
filtered_data[fieldname] = {
valid_old_key_to_new_key[key]: query for key, query in objects if key in valid_old_key_to_new_key
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--filename")
args = parser.parse_args()
main(args.filename)
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/data_processor/data_processor.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from nemo.collections.nlp.data.data_utils.data_preprocessing import DataProcessor
__all__ = ['DialogueDataProcessor']
class DialogueDataProcessor(DataProcessor):
"""
Base class for Data Processing for all data sources
Data Processor is designed to be Model-independent (but Data-dependent) so that
- Encourages experimentation with a variety of models \
(BERT-style; GPT-style; T5-style), \
which have different tokenization/preprocessing requirements
- Facilitates experiments with a variety of data sources,
as data is processed into a common format
Roles
1. Processes raw files into Dialogue Input Examples.
2. Keeps all possibly relevant information from the raw files, which
the Dataset class can then determine which labels to use
"""
def __init__(self):
raise NotImplementedError()
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
@staticmethod
def get_relevant_idxs(dataset_split, n_samples, dev_proportion):
"""
Obtain indexes for each dataset_split, when train and dev sets are not in separate files
Args:
dataset_split: train, dev or test
n_samples: total number of samples
dev_proportion: value from 1 to 99 that represent proportion of data in dev set
Returns:
idxs: indices for relevant samples
"""
if dataset_split in ["train", "dev"]:
n_dev = int(n_samples * (dev_proportion / 100))
dev_idxs = random.sample(list(range(n_samples)), n_dev)
if dataset_split == "dev":
idxs = dev_idxs
else:
dev_idxs_set = set(dev_idxs)
train_idxs = [idx for idx in list(range(n_samples)) if idx not in dev_idxs_set]
idxs = train_idxs
elif dataset_split == "test":
idxs = list(range(n_samples))
else:
raise ValueError("please select dataset split from train, dev and test")
return idxs
|
gkucsko/NeMo
|
examples/nlp/language_modeling/megatron_t5_eval.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
import torch
from pytorch_lightning.trainer.trainer import Trainer
from torch.utils.data import DataLoader
from nemo.collections.nlp.data.language_modeling.megatron.request_dataset import T5RequestDataset
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPPlugin, NLPSaveRestoreConnector
from nemo.utils.app_state import AppState
assert torch.cuda.is_available()
def main():
parser = ArgumentParser()
parser.add_argument("--model_file", type=str, default="", required=True, help="Pass path to model's .nemo file")
parser.add_argument(
"--prompt", type=str, default="", required=True, help="Prompt for the model (a text to complete)"
)
parser.add_argument(
"--tokens_to_generate", type=int, default="16", required=False, help="How many tokens to add to prompt"
)
parser.add_argument(
"--tensor_model_parallel_size", type=int, default=1, required=False,
)
parser.add_argument(
"--pipeline_model_parallel_size", type=int, default=1, required=False,
)
parser.add_argument(
"--pipeline_model_parallel_split_rank", type=int, default=0, required=False,
)
parser.add_argument("--precision", default="16", type=str, help="PyTorch Lightning Trainer precision flag")
args = parser.parse_args()
# cast precision to int if 32 or 16
if args.precision in ["32", "16"]:
args.precision = int(float(args.precision))
# trainer required for restoring model parallel models
trainer = Trainer(
plugins=NLPDDPPlugin(),
devices=args.tensor_model_parallel_size * args.pipeline_model_parallel_size,
accelerator='gpu',
precision=args.precision,
)
app_state = AppState()
if args.tensor_model_parallel_size > 1 or args.pipeline_model_parallel_size > 1:
app_state.model_parallel_size = args.tensor_model_parallel_size * args.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=args.tensor_model_parallel_size,
pipeline_model_parallel_size_=args.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=args.pipeline_model_parallel_split_rank,
)
model = MegatronT5Model.restore_from(
restore_path=args.model_file, trainer=trainer, save_restore_connector=NLPSaveRestoreConnector(),
)
model.freeze()
request = {
"prompt": args.prompt,
"tokens_to_generate": args.tokens_to_generate,
}
dataset = T5RequestDataset(request, model.tokenizer)
request_dl = DataLoader(dataset)
response = trainer.predict(model, request_dl)
print("***************************")
print(response)
print("***************************")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
|
gkucsko/NeMo
|
scripts/dataset_processing/tts/ljspeech/get_data.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import tarfile
import urllib.request
from pathlib import Path
import sox
import wget
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser(description='Download LJSpeech and create manifests with predefined split')
parser.add_argument("--data-root", required=True, type=Path)
parser.add_argument('--whitelist-path', type=str, default=None)
args = parser.parse_args()
return args
URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
FILELIST_BASE = 'https://raw.githubusercontent.com/NVIDIA/tacotron2/master/filelists'
def __maybe_download_file(source_url, destination_path):
if not destination_path.exists():
tmp_file_path = destination_path.with_suffix('.tmp')
urllib.request.urlretrieve(source_url, filename=str(tmp_file_path))
tmp_file_path.rename(destination_path)
def __extract_file(filepath, data_dir):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
print(f"Error while extracting {filepath}. Already extracted?")
def __process_data(data_root, whitelist_path):
if whitelist_path is None:
wget.download(
"https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv",
out=str(data_root),
)
whitelist_path = data_root / "lj_speech.tsv"
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist=whitelist_path,
overwrite_cache=True,
cache_dir=data_root / "cache_dir",
)
text_normalizer_call_kwargs = {"punct_pre_process": True, "punct_post_process": True}
normalizer_call = lambda x: text_normalizer.normalize(x, **text_normalizer_call_kwargs)
# Create manifests (based on predefined NVIDIA's split)
filelists = ['train', 'val', 'test']
for split in tqdm(filelists):
# Download file list if necessary
filelist_path = data_root / f"ljs_audio_text_{split}_filelist.txt"
if not filelist_path.exists():
wget.download(f"{FILELIST_BASE}/ljs_audio_text_{split}_filelist.txt", out=str(data_root))
manifest_target = data_root / f"{split}_manifest.json"
with open(manifest_target, 'w') as f_out:
with open(filelist_path, 'r') as filelist:
print(f"\nCreating {manifest_target}...")
for line in tqdm(filelist):
basename = line[6:16]
text = line[21:].strip()
norm_text = normalizer_call(text)
# Make sure corresponding wavfile exists
wav_path = data_root / 'wavs' / f"{basename}.wav"
assert wav_path.exists(), f"{wav_path} does not exist!"
entry = {
'audio_filepath': str(wav_path),
'duration': sox.file_info.duration(wav_path),
'text': text,
'normalized_text': norm_text,
}
f_out.write(json.dumps(entry) + '\n')
def main():
args = get_args()
tarred_data_path = args.data_root / "LJSpeech-1.1.tar.bz2"
__maybe_download_file(URL, tarred_data_path)
__extract_file(str(tarred_data_path), str(args.data_root))
data_root = args.data_root / "LJSpeech-1.1"
whitelist_path = args.whitelist_path
__process_data(data_root, whitelist_path)
if __name__ == '__main__':
main()
|
gkucsko/NeMo
|
tests/collections/asr/test_ssl_models.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pytest
import torch
from omegaconf import DictConfig, ListConfig
from nemo.collections.asr.models import SpeechEncDecSelfSupervisedModel
@pytest.fixture()
def ssl_model():
preprocessor = {
'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
'params': dict({'pad_to': 16, 'dither': 0}),
}
model_defaults = {'enc_hidden': 32, 'dec_out': 128}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
},
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
},
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
},
],
},
}
spec_augment = {
'_target_': 'nemo.collections.asr.modules.MaskedPatchAugmentation',
'freq_masks': 3,
'freq_width': 20,
'patch_size': 16,
'mask_patches': 0.5,
}
loss_list_contr_mlm = {
'contr': {
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoderReconstruction',
'feat_in': model_defaults['enc_hidden'],
'feat_hidden': 128,
'feat_out': model_defaults['dec_out'],
'stride_layers': 0,
'non_stride_layers': 0,
'stride_transpose': False,
},
'loss': {
'_target_': 'nemo.collections.asr.losses.ContrastiveLoss',
'in_dim': 64,
'proj_dim': model_defaults['dec_out'],
'combine_time_steps': 1,
'quantized_targets': True,
'codebook_size': 64,
'sample_from_same_utterance_only': True,
'sample_from_non_masked': False,
'num_negatives': 3,
},
},
'mlm': {
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': model_defaults['enc_hidden'],
'num_classes': 4096,
},
'loss': {'_target_': 'nemo.collections.asr.losses.MLMLoss', 'combine_time_steps': 1},
'targets_from_loss': "contr",
},
}
modelConfig_contr_mlm = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'spec_augment': DictConfig(spec_augment),
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'loss_list': DictConfig(loss_list_contr_mlm),
}
)
ssl_model = SpeechEncDecSelfSupervisedModel(cfg=modelConfig_contr_mlm)
return ssl_model
class TestSSLModel:
@pytest.mark.unit
def test_constructor(self, ssl_model):
confdict = ssl_model.to_config_dict()
instance2 = SpeechEncDecSelfSupervisedModel.from_config_dict(confdict)
assert isinstance(instance2, SpeechEncDecSelfSupervisedModel)
@pytest.mark.unit
def test_contr_nonquant(self, ssl_model):
modelConfig_contr_nonquant = ssl_model.to_config_dict()
loss_list_contr_nonquant = dict(modelConfig_contr_nonquant['loss_list'])
del loss_list_contr_nonquant['mlm']
loss_list_contr_nonquant['contr']['loss']['quantized_targets'] = False
modelConfig_contr_nonquant['loss_list'] = DictConfig(loss_list_contr_nonquant)
ssl_model = SpeechEncDecSelfSupervisedModel(cfg=modelConfig_contr_nonquant)
input_signal = torch.randn(size=(4, 64000))
length = torch.randint(low=48000, high=64000, size=[4])
with torch.no_grad():
spectrograms, spec_masks, encoded, encoded_len = ssl_model.forward(
input_signal=input_signal, input_signal_length=length
)
loss_value, loss_val_dict = ssl_model.decoder_loss_step(spectrograms, spec_masks, encoded, encoded_len)
assert len(loss_val_dict) == 1
@pytest.mark.unit
def test_contr_mlm(self, ssl_model):
input_signal = torch.randn(size=(4, 64000))
length = torch.randint(low=48000, high=64000, size=[4])
with torch.no_grad():
spectrograms, spec_masks, encoded, encoded_len = ssl_model.forward(
input_signal=input_signal, input_signal_length=length
)
loss_value, loss_val_dict = ssl_model.decoder_loss_step(spectrograms, spec_masks, encoded, encoded_len)
assert len(loss_val_dict) == 2
@pytest.mark.unit
def test_contr_mlm_multi(self, ssl_model):
modelConfig_contr_mlm_multi = ssl_model.to_config_dict()
model_defaults = modelConfig_contr_mlm_multi['model_defaults']
loss_list_contr_mlm_multi = dict(modelConfig_contr_mlm_multi['loss_list'])
loss_list_contr_mlm_multi['mlm_2'] = {
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': model_defaults['enc_hidden'],
'num_classes': 4096,
},
'loss': {'_target_': 'nemo.collections.asr.losses.MLMLoss', 'combine_time_steps': 1},
'output_from_layer': "encoder.0",
'targets_from_loss': "contr",
}
loss_list_contr_mlm_multi['mlm_3'] = {
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': model_defaults['enc_hidden'],
'num_classes': 4096,
},
'loss': {'_target_': 'nemo.collections.asr.losses.MLMLoss', 'combine_time_steps': 1},
'output_from_layer': "encoder.1",
'targets_from_loss': "contr",
}
modelConfig_contr_mlm_multi['loss_list'] = DictConfig(loss_list_contr_mlm_multi)
ssl_model = SpeechEncDecSelfSupervisedModel(cfg=modelConfig_contr_mlm_multi)
input_signal = torch.randn(size=(4, 64000))
length = torch.randint(low=48000, high=64000, size=[4])
with torch.no_grad():
spectrograms, spec_masks, encoded, encoded_len = ssl_model.forward(
input_signal=input_signal, input_signal_length=length
)
loss_value, loss_val_dict = ssl_model.decoder_loss_step(spectrograms, spec_masks, encoded, encoded_len)
assert len(loss_val_dict) == 4
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/evaluation/prepare_corpora_for_testing.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to prepare test corpus for the ThutmoseTaggerModel from Google Text Normalization dataset.
"""
import os
import re
from argparse import ArgumentParser
from collections import Counter
from typing import Dict, TextIO, Tuple
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import spoken_preprocessing
parser = ArgumentParser(description="Text Normalization Data Preprocessing for English")
parser.add_argument(
"--data_dir", required=True, type=str, help="Path to data directory with files like output-00000-of-00100.tsv"
)
parser.add_argument("--reference_vocab", required=True, type=str, help="Multi Reference vocabulary")
parser.add_argument("--output_file", required=True, type=str, help="Output file")
parser.add_argument(
"--sampling_count", required=True, type=int, help="Number of examples per class, you want, use -1 for all examples"
)
args = parser.parse_args()
def process_file(
inputname: str,
out: TextIO,
out_raw: TextIO,
reference_vcb: Dict[Tuple[str, str], Dict[str, int]],
sampling_vcb: Dict[str, int],
) -> None:
words = []
reference_words = [] # size may be different
semiotic_info = []
raw_lines = []
sent_ok = True if args.sampling_count == -1 else False
with open(inputname, "r", encoding="utf-8") as f:
for line in f:
if line.startswith("<eos>"):
if len(words) > 0 and sent_ok:
out.write(
" ".join(words) + "\t" + " ".join(reference_words) + "\t" + ";".join(semiotic_info) + "\n"
)
out_raw.write("\n".join(raw_lines) + "\n" + line)
words = []
reference_words = []
semiotic_info = []
raw_lines = []
sent_ok = True if args.sampling_count == -1 else False
else:
raw_lines.append(line.strip())
cls, written, spoken = line.strip().split("\t")
spoken = spoken_preprocessing(spoken)
written = written.casefold()
references = set()
if spoken == "sil":
continue
if spoken == "<self>":
words.append(written)
reference_words.append(written)
# if reference is <self>, but the word has itn conversions in our dictionary, add them
for cls in ["CARDINAL", "ORDINAL", "DATE"]: # date, ex sixties -> 60s
k = (cls, written)
if k in reference_vcb:
for tr_variant in reference_vcb[k]:
references.add(tr_variant)
semiotic_info.append(
cls
+ " "
+ str(len(words) - 1)
+ " "
+ str(len(words))
+ " | "
+ " | ".join(references)
)
break
continue
spoken_words = spoken.split()
words.extend(spoken_words)
k = (cls, spoken)
if k in reference_vcb:
for tr_variant in reference_vcb[k]:
references.add(tr_variant)
references.add(spoken)
references.add(written)
for tr_variant in list(references):
# 6,51 km² => 6,51 km 2
(tr_variant2, n2) = re.subn(r"²", " 2", tr_variant)
(tr_variant3, n3) = re.subn(r"³", " 3", tr_variant)
if n2 > 0:
references.add(tr_variant2)
if n3 > 0:
references.add(tr_variant3)
semiotic_info.append(
cls
+ " "
+ str(len(words) - len(spoken_words))
+ " "
+ str(len(words))
+ " | "
+ " | ".join(list(references))
)
reference_words.append(written.casefold())
if cls not in sampling_vcb:
sampling_vcb[cls] = 0
if sampling_vcb[cls] < args.sampling_count:
sent_ok = True
sampling_vcb[cls] += 1
def main() -> None:
if not os.path.exists(args.data_dir):
raise ValueError(f"Data dir {args.data_dir} does not exist")
reference_vcb = {}
with open(args.reference_vocab, "r", encoding="utf-8") as f:
for line in f:
sem, spoken, written, freq = line.strip().split("\t")
k = (sem, spoken)
if k not in reference_vcb:
reference_vcb[k] = {}
reference_vcb[k][written] = int(freq)
sampling_vcb = Counter()
out = open(args.output_file, "w", encoding="utf-8")
out_raw = open(args.output_file + ".raw", "w", encoding="utf-8")
input_paths = sorted([os.path.join(args.data_dir, f) for f in os.listdir(args.data_dir)])
for inputname in input_paths:
process_file(inputname, out, out_raw, reference_vcb, sampling_vcb)
out.close()
out_raw.close()
if __name__ == "__main__":
main()
|
gkucsko/NeMo
|
nemo_text_processing/inverse_text_normalization/vi/verbalizers/date.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.vi.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
try:
import pynini
from pynini.lib import pynutil
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { month: "1" year: "2012"} -> tháng 1 năm 2012
date { day: "5" month: "10" year: "2021" preserve_order: true } -> 5 tháng 10 năm 2021
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
day = (
pynutil.delete("day:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
month = (
pynutil.delete("month:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
year = (
pynutil.delete("year:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ delete_space
+ pynutil.delete('"')
)
# (day) month year
# day month
graph_dm = day + delete_space + pynutil.insert(" tháng ") + month
graph_dmy = graph_dm + delete_space + pynutil.insert(" năm ") + year
graph_m = pynutil.insert("tháng ") + month
graph_my = pynutil.insert("tháng ") + month + delete_space + pynutil.insert(" năm ") + year
graph_y = pynutil.insert("năm ") + year
optional_preserve_order = pynini.closure(
pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
| pynutil.delete("field_order:")
+ delete_space
+ pynutil.delete('"')
+ NEMO_NOT_QUOTE
+ pynutil.delete('"')
+ delete_space
)
final_graph = (graph_y | graph_m | graph_dm | graph_dmy | graph_my) + delete_space + optional_preserve_order
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
|
gkucsko/NeMo
|
tests/collections/asr/test_asr_modules.py
|
<reponame>gkucsko/NeMo<gh_stars>0
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from omegaconf import OmegaConf
from nemo.collections.asr import modules
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
from nemo.utils import config_utils, logging
class TestASRModulesBasicTests:
@pytest.mark.unit
def test_AudioToMelSpectrogramPreprocessor_config(self):
# Test that dataclass matches signature of module
result = config_utils.assert_dataclass_signature_match(
modules.AudioToMelSpectrogramPreprocessor,
modules.audio_preprocessing.AudioToMelSpectrogramPreprocessorConfig,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_AudioToMelSpectrogramPreprocessor_batch(self):
# Test 1 that should test the pure stft implementation as much as possible
instance1 = modules.AudioToMelSpectrogramPreprocessor(normalize="per_feature", dither=0, pad_to=0)
# Ensure that the two functions behave similarily
for _ in range(10):
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
res_instance, length_instance = [], []
for i in range(input_signal.size(0)):
res_ins, length_ins = instance1(input_signal=input_signal[i : i + 1], length=length[i : i + 1])
res_instance.append(res_ins)
length_instance.append(length_ins)
res_instance = torch.cat(res_instance, 0)
length_instance = torch.cat(length_instance, 0)
# batch size 4
res_batch, length_batch = instance1(input_signal=input_signal, length=length)
assert res_instance.shape == res_batch.shape
assert length_instance.shape == length_batch.shape
diff = torch.mean(torch.abs(res_instance - res_batch))
assert diff <= 1e-3
diff = torch.max(torch.abs(res_instance - res_batch))
assert diff <= 1e-3
@pytest.mark.unit
def test_SpectrogramAugmentationr(self):
# Make sure constructor works
instance1 = modules.SpectrogramAugmentation(
freq_masks=10, time_masks=3, rect_masks=3, use_numba_spec_augment=False
)
assert isinstance(instance1, modules.SpectrogramAugmentation)
# Make sure forward doesn't throw with expected input
instance0 = modules.AudioToMelSpectrogramPreprocessor(dither=0)
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
res0 = instance0(input_signal=input_signal, length=length)
res = instance1(input_spec=res0[0], length=length)
assert res.shape == res0[0].shape
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_SpectrogramAugmentationr_numba_kernel(self, caplog):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
logging._logger.propagate = True
original_verbosity = logging.get_verbosity()
logging.set_verbosity(logging.DEBUG)
caplog.set_level(logging.DEBUG)
# Make sure constructor works
instance1 = modules.SpectrogramAugmentation(
freq_masks=10, time_masks=3, rect_masks=3, use_numba_spec_augment=True
)
assert isinstance(instance1, modules.SpectrogramAugmentation)
# Make sure forward doesn't throw with expected input
instance0 = modules.AudioToMelSpectrogramPreprocessor(dither=0)
input_signal = torch.randn(size=(8, 512))
length = torch.randint(low=161, high=500, size=[8])
res0 = instance0(input_signal=input_signal, length=length)
res = instance1(input_spec=res0[0], length=length)
assert res.shape == res0[0].shape
# check tha numba kernel debug message indicates that it is available for use
assert """Numba SpecAugment kernel is available""" in caplog.text
logging._logger.propagate = False
logging.set_verbosity(original_verbosity)
@pytest.mark.unit
def test_SpectrogramAugmentationr_config(self):
# Test that dataclass matches signature of module
result = config_utils.assert_dataclass_signature_match(
modules.SpectrogramAugmentation, modules.audio_preprocessing.SpectrogramAugmentationConfig,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_CropOrPadSpectrogramAugmentation(self):
# Make sure constructor works
audio_length = 128
instance1 = modules.CropOrPadSpectrogramAugmentation(audio_length=audio_length)
assert isinstance(instance1, modules.CropOrPadSpectrogramAugmentation)
# Make sure forward doesn't throw with expected input
instance0 = modules.AudioToMelSpectrogramPreprocessor(dither=0)
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
res0 = instance0(input_signal=input_signal, length=length)
res, new_length = instance1(input_signal=res0[0], length=length)
assert res.shape == torch.Size([4, 64, audio_length])
assert all(new_length == torch.tensor([128] * 4))
@pytest.mark.unit
def test_CropOrPadSpectrogramAugmentation_config(self):
# Test that dataclass matches signature of module
result = config_utils.assert_dataclass_signature_match(
modules.CropOrPadSpectrogramAugmentation,
modules.audio_preprocessing.CropOrPadSpectrogramAugmentationConfig,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_MaskedPatchAugmentation(self):
# Make sure constructor works
audio_length = 128
instance1 = modules.MaskedPatchAugmentation(patch_size=16, mask_patches=0.5, freq_masks=2, freq_width=10)
assert isinstance(instance1, modules.MaskedPatchAugmentation)
# Make sure forward doesn't throw with expected input
instance0 = modules.AudioToMelSpectrogramPreprocessor(dither=0)
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
res0 = instance0(input_signal=input_signal, length=length)
res = instance1(input_spec=res0[0], length=length)
assert res.shape == res0[0].shape
@pytest.mark.unit
def test_MaskedPatchAugmentation_config(self):
# Test that dataclass matches signature of module
result = config_utils.assert_dataclass_signature_match(
modules.MaskedPatchAugmentation, modules.audio_preprocessing.MaskedPatchAugmentationConfig,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_RNNTDecoder(self):
vocab = list(range(10))
vocab = [str(x) for x in vocab]
vocab_size = len(vocab)
pred_config = OmegaConf.create(
{
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': 32, 'pred_rnn_layers': 1,},
'vocab_size': vocab_size,
'blank_as_pad': True,
}
)
prednet = modules.RNNTDecoder.from_config_dict(pred_config)
# num params
pred_hidden = pred_config.prednet.pred_hidden
embed = (vocab_size + 1) * pred_hidden # embedding with blank
rnn = (
2 * 4 * (pred_hidden * pred_hidden + pred_hidden)
) # (ih + hh) * (ifco gates) * (indim * hiddendim + bias)
assert prednet.num_weights == (embed + rnn)
# State initialization
x_ = torch.zeros(4, dtype=torch.float32)
states = prednet.initialize_state(x_)
for state_i in states:
assert state_i.dtype == x_.dtype
assert state_i.device == x_.device
assert state_i.shape[1] == len(x_)
# Blank hypotheses test
blank = vocab_size
hyp = Hypothesis(score=0.0, y_sequence=[blank])
cache = {}
pred, states, _ = prednet.score_hypothesis(hyp, cache)
assert pred.shape == torch.Size([1, 1, pred_hidden])
assert len(states) == 2
for state_i in states:
assert state_i.dtype == pred.dtype
assert state_i.device == pred.device
assert state_i.shape[1] == len(pred)
# Blank stateless predict
g, states = prednet.predict(y=None, state=None, add_sos=False, batch_size=1)
assert g.shape == torch.Size([1, 1, pred_hidden])
assert len(states) == 2
for state_i in states:
assert state_i.dtype == g.dtype
assert state_i.device == g.device
assert state_i.shape[1] == len(g)
# Blank stateful predict
g, states2 = prednet.predict(y=None, state=states, add_sos=False, batch_size=1)
assert g.shape == torch.Size([1, 1, pred_hidden])
assert len(states2) == 2
for state_i, state_j in zip(states, states2):
assert (state_i - state_j).square().sum().sqrt() > 0.0
# Predict with token and state
token = torch.full([1, 1], fill_value=0, dtype=torch.long)
g, states = prednet.predict(y=token, state=states2, add_sos=False, batch_size=None)
assert g.shape == torch.Size([1, 1, pred_hidden])
assert len(states) == 2
# Predict with blank token and no state
token = torch.full([1, 1], fill_value=blank, dtype=torch.long)
g, states = prednet.predict(y=token, state=None, add_sos=False, batch_size=None)
assert g.shape == torch.Size([1, 1, pred_hidden])
assert len(states) == 2
@pytest.mark.unit
def test_RNNTJoint(self):
vocab = list(range(10))
vocab = [str(x) for x in vocab]
vocab_size = len(vocab)
batchsize = 4
encoder_hidden = 64
pred_hidden = 32
joint_hidden = 16
joint_cfg = OmegaConf.create(
{
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'num_classes': vocab_size,
'vocabulary': vocab,
'jointnet': {
'encoder_hidden': encoder_hidden,
'pred_hidden': pred_hidden,
'joint_hidden': joint_hidden,
'activation': 'relu',
},
}
)
jointnet = modules.RNNTJoint.from_config_dict(joint_cfg)
enc = torch.zeros(batchsize, encoder_hidden, 48) # [B, D1, T]
dec = torch.zeros(batchsize, pred_hidden, 24) # [B, D2, U]
# forward call test
out = jointnet(encoder_outputs=enc, decoder_outputs=dec)
assert out.shape == torch.Size([batchsize, 48, 24, vocab_size + 1]) # [B, T, U, V + 1]
# joint() step test
enc2 = enc.transpose(1, 2) # [B, T, D1]
dec2 = dec.transpose(1, 2) # [B, U, D2]
out2 = jointnet.joint(enc2, dec2) # [B, T, U, V + 1]
assert (out - out2).abs().sum() <= 1e-5
# assert vocab size
assert jointnet.num_classes_with_blank == vocab_size + 1
|
gkucsko/NeMo
|
nemo/collections/nlp/data/common/sequence_to_sequence_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import get_indexed_dataset_
from nemo.collections.nlp.data.language_modeling.text_memmap_dataset import TextMemMapDataset
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ['SequenceToSequenceDataset', 'TextMemmapSequenceToSequenceDataset']
class SequenceToSequenceDataset(Dataset):
"""Sequence to Sequence Dataset in memory."""
def __init__(
self,
src_file_name: str,
tgt_file_name: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
max_src_seq_length: int,
max_tgt_seq_length: int,
):
super().__init__()
self.src_file_name = src_file_name
self.tgt_file_name = tgt_file_name
self.src_tokenizer = src_tokenizer
self.tgt_tokenizer = tgt_tokenizer
self.max_src_seq_length = max_src_seq_length
self.max_tgt_seq_length = max_tgt_seq_length
assert self.max_src_seq_length > 0
assert self.max_tgt_seq_length > 0
self._check_files_exist()
self._get_examples()
def _check_files_exist(self):
if not os.path.exists(self.src_file_name):
raise FileNotFoundError(f"Source file {self.src_file_name} not found")
if not os.path.exists(self.tgt_file_name):
raise FileNotFoundError(f"Source file {self.src_file_name} not found")
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
example = self.examples[idx]
text_enc = example['src']
text_dec = example['tgt'][:-1]
labels = example['tgt'][1:]
return {'text_enc': text_enc, 'text_dec': text_dec, 'labels': labels}
def _get_examples(self):
self.examples = []
with open(self.src_file_name, encoding='utf8') as f_src, open(self.tgt_file_name, encoding='utf8') as f_tgt:
for i, (src, tgt) in enumerate(zip(f_src, f_tgt)):
if i % 10000 == 0 and i != 0:
logging.info(f"Read {i} lines from {self.src_file_name} & {self.tgt_file_name}")
src = (
[self.src_tokenizer.bos_id]
+ self.src_tokenizer.text_to_ids(src.strip())
+ [self.src_tokenizer.eos_id]
)
tgt = (
[self.tgt_tokenizer.bos_id]
+ self.tgt_tokenizer.text_to_ids(tgt.strip())
+ [self.tgt_tokenizer.eos_id]
)
if len(src) <= self.max_src_seq_length and len(tgt) < self.max_tgt_seq_length:
self.examples.append({'src': src, 'tgt': tgt})
def collate_fn(self, batch):
enc_query = [item['text_enc'] for item in batch]
dec_input = [item['text_dec'] for item in batch]
labels = [item['labels'] for item in batch]
if isinstance(enc_query[0], np.ndarray):
enc_query = [x.tolist() for x in enc_query]
if isinstance(dec_input[0], np.ndarray):
dec_input = [x.tolist() for x in dec_input]
if isinstance(labels[0], np.ndarray):
labels = [x.tolist() for x in labels]
max_dec_input_length = max([len(item) for item in dec_input]) if dec_input else 0
max_enc_query_length = max([len(item) for item in enc_query]) if enc_query else 0
max_label_length = max([len(item) for item in labels]) if labels else 0
loss_mask = [([1] * (len(item))) + ([0] * (max_label_length - len(item))) for item in labels]
enc_query = [item + [self.src_tokenizer.pad_id] * (max_enc_query_length - len(item)) for item in enc_query]
dec_input = [item + [self.tgt_tokenizer.pad_id] * (max_dec_input_length - len(item)) for item in dec_input]
labels = [item + [self.tgt_tokenizer.pad_id] * (max_label_length - len(item)) for item in labels]
enc_query = torch.LongTensor(enc_query)
dec_input = torch.LongTensor(dec_input)
labels = torch.LongTensor(labels)
loss_mask = torch.LongTensor(loss_mask)
enc_mask = (enc_query != self.src_tokenizer.pad_id).long()
dec_mask = (dec_input != self.tgt_tokenizer.pad_id).long()
return {
'text_enc': enc_query,
'text_dec': dec_input,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
class TextMemmapSequenceToSequenceDataset(SequenceToSequenceDataset):
"""Sequence to Sequence Dataset in memory."""
def __init__(
self,
src_file_name: str,
tgt_file_name: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
max_src_seq_length: int,
max_tgt_seq_length: int,
):
super().__init__(
src_file_name=src_file_name,
tgt_file_name=tgt_file_name,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length,
max_tgt_seq_length=max_tgt_seq_length,
)
def __len__(self):
return len(self.src_dataset)
def __getitem__(self, idx):
src = self.src_dataset[idx]
if len(src) > self.max_src_seq_length - 2:
src = src[: self.max_src_seq_length - 2]
src = [self.src_tokenizer.bos_id] + src + [self.src_tokenizer.eos_id]
tgt = self.tgt_dataset[idx]
if len(tgt) > self.max_tgt_seq_length - 1:
tgt = tgt[: self.max_tgt_seq_length - 1]
text_enc = src
text_dec = [self.tgt_tokenizer.bos_id] + tgt
labels = tgt + [self.tgt_tokenizer.eos_id]
return {'text_enc': text_enc, 'text_dec': text_dec, 'labels': labels}
def _get_examples(self):
self.src_dataset = TextMemMapDataset(dataset_paths=[self.src_file_name], tokenizer=self.src_tokenizer)
self.tgt_dataset = TextMemMapDataset(dataset_paths=[self.tgt_file_name], tokenizer=self.tgt_tokenizer)
assert len(self.src_dataset) == len(self.tgt_dataset), "src and tgt has different number of lines"
class BinarizedMemmapSequenceToSequenceDataset(SequenceToSequenceDataset):
"""Sequence to Sequence Dataset based on Megatron Dataset Utils."""
def __init__(
self,
src_dataset_prefix,
tgt_dataset_prefix,
src_tokenizer,
tgt_tokenizer,
max_src_seq_length,
max_tgt_seq_length,
start_index=0,
end_index=None,
data_impl='mmap',
skip_warmup=True,
seed=1337,
):
self.src_dataset_prefix = src_dataset_prefix
self.tgt_dataset_prefix = tgt_dataset_prefix
self.src_tokenizer = src_tokenizer
self.tgt_tokenizer = tgt_tokenizer
self.data_impl = data_impl
self.skip_warmup = skip_warmup
self.start_index = start_index
self.end_index = end_index
self.max_src_seq_length = max_src_seq_length
self.max_tgt_seq_length = max_tgt_seq_length
self.seed = seed
super().__init__(
src_file_name=src_dataset_prefix,
tgt_file_name=tgt_dataset_prefix,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length,
max_tgt_seq_length=max_tgt_seq_length,
)
self._dataset_length = lambda dataset: dataset.sizes.shape[0]
if not end_index:
self.end_index = self._dataset_length(self.src_indexed_dataset) - 1
self._print_stats('Source Dataset', self.start_index, self.end_index)
self._print_stats('Target Dataset', self.start_index, self.end_index)
def __len__(self):
self.end_index - self.start_index
def _check_files_exist(self):
if not os.path.exists(self.src_dataset_prefix + ".bin") or not os.path.exists(
self.src_dataset_prefix + ".idx"
):
raise FileNotFoundError(f"{self.src_dataset_prefix}.bin or {self.src_dataset_prefix}.idx not found")
if not os.path.exists(self.tgt_dataset_prefix + ".bin") or not os.path.exists(
self.tgt_dataset_prefix + ".idx"
):
raise FileNotFoundError(f"{self.tgt_dataset_prefix}.bin or {self.tgt_dataset_prefix}.idx not found")
def _get_examples(self):
self.src_indexed_dataset = self._get_indexed_dataset(self.src_dataset_prefix, self.data_impl, self.skip_warmup)
self.tgt_indexed_dataset = self._get_indexed_dataset(self.tgt_dataset_prefix, self.data_impl, self.skip_warmup)
assert len(self.src_indexed_dataset) == len(self.tgt_indexed_dataset)
def _get_indexed_dataset(self, data_prefix, data_impl, skip_warmup):
indexed_dataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup)
return indexed_dataset
def _print_stats(self, name, start, end):
logging.info(' {}:'.format(name))
logging.info(' sentence indices in [{}, {}) total of {} ' 'sentences'.format(start, end, end - start))
def __len__(self):
return self.end_index - self.start_index
def __getitem__(self, idx):
if isinstance(idx, np.int64):
idx = idx.item()
local_idx = idx + self.start_index
assert local_idx < self.end_index
src = self.src_indexed_dataset[local_idx]
if len(src) > self.max_src_seq_length - 2:
src = src[: self.max_src_seq_length - 2]
text_enc = np.concatenate([[self.src_tokenizer.bos_id], src, [self.src_tokenizer.eos_id]])
tgt = self.tgt_indexed_dataset[local_idx]
if len(tgt) > self.max_tgt_seq_length - 2:
tgt = tgt[: self.max_tgt_seq_length - 2]
text_dec = np.concatenate([[self.tgt_tokenizer.bos_id], tgt])
labels = np.concatenate([tgt, [self.tgt_tokenizer.eos_id]])
return {'text_enc': text_enc, 'text_dec': text_dec, 'labels': labels}
|
gkucsko/NeMo
|
examples/asr/asr_adapters/eval_asr_adapter.py
|
<reponame>gkucsko/NeMo
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Evaluate an adapted model
python eval_asr_adapter.py \
--config-path="../conf/asr_adapters" \
--config-name="asr_adaptation.yaml" \
model.pretrained_model=null \
model.nemo_model=null \
model.adapter.adapter_name=<name of the adapter to evaluate> \
model.test_ds.manifest_filepath="<Path to validation/test manifest>" \
model.test_ds.batch_size=16 \
model.train_ds.manifest_filepath=null \
model.validation_ds.manifest_filepath=null \
model.adapter.in_features=null \
trainer.devices=1 \
trainer.precision=32
# Pretrained Models
For documentation on existing pretrained models, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf, open_dict
from nemo.collections.asr.models import ASRModel
from nemo.core import adapter_mixins
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
def update_encoder_config_to_support_adapter(model_cfg):
with open_dict(model_cfg):
adapter_metadata = adapter_mixins.get_registered_adapter(model_cfg.encoder._target_)
if adapter_metadata is not None:
model_cfg.encoder._target_ = adapter_metadata.adapter_class_path
def update_model_cfg(original_cfg, new_cfg):
with open_dict(new_cfg):
# drop keys which dont exist in old config
new_keys = list(new_cfg.keys())
for key in new_keys:
if key not in original_cfg:
new_cfg.pop(key)
print("Removing unavailable key from config :", key)
new_cfg = OmegaConf.merge(original_cfg, new_cfg)
return new_cfg
@hydra_runner(config_path="../conf/asr_adapters", config_name="asr_adaptation.yaml")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if cfg.model.pretrained_model is None and cfg.model.nemo_model is None:
raise ValueError("Either set `cfg.model.nemo_model` or `cfg.model.pretrained_model`")
if cfg.model.pretrained_model is not None and cfg.model.nemo_model is not None:
raise ValueError("Cannot set `cfg.model.nemo_model` and `cfg.model.pretrained_model`. Select one only.")
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
if cfg.model.pretrained_model is not None:
model_cfg = ASRModel.from_pretrained(cfg.model.pretrained_model, return_config=True)
update_encoder_config_to_support_adapter(model_cfg)
model = ASRModel.from_pretrained(cfg.model.pretrained_model, override_config_path=model_cfg, trainer=trainer)
else:
model_cfg = ASRModel.restore_from(cfg.model.nemo_model, return_config=True)
update_encoder_config_to_support_adapter(model_cfg)
model = ASRModel.restore_from(cfg.model.nemo_model, override_config_path=model_cfg, trainer=trainer)
# Setup model for finetuning (train and validation only)
cfg.model.test_ds = update_model_cfg(model.cfg.test_ds, cfg.model.test_ds)
# Call the dataloaders and optimizer + scheduler
model.setup_multiple_test_data(cfg.model.test_ds)
# Setup adapters
with open_dict(cfg.model.adapter):
adapter_name = cfg.model.adapter.pop("adapter_name", None)
# Disable all other adapters, enable just the current adapter.
model.set_enabled_adapters(enabled=False) # disable all adapters prior to training
if adapter_name is not None:
model.set_enabled_adapters(adapter_name, enabled=True) # enable just one adapter by name if provided
# First, Freeze all the weights of the model (not just encoder, everything)
model.freeze()
# Finally, train model
trainer.test(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
|
gkucsko/NeMo
|
examples/nlp/text_normalization_as_tagging/evaluation/eval_per_class.py
|
<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to compare the inference output of Thutmose tagger with multi_reference file.
The additional report is stored to a separate file for each semiotic class.
USAGE Example:
python eval_per_class.py \
--inference_file= \
--reference_file= \
--output_file=
The inference file is a tsv file in which the first column contains the predicted sentence text.
The reference file is a tsv file in which
the first column contains the input sentence text,
the second column contains the reference sentence text (taken from Google TN dataset)
the third column (optional) contains additional acceptable references for semiotic spans in this sentence.
E.g.
mizoguchi akiko september twenty ten mizoguchi akiko september 2010 DATE 2 5 | sept 2010 | sep. 2010 ...
The script generates:
a file with report on accuracy per semiotiotic class (output_file).
files (<output_file>.<semiotic_class>) with sentences, containing errors in this semiotic span.
"""
import glob
import os
from argparse import ArgumentParser
from collections import Counter
parser = ArgumentParser(description="Compare inference output with multi-reference, print report per class")
parser.add_argument("--inference_file", type=str, required=True, help="Path to inference file 1")
parser.add_argument("--reference_file", type=str, required=True, help="Path to reference file")
parser.add_argument("--output_file", type=str, required=True, help="Path to output file")
args = parser.parse_args()
# Main code
if __name__ == '__main__':
# delete all class-specific reports, as they are created in the append mode
for f in glob.glob(args.output_file + ".*"):
os.remove(f)
total_count = Counter()
correct_count = Counter()
f_ref = open(args.reference_file, "r", encoding="utf-8")
f_infer = open(args.inference_file, "r", encoding="utf-8")
f_out = open(args.output_file, "w", encoding="utf-8")
lines_ref = f_ref.readlines()
lines_infer = f_infer.readlines()
f_ref.close()
f_infer.close()
if len(lines_ref) != len(lines_infer):
raise ValueError(
"Number of lines doesn't match: len(lines_ref)="
+ str(len(lines_ref))
+ "; len(lines_infer)="
+ str(len(lines_infer))
)
for i in range(len(lines_infer)):
_, inp_str, _, tag_with_swap_str, semiotic = lines_infer[i].strip().split("\t")
input_words = inp_str.split(" ")
predicted_tags = tag_with_swap_str.split(" ")
predicted_words = predicted_tags[:]
for k in range(len(predicted_tags)):
t = predicted_tags[k]
if t == "<SELF>":
predicted_words[k] = input_words[k]
elif t == "<DELETE>":
predicted_words[k] = ""
else:
predicted_words[k] = predicted_words[k].replace(">", "").replace("<", "")
parts = lines_ref[i].strip().split("\t")
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Bad format: " + lines_ref[i])
if len(parts) == 3: # there are non-trivial semiotic spans
spans = parts[2].split(";")
for span in spans:
span_parts = span.split(" | ")
try:
sem, begin, end = span_parts[0].split(" ")
except Exception:
print("error: ", lines_ref[i])
continue
begin = int(begin)
end = int(end)
ok = False
predicted_span = " ".join(predicted_words[begin:end]).replace("_", " ").replace(" ", "").casefold()
input_span = " ".join(input_words[begin:end])
total_count[sem] += 1
for tr_variant in span_parts[1:]:
ref_span = tr_variant.replace("_", " ").replace(" ", "").casefold()
if ref_span == predicted_span:
ok = True
correct_count[sem] += 1
break
if not ok:
out_sem = open(args.output_file + "." + sem, "a", encoding="utf-8")
out_sem.write(
"error: pred="
+ " ".join(predicted_words[begin:end])
+ "; inp="
+ input_span
+ "; ref="
+ span
+ "\n"
)
out_sem.write("\tinput=" + " ".join(input_words) + "\n")
out_sem.write("\ttags=" + " ".join(predicted_tags) + "\n")
out_sem.write("\tpred=" + " ".join(predicted_words) + "\n")
out_sem.write("\tsemiotic=" + semiotic + "\n")
out_sem.write("\tref=" + parts[1] + "\n")
out_sem.close()
f_out.write("class\ttotal\tcorrect\terrors\taccuracy\n")
for sem in total_count:
f_out.write(
sem
+ "\t"
+ str(total_count[sem])
+ "\t"
+ str(correct_count[sem])
+ "\t"
+ str(total_count[sem] - correct_count[sem])
+ "\t"
+ str(correct_count[sem] / total_count[sem])
+ "\n"
)
f_out.close()
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/dataset/dialogue_s2s_generation_dataset.py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
class DialogueS2SGenerationDataset(DialogueDataset):
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
""" Constructor
Designed for free form generation tasks such as Dialogue Response Generation
Args:
dataset_split: dataset split
dialogues_processor: dialogues processor
tokenizer: tokenizer
cfg: cfg container for dataset
"""
self.cfg = cfg
self.input_label_type = self.cfg.input_field
self.output_label_type = self.cfg.output_field
self.tokenizer = tokenizer
if not isinstance(dataset_split, str):
dataset_split = dataset_split[0]
self.features = dialogues_processor.get_dialog_examples(dataset_split)
self.features = self.remove_invalid_samples(self.features)
if self.cfg.debug_mode:
self.features = self.features[:16]
@staticmethod
def format_actions(prompt_template, actions):
"""
Formats actions based on prompt_template
Args:
prompt_template: determines whether acts, slot-names, slot-values are necessary in formatted actions
actions: list of actions, each a dict containing keys 'act', 'slot' and 'values' with their corresponding values as their attribute-values
Returns:
formatted_actions: string representations of actions, formatted based on the fields needed.
"""
actions_str = []
for action in actions:
act = action['act'].lower()
slot = action['slot']
value = action['values'][0] if action['values'] else ''
if prompt_template == 'values':
action_str = value
elif prompt_template == 'slots_values':
if value:
action_str = '{} ({})'.format(slot, value)
else:
action_str = slot
elif prompt_template == 'acts_slots_values':
if value:
action_str = '{} {} ({})'.format(act, slot, value)
elif slot:
action_str = '{} {}'.format(act, slot)
else:
action_str = act
else:
raise ValueError(
"Please set model.dataset.prompt_template to acts_slots_values, slots_values or values"
)
actions_str.append(action_str)
return ' '.join(actions_str)
def remove_invalid_samples(self, features):
valid_idxs = []
for i in range(len(features)):
for field in ['utterance', 'system_utterance', 'system_actions']:
if field in features[i].data:
features[i].data["labels"][field] = features[i].data[field]
all_fields = self.input_label_type.split('+') + self.output_label_type.split('+')
all_fields_non_empty = True
for field in all_fields:
if not features[i].data["labels"][field]:
all_fields_non_empty = False
if all_fields_non_empty:
valid_idxs.append(i)
return [features[i] for i in valid_idxs]
def __len__(self):
return len(self.features)
def get_n_tokens_in_sentence(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding=False, return_tensors="pt"
)
output = torch.squeeze(encodings_dict['input_ids'])
return len(output) if len(output.size()) > 0 else 0
def default_encode(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding="max_length", return_tensors="pt"
)
input_ids = torch.squeeze(encodings_dict['input_ids'])
attn_masks = torch.squeeze(encodings_dict['attention_mask'])
return encodings_dict, input_ids, attn_masks
def format_prompt(self, ex):
'''
Formats training prompt based on self.input_field_type
Training example:
e.g. response: <response> # input_label_type = response
e.g. utterance: <utterance> # input_label_type = utterance
e.g. passage: <passage> utterance: <utterance> # input_label_type = passage+utterance
'''
parts = self.input_label_type.split('+')
input_sentence = ' '.join([part + ': ' + ex["labels"][part] for part in parts])
return input_sentence
def __getitem__(self, idx: int):
'''
State how the input and output samples look like
This template can be changed
Training example:
e.g. INPUT - "response: <response>" OUTPUT - "<fluent_response>" # input_label_type = response, output_label_type = fluent_response
e.g. INPUT - "utterance: <utterance>" OUTPUT - "<response>" # input_label_type = utterance, output_label_type = response
e.g. INPUT - "passage: <passage> utterance: <utterance>" OUTPUT - "<response>" # input_label_type = passage+utterance, output_label_type = response
'''
ex = self.features[idx].data
for field in ['utterance', 'system_utterance']:
if field in ex:
ex["labels"][field] = ex[field]
if 'system_actions' in ex:
ex["labels"]['system_actions'] = DialogueS2SGenerationDataset.format_actions(
self.cfg.prompt_template, ex['system_actions']
)
input_sentence = self.format_prompt(ex)
output_sentence = ex["labels"][self.output_label_type]
_, input_ids, attn_masks = self.default_encode(input_sentence)
_, labels, _ = self.default_encode(output_sentence)
labels[labels == self.tokenizer.tokenizer.pad_token_id] = -100
return input_ids, attn_masks, labels
|
gkucsko/NeMo
|
scripts/dataset_processing/get_hi-mia_data.py
|
<gh_stars>0
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE: python get_aishell_data.py --data_root=<where to put data>
import argparse
import json
import logging as _logging
import os
import tarfile
import urllib.request
from glob import glob
import librosa as l
from sklearn.model_selection import StratifiedShuffleSplit
from tqdm import tqdm
parser = argparse.ArgumentParser(description="HI-MIA Data download")
parser.add_argument("--data_root", required=True, default=None, type=str)
parser.add_argument("--log_level", default=20, type=int)
args = parser.parse_args()
logging = _logging.getLogger(__name__)
logging.addHandler(_logging.StreamHandler())
logging.setLevel(args.log_level)
URL = {
"dev": "http://www.openslr.org/resources/85/dev.tar.gz",
"test": "http://www.openslr.org/resources/85/test.tar.gz",
"train": "http://www.openslr.org/resources/85/train.tar.gz",
}
def __retrieve_with_progress(source: str, filename: str):
"""
Downloads source to destination
Displays progress bar
Args:
source: url of resource
destination: local filepath
Returns:
"""
with open(filename, "wb") as f:
response = urllib.request.urlopen(source)
total = response.length
if total is None:
f.write(response.content)
else:
with tqdm(total=total, unit="B", unit_scale=True, unit_divisor=1024) as pbar:
for data in response:
f.write(data)
pbar.update(len(data))
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
source = URL[source]
if not os.path.exists(destination) and not os.path.exists(os.path.splitext(destination)[0]):
logging.info("{0} does not exist. Downloading ...".format(destination))
__retrieve_with_progress(source, filename=destination + ".tmp")
os.rename(destination + ".tmp", destination)
logging.info("Downloaded {0}.".format(destination))
elif os.path.exists(destination):
logging.info("Destination {0} exists. Skipping.".format(destination))
elif os.path.exists(os.path.splitext(destination)[0]):
logging.warning(
"Assuming extracted folder %s contains the extracted files from %s. Will not download.",
os.path.basename(destination),
destination,
)
return destination
def __extract_all_files(filepath: str, data_root: str, data_dir: str):
if not os.path.exists(data_dir):
extract_file(filepath, data_root)
audio_dir = os.path.join(data_dir, "wav")
for subfolder, _, filelist in os.walk(audio_dir):
for ftar in filelist:
extract_file(os.path.join(subfolder, ftar), subfolder)
else:
logging.info("Skipping extracting. Data already there %s" % data_dir)
def extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath, encoding='utf-8')
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info("Not extracting. Maybe already there?")
def __remove_tarred_files(filepath: str, data_dir: str):
if os.path.exists(data_dir) and os.path.isfile(filepath):
logging.info("Deleting %s" % filepath)
os.remove(filepath)
def write_file(name, lines, idx):
with open(name, "w") as fout:
for i in idx:
dic = lines[i]
json.dump(dic, fout)
fout.write("\n")
logging.info("wrote %s", name)
def __process_data(data_folder: str, data_set: str):
"""
To generate manifest
Args:
data_folder: source with wav files
Returns:
"""
fullpath = os.path.abspath(data_folder)
filelist = glob(fullpath + "/**/*.wav", recursive=True)
out = os.path.join(fullpath, data_set + "_all.json")
utt2spk = os.path.join(fullpath, "utt2spk")
utt2spk_file = open(utt2spk, "w")
id = -2 # speaker id
if os.path.exists(out):
logging.warning(
"%s already exists and is assumed to be processed. If not, please delete %s and rerun this script",
out,
out,
)
return
speakers = []
lines = []
with open(out, "w") as outfile:
for line in tqdm(filelist):
line = line.strip()
y, sr = l.load(line, sr=None)
if sr != 16000:
y, sr = l.load(line, sr=16000)
l.output.write_wav(line, y, sr)
dur = l.get_duration(y=y, sr=sr)
if data_set == "test":
speaker = line.split("/")[-1].split(".")[0].split("_")[0]
else:
speaker = line.split("/")[id]
speaker = list(speaker)
speaker = "".join(speaker)
speakers.append(speaker)
meta = {"audio_filepath": line, "duration": float(dur), "label": speaker}
lines.append(meta)
json.dump(meta, outfile)
outfile.write("\n")
utt2spk_file.write(line.split("/")[-1] + "\t" + speaker + "\n")
utt2spk_file.close()
if data_set != "test":
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)
for train_idx, test_idx in sss.split(speakers, speakers):
print(len(train_idx))
out = os.path.join(fullpath, "train.json")
write_file(out, lines, train_idx)
out = os.path.join(fullpath, "dev.json")
write_file(out, lines, test_idx)
def main():
data_root = args.data_root
for data_set in URL.keys():
# data_set = 'data_aishell'
logging.info("\n\nWorking on: {0}".format(data_set))
file_path = os.path.join(data_root, data_set + ".tgz")
logging.info("Getting {0}".format(data_set))
__maybe_download_file(file_path, data_set)
logging.info("Extracting {0}".format(data_set))
data_folder = os.path.join(data_root, data_set)
__extract_all_files(file_path, data_root, data_folder)
__remove_tarred_files(file_path, data_folder)
logging.info("Processing {0}".format(data_set))
__process_data(data_folder, data_set)
logging.info("Done!")
if __name__ == "__main__":
main()
|
gkucsko/NeMo
|
nemo/collections/nlp/models/dialogue/dialogue_zero_shot_intent_model.py
|
# Copyright 2018 The HuggingFace Inc. team.
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import defaultdict
from typing import Dict, List, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from nemo.collections.nlp.data.dialogue import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.assistant_data_processor import DialogueAssistantDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.design_data_processor import DialogueDesignDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_zero_shot_intent_dataset import DialogueZeroShotIntentDataset
from nemo.collections.nlp.data.zero_shot_intent_recognition.zero_shot_intent_dataset import (
ZeroShotIntentInferenceDataset,
calc_class_weights_from_dataloader,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
from nemo.collections.nlp.models import TextClassificationModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
__all__ = ['DialogueZeroShotIntentModel']
class DialogueZeroShotIntentModel(TextClassificationModel):
"""TextClassificationModel to be trained on two- or three-class textual entailment data, to be used for zero shot intent recognition."""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.cfg = cfg
super().__init__(cfg=cfg, trainer=trainer)
if self.cfg.library == 'megatron':
# zero shot intent classification loading
# cannot directly load as .nemo uses the pre-refactor model
# therefore transfer its attributes over
if self.cfg.original_nemo_checkpoint is not None:
original_model = DialogueZeroShotIntentModel.restore_from(self.cfg.original_nemo_checkpoint)
self.classifier = original_model.classifier
self.bert_model = original_model.bert_model
self.loss = original_model.loss
self.classification_report = original_model.classification_report
elif self.cfg.library == "huggingface":
self.nli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli')
self.bert_model = self.nli_model.model
self.classifier = self.nli_model.classification_head
original_model = DialogueZeroShotIntentModel.restore_from(self.cfg.original_nemo_checkpoint)
self.loss = original_model.loss
self.classification_report = original_model.classification_report
self.tokenizer = AutoTokenizer.from_pretrained('facebook/bart-large-mnli')
self.tokenizer.max_seq_length = self.cfg.dataset.max_seq_length
def _setup_dataloader_from_config(self, cfg: DictConfig, dataset_split) -> 'torch.utils.data.DataLoader':
if self._cfg.dataset.task == "zero_shot":
self.data_processor = DialogueAssistantDataProcessor(
self.cfg.data_dir, self.tokenizer, cfg=self.cfg.dataset
)
elif self._cfg.dataset.task == "design":
self.data_processor = DialogueDesignDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
elif self._cfg.dataset.task == 'sgd':
self.data_processor = DialogueSGDDataProcessor(
data_dir=self._cfg.dataset.data_dir,
dialogues_example_dir=self._cfg.dataset.dialogues_example_dir,
tokenizer=self.tokenizer,
cfg=self._cfg.dataset,
)
else:
raise ValueError("Only zero_shot, design and sgd supported for Zero Shot Intent Model")
dataset = DialogueZeroShotIntentDataset(
dataset_split,
self.data_processor,
self.tokenizer,
self.cfg.dataset, # this is the model.dataset cfg, which is diff from train_ds cfg etc
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
def forward(self, input_ids, attention_mask, token_type_ids):
if self.cfg.library == 'megatron':
hidden_states = self.bert_model(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)
if isinstance(hidden_states, tuple):
hidden_states = hidden_states[0]
logits = self.classifier(hidden_states=hidden_states)
elif self.cfg.library == 'huggingface':
output = self.nli_model(input_ids=input_ids, attention_mask=attention_mask)
logits = output['logits']
return logits
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config:
logging.info(
f"Dataloader config or file_name for the training set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._train_dl = self._setup_dataloader_from_config(train_data_config, "train")
# calculate the class weights to be used in the loss function
if self.cfg.dataset.class_balancing == 'weighted_loss':
self.class_weights = calc_class_weights_from_dataloader(
self._train_dl, self.cfg.dataset.num_classes, self.cfg.dataset.data_dir
)
else:
self.class_weights = None
# we need to create/update the loss module by using the weights calculated from the training data
self.create_loss_module()
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config:
logging.info(
f"Dataloader config or file_path for the validation data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(val_data_config, "dev")
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config:
logging.info(
f"Dataloader config or file_path for the test data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(test_data_config, "test")
def _setup_infer_dataloader(
self,
queries: List[str],
candidate_labels: List[str],
hypothesis_template=str,
batch_size=1,
max_seq_length: int = -1,
) -> 'torch.utils.data.DataLoader':
"""
Setup method for inference data loader. Here the premise-hypothesis pairs are made from queries and candidate labels.
Args:
queries: the queries to classify
candidate_labels: strings to be used as labels
hypothesis_template: the template used to turn each label into an NLI-style hypothesis. Must include a {}
or similar syntax for the candidate label to be inserted.
batch_size: batch size to use during inference
max_seq_length: maximum length of queries, default is -1 for no limit
Returns:
A pytorch DataLoader.
"""
dataset = ZeroShotIntentInferenceDataset(
queries=queries,
candidate_labels=candidate_labels,
tokenizer=self.tokenizer,
max_seq_length=max_seq_length,
hypothesis_template=hypothesis_template,
)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=2,
pin_memory=False,
drop_last=False,
collate_fn=dataset.collate_fn,
)
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_type_ids, input_mask, labels = batch
logits = self.forward(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)
val_loss = self.loss(logits=logits, labels=labels)
preds = torch.argmax(logits, axis=-1)
tp, fn, fp, _ = self.classification_report(preds, labels)
return {
'val_loss': val_loss,
'tp': tp,
'fn': fn,
'fp': fp,
'logits': logits,
'input_ids': input_ids,
'labels': labels,
}
def validation_epoch_end(self, outputs):
"""
Get metrics based on the candidate label with the highest predicted likelihood and the ground truth label for intent
"""
output_logits = torch.cat([output['logits'] for output in outputs], dim=0)
output_input_ids = torch.cat([output['input_ids'] for output in outputs], dim=0)
output_labels = torch.cat([output['labels'] for output in outputs], dim=0)
if self.cfg.library == 'huggingface':
entail_logits = output_logits[..., 2]
decoded_input_ids = [self.tokenizer.decode(output_input_ids[i]) for i in range(len(output_input_ids))]
utterance_candidate_pairs = [i.split(self.tokenizer.sep_token) for i in decoded_input_ids]
utterances = [
i[0].replace(self.tokenizer.bos_token, '').replace(self.tokenizer.eos_token, '')
for i in utterance_candidate_pairs
]
elif self.cfg.library == 'megatron':
entail_logits = output_logits[..., 1]
decoded_input_ids = [
self.tokenizer.tokenizer.decode(output_input_ids[i]) for i in range(len(output_input_ids))
]
utterance_candidate_pairs = [i.split(self.tokenizer.tokenizer.sep_token) for i in decoded_input_ids]
utterances = [
i[0].replace(self.tokenizer.tokenizer.bos_token, '').replace(self.tokenizer.tokenizer.eos_token, '')
for i in utterance_candidate_pairs
]
# account for uncased tokenization
candidates = [
i[1]
.replace(self.cfg.dataset.prompt_template.lower(), '')
.replace(self.cfg.dataset.prompt_template, '')
.strip()
for i in utterance_candidate_pairs
]
utterance_to_idx = defaultdict(list)
for idx, utterance in enumerate(utterances):
utterance_to_idx[utterance].append(idx)
predicted_labels = []
ground_truth_labels = []
utterances = []
for utterance, idxs in utterance_to_idx.items():
utterance_candidates = [candidates[idx] for idx in idxs]
logits = [entail_logits[idx].item() for idx in idxs]
labels = [output_labels[idx].item() for idx in idxs]
correct_candidate = utterance_candidates[np.argmax(labels)]
predicted_candidate = utterance_candidates[np.argmax(logits)]
predicted_labels.append(predicted_candidate)
ground_truth_labels.append(correct_candidate)
utterances.append(utterance)
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(self.cfg.dataset.dialogues_example_dir, "test_predictions.jsonl")
DialogueGenerationMetrics.save_predictions(
filename, predicted_labels, ground_truth_labels, utterances,
)
label_to_ids = {label: idx for idx, label in enumerate(list(set(predicted_labels + ground_truth_labels)))}
self.classification_report = ClassificationReport(
num_classes=len(label_to_ids), mode='micro', label_ids=label_to_ids, dist_sync_on_step=True
).to(output_logits[0].device)
predicted_label_ids = torch.tensor([label_to_ids[label] for label in predicted_labels]).to(
output_logits[0].device
)
ground_truth_label_ids = torch.tensor([label_to_ids[label] for label in ground_truth_labels]).to(
output_logits[0].device
)
tp, fn, fp, _ = self.classification_report(predicted_label_ids, ground_truth_label_ids)
precision, recall, f1, report = self.classification_report.compute()
label_acc = np.mean([int(predicted_labels[i] == ground_truth_labels[i]) for i in range(len(predicted_labels))])
avg_loss = torch.stack([x[f'val_loss'] for x in outputs]).mean()
logging.info(report)
self.log('unified_precision', precision)
self.log('unified_f1', f1)
self.log('unified_recall', recall)
self.log('unfied_accuracy', label_acc * 100)
self.log('val_loss', avg_loss, prog_bar=True)
self.classification_report.reset()
def predict(
self,
queries: Union[str, List[str]],
candidate_labels: Union[str, List[str]],
hypothesis_template='This example is {}.',
batch_size=1,
multi_label=True,
entailment_idx=1,
contradiction_idx=0,
) -> List[Dict]:
"""
Given a list of queries and a list of candidate labels, return a ranked list of labels and scores for each query.
Example usage:
queries = ["I'd like a veggie burger, fries, and a coke", "Turn off the lights in the living room",]
candidate_labels = ["Food order", "Change lighting"]
model.predict(queries, candidate_labels)
Example output:
[{'sentence': "I'd like a veggie burger, fries, and a coke",
'labels': ['Food order', 'Change lighting'],
'scores': [0.8557153344154358, 0.12036784738302231]},
{'sentence': 'Turn off the lights in the living room',
'labels': ['Change lighting', 'Food order'],
'scores': [0.8506497144699097, 0.06594637036323547]}]
Args:
queries: the query or list of queries to classify
candidate_labels: string or list of strings to be used as labels
hypothesis_template: the template used to turn each label into an NLI-style hypothesis. Must include a {}
or similar syntax for the candidate label to be inserted.
batch_size: the batch size to use for inference.
multi_label: whether or not multiple candidate labels can be true. If False, the scores are normalized
such that all class probabilities sum to 1. If True, the labels are
considered independent and probabilities are normalized for each candidate by doing a softmax of
the entailment score vs. the contradiction score.
entailment_idx: the index of the "entailment" class in the trained model; models trained on MNLI
using NeMo's glue_benchmark.py or zero_shot_intent_model.py use an index of 1 by default.
contradiction_idx: the index of the "contradiction" class in the trained model; models trained on MNLI
using NeMo's glue_benchmark.py or zero_shot_intent_model.py use an index of 0 by default.
Returns:
list of dictionaries; one dict per input query. Each dict has keys "sentence", "labels", "scores".
labels and scores are parallel lists (with each score corresponding to the label at the same index),
sorted from highest to lowest score.
"""
if not queries:
raise ValueError("No queries were passed for classification!")
if not candidate_labels:
raise ValueError("No candidate labels were provided!")
queries = [queries] if isinstance(queries, str) else queries
candidate_labels = [candidate_labels] if isinstance(candidate_labels, str) else candidate_labels
if len(candidate_labels) == 1:
multi_label = True
mode = self.training
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Switch model to evaluation mode
self.eval()
self.to(device)
infer_datalayer = self._setup_infer_dataloader(
queries,
candidate_labels,
hypothesis_template=hypothesis_template,
batch_size=batch_size,
max_seq_length=self._cfg.dataset.max_seq_length,
)
all_batch_logits = []
for batch in infer_datalayer:
input_ids, input_type_ids, input_mask, _ = batch
logits = self.forward(
input_ids=input_ids.to(device),
token_type_ids=input_type_ids.to(device),
attention_mask=input_mask.to(device),
)
all_batch_logits.append(logits.detach().cpu().numpy())
all_logits = np.concatenate(all_batch_logits)
outputs = all_logits.reshape((len(queries), len(candidate_labels), -1))
if not multi_label:
# softmax the "entailment" logits over all candidate labels
entail_logits = outputs[..., entailment_idx]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
else:
# softmax over the entailment vs. contradiction dim for each label independently
entail_contr_logits = outputs[..., [contradiction_idx, entailment_idx]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
result = []
for i in range(len(queries)):
sorted_idxs = list(reversed(scores[i].argsort()))
result.append(
{
"sentence": queries[i],
"labels": [candidate_labels[j] for j in sorted_idxs],
"scores": scores[i][sorted_idxs].tolist(),
}
)
finally:
# set mode back to its original value
self.train(mode=mode)
return result
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained models which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
result.append(
PretrainedModelInfo(
pretrained_model_name="zeroshotintent_en_bert_base_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/zeroshotintent_en_bert_base_uncased/versions/1.4.1/files/zeroshotintent_en_bert_base_uncased.nemo",
description="DialogueZeroShotIntentModel trained by fine tuning BERT-base-uncased on the MNLI (Multi-Genre Natural Language Inference) dataset, which achieves an accuracy of 84.9% and 84.8% on the matched and mismatched dev sets, respectively.",
)
)
result.append(
PretrainedModelInfo(
pretrained_model_name="zeroshotintent_en_megatron_uncased",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/zeroshotintent_en_megatron_uncased/versions/1.4.1/files/zeroshotintent_en_megatron_uncased.nemo",
description="DialogueZeroShotIntentModel trained by fine tuning Megatron-BERT-345m=M-uncased on the MNLI (Multi-Genre Natural Language Inference) dataset, which achieves an accuracy of 90.0% and 89.9% on the matched and mismatched dev sets, respectively",
)
)
return result
|
gkucsko/NeMo
|
nemo/collections/nlp/models/dialogue/dialogue_nearest_neighbour_model.py
|
# Copyright 2022 The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from transformers import AutoModel
from nemo.collections.nlp.data.dialogue import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.assistant_data_processor import DialogueAssistantDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.design_data_processor import DialogueDesignDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_nearest_neighbour_dataset import (
DialogueNearestNeighbourDataset,
)
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
from nemo.collections.nlp.models.nlp_model import NLPModel
from nemo.core.classes.common import PretrainedModelInfo
from nemo.utils import logging
__all__ = ['DialogueNearestNeighbourModel']
class DialogueNearestNeighbourModel(NLPModel):
"""Dialogue Nearest Neighbour Model identifies the intent of an utterance using the cosine similarity between sentence embeddings of the utterance and various label descriptions """
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
self.cfg = cfg
super().__init__(cfg=cfg, trainer=trainer)
if self.cfg.library == "huggingface":
self.language_model = AutoModel.from_pretrained(self.cfg.language_model.pretrained_model_name)
def _setup_dataloader_from_config(self, cfg: DictConfig, dataset_split) -> 'torch.utils.data.DataLoader':
if self._cfg.dataset.task == "zero_shot":
self.data_processor = DialogueAssistantDataProcessor(
self.cfg.data_dir, self.tokenizer, cfg=self.cfg.dataset
)
elif self._cfg.dataset.task == "design":
self.data_processor = DialogueDesignDataProcessor(
data_dir=self._cfg.dataset.data_dir, tokenizer=self.tokenizer, cfg=self._cfg.dataset
)
elif self._cfg.dataset.task == 'sgd':
self.data_processor = DialogueSGDDataProcessor(
data_dir=self._cfg.dataset.data_dir,
dialogues_example_dir=self._cfg.dataset.dialogues_example_dir,
tokenizer=self.tokenizer,
cfg=self._cfg.dataset,
)
else:
raise ValueError("Only zero_shot, design and sgd supported for Zero Shot Intent Model")
dataset = DialogueNearestNeighbourDataset(
dataset_split,
self.data_processor,
self.tokenizer,
self.cfg.dataset, # this is the model.dataset cfg, which is diff from train_ds cfg etc
)
return torch.utils.data.DataLoader(
dataset=dataset,
collate_fn=dataset.collate_fn,
batch_size=cfg.batch_size,
shuffle=cfg.shuffle,
num_workers=cfg.get("num_workers", 0),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
def forward(self, input_ids, attention_mask):
if self.cfg.library == 'huggingface':
output = self.language_model(input_ids=input_ids, attention_mask=attention_mask)
return output
def training_step(self, batch, batch_idx):
raise NotImplementedError
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx, mode='test')
@staticmethod
def mean_pooling(model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def validation_step(self, batch, batch_idx, mode='val'):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
input_ids, input_mask, labels = batch
preds = []
gts = []
inputs = []
for i in range(input_ids.size(0)):
output = self.forward(input_ids=input_ids[i], attention_mask=input_mask[i])
sentence_embeddings = DialogueNearestNeighbourModel.mean_pooling(output, input_mask[i])
sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
cos_sim = F.cosine_similarity(sentence_embeddings[:1, :], sentence_embeddings[1:, :])
pred = torch.argmax(cos_sim).item() + 1
gt = torch.argmax(labels[i][1:]).item() + 1
preds.append(input_ids[i, pred])
gts.append(input_ids[i, gt])
inputs.append(input_ids[i, 0])
return {'preds': torch.stack(preds), 'labels': torch.stack(gts), 'inputs': torch.stack(inputs)}
def multi_test_epoch_end(self, outputs, dataloader_idx):
return self.validation_epoch_end(outputs)
def validation_epoch_end(self, outputs):
"""
Get metrics based on the candidate label with the highest predicted likelihood and the ground truth label for intent
"""
output_preds = torch.cat([output['preds'] for output in outputs], dim=0)
output_labels = torch.cat([output['labels'] for output in outputs], dim=0)
inputs = torch.cat([output['inputs'] for output in outputs], dim=0)
decoded_preds = self.tokenizer.tokenizer.batch_decode(output_preds, skip_special_tokens=True)
decoded_labels = self.tokenizer.tokenizer.batch_decode(output_labels, skip_special_tokens=True)
decoded_inputs = self.tokenizer.tokenizer.batch_decode(inputs, skip_special_tokens=True)
prompt_len = len(self.cfg.dataset.prompt_template.strip())
predicted_labels = [i[prompt_len:].strip() for i in decoded_preds]
ground_truth_labels = [i[prompt_len:].strip() for i in decoded_labels]
os.makedirs(self.cfg.dataset.dialogues_example_dir, exist_ok=True)
filename = os.path.join(self.cfg.dataset.dialogues_example_dir, "test_predictions.jsonl")
DialogueGenerationMetrics.save_predictions(
filename, predicted_labels, ground_truth_labels, decoded_inputs,
)
label_to_ids = {label: idx for idx, label in enumerate(list(set(predicted_labels + ground_truth_labels)))}
self.classification_report = ClassificationReport(
num_classes=len(label_to_ids), mode='micro', label_ids=label_to_ids, dist_sync_on_step=True
).to(output_preds[0].device)
predicted_label_ids = torch.tensor([label_to_ids[label] for label in predicted_labels]).to(
output_preds[0].device
)
ground_truth_label_ids = torch.tensor([label_to_ids[label] for label in ground_truth_labels]).to(
output_preds[0].device
)
tp, fn, fp, _ = self.classification_report(predicted_label_ids, ground_truth_label_ids)
precision, recall, f1, report = self.classification_report.compute()
label_acc = np.mean([int(predicted_labels[i] == ground_truth_labels[i]) for i in range(len(predicted_labels))])
logging.info(report)
self.log('unified_precision', precision)
self.log('unified_f1', f1)
self.log('unified_recall', recall)
self.log('unfied_accuracy', label_acc * 100)
self.classification_report.reset()
def setup_training_data(self, train_data_config: Optional[DictConfig]):
if not train_data_config:
logging.info(
f"Dataloader config or file_name for the training set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._train_dl = self._setup_dataloader_from_config(train_data_config, "train")
# self.create_loss_module()
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
if not val_data_config:
logging.info(
f"Dataloader config or file_path for the validation data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._validation_dl = self._setup_dataloader_from_config(val_data_config, "dev")
def setup_multiple_test_data(self, test_data_config: Optional[DictConfig]):
self.setup_test_data(test_data_config)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
if not test_data_config:
logging.info(
f"Dataloader config or file_path for the test data set is missing, so no data loader for test is created!"
)
self._test_dl = None
return
self._test_dl = self._setup_dataloader_from_config(test_data_config, "test")
@classmethod
def list_available_models(cls) -> Optional[PretrainedModelInfo]:
"""
This method returns a list of pre-trained models which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
return result
|
gkucsko/NeMo
|
nemo/collections/nlp/data/dialogue/data_processor/mellon_qa_data_processor.py
|
<filename>nemo/collections/nlp/data/dialogue/data_processor/mellon_qa_data_processor.py<gh_stars>0
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
__all__ = ['DialogueMellonQADataProcessor']
class DialogueMellonQADataProcessor(DialogueDataProcessor):
"""Data Processor for Mellon QA dialogues.
"""
def __init__(self, data_dir: str, tokenizer: object, cfg=None):
"""
Constructs DialogueMSMarcoDataProcessor
Args:
data_dir: path to data directory
tokenizer: tokenizer object
cfg: cfg container for dataset
"""
self.data_dir = data_dir
self._tokenizer = tokenizer
self.cfg = cfg
def open_csv(self, filename):
"""
Reads file into a list
"""
filename = os.path.join(self.data_dir, filename)
with open(filename, "r", encoding="UTF-8") as f:
df = pd.read_csv(filename)
return df.to_dict(orient='index')
def get_dialog_examples(self, dataset_split: str):
"""
Process raw files into DialogueInputExample
Args:
dataset_split: {train, dev, test}
For the Mellon QA dataset, there is no explicit dev set (instead uses the test set as the dev set)
Therefore, this function creates a dev set and a new train set from the train set.
Dev set contains self.cfg.dev_proportion % of samples with the rest going into the train set
Test set contains the whole dataset (Dev + Train) as this dataset is small (~100) and primarily used in a zero shot setting
"""
examples = []
raw_examples = self.open_csv('mellon_qa_data.csv')
raw_examples = list(raw_examples.values())
# filter out answers with no answer
raw_examples = [
example
for example in raw_examples
if isinstance(example['Non Generative Question Answering '], str)
and isinstance(example['Generative Question Answering '], str)
]
n_samples = len(raw_examples)
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, self.cfg.dev_proportion)
for i in idxs:
utterance = str(raw_examples[i]['Question'])
answer = str(raw_examples[i]['Non Generative Question Answering '])
well_formed_answer = str(raw_examples[i]['Generative Question Answering '])
passage = raw_examples[i]['Passage']
input_example = {
"utterance": utterance,
"example_id": i,
"labels": {"response": answer, "fluent_response": well_formed_answer, "passage": passage,},
}
example = DialogueInputExample(input_example)
examples.append(example)
return examples
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.