repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/NewsOrchestrator/app.py | <gh_stars>10-100
import os
from flask import Flask
from views import news
# instantiate the app
app = Flask(__name__)
# set config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
# register blueprints
app.register_blueprint(news)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/NewsOrchestrator/views.py | import os
import json
import itertools
from flask import Blueprint, jsonify, request
from nameko.standalone.rpc import ClusterRpcProxy
from nameko.standalone.events import event_dispatcher
news = Blueprint('news', __name__)
BROKER_CONFIG = {'AMQP_URI': os.environ.get('QUEUE_HOST')}
@news.route('/<string:news_type>/<int:news_id>', methods=['GET'])
def get_single_news(news_type, news_id):
"""Get single user details"""
try:
response_object = rpc_get_news(news_type, news_id)
dispatcher = event_dispatcher(BROKER_CONFIG)
dispatcher('recommendation_sender', 'receiver', {
'user_id': request.cookies.get('user_id'),
'news': response_object['news'],
})
return jsonify(response_object), 200
except Exception as e:
return error_response(e, 500)
@news.route(
'/all/<int:num_page>/<int:limit>',
methods=['GET'])
def get_all_news(num_page, limit):
try:
response_famous = rpc_get_all_news(
'famous',
num_page,
limit
)
response_politics = rpc_get_all_news(
'politics',
num_page,
limit
)
response_sports = rpc_get_all_news(
'sports',
num_page,
limit
)
all_news = itertools.chain(
response_famous.get('news', []),
response_politics.get('news', []),
response_sports.get('news', []),
)
response_object = {
'status': 'success',
'news': list(all_news),
}
return jsonify(response_object), 200
except Exception as e:
return error_response(e, 500)
@news.route(
'/<string:news_type>/<int:num_page>/<int:limit>',
methods=['GET'])
def get_all_news_by_type(news_type, num_page, limit):
"""Get all new by type"""
try:
response_object = rpc_get_all_news(
news_type,
num_page,
limit
)
return jsonify(response_object), 200
except Exception as e:
return error_response(e, 500)
@news.route('/<string:news_type>', methods=['POST', 'PUT'])
def add_news(news_type):
post_data = request.get_json()
if not post_data:
return error_response('Invalid payload', 400)
try:
response_object = rpc_command(news_type, post_data)
return jsonify(response_object), 201
except Exception as e:
return error_response(e, 500)
def error_response(e, code):
response_object = {
'status': 'fail',
'message': str(e),
}
return jsonify(response_object), code
def rpc_get_news(news_type, news_id):
with ClusterRpcProxy(BROKER_CONFIG) as rpc:
if news_type == 'famous':
news = rpc.query_famous.get_news(news_id)
elif news_type == 'sports':
news = rpc.query_sports.get_news(news_id)
elif news_type == 'politics':
news = rpc.query_politics.get_news(news_id)
else:
return error_response('Invalid News type', 400)
return {
'status': 'success',
'news': json.loads(news)
}
def rpc_get_all_news(news_type, num_page, limit):
with ClusterRpcProxy(BROKER_CONFIG) as rpc:
if news_type == 'famous':
news = rpc.query_famous.get_all_news(num_page, limit)
elif news_type == 'sports':
news = rpc.query_sports.get_all_news(num_page, limit)
elif news_type == 'politics':
news = rpc.query_politics.get_all_news(num_page, limit)
else:
return error_response('Invalid News type', 400)
return {
'status': 'success',
'news': json.loads(news)
}
def rpc_command(news_type, data):
with ClusterRpcProxy(BROKER_CONFIG) as rpc:
if news_type == 'famous':
news = rpc.command_famous.add_news(data)
elif news_type == 'sports':
news = rpc.command_sports.add_news(data)
elif news_type == 'politics':
news = rpc.command_politics.add_news(data)
else:
return error_response('Invalid News type', 400)
return {
'status': 'success',
'news': news,
}
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/RecommendationService/tests.py | <reponame>PacktPublishing/Microservice-Patterns-and-Best-Practices
import os
import pytest
from .service import Command
from nameko.testing.services import worker_factory
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
@pytest.fixture
def session():
db_engine = create_engine(os.environ.get('COMMANDDB_TEST_HOST'))
Session = sessionmaker(db_engine)
return Session()
def test_command(session):
data = {
"title": "title test",
"author": "author test",
"content": "content test",
"tags": [
"test tag1",
"test tag2",
],
}
command = worker_factory(Command, db=session)
result = command.add_news(data)
assert result['title'] == "title test"
assert result['version'] == 1
data['id'] = result['id']
data['version'] = result['version']
command = worker_factory(Command, db=session)
result = command.add_news(data)
assert result['version'] == 2
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter06/NewsOrchestrator/tests.py | import os
import pytest
from .service import Command
from nameko.testing.services import worker_factory
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
@pytest.fixture
def session():
db_engine = create_engine(os.environ.get('COMMANDDB_TEST_HOST'))
Session = sessionmaker(db_engine)
return Session()
def test_command(session):
data = {
"title": "title test",
"author": "author test",
"content": "content test",
"tags": [
"test tag1",
"test tag2",
],
}
command = worker_factory(Command, db=session)
result = command.add_news(data)
assert result['title'] == "title test"
assert result['version'] == 1
data['id'] = result['id']
data['version'] = result['version']
command = worker_factory(Command, db=session)
result = command.add_news(data)
assert result['version'] == 2
class TestNewsService(BaseTestCase):
def test_add_news(self):
"""Test to insert a News to the database."""
with self.client:
response = self.client.post(
'/famous',
data=json.dumps(dict(
title='My Test',
content='Just a service test',
author='unittest',
tags=['Test', 'Functional_test'],
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertIn('success', data['status'])
self.assertIn('My Test', data['news']['title'])
def test_get_all_news(self):
"""Test to get all News paginated from the database."""
with self.client:
test_cases = [
{'page': 1, 'num_per_page': 10, 'loop_couter': 0},
{'page': 2, 'num_per_page': 10, 'loop_couter': 10},
{'page': 1, 'num_per_page': 20, 'loop_couter': 0},
]
for tc in test_cases:
response = self.client.get(
'/famous/{}/{}'.format(
tc['page'], tc['num_per_page'])
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertEqual(len(data['news']) > 0)
for d in data['news']:
self.assertEqual(
d['title'],
'Title test-{}'.format(tc['loop_couter'])
)
self.assertEqual(
d['content'],
'Content test-{}'.format(tc['loop_couter'])
)
self.assertEqual(
d['author'],
'Author test-{}'.format(tc['loop_couter'])
)
tc['loop_couter'] += 1
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/PoliticsNewsService/dbmigrate.py | import os
from models import Base
from sqlalchemy import create_engine
def create_db():
db = create_engine(os.environ.get("COMMANDDB_HOST"))
db.execute('CREATE SEQUENCE IF NOT EXISTS news_id_seq START 1;')
Base.metadata.create_all(db)
if __name__ == '__main__':
print('creating databases')
create_db()
print('databases created')
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter06/PoliticsNewsService/app.py | import os
from flask import Flask
from views import politics_news
from models import db
app = Flask(__name__)
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
db.init_app(app)
app.register_blueprint(politics_news)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter06/SportsNewsService/config.py | class BaseConfig:
"""Base configuration"""
DEBUG = False
TESTING = False
MONGODB_SETTINGS = {}
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
DEBUG = True
MONGODB_SETTINGS = {
'db': 'sports_dev',
'host': '{}{}'.format(
os.environ.get('DATABASE_HOST'),
'sports_dev',
),
}
class TestingConfig(BaseConfig):
"""Testing configuration"""
DEBUG = True
TESTING = True
MONGODB_SETTINGS = {
'db': 'sports_test',
'host': '{}{}'.format(
os.environ.get('DATABASE_HOST'),
'sports_test',
),
}
class ProductionConfig(BaseConfig):
"""Production configuration"""
DEBUG = False
MONGODB_SETTINGS = {
'db': 'sports',
'host': '{}{}'.format(
os.environ.get('DATABASE_HOST'),
'sports',
),
}
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/RecommendationService/user_client.py | <reponame>PacktPublishing/Microservice-Patterns-and-Best-Practices<filename>Chapter11/RecommendationService/user_client.py<gh_stars>10-100
import logging
import os
import grpc
import user_data_pb2
import user_data_pb2_grpc
class UserClient:
def __init__(self, user_id):
self.user_id = int(user_id)
# Open a communication channel with UsersService
self.channel = grpc.insecure_channel(os.getenv('USER_SERVICE_HOST'))
# Creating stub to get data
self.stub = user_data_pb2_grpc.GetUserDataStub(self.channel)
def __enter__(self):
# Call common method between both microservices passing the request type
return self.stub.GetUser(
user_data_pb2.UserDataRequest(id=self.user_id)
)
def __exit__(self, type, value, traceback):
# Logging the process
logging.info('Received info using gRPC', [type, value, traceback])
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter13/NewsOrchestrator/tests.py | # -*- coding: utf-8 -*-
import json
import unittest
from mock import patch
from app import app
from views import error_response
from flask_testing import TestCase
class BaseTestCase(TestCase):
def create_app(self):
app.config.from_object('config.TestingConfig')
return app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertTrue(app.config['DEBUG'] is True)
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(app.config['DEBUG'])
self.assertTrue(app.config['TESTING'])
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertFalse(app.config['DEBUG'])
self.assertFalse(app.config['TESTING'])
class TestGetSingleNews(BaseTestCase):
@patch('views.rpc_get_news')
@patch('nameko.standalone.events.event_dispatcher')
def test_success(self, event_dispatcher_mock, rpc_get_news_mock):
event_dispatcher_mock.return_value = lambda v1, v2, v3: None
rpc_get_news_mock.return_value = {
"news": [
{
"_id": 1,
"author": "unittest",
"content": "Just a service test",
"created_at": {
"$date": 1514741833010
},
"news_type": "famous",
"tags": [
"Test",
"unit_test"
],
"title": "My Test",
"version": 1
}
],
"status": "success"
}
response = self.client.get('/famous/1')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertTrue(len(data['news']) > 0)
for d in data['news']:
self.assertEqual(
d['title'],
'My Test',
)
self.assertEqual(
d['content'],
'Just a service test'
)
self.assertEqual(
d['author'],
'unittest'
)
@patch('views.rpc_get_news')
@patch('nameko.standalone.events.event_dispatcher')
def test_fail(self, event_dispatcher_mock, rpc_get_news_mock):
event_dispatcher_mock.return_value = lambda v1, v2, v3: None
rpc_get_news_mock.return_value = None
response = self.client.get('/famous/1')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 500)
self.assertEqual('fail', data['status'])
self.assertEqual("'NoneType' object is not subscriptable", data['message'])
class TestAddNews(BaseTestCase):
@patch('views.rpc_command')
def test_sucess(self, rpc_command_mock):
"""Test to insert a News."""
dict_obj = dict(
title='My Test',
content='Just a service test',
author='unittest',
tags=['Test', 'unit_test'],
)
rpc_command_mock.return_value = {
'status': 'success',
'news': dict_obj,
}
with self.client:
response = self.client.post(
'/famous',
data=json.dumps(dict_obj),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertEqual('success', data['status'])
self.assertEqual('My Test', data['news']['title'])
def test_fail_by_invalid_input(self):
dict_obj = None
with self.client:
response = self.client.post(
'/famous',
data=json.dumps(dict_obj),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertEqual('fail', data['status'])
self.assertEqual('Invalid payload', data['message'])
@patch('views.rpc_command')
def test_fail_to_register(self, rpc_command_mock):
"""Test to insert a News."""
dict_obj = dict(
title='My Test',
content='Just a service test',
author='unittest',
tags=['Test', 'unit_test'],
)
rpc_command_mock.side_effect = Exception('Forced test fail')
with self.client:
response = self.client.post(
'/famous',
data=json.dumps(dict_obj),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 500)
self.assertEqual('fail', data['status'])
self.assertEqual('Forced test fail', data['message'])
class TestGetAllNewsPerType(BaseTestCase):
@patch('views.rpc_get_all_news')
def test_sucess(self, rpc_get_all_news_mock):
"""Test to get all News paginated."""
rpc_get_all_news_mock.return_value = {
"news": [
{
"_id": 1,
"author": "unittest",
"content": "Just a service test 1",
"created_at": {
"$date": 1514741833010
},
"news_type": "famous",
"tags": [
"Test",
"unit_test"
],
"title": "My Test 1",
"version": 1
},
{
"_id": 2,
"author": "unittest",
"content": "Just a service test 2",
"created_at": {
"$date": 1514741833010
},
"news_type": "famous",
"tags": [
"Test",
"unit_test"
],
"title": "My Test 2",
"version": 1
},
],
"status": "success"
}
with self.client:
response = self.client.get('/famous/1/10')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertEqual(2, len(data['news']))
counter = 1
for d in data['news']:
self.assertEqual(
d['title'],
'My Test {}'.format(counter)
)
self.assertEqual(
d['content'],
'Just a service test {}'.format(counter)
)
self.assertEqual(
d['author'],
'unittest'
)
counter += 1
@patch('views.rpc_get_all_news')
def test_fail(self, rpc_get_all_news_mock):
"""Test to get all News paginated."""
rpc_get_all_news_mock.side_effect = Exception('Forced test fail')
with self.client:
response = self.client.get('/famous/1/10')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 500)
self.assertEqual('fail', data['status'])
self.assertEqual('Forced test fail', data['message'])
class TestUtilsFunctions(BaseTestCase):
def test_error_message(self):
response = error_response('test message error', 500)
data = json.loads(response[0].data.decode())
self.assertEqual(response[1], 500)
self.assertEqual('fail', data['status'])
self.assertEqual('test message error', data['message'])
if __name__ == '__main__':
unittest.main()
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/FamousNewsService/service.py | import mongoengine
from models import (
CommandNewsModel,
Base,
QueryNewsModel,
)
from sqlalchemy import Sequence
from nameko.events import EventDispatcher
from nameko.rpc import rpc
from nameko.events import event_handler
from nameko_sqlalchemy import DatabaseSession
class Command:
name = 'command_famous'
dispatch = EventDispatcher()
db = DatabaseSession(Base)
@rpc
def add_news(self, data):
try:
version = 1
if data.get('version'):
version = (data.get('version') + 1)
if data.get('id'):
id = data.get('id')
else:
id = self.db.execute(Sequence('news_id_seq'))
news = CommandNewsModel(
id=id,
version=version,
title=data['title'],
content=data['content'],
author=data['author'],
published_at=data.get('published_at'),
tags=data['tags'],
)
self.db.add(news)
self.db.commit()
data['id'] = news.id
data['version'] = news.version
self.dispatch('replicate_db_event', data)
return data
except Exception as e:
self.db.rollback()
return e
class Query:
name = 'query_famous'
@event_handler('command_famous', 'replicate_db_event')
def normalize_db(self, data):
try:
news = QueryNewsModel.objects.get(
id=data['id']
)
news.update(
version=data.get('version', news.version),
title=data.get('title', news.title),
content=data.get('content', news.content),
author=data.get('author', news.author),
published_at=data.get('published_at', news.published_at),
tags=data.get('tags', news.tags),
)
news.reload()
except mongoengine.DoesNotExist:
QueryNewsModel(
id=data['id'],
version=data['version'],
title=data.get('title'),
content=data.get('content'),
author=data.get('author'),
tags=data.get('tags'),
).save()
except Exception as e:
return e
@rpc
def get_news(self, id):
try:
news = QueryNewsModel.objects.get(id=id)
return news.to_json()
except mongoengine.DoesNotExist as e:
return e
except Exception as e:
return e
@rpc
def get_all_news(self, num_page, limit):
try:
if not num_page:
num_page = 1
offset = (num_page - 1) * limit
news = QueryNewsModel.objects.skip(offset).limit(limit)
return news.to_json()
except Exception as e:
return e
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/NewsOrchestrator/tests_integration.py | # -*- coding: utf-8 -*-
import json
import unittest
from app import app
from flask_testing import TestCase
class BaseTestCase(TestCase):
def create_app(self):
app.config.from_object('config.TestingConfig')
return app
class TestIntegration(BaseTestCase):
def setUp(self):
dict_obj = dict(
title='My Test',
content='Just a service test',
author='unittest',
tags=['Test', 'unit_test'],
)
with self.client:
self.response_post = self.client.post(
'/famous',
data=json.dumps(dict_obj),
content_type='application/json',
)
self.data_post = json.loads(self.response_post.data.decode())
def test_get_single_news(self):
response = self.client.get('famous/{id}'.format(id=self.data_post['news']['id']))
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertTrue(len(data['news']) > 0)
self.assertEqual(
data['news']['title'],
'My Test',
)
self.assertEqual(
data['news']['content'],
'Just a service test'
)
self.assertEqual(
data['news']['author'],
'unittest'
)
def test_add_news(self):
"""Test to insert a News."""
self.assertEqual(self.response_post.status_code, 201)
self.assertEqual('success', self.data_post['status'])
self.assertEqual('My Test', self.data_post['news']['title'])
if __name__ == '__main__':
unittest.main()
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter05/SportsNewsService/views.py | import datetime
import mongoengine
from flask import Blueprint, jsonify, request
from models import News
sports_news = Blueprint('sports_news', __name__)
@sports_news.route('/sports/news/<string:news_id>', methods=['GET'])
def get_single_news(news_id):
"""Get single user details"""
response_object = {
'status': 'fail',
'message': 'User does not exist'
}
try:
news = News.objects.get(id=news_id)
response_object = {
'status': 'success',
'data': news,
}
return jsonify(response_object), 200
except mongoengine.DoesNotExist:
return jsonify(response_object), 404
@sports_news.route('/sports/news/<int:num_page>/<int:limit>', methods=['GET'])
def get_all_news(num_page, limit):
"""Get all users"""
news = News.objects.paginate(page=num_page, per_page=limit)
response_object = {
'status': 'success',
'data': news.items,
}
return jsonify(response_object), 200
@sports_news.route('/sports/news', methods=['POST'])
def add_news():
post_data = request.get_json()
if not post_data:
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
return jsonify(response_object), 400
news = News(
title=post_data['title'],
content=post_data['content'],
author=post_data['author'],
tags=post_data['tags'],
).save()
response_object = {
'status': 'success',
'news': news,
}
return jsonify(response_object), 201
@sports_news.route('/sports/news/<string:news_id>/publish/', methods=['GET'])
def publish_news(news_id):
try:
news = News.objects.get(id=news_id)
news.update(published_at=datetime.datetime.now)
news.reload()
response_object = {
'status': 'success',
'news': news,
}
return jsonify(response_object), 200
except mongoengine.DoesNotExist:
return jsonify(response_object), 404
@sports_news.route('/sports/news', methods=['PUT'])
def update_news():
try:
post_data = request.get_json()
news = News.objects.get(id=post_data['news_id'])
news.update(
title=post_data.get('title', news.title),
content=post_data.get('content', news.content),
author=post_data.get('author', news.author),
tags=post_data.get('tags', news.tags),
)
news.reload()
response_object = {
'status': 'success',
'news': news,
}
return jsonify(response_object), 200
except mongoengine.DoesNotExist:
return jsonify(response_object), 404
@sports_news.route('/sports/news/<string:news_id>', methods=['DELETE'])
def delete_news(news_id):
News.objects(id=news_id).delete()
response_object = {
'status': 'success',
'news_id': news_id,
}
return jsonify(response_object), 200
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/PoliticsNewsService/models.py | <filename>Chapter11/PoliticsNewsService/models.py
import os
from datetime import datetime
from mongoengine import (
connect,
Document,
DateTimeField,
ListField,
IntField,
StringField,
)
from sqlalchemy import (
Column,
String,
BigInteger,
DateTime,
Index,
)
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class CommandNewsModel(Base):
__tablename__ = 'news'
id = Column(BigInteger, primary_key=True)
version = Column(BigInteger, primary_key=True)
title = Column(String(length=200))
content = Column(String)
author = Column(String(length=50))
created_at = Column(DateTime, default=datetime.utcnow)
published_at = Column(DateTime)
news_type = Column(String, default='politcs')
tags = Column(postgresql.ARRAY(String))
__table_args__ = Index('index', 'id', 'version'),
connect('politcs', host=os.environ.get('QUERYBD_HOST'))
class QueryNewsModel(Document):
id = IntField(primary_key=True)
version = IntField(required=True)
title = StringField(required=True, max_length=200)
content = StringField(required=True)
author = StringField(required=True, max_length=50)
created_at = DateTimeField(default=datetime.utcnow)
published_at = DateTimeField()
news_type = StringField(default="politcs")
tags = ListField(StringField(max_length=50))
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/RecommendationService/user_data_pb2_grpc.py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import user_data_pb2 as user__data__pb2
class GetUserDataStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetUser = channel.unary_unary(
'/GetUserData/GetUser',
request_serializer=user__data__pb2.UserDataRequest.SerializeToString,
response_deserializer=user__data__pb2.UserDataResponse.FromString,
)
class GetUserDataServicer(object):
# missing associated documentation comment in .proto file
pass
def GetUser(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GetUserDataServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetUser': grpc.unary_unary_rpc_method_handler(
servicer.GetUser,
request_deserializer=user__data__pb2.UserDataRequest.FromString,
response_serializer=user__data__pb2.UserDataResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'GetUserData', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/RecommendationService/user_data_pb2.py | <filename>Chapter11/RecommendationService/user_data_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: user_data.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='user_data.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0fuser_data.proto\"\x1d\n\x0fUserDataRequest\x12\n\n\x02id\x18\x01 \x01(\x05\";\n\x10UserDataResponse\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\r\n\x05\x65mail\x18\x03 \x01(\t2?\n\x0bGetUserData\x12\x30\n\x07GetUser\x12\x10.UserDataRequest\x1a\x11.UserDataResponse\"\x00\x62\x06proto3')
)
_USERDATAREQUEST = _descriptor.Descriptor(
name='UserDataRequest',
full_name='UserDataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='UserDataRequest.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=48,
)
_USERDATARESPONSE = _descriptor.Descriptor(
name='UserDataResponse',
full_name='UserDataResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='UserDataResponse.id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='UserDataResponse.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='email', full_name='UserDataResponse.email', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=50,
serialized_end=109,
)
DESCRIPTOR.message_types_by_name['UserDataRequest'] = _USERDATAREQUEST
DESCRIPTOR.message_types_by_name['UserDataResponse'] = _USERDATARESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UserDataRequest = _reflection.GeneratedProtocolMessageType('UserDataRequest', (_message.Message,), dict(
DESCRIPTOR = _USERDATAREQUEST,
__module__ = 'user_data_pb2'
# @@protoc_insertion_point(class_scope:UserDataRequest)
))
_sym_db.RegisterMessage(UserDataRequest)
UserDataResponse = _reflection.GeneratedProtocolMessageType('UserDataResponse', (_message.Message,), dict(
DESCRIPTOR = _USERDATARESPONSE,
__module__ = 'user_data_pb2'
# @@protoc_insertion_point(class_scope:UserDataResponse)
))
_sym_db.RegisterMessage(UserDataResponse)
_GETUSERDATA = _descriptor.ServiceDescriptor(
name='GetUserData',
full_name='GetUserData',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=111,
serialized_end=174,
methods=[
_descriptor.MethodDescriptor(
name='GetUser',
full_name='GetUserData.GetUser',
index=0,
containing_service=None,
input_type=_USERDATAREQUEST,
output_type=_USERDATARESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_GETUSERDATA)
DESCRIPTOR.services_by_name['GetUserData'] = _GETUSERDATA
# @@protoc_insertion_point(module_scope)
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter10/RecommendationService/service.py | import json
import logging
import os
import requests
from nameko.web.handlers import http
from nameko.events import event_handler
from models import (
create_user_node,
create_label_node,
create_recommendation,
get_labels_by_user_id,
get_users_by_label,
)
class Recommendation:
name = 'recommendation'
# declaring the receiver method as a handler to message broker
@event_handler('recommendation_sender', 'receiver')
def receiver(self, data):
try:
# getting the URL to do a sequential HTTP request to UsersService
user_service_route = os.getenv('USER_SERVICE_ROUTE')
# consuming data from UsersService using the requests lib
user = requests.get(
"{}{}".format(
user_service_route,
data['user_id'],
)
)
# serializing the UsersService data to JSON
user = user.json()
# creating user node on Neo4j
create_user_node(user)
# getting all tags read
for label in data['news']['tags']:
# creating label node on Neo4j
create_label_node(label)
# creating the recommendation on Neo4j
create_recommendation(
user['id'],
label,
)
except Exception as e:
logging.error('RELATIONSHIP_ERROR: {}'.format(e))
class RecommendationApi:
name = 'recommnedation_api'
@http('GET', '/user/<int:user_id>')
def get_recommendations_by_user(self, request, user_id):
"""Get recommendations by user_id"""
try:
relationship_response = get_labels_by_user_id(user_id)
http_response = [
rel.end_node()
for rel in relationship_response
]
return 200, json.dumps(http_response)
except Exception as ex:
error_response(500, ex)
@http('GET', '/label/<string:label>')
def get_users_recomendations_by_label(self, request, label):
"""Get users recommendations by label"""
try:
relationship_response = get_users_by_label(label)
http_response = [
rel.end_node()
for rel in relationship_response
]
return 200, json.dumps(http_response)
except Exception as ex:
error_response(500, ex)
def error_response(code, ex):
response_object = {
'status': 'fail',
'message': str(ex),
}
return code, json.dumps(response_object)
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter11/RecommendationService/models.py | <gh_stars>10-100
import os
from py2neo import (
Graph,
Node,
Relationship,
)
USERS_NODE = 'Users'
LABELS_NODE = 'Labels'
REL_TYPE = 'RECOMMENDATION'
graph = Graph(os.getenv('DATABASE_URL'))
def get_user_node(user_id):
return graph.find_one(
USERS_NODE,
property_key='id',
property_value=user_id,
)
def get_label_node(label):
return graph.find_one(
LABELS_NODE,
property_key='id',
property_value=label,
)
def get_labels_by_user_id(user_id):
user_node = get_user_node(user_id)
return graph.match(
start_node=user_node,
rel_type=REL_TYPE,
)
def get_users_by_label(label):
label_node = get_label_node(label)
return graph.match(
start_node=label_node,
rel_type=REL_TYPE,
)
def create_user_node(user):
# get user info from UsersService
if not get_user_node(user.id):
user_node = Node(
USERS_NODE,
id=user.id,
name=user.name,
email=user.email,
)
graph.create(user_node)
def create_label_node(label):
# get user info from UsersService
if not get_label_node(label):
label_node = Node(LABELS_NODE, id=label)
graph.create(label_node)
def create_recommendation(user_id, label):
user_node = get_user_node(user_id)
label_node = get_label_node(label)
graph.create(Relationship(
label_node,
REL_TYPE,
user_node,
))
graph.create(Relationship(
user_node,
REL_TYPE,
label_node,
))
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter05/FamousNewsService/tests.py | <reponame>PacktPublishing/Microservice-Patterns-and-Best-Practices<filename>Chapter05/FamousNewsService/tests.py<gh_stars>10-100
import json
import unittest
from app import app
from flask_testing import TestCase
class BaseTestCase(TestCase):
def create_app(self):
app.config.from_object('config.TestingConfig')
return app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertTrue(app.config['DEBUG'] is True)
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(app.config['DEBUG'])
self.assertTrue(app.config['TESTING'])
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertFalse(app.config['DEBUG'])
self.assertFalse(app.config['TESTING'])
class TestNewsService(BaseTestCase):
def test_add_news(self):
"""Ensure a new user can be added to the database."""
with self.client:
response = self.client.post(
'/famous/news',
data=json.dumps(dict(
title='My Test',
content='Just a service test',
author='unittest',
tags=['Test', 'Functional_test'],
)),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertIn('success', data['status'])
self.assertIn('My Test', data['news']['title'])
if __name__ == '__main__':
unittest.main()
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter05/SportsNewsService/models.py | import datetime
from flask_mongoengine import MongoEngine
db = MongoEngine()
class News(db.Document):
title = db.StringField(required=True, max_length=200)
content = db.StringField(required=True)
author = db.StringField(required=True, max_length=50)
created_at = db.DateTimeField(default=datetime.datetime.now)
published_at = db.DateTimeField()
news_type = db.StringField(default="sports")
tags = db.ListField(db.StringField(max_length=50))
|
PacktPublishing/Microservice-Patterns-and-Best-Practices | Chapter05/FamousNewsService/config.py | import os
class BaseConfig:
"""Base configuration"""
DEBUG = False
TESTING = False
MONGODB_SETTINGS = {}
class DevelopmentConfig(BaseConfig):
"""Development configuration"""
DEBUG = True
MONGODB_SETTINGS = {
'db': 'famous_dev',
'host': '{}{}'.format(
os.environ.get('DATABASE_HOST'),
'famous_dev',
),
}
class TestingConfig(BaseConfig):
"""Testing configuration"""
DEBUG = True
TESTING = True
MONGODB_SETTINGS = {
'db': 'famous_test',
'host': '{}{}'.format(
os.environ.get('DATABASE_HOST'),
'famous_test',
),
}
class ProductionConfig(BaseConfig):
"""Production configuration"""
DEBUG = False
MONGODB_SETTINGS = {
'db': 'famous',
'host': '{}{}'.format(
os.environ.get('DATABASE_HOST'),
'famous',
),
}
|
mmmuuuuua/FCOS | tests/test_coco.py | from PIL import Image
import os
import os.path
from pycocotools.coco import COCO
import unittest
annFile = "C:\\zhulei\\data\\instance_segmentation\\rock\\shapes\\train\\instances_leaf_train2017.json"
root = "C:\\zhulei\\data\\instance_segmentation\\rock\\shapes\\train\\shapes_train2017"
class TestCocoApi(unittest.TestCase):
def test_coco_api(self):
coco = COCO(annFile)
ids = list(sorted(coco.imgs.keys()))
print(ids)
img_id = ids[1]
print(img_id)
ann_ids = coco.getAnnIds(imgIds=img_id)
print(ann_ids)
target = coco.loadAnns(ann_ids)
# print(target)
path = coco.loadImgs(img_id)[0]['file_name']
print(path)
imgs_path = os.path.join(root, path)
for img_path in os.listdir(imgs_path):
img = Image.open(os.path.join(imgs_path, img_path)).convert('RGB')
# img = Image.open(os.path.join(root, path)).convert('RGB')
if __name__ == "__main__":
unittest.main()
|
peaceabuya/TesterSimulator | socket_prog.py | #!/usr/bin/python
# Import modules
import sys
import socket
import time
# Create socket object
s = socket.socket()
print s
def get_command(command):
print s
# Send ENQ and check ACK
s.send('\x05')
#time.sleep(.1)
rsp = s.recv(1)
while rsp != '\x06':
s.send('\x05')
#time.sleep(.1)
rsp = s.recv(1)
# Send command
s.send(command)
#time.sleep(.1)
# Wait for ENQ
rsp = s.recv(1)
while rsp != '\x05':
print 'Waiting for ENQ'
rsp = s.recv(1)
s.send('\x06')
time.sleep(.1)
rsp = s.recv(1024)
print rsp
return 1
def main(unused):
# Host IP Address and Port Number
host_ip = '172.26.67.100'
host_port = 10001
print s
# Establish connection
s.connect((host_ip, host_port))
for i in xrange(5):
print i
start = time.time()
get_command('\x02CD\x03')
print time.time() - start
get_command('\x02DHT\x03')
return 0
# Close connection
s.close()
print s
if __name__ == '__main__':
sys.exit(main(sys.argv)) |
99wanglin/d2w_mini_projects | mp_calc/app/serverlibrary.py | <reponame>99wanglin/d2w_mini_projects<gh_stars>0
def merge(array, p, q, r, byfunc):
# print('merging')
nleft = q - p + 1
nright = r - q
left_array = array[p:q+1]
right_array = array[(q+1):r+1]
left = 0
right = 0
dest = p
while left < nleft and right < nright:
if byfunc(left_array[left]) <= byfunc(right_array[right]):
array[dest] = left_array[left]
left += 1
else:
array[dest] = right_array[right]
right += 1
dest += 1
while left < nleft:
array[dest] = left_array[left]
left += 1
dest += 1
while right < nright:
array[dest] = right_array[right]
right += 1
dest += 1
# print('stop merging')
def mergesort_recursive(array, p, r,byfunc):
q = (r+p)//2
if r-p <= 1:
# print('r', r)
# print('p', p)
merge(array, p, q, r, byfunc)
# print('done')
return
else:
mergesort_recursive(array, p, q, byfunc)
# print('p', p)
# print('q', q+1)
# print('r', r)
mergesort_recursive(array, q+1, r, byfunc)
# print('doing')
merge(array, p, q, r, byfunc)
def mergesort(array, byfunc=None):
if len(array) == 1:
return
mergesort_recursive(array, 0, len(array)-1, byfunc)
class Stack:
def __init__(self):
self.__items = []
def push(self, item):
self.__items.append(item)
def pop(self):
if self.is_empty:
return None
else:
item = self.__items[-1]
del self.__items[-1]
return item
def peek(self):
if self.is_empty:
return None
else:
return self.__items[-1]
@property
def is_empty(self):
if self.__items == []:
return True
else:
return False
@property
def size(self):
return len(self.__items)
class Queue:
def __init__(self):
self.left_stack = Stack()
self.right_stack = Stack()
@property
def is_empty(self):
return self.right_stack.is_empty
@property
def size(self):
return self.right_stack.size
def enqueue(self, item, side):
if side == 'left':
self.left_stack.push(item)
else:
self.right_stack.push(item)
def dequeue(self, side):
if side == 'left':
item = self.left_stack.pop()
else:
item = self.right_stack.pop()
return item
def peek(self):
return self.right_stack.peek()
class EvaluateExpression:
valid_char = '0123456789+-*/() '
operators = '*/-+()'
operand = '0123456789'
def __init__(self, string=""):
self.expression = string
@property
def expression(self):
return self._expression
@expression.setter
def expression(self, new_expr):
for val in new_expr:
if val not in self.valid_char:
self._expression = ''
return
else:
self._expression = new_expr
def insert_space(self):
temp = ''
for i, val in enumerate(self._expression):
if val in self.operators:
temp = temp + ' ' + val + ' '
else:
temp += val
return temp
def applyop(self, opr, val1, val2):
val1 = int(val1)
val2 = int(val2)
if opr == '+':
return val1 + val2
if opr == '-':
return val1 - val2
if opr == '*':
return val1 * val2
if opr == '/':
return val1 // val2
def process_operator(self, operand_stack, operator_stack):
right = operand_stack.pop()
left = operand_stack.pop()
opr = operator_stack.pop()
result = self.applyop(opr, left, right)
operand_stack.push(result)
def evaluate(self):
operand_stack = Stack()
operator_stack = Stack()
expression = self.insert_space()
tokens = expression.split()
n = len(self.expression)
i = 0
while i < n:
val = self.expression[i]
if val in self.operand:
number = ''
j = i
while j < n and '0' <= self.expression[j] <= '9':
number += self.expression[j]
j += 1
i = j-1
operand_stack.push(number)
elif val in '+-':
while not operator_stack.is_empty and \
operator_stack._Stack__items[-1] not in '()':
self.process_operator(operand_stack, operator_stack)
operator_stack.push(val)
elif val in '*/':
while not operator_stack.is_empty and \
operator_stack._Stack__items[-1] in '*/':
self.process_operator(operand_stack, operator_stack)
operator_stack.push(val)
elif val == '(':
operator_stack.push('(')
elif val == ')':
while operator_stack._Stack__items[-1] != '(':
self.process_operator(operand_stack, operator_stack)
operator_stack.pop()
i += 1
while not operator_stack.is_empty:
self.process_operator(operand_stack, operator_stack)
return round(operand_stack.pop(), 2)
def get_smallest_three(challenge):
records = challenge.records
times = [r for r in records]
mergesort(times, lambda x: x.elapsed_time)
return times[:3]
|
99wanglin/d2w_mini_projects | mp_sort/app/static/library.py | <filename>mp_sort/app/static/library.py<gh_stars>0
from org.transcrypt.stubs.browser import *
import random
array = []
def gen_random_int(number, seed):
array = []
random.seed(seed)
for i in range(number):
array.append(i)
random.shuffle(array)
return array
def create_array_str():
array_str = ''
for i, v in enumerate(array):
array_str += v
if i < len(array)-1:
array_str += ', '
else:
array_str += '.'
return array_str
def generate():
global array
number = 10
seed = 200
# call gen_random_int() with the given number and seed
# store it to the global variable array
array = gen_random_int(number, seed)
# convert the items into one single string
# the number should be separated by a comma
# and a full stop should end the string.
array_str = create_array_str()
# This line is to placed the string into the HTML
# under div section with the id called "generate"
document.getElementById("generate").innerHTML = array_str
def sortnumber1():
''' This function is used in Exercise 1.
The function is called when the sort button is clicked.
You need to do the following:
- get the list of numbers from the global variable array and
copy it to a new list
- call your sort function, either bubble sort or insertion sort
- create a string of the sorted numbers and store it in array_str
'''
n = len(array)
for i in range(n):
index = i
temporary = array[i]
while index > 0 and temporary < array[index-1]:
if array[index] < array[index-1] :
array[index], array[index - 1] = array[index-1], array[index]
index -=1
temporary = array[index]
array_str = create_array_str()
document.getElementById("sorted").innerHTML = array_str
return array_str
def sortnumber2():
global array
''' This function is used in Exercise 2.
The function is called when the sort button is clicked.
You need to do the following:
- Get the numbers from a string variable "value".
- Split the string using comma as the separator and convert them to
a list of numbers
- call your sort function, either bubble sort or insertion sort
- create a string of the sorted numbers and store it in array_str
'''
# The following line get the value of the text input called "numbers"
value = document.getElementsByName("numbers")[0].value
# Throw alert and stop if nothing in the text input
if value == "":
window.alert("Your textbox is empty")
return
# Your code should start from here
# store the final string to the variable array_str
pass
new_value = value.replace(" ", '')
array = list(new_value.split(','))
for i, v in enumerate(array):
array[i] = int(v)
array_str = sortnumber1()
document.getElementById("sorted").innerHTML = array_str
|
99wanglin/d2w_mini_projects | test.py | def fibonacci(index):
result = 0
if index == 0:
return 0
elif index == 1:
return 1
else:
return fibonacci(index - 1) + fibonacci(index - 2)
fibonacci(3) |
99wanglin/d2w_mini_projects | mp_calc/virtenv/Scripts/transcrypt-script.py | <reponame>99wanglin/d2w_mini_projects<gh_stars>0
#!"C:\Users\sim_w\Desktop\School\Term 3 DDW\d2w_mini_projects\mp_calc\virtenv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'Transcrypt==3.7.16','console_scripts','transcrypt'
__requires__ = 'Transcrypt==3.7.16'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Transcrypt==3.7.16', 'console_scripts', 'transcrypt')()
)
|
99wanglin/d2w_mini_projects | mergesort.py | <reponame>99wanglin/d2w_mini_projects
def merge(array, p, q, r):
nleft = q - p + 1
nright = r - q
left_array = array[p:q+1]
right_array = array[(q+1):r+1]
left = 0
right = 0
dest = p
while left < nleft and right < nright:
if left_array[left] <= right_array[right]:
array[dest] = left_array[left]
left += 1
else:
array[dest] = right_array[right]
right += 1
dest += 1
while left < nleft:
array[dest] = left_array[left]
left += 1
dest += 1
while right < nright:
array[dest] = right_array[right]
right += 1
dest += 1
def mergesort_recursive(array, p, r):
q = (r+p)//2
if r-p == 1:
merge(array, p, q, r)
else:
mergesort_recursive(array, p, q)
mergesort_recursive(array, q+1, r)
merge(array, p, q, r)
def mergesort(array):
mergesort_recursive(array, 0, len(array)-1)
input_array = [5, 2, 4, 7, 1, 3, 2, 6]
mergesort(input_array)
print(input_array) |
ddeangelis/unshorten | isurlshortener/unshortener.py | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Unshortener Documentation
This module unshortens URLs
"""
import re
import http
from urllib.parse import urlparse
from http import client
from isurlshortener.exceptions import PathMissing, UnhandledHTTPStatusCode, LocationHeaderMissing, ProtocolException
class Unshortener(object):
#FIXME: Most servers redirect http to https --> special handling for that?
@staticmethod
def unshorten_url(url: str) -> str:
"""Tries to unshorten an URL by requesting it and checking HTTP status
Args:
url: URL to check. The url MUST contain a protocol (e.g., http://), a domain (e.g., example.net), and a path
(e.g., something/) --> http://example.net/something/
Returns:
Unshortened URL
Raises:
IsUrlShortener.LocationHeaderMissing: Server did not return a Location
IsUrlShortener.UnhandledHTTPStatusCode: Unsupported HTTP status code
"""
url = Unshortener._prepare_url(url)
if url.path is '' or url.path is '/':
raise PathMissing()
server_connection = Unshortener._get_connection(url)
server_connection.request('GET', url.path)
response = server_connection.getresponse()
if response.status in range(300, 309):
return Unshortener._get_location_from_header(response.getheaders())
elif response.status in range(200, 201):
return url.geturl()
else:
raise UnhandledHTTPStatusCode(response.status)
@staticmethod
def _get_location_from_header(headers: list) -> str:
"""Returns the location information from the headers
Args:
headers: Header returned from the server
Returns:
Location information
Raises:
IsUrlShortener.LocationHeaderMissing: Location field missing in the header
"""
for header_field in headers:
if header_field[0].lower() == 'location':
return header_field[1]
raise LocationHeaderMissing
@staticmethod
def _prepare_url(url: str) -> dict:
"""Prepares a given URL strict for the unshortener
Args:
url: URL prepare
Returns:
Dict with the prepared URL information
Raises:
IsUrlShortener.ProtocolException: http/https protocol prefix is missing
"""
if not re.findall('^(http[s]?://)', url):
raise ProtocolException('Invalid protocol or no protocol given')
return urlparse(url)
@staticmethod
def _get_connection(url: dict) -> [http.client.HTTPConnection, http.client.HTTPSConnection]:
"""Prepares a connection to a given server
Args:
url: URL with server information
Returns:
Connection to the server
Raises:
IsUrlShortener.ProtocolException: Protocol not supported
"""
if url.scheme == 'http':
return http.client.HTTPConnection(url.netloc)
elif url.scheme == 'https':
return http.client.HTTPSConnection(url.netloc)
else:
raise ProtocolException('Protocol Exception: "{}"'.format(url.scheme))
|
ddeangelis/unshorten | isurlshortener/exceptions.py | <filename>isurlshortener/exceptions.py
class ProtocolException(Exception):
pass
class PathMissing(Exception):
pass
class UnhandledHTTPStatusCode(Exception):
pass
class LocationHeaderMissing(Exception):
pass
|
ddeangelis/unshorten | isurlshortener/tests/test_isurlshortener.py | import unittest
from isurlshortener.isurlshortener import IsUrlShortener
class TestIsUrlShortener(unittest.TestCase):
def test_empty_string(self):
self.assertFalse(IsUrlShortener.is_url_shortener(''), 'Empty string error')
def test_bitly_url(self):
HTTPS_BIT_LY = 'https://bitly.is/1g3AhR6'
self.assertTrue(IsUrlShortener.is_url_shortener(HTTPS_BIT_LY),
'Bit.ly not detected "{}"'.format(HTTPS_BIT_LY))
def test_bitly_url_without_http_prefix(self):
BIT_LY = 'bitly.is/1g3AhR6'
self.assertTrue(IsUrlShortener.is_url_shortener(BIT_LY),
'Bit.ly not detected "{}"'.format(BIT_LY))
def test_no_shortener(self):
GOOGLE = 'https://www.google.com'
self.assertFalse(IsUrlShortener.is_url_shortener(GOOGLE),
'Detected {} as shortener'.format(GOOGLE))
def test_former_url_shortener(self):
expected = {'bitly.is/1g3AhR6': False, 'fur.ly/1g3AhR6': True}
for k, v in expected.items():
self.assertEqual(v, IsUrlShortener.is_disabled_url_shortener(k),
'Error for {}'.format(k))
def test_is_or_was_shortener(self):
expected = {'bitly.is/1g3AhR6': True,
'fur.ly/1g3AhR6': True,
'bitly.is': True,
'www.google.com': False}
for k, v in expected.items():
self.assertEqual(v, IsUrlShortener.is_or_was_from_url_shortener(k),
'Error for {}'.format(k))
def test_raises_assertion_on_missing_data(self):
BIT_LY = 'bitly.is/1g3AhR6'
with self.assertRaises(FileNotFoundError):
IsUrlShortener._is_in_servicelist(BIT_LY, 'this_file_does_not_exist')
|
ddeangelis/unshorten | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='isurlshortener',
version='0.1.6',
description='Provides information if a given url is from an url shortener service',
author='<NAME>',
author_email='<EMAIL>',
packages=['isurlshortener'],
data_files=[('isurlshortener', ['isurlshortener/data/shortener_services.txt',
'isurlshortener/data/former_shortener_services.txt'])],
license='MIT')
|
ddeangelis/unshorten | isurlshortener/tests/test_unshorten.py | import unittest
from isurlshortener.unshortener import Unshortener
from isurlshortener.exceptions import ProtocolException, PathMissing
class TestUnshortener(unittest.TestCase):
def test_unshorten_without_protocol(self):
BIT_LY = 'bitly.is/1g3AhR6'
with self.assertRaises(ProtocolException):
Unshortener.unshorten_url(BIT_LY)
def test_unshorten_with_invalid_protocol(self):
sample_set = ['ftp://bit.ly/1G3AhR6']
for sample in sample_set:
with self.assertRaises(ProtocolException):
Unshortener.unshorten_url(sample)
def test_unshorten_without_path(self):
sample_set = ['http://bitly.is/', 'http://bitly.is']
for sample in sample_set:
with self.assertRaises(PathMissing):
Unshortener.unshorten_url(sample)
def test_unshorten(self):
sample_set = {'http://bit.ly/1ixYuRi': 'http://www.microsoft.com/'}
for short_url, long_url in sample_set.items():
self.assertEqual(long_url, Unshortener.unshorten_url(short_url))
def test_unshorten_already_unshortened(self):
sample_set = {'https://www.microsoft.com/de-de/': 'https://www.microsoft.com/de-de/'}
for short_url, long_url in sample_set.items():
self.assertEqual(long_url, Unshortener.unshorten_url(short_url))
def test_get_location_from_header(self):
header = [('content-length', '0'),
('date', 'Mon, 30 Jan 2017 18:48:49 GMT'),
('location', 'https://twitter.com/abc'),
('server', 'tsa_o')]
expected = 'https://twitter.com/abc'
self.assertEqual(expected, Unshortener._get_location_from_header(header))
|
ddeangelis/unshorten | isurlshortener/isurlshortener.py | <reponame>ddeangelis/unshorten
# -*- coding: utf-8 -*-
"""IsUrlShortener Documentation
This module answer the question if a given URL is a URL from an URL shortener service
"""
import os
import re
class IsUrlShortener(object):
"""IsUrlShortener
This class contains all required program logic.
"""
@staticmethod
def is_url_shortener(url: str) -> bool:
"""Checks if a given URL is from an active URL shortener
Args:
url: URL to check
Returns:
True or False
"""
return IsUrlShortener._is_in_servicelist(url, 'data/shortener_services.txt')
@staticmethod
def is_disabled_url_shortener(url: str) -> bool:
"""Checks if a given URL is from an inactive URL shortener
Args:
url: URL to check
Returns:
True or False
"""
return IsUrlShortener._is_in_servicelist(url, 'data/former_shortener_services.txt')
@staticmethod
def is_or_was_from_url_shortener(url: str) -> bool:
"""Checks if a given URL or domain is from an in-/active URL shortener
Args:
url: URL to check
Returns:
True or False
"""
return IsUrlShortener.is_disabled_url_shortener(url) or \
IsUrlShortener.is_url_shortener(url)
@staticmethod
def _is_in_servicelist(service_domain: str, servicelist: str) -> bool:
"""Checks if a given URL is included in a given service list
Args:
url: URL to check
servicelist: File containing the services to consider
Returns:
True or False
Raises:
FileNotFoundError: If servicelist is not found
"""
if not service_domain:
return False
with open(os.path.join(os.path.dirname(__file__), servicelist), 'r') as shortener_list:
for shortener in shortener_list:
regex = '(http[s]?://)?{}(/.*)?'.format(shortener.rstrip().replace('.', '\.'))
if len(re.findall(regex, service_domain)) > 0:
return True
return False
|
ddeangelis/unshorten | isurlshortener/__init__.py | <reponame>ddeangelis/unshorten
from .isurlshortener import IsUrlShortener
from .unshortener import Unshortener
|
deni-zen/CSVelte | docs/_contrib/apigenrole.py | from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
# I cant figure out how the hell to import this so I'm just gonna forget it for now
def apigen_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to API Docs page.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
try:
class_name = text.replace('\\', '.')
if text[0:1] == '.':
class_name = class_name[1:]
if class_name == "":
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'Class name must be a valid fully qualified class name; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_link_node(rawtext, app, 'class', class_name, options)
return [node], []
def make_link_node(rawtext, app, type, slug, options):
"""Create a link to an ApiGen API docs page.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param type: Item type (class, namespace, etc.)
:param slug: ID of the thing to link to
:param options: Options dictionary passed to role func.
"""
#
try:
base = app.config.apigen_docs_uri
if not base:
raise AttributeError
except AttributeError, err:
raise ValueError('apigen_docs_uri configuration value is not set (%s)' % str(err))
# Build API docs link
slash = '/' if base[-1] != '/' else ''
ref = base + slash + type + '-' + slug + '.html'
set_classes(options)
node = nodes.reference(rawtext, type + ' ' + utils.unescape(slug), refuri=ref,
**options)
return node
def setup(app):
"""Install the plugin.
:param app: Sphinx application context.
"""
app.info('Initializing Api Class plugin')
app.add_role('apiclass', apigen_role)
# app.add_role('apins', apigen_namespace_role)
app.add_config_value('apigen_docs_uri', None, 'env')
return
|
marsild/ProgettoReti-2021 | spahiuWebServer.py | <reponame>marsild/ProgettoReti-2021<filename>spahiuWebServer.py
# -*- coding: utf-8 -*-
"""
Progetto di Programmazione di Reti a.a. 2020-21
Cognome e Nome: <NAME>
Matricola: 916048
Traccia 2: Python Web Server
"""
import sys
import signal
import http.server
import socketserver
import threading
#Gestire l'attesa senza busy waiting
waiting_refresh = threading.Event()
# Legge il numero di argomenti inseriti da linea di comando.
# Se è presente l'argomento 1, vuol dire che è stato inserito il valore della porta da utilizzare.
if sys.argv[1:]:
try:
port = int(sys.argv[1])
except:
print("Errore nell'inserimento della porta.")
print("Porta inserita: " + sys.argv[1])
print("Chiusura del programma.\n")
sys.exit(0)
else: #Nel caso non venga specificato da linea di comando, viene utilizzato il valore di default.
port = 8080
print('Porta inserita: {0}\n'.format(port))
print("Combinazione per interrompere da tastiera: Ctrl+C")
# classe che mantiene le funzioni di SimpleHTTPRequestHandler e implementa
# il metodo get nel caso in cui si voglia fare un refresh
class ServerHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
# Scrivo sul file AllRequestsGET le richieste dei client
with open("AllRequestsGET.txt", "a") as out:
info = "GET request,\nPath: " + str(self.path) + "\nHeaders:\n" + str(self.headers) + "\n"
out.write(str(info))
if self.path == '/refresh':
resfresh_contents()
self.path = '/'
http.server.SimpleHTTPRequestHandler.do_GET(self)
# ThreadingTCPServer per gestire più richieste
server = socketserver.ThreadingTCPServer(('127.0.0.1',port), ServerHandler)
# La composizione è la stessa per tutti i servizi:
# message = header_html + title + navigation_bar + end_page + footer_html
# la parte iniziale (header_html) è identica per tutti
header_html = """
<html>
<head>
<style>
h1 {
text-align: center;
margin: 0;
font-family: Monospace;
}
h2 {
text-align: center;
margin: 0;
font-family: Monospace;
}
table {width:70%;}
img {
max-width:300;
max-height:200px;
width:auto;
}
td {width: 33%;}
p {
text-align:justify;
font-family: Monospace;
font-size: 20px;
}
td {
padding: 20px;
text-align: center;
}
.topnav {
overflow: hidden;
background-color: #F2F3F4;
}
.topnav a {
float: left;
color: #333;
text-align: center;
padding: 20px 16px;
text-decoration: none;
font-size: 17px;
}
.topnav a:hover {
background-color: #4CAF50;
color: white;
}
.topnav a.active {
background-color: #ddd;
color: black;
}
</style>
</head>
<body>
"""
# [message = header_html + title + navigation_bar + end_page + footer_html]
# la barra di navigazione (navigation_bar) è identica
navigation_bar = """
<div class="topnav">
<a class="active" href="http://127.0.0.1:{port}">Home</a>
<a href="http://127.0.0.1:{port}/118.html">118</a>
<a href="http://127.0.0.1:{port}/pronto-soccorso.html">Pronto soccorso</a>
<a href="http://127.0.0.1:{port}/medici-famiglia.html">Medici e Pediatri di famiglia</a>
<a href="http://127.0.0.1:{port}/guardia-medica.html">Guardia Medica (ex)</a>
<a href="http://127.0.0.1:{port}/farmacie-turno.html">Farmacie di turno</a>
<a href="http://127.0.0.1:{port}/FSE.html">FSE</a>
<a href="http://127.0.0.1:{port}/refresh" style="float: right">Aggiorna contenuti</a>
<a href="http://127.0.0.1:{port}/info.pdf" download="info.pdf" style="float: right">Download info</a>
<a href="https://www.auslromagna.it/covid-19-aggiornamenti" style="float: right; font-family: Monospace; color: red;">COVID-19 🡭</a>
</div>
<table align="center">
""".format(port=port)
# [message = header_html + title + navigation_bar + end_page + footer_html]
# la parte finale (footer_html) è identica per tutti i servizi
footer_html= """
</table>
</body>
</html>
"""
# [message = header_html + title + navigation_bar + end_page + footer_html]
# Singole parti finali (end_page)
# (end_page) home:
end_page_index = """
<form action="http://127.0.0.1:{port}" method="post" style="text-align: center;">
<hr>
<img src='resources/hospital.jpg'
style="float:left; left:0px; top:0px; width:180px; height:90px; border:none;" />
<img src='resources/hospital.jpg'
style="float:right; right:0px; top:0px; width:180px; height:90px; border:none;" />
<h1><strong>Servizi Ospedalieri</strong></h1><br>
<h2>Servizi AUSL della Romagna</h1><br>
<hr>
<p>L'Azienda Unità Sanitaria Locale della Romagna (<a href="https://www.auslromagna.it/">AUSL Romagna</a>), istituita con Legge regionale n. 22 del 21 novembre 2013,
è l'ente strumentale attraverso il quale la Regione assicura i livelli essenziali ed uniformi di assistenza dell'ambito territoriale della Romagna.
L'Azienda è dotata di personalità giuridica pubblica e di autonomia imprenditoriale ai sensi delle disposizioni legislative nazionali e regionali di regolamentazione del Servizio Sanitario Regionale.<br><br>
Attraverso la barra di navigazione sovrastante, oppure attraverso i collegamenti riportati qui di seguito, potrai accedere ai principali servizi offerti. È anche possibile scaricare l'elenco di questi ultimi da "Download info" nella barra di navigazione.</p>
<a href="http://127.0.0.1:{port}/118.html"><h2>Passa al servizio 118</a></h2>
<a href="http://127.0.0.1:{port}/pronto-soccorso.html"><h2>Passa al servizio Pronto Soccorso</a></h2>
<a href="http://127.0.0.1:{port}/medici-famiglia.html"><h2>Passa al servizio Medici e Pediatri di famiglia</a></h2>
<a href="http://127.0.0.1:{port}/guardia-medica.html"><h2>Passa al servizio Continuità Assistenziale (Guardia Medica)</a></h2>
<a href="http://127.0.0.1:{port}/farmacie-turno.html"><h2>Passa al servizio Farmacie di turno</a></h2>
<a href="http://127.0.0.1:{port}/FSE.html"><h2>Passa al servizio FSE</a></h2><br>
<hr>
<p style="text-align: center; font-size: 15px"> Per qualsiasi informazione, dubbio o segnalazione riguardante i Servizi <NAME> <a href="mailto:<EMAIL>">contattaci qui</a>.</p><br>
</form>
<br>
</body>
</html>
""".format(port=port)
# (end_page) 118:
end_page_118= """
<form action="http://1172.16.17.32:{port}/118.html" method="post" style="text-align: center;">
<hr>
<img src='resources/118.png'
style="float:left; left:0px; top:0px; width:180px; height:90px; border:none;" />
<img src='resources/118.png'
style="float:right; right:0px; top:0px; width:180px; height:90px; border:none;" />
<h1><strong>118 Emilia-Romagna</strong></h1><br>
<h2>118 Emilia-Romagna Pronto Soccorso Emergenza</h1><br>
<hr>
<a href="https://www.118er.it/istruzioni.asp"><h1>ISTRUZIONI - <NAME> <NAME></a></h1><br>
<p><strong>Chi è il 118:</strong> Il 118 è un servizio pubblico e gratuito di pronto intervento sanitario, attivo 24 ore su 24, coordinato da una centrale operativa che gestisce tutte le chiamate per necessità urgenti e di emergenza sanitaria.
<a href="https://www.118er.it/118.asp"> [+]</a></p><br>
<a href="https://www.118er.it/"><h2>Raggiungi il sito ufficiale del 118</a></h2><br>
<hr>
<p style="text-align: center; font-size: 15px">Torna alla <a href="http://127.0.0.1:{port}">home</a>.</p><br>
</form>
<br>
</body>
</html>
""".format(port=port)
# (end_page) pronto soccorso:
end_page_pronto_soccorso= """
<form action="http://1172.16.17.32:{port}/pronto-soccorso.html" method="post" style="text-align: center;">
<hr>
<img src='resources/pronto_soccorso.png'
style="float:left; left:0px; top:0px; width:90px; height:90px; border:none;" />
<img src='resources/pronto_soccorso.png'
style="float:right; right:0px; top:0px; width:90px; height:90px; border:none;" />
<h1><strong>Pronto Soccorso</strong></h1><br>
<h2>Pronto Soccorso e pronto intervento</h1><br>
<hr>
<a href="https://www.auslromagna.it/luoghi/pronto-soccorso"><h1>MAPPA DEI PUNTI DI PRONTO SOCCORSO E PRONTO INTERVENTO</a></h1><br>
<p>Il servizio di Pronto Soccorso è rivolto a persone che hanno di bisogno di cure urgenti.
Per situazioni non urgenti è opportuno rivolgersi direttamente al proprio <a href="http://127.0.0.1:{port}/medici-famiglia.html"> medico di famiglia</a> od al <a href="http://127.0.0.1:{port}/guardia-medica.html"> servizio sostitutivo di guardia medica</a>.</p><br>
<a href="https://www.auslromagna.it/servizi/pronto-soccorso"><h2>Raggiungi il sito ufficiale del Pronto Soccorso</a></h2><br>
<hr>
<p style="text-align: center; font-size: 15px">Torna alla <a href="http://127.0.0.1:{port}">home</a>.</p><br>
</form>
<br>
</body>
</html>
""".format(port=port)
# (end_page) medici_famiglia:
end_page_medici_famiglia= """
<form action="http://127.0.0.1:{port}/medici-famiglia.html" method="post" style="text-align: center;">
<hr>
<img src='resources/famiglia.jpg'
style="float:left; left:0px; top:0px; width:150px; height:90px; border:none;" />
<img src='resources/famiglia.jpg'
style="float:right; right:0px; top:0px; width:150px; height:90px; border:none;" />
<h1><strong>Medici e Pediatri di famiglia</strong></h1><br>
<h2>Medici e Pediatri di famiglia in Romagna</h1><br>
<hr>
<p>Medico e pediatra di famiglia sono il primo riferimento per problemi di salute, per avere confronti e counselling. Alla base di ogni rapporto di collaborazione tra il medico e la famiglia ci deve essere naturalmente fiducia reciproca. La scelta del medico avviene contemporaneamente all'iscrizione al Servizio sanitario nazionale.</p><br>
<a href="https://www.auslromagna.it/servizi/medici-famiglia"><h2>Raggiungi il sito ufficiale riguardante Medici e Pediatri di famiglia</a></h2><br>
<hr>
<p style="text-align: center; font-size: 15px">Torna alla <a href="http://127.0.0.1:{port}">home</a>.</p><br>
</form>
<br>
</body>
</html>
""".format(port=port)
# (end_page) guardia_medica:
end_page_guardia_medica= """
<form action="http://127.0.0.1:{port}/guardia-medica.html" method="post" style="text-align: center;">
<hr>
<img src='resources/guardia-medica.jpg'
style="float:left; left:0px; top:0px; width:90px; height:90px; border:none;" />
<img src='resources/guardia-medica.jpg'
style="float:right; right:0px; top:0px; width:90px; height:90px; border:none;" />
<h1><strong>Continuità assistenziale</strong></h1><br>
<h2>Servizio di Continuità assistenziale (ex Guardia medica)</h1><br>
<hr>
<a href="https://www.auslromagna.it/servizi/guardia-medica/mappa-ambulatori-continuita-assistenziale"><h1>MAPPA DEGLI AMBULATORI DI CONTINUITÀ ASSISTENZIALE</a></h1><br>
<p>È il servizio che, in assenza del medico di famiglia, garantisce l'assistenza medica di base per situazioni che rivestono carattere di non differibilità, cioè per quei problemi sanitari per i quali non si può aspettare fino all'apertura dell'ambulatorio del proprio medico curante o pediatra di libera scelta.<br><br>
<strong>NUMERO TELEFONICO COMPETENTE</strong>: <br>
<strong>Comprensorio di Forlì</strong>: 800533118
<strong>Comprensorio di Cesena</strong>: 800050909
<strong>Comprensorio di Ravenna</strong>: 800244244
<strong>Comprensorio di Rimini</strong>: 0541787461
</p><br>
<a href="https://www.auslromagna.it/servizi/guardia-medica"><h2>Raggiungi il sito ufficiale del servizio di Continuità assistenziale</a></h2><br>
<hr>
<p style="text-align: center; font-size: 15px">Torna alla <a href="http://127.0.0.1:{port}">home</a>.</p><br>
</form>
<br>
</body>
</html>
""".format(port=port)
# (end_page) farmacie_turno:
end_page_farmacie_turno= """
<form action="http://127.0.0.1:{port}/farmacie-turno.html" method="post" style="text-align: center;">
<hr>
<img src='resources/farmacia.png'
style="float:left; left:0px; top:0px; width:90px; height:90px; border:none;" />
<img src='resources/farmacia.png'
style="float:right; right:0px; top:0px; width:90px; height:90px; border:none;" />
<h1><strong>Farmacie di turno</strong></h1><br>
<h2>Farmacie di turno in Romagna</h1><br>
<hr>
<p>La Guardia Farmaceutica (Farmacia di Turno) è un servizio che ogni farmacia aperta al pubblico deve fornire in base a una specifica Legge Regionale (n. 33 del 30 dicembre 2009).
Tale servizio ha due caratteristiche fondamentali: la presenza e l'accessibilità su tutto il territorio e la disponibilità 24 ore su 24 per 365 giorni all'anno.
Il servizio viene svolto da un farmacista nella farmacia di turno ed assicura la distribuzione dei farmaci durante gli orari di chiusura delle farmacie.
Le farmacie di turno svolgono il servizio di norma fino alle ore 20.00 a battenti aperti e successivamente a battenti chiusi fino all'ora di apertura antimeridiana.</p><br>
<a href="https://www.auslromagna.it/servizi/farmacie"><h2>Trova la farmacia di turno più vicina a te</a></h2><br>
<hr>
<p style="text-align: center; font-size: 15px">Torna alla <a href="http://1172.16.17.32:{port}">home</a>.</p><br>
</form>
<br>
</body>
</html>
""".format(port=port)
# (end_page) FSE:
end_page_FSE= """
<form action="http://1172.16.17.32:{port}/FSE.html" method="post" style="text-align: center;">
<hr>
<img src='resources/fse.png'
style="float:left; left:0px; top:0px; width:90px; height:90px; border:none;" />
<img src='resources/fse.png'
style="float:right; right:0px; top:0px; width:90px; height:90px; border:none;" />
<h1><strong>FSE</strong></h1><br>
<h2>Fascicolo Sanitario Elettronico</h1><br>
<hr>
<p>Il <a href="https://www.auslromagna.it/servizi-on-line/fse-fascicolo-sanitario-elettronico">Fascicolo Sanitario Elettronico (FSE)</a> consente l'archiviazione e la consultazione da pc e da smartphone dei propri dati e documenti di tipo sanitario e socio-sanitario, in forma riservata e protetta.
Tramite il Fascicolo Sanitario Elettronico è anche possibile ricevere i referti delle prestazioni sanitarie, evitando di recarsi agli sportelli per il ritiro del documento cartaceo.
Inoltre, il cittadino può arricchire il proprio FSE con dati e documenti medici in suo possesso - ad esempio appunti sulle cure, agenda degli appuntamenti sanitari, referti di strutture non convenzionate o rilasciati da altri specialisti - ed ha accesso ad ulteriori servizi online.
Possono attivare il FSE tutte le persone maggiorenni iscritte al Servizio Sanitario Nazionale, che possono farlo anche per i figli minori.</p><br>
<a href="https://support.fascicolo-sanitario.it/"><h2>Come attivare FSE</a></h2><br>
<a href="https://www.fascicolo-sanitario.it/fse/;jsessionid=90F1C30D12B671EF7203450B71876F41?0"><h2>Accedi al tuo FSE</a></h2><br>
<hr>
<p style="text-align: center; font-size: 15px">Torna alla <a href="http://127.0.0.1:{port}">home</a>.</p><br>
</form>
<br>
</body>
</html>
""".format(port=port)
#metodo lanciato per la creazione delle pagine servizi
def create_page_servizio(title,file_html, end_page):
f = open(file_html,'w', encoding="utf-8")
try:
message = header_html + title + navigation_bar + end_page + footer_html
except:
pass
f.write(message)
f.close()
#definisco il font da utilizzare nel titolo della pagina
intro_page = "<hr><h1 style='font-family: Trebuchet MS;'>";
#definisco il titolo di ciascuna scheda
bar_title_index = "<title>Servizi Ospedalieri</title>" + intro_page;
bar_title_118 = "<title>Servizi Ospedalieri - 118</title>" + intro_page;
bar_title_pronto_soccorso = "<title>Servizi Ospedalieri - Pronto Soccorso</title>" + intro_page;
bar_title_medici_famiglia = "<title>Servizi Ospedalieri - Medici e Pediatri di famiglia</title>" + intro_page;
bar_title_guardia_medica = "<title>Servizi Ospedalieri - Continuità assitenziale</title>" + intro_page;
bar_title_farmacie_turno = "<title>Servizi Ospedalieri - Farmacie di turno</title>" + intro_page;
bar_title_FSE = "<title>Servizi Ospedalieri - FSE</title>" + intro_page;
# creazione della pagina specifica del 118
def create_page_118():
create_page_servizio(bar_title_118 + "118 Emilia-Romagna</h1><hr>" , '118.html', end_page_118 )
# creazione della pagina specifica del pronto soccorso
def create_page_pronto_soccorso():
create_page_servizio(bar_title_pronto_soccorso + "Pronto Soccorso</h1><hr>" , 'pronto-soccorso.html', end_page_pronto_soccorso )
# creazione della pagina specifica dei medici di famiglia
def create_page_medici_famiglia():
create_page_servizio(bar_title_medici_famiglia + "Medici e Pediatri di famiglia</h1><hr>" , 'medici-famiglia.html', end_page_medici_famiglia )
# creazione della pagina specifica della guardia medica
def create_page_guardia_medica():
create_page_servizio(bar_title_medici_famiglia + "Continuità assitenziale</h1><hr>" , 'guardia-medica.html', end_page_guardia_medica )
# creazione della pagina specifica delle farmacie di turno
def create_page_farmacie_turno():
create_page_servizio(bar_title_farmacie_turno + "Farmacie di turno</h1><hr>" , 'farmacie-turno.html', end_page_farmacie_turno )
# creazione della pagina specifica del FSE
def create_page_FSE():
create_page_servizio(bar_title_FSE + "FSE - Fascicolo Sanitario Elettronico</h1><hr>", 'FSE.html', end_page_FSE )
# creazione della pagina index.html (iniziale)
# contenente pagina principale del Azienda ospedaliera
def create_index_page():
create_page_servizio(bar_title_index + "Servizi Ospedalieri Spahiu</h1><hr>", 'index.html', end_page_index )
# creo tutti i file utili per navigare.
def resfresh_contents():
print("Aggiornamento contenuti...\n")
create_index_page()
create_page_118()
create_page_pronto_soccorso()
create_page_medici_famiglia()
create_page_guardia_medica()
create_page_farmacie_turno()
create_page_FSE()
print("Aggiornamento terminato.\n")
# lancio un thread che ogni 300 secondi (5 minuti) aggiorna i contenuti delle pagine
def launch_thread_resfresh():
t_refresh = threading.Thread(target=resfresh_contents())
t_refresh.daemon = True
t_refresh.start()
# definiamo una funzione per permetterci di uscire dal processo tramite Ctrl-C
def signal_handler(signal, frame):
print('Combinazione di interruzione (Ctrl+C) premuta: uscita dal server http.')
try:
if(server):
server.server_close()
finally:
# fermo il thread del refresh senza busy waiting
waiting_refresh.set()
sys.exit(0)
# metodo che viene chiamato al "lancio" del server
def main():
cond = True #variabile utilizzata come controllo del while. Finchè è true, continua a richiedere username e password
while cond:
try:
username = input("Inserire l'username: ") #richiesto l'username da tastiera
password = input("Inserire la password: ") #richiesta la password
except: #Try ed Except nel caso, ad esempio, venisse premuto Ctrl + C durante l'inserimento delle credenziali
print("\nErrore nell'inserimento delle credenziali.")
print("Chiusura del programma.")
server.server_close() #Per evitare errori al prossimo avvio
sys.exit(0)
if username != 'admin' or password != '<PASSWORD>':
print("\nUsername e/o password incorretta. Ritenta")
else:
cond = False #username e password inserite correttamente, variabile impostata a False per uscire dal while
print("\nAccesso eseguito correttamente.\n")
# lancio un thread che aggiorna ricorrentemente i contenuti
launch_thread_resfresh()
#Assicura che da tastiera usando la combinazione
#di tasti Ctrl-C termini in modo pulito tutti i thread generati
server.daemon_threads = True
#il Server acconsente al riutilizzo del socket anche se ancora non è stato
#rilasciato quello precedente, andandolo a sovrascrivere
server.allow_reuse_address = True
#interrompe l'esecuzione se da tastiera arriva la sequenza (CTRL + C)
signal.signal(signal.SIGINT, signal_handler)
# cancella i dati get ogni volta che il server viene attivato
f = open('AllRequestsGET.txt','w', encoding="utf-8")
f.close()
# entra nel loop infinito
try:
while True:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
if __name__ == "__main__":
main()
|
coleygroup/pyscreener | pyscreener/docking/dock/runner.py | <reponame>coleygroup/pyscreener
import os
from pathlib import Path
import re
import subprocess as sp
import sys
from typing import Mapping, Optional, Tuple, Union
from openbabel import pybel
from rdkit.Chem import AllChem as Chem
import ray
from pyscreener.exceptions import MisconfiguredDirectoryError, MissingEnvironmentVariableError
from pyscreener.utils import calc_score
from pyscreener.docking import CalculationData, DockingRunner, Result
from pyscreener.docking.dock import utils
from pyscreener.docking.dock.metadata import DOCKMetadata
try:
DOCK6 = Path(os.environ["DOCK6"])
except KeyError:
raise MissingEnvironmentVariableError(
"DOCK6 environment variable not set! "
"See https://github.com/coleygroup/pyscreener#specifying-an-environment-variable "
"for more information."
)
VDW_DEFN_FILE = DOCK6 / "parameters" / "vdw_AMBER_parm99.defn"
FLEX_DEFN_FILE = DOCK6 / "parameters" / "flex.defn"
FLEX_DRIVE_FILE = DOCK6 / "parameters" / "flex_drive.tbl"
DOCK = DOCK6 / "bin" / "dock6"
for f in (VDW_DEFN_FILE, FLEX_DEFN_FILE, FLEX_DRIVE_FILE, DOCK):
if not f.exists():
raise MisconfiguredDirectoryError(
f'$DOCK6 directory not configured properly! DOCK6 path is set as "{DOCK6}", but there '
f'is no "{f.name}" located under the "{f.parents}" directory. '
"See https://github.com/coleygroup/pyscreener#specifying-an-environment-variable for more information."
)
class DOCKRunner(DockingRunner):
@staticmethod
def prepare(data: CalculationData) -> CalculationData:
data = DOCKRunner.prepare_receptor(data)
data = DOCKRunner.prepare_ligand(data)
return data
@staticmethod
def prepare_receptor(data: CalculationData) -> CalculationData:
"""Prepare the files necessary to dock ligands against the input receptor using this
Screener's parameters
Parameter
---------
receptor : str
the filepath of a file containing a receptor. Must be in a format that is readable by
Chimera
Returns
-------
rec_sph : str
the filepath of the file containing the selected spheres
grid_prefix : str
the prefix of all prepared grid files.
None
if receptor preparation fails at any point
"""
rec_mol2 = utils.prepare_mol2(data.receptor, data.in_path)
rec_pdb = utils.prepare_pdb(data.receptor, data.in_path)
if rec_mol2 is None or rec_pdb is None:
return data
rec_dms = utils.prepare_dms(rec_pdb, data.metadata.probe_radius, data.in_path)
if rec_dms is None:
return data
rec_sph = utils.prepare_sph(
rec_dms,
data.metadata.steric_clash_dist,
data.metadata.min_radius,
data.metadata.max_radius,
data.in_path,
)
if rec_sph is None:
return data
rec_sph = utils.select_spheres(
rec_sph,
data.metadata.sphere_mode,
data.center,
data.size,
data.metadata.docked_ligand_file,
data.metadata.buffer,
data.in_path,
)
rec_box = utils.prepare_box(
rec_sph,
data.center,
data.size,
data.metadata.enclose_spheres,
data.metadata.buffer,
data.in_path,
)
if rec_box is None:
return data
grid_stem = utils.prepare_grid(rec_mol2, rec_box, data.in_path)
if grid_stem is None:
return data
data.metadata.prepared_receptor = rec_sph, grid_stem
return data
@staticmethod
def prepare_and_run(data: CalculationData) -> CalculationData:
DOCKRunner.prepare_ligand(data)
DOCKRunner.run(data)
return data
@staticmethod
def prepare_ligand(data: CalculationData) -> CalculationData:
if data.smi is not None:
DOCKRunner.prepare_from_smi(data)
else:
DOCKRunner.prepare_from_file(data)
return data
@staticmethod
def prepare_from_smi(data: CalculationData) -> CalculationData:
"""Prepare an input ligand file from the ligand's SMILES string
Parameters
----------
data: CalculationData
Returns
-------
CalculationData
"""
mol2 = Path(data.in_path) / f"{data.name}.mol2"
mol = Chem.AddHs(Chem.MolFromSmiles(data.smi))
Chem.EmbedMolecule(mol)
Chem.MMFFOptimizeMolecule(mol)
try:
mol = pybel.readstring("mol", Chem.MolToMolBlock(mol))
# mol = pybel.readstring(format="smi", string=data.smi)
# mol.make3D()
# mol.addh()
mol.calccharges(model="gasteiger")
except Exception:
pass
mol.write(format="mol2", filename=str(mol2), overwrite=True, opt={"h": None})
data.metadata.prepared_ligand = mol2
return data
@staticmethod
def prepare_from_file(data: CalculationData) -> Optional[Tuple]:
"""Convert a single ligand to the appropriate input format with specified geometry"""
fmt = Path(data.input_file).suffix.strip(".")
mols = list(pybel.readfile(fmt, data.input_file))
mol = mols[0]
mol2 = Path(data.in_path) / f"{mol.title or data.name}.mol2"
data.smi = mol.write()
try:
mol.addh()
mol.calccharges(model="gasteiger")
except Exception:
pass
mol.write(format="mol2", filename=mol2, overwrite=True, opt={"h": None})
data.metadata.prepared_ligand = mol2
return data
@staticmethod
def run(data: CalculationData) -> Optional[float]:
"""Dock this ligand into the ensemble of receptors
Returns
-------
score : Optional[float]
the ligand's docking score. None if DOCKing failed.
"""
p_ligand = Path(data.metadata.prepared_ligand)
ligand_name = p_ligand.stem
sph_file, grid_prefix = data.metadata.prepared_receptor
name = f"{Path(sph_file).stem}_{ligand_name}"
infile, outfile_prefix = DOCKRunner.prepare_input_file(
p_ligand, sph_file, grid_prefix, name, data.in_path, data.out_path
)
logfile = Path(outfile_prefix).parent / f"{name}.log"
argv = [str(DOCK), "-i", str(infile), "-o", str(logfile)]
ret = sp.run(argv, stdout=sp.PIPE, stderr=sp.PIPE)
try:
ret.check_returncode()
except sp.SubprocessError:
print(f"ERROR: docking failed. argv: {argv}", file=sys.stderr)
print(f'Message: {ret.stderr.decode("utf-8")}', file=sys.stderr)
scores = DOCKRunner.parse_logfile(logfile)
score = None if scores is None else calc_score(scores, data.score_mode, data.k)
data.result = Result(
data.smi, name, re.sub("[:,.]", "", ray.state.current_node_id()), score
)
return scores
@staticmethod
def validate_metadata(metadata: DOCKMetadata):
return
@staticmethod
def parse_logfile(outfile: Union[str, Path]) -> Optional[float]:
"""parse a DOCK log file for the scores of the conformations
Parameters
----------
outfile : Union[str, Path]
the filepath of a scored log file generated by DOCK6
Returns
-------
Optional[float]
the DOCKing score of the ligand. None if no scores were parsed or the log file was
unparseable
"""
try:
with open(outfile) as fid:
score_lines = [line for line in fid if "Grid_Score" in line]
except OSError:
pass
scores = []
for line in score_lines:
try:
scores.append(float(line.split()[1]))
except ValueError:
continue
return scores or None
@staticmethod
def prepare_input_file(
ligand_file: Union[str, Path],
sph_file: str,
grid_prefix: str,
name: Optional[str] = None,
in_path: Union[str, Path] = ".",
out_path: Union[str, Path] = ".",
**kwargs,
) -> Tuple[Path, Path]:
"""Prepare an input file with which to run DOCK
Parameters
----------
ligand_file : str
the MOL2 file corresponding to the ligand that will be docked
sph_file : str
the SPH file containing the DOCK spheres of the receptor
grid_prefix : str
the prefix of the prepared grid files (as was passed to the grid program)
name : Optional[str], default=None
the name to use for the input file and output file
in_path : Union[str, os.PathLike], default="inputs"
the path under which to write the input files both the input file and output
out_path : Union[str, os.PathLike], default="outputs"
the path under which to write the output files
**kwargs
keyword options DOCKing parameters
Returns
-------
infile: Path
the filepath of the input file
outfile_prefix: Path
the prefix of the outfile name. DOCK will automatically name outfiles
as <outfile_prefix>_scored.mol2
"""
name = name or f"{Path(sph_file).stem}_{Path(ligand_file).stem}"
infile = in_path / f"{name}.in"
outfile_prefix = out_path / name
with open(infile, "w") as fid:
# fid.write("conformer_search_type flex\n")
# fid.write("write_fragment_libraries no\n")
# fid.write("user_specified_anchor no\n")
# fid.write("limit_max_anchors no\n")
# fid.write("min_anchor_size 5\n")
# fid.write("pruning_use_clustering yes\n")
# fid.write("pruning_max_orients 100\n")
# fid.write("pruning_clustering_cutoff 100\n")
# fid.write("pruning_conformer_score_cutoff 100.0\n")
# fid.write("pruning_conformer_score_scaling_factor 1.0\n")
# fid.write("use_clash_overlap no\n")
# fid.write("write_growth_tree no\n")
# fid.write("use_internal_energy yes\n")
# fid.write("internal_energy_rep_exp 12\n")
# fid.write("internal_energy_cutoff 100.0\n")
# fid.write(f"ligand_atom_file {ligand_file}\n")
# fid.write("limit_max_ligands no\n")
# fid.write("skip_molecule no\n")
# fid.write("read_mol_solvation no\n")
# fid.write("calculate_rmsd no\n")
# fid.write("use_rmsd_reference_mol no\n")
# fid.write("use_database_filter no\n")
# fid.write("orient_ligand yes\n")
# fid.write("automated_matching yes\n")
# fid.write(f"receptor_site_file {sph_file}\n")
# fid.write("max_orientations 1000\n")
# fid.write("critical_points no\n")
# fid.write("chemical_matching no\n")
# fid.write("use_ligand_spheres no\n")
# fid.write("bump_filter no\n")
# fid.write("score_molecules yes\n")
# fid.write("contact_score_primary no\n")
# fid.write("contact_score_secondary no\n")
# fid.write("grid_score_primary yes\n")
# fid.write("grid_score_secondary no\n")
# fid.write("grid_score_rep_rad_scale 1\n")
# fid.write("grid_score_vdw_scale 1\n")
# fid.write("grid_score_es_scale 1\n")
# fid.write(f"grid_score_grid_prefix {grid_prefix}\n")
# fid.write("multigrid_score_secondary no\n")
# fid.write("dock3.5_score_secondary no\n")
# fid.write("continuous_score_secondary no\n")
# fid.write("footprint_similarity_score_secondary no\n")
# fid.write("pharmacophore_score_secondary no\n")
# fid.write("descriptor_score_secondary no\n")
# fid.write("gbsa_zou_score_secondary no\n")
# fid.write("gbsa_hawkins_score_secondary no\n")
# fid.write("SASA_score_secondary no\n")
# fid.write("amber_score_secondary no\n")
# fid.write("minimize_ligand yes\n")
# fid.write("minimize_anchor yes\n")
# fid.write("minimize_flexible_growth yes\n")
# fid.write("use_advanced_simplex_parameters no\n")
# fid.write("simplex_max_cycles 1\n")
# fid.write("simplex_score_converge 0.1\n")
# fid.write("simplex_cycle_converge 1.0\n")
# fid.write("simplex_trans_step 1.0\n")
# fid.write("simplex_rot_step 0.1\n")
# fid.write("simplex_tors_step 10.0\n")
# fid.write("simplex_anchor_max_iterations 500\n")
# fid.write("simplex_grow_max_iterations 500\n")
# fid.write("simplex_grow_tors_premin_iterations 0\n")
# fid.write("simplex_random_seed 0\n")
# fid.write("simplex_restraint_min no\n")
# fid.write("atom_model all\n")
# fid.write(f"vdw_defn_file {VDW_DEFN_FILE}\n")
# fid.write(f"flex_defn_file {FLEX_DEFN_FILE}\n")
# fid.write(f"flex_drive_file {FLEX_DRIVE_FILE}\n")
# fid.write(f"ligand_outfile_prefix {outfile_prefix}\n")
# fid.write("write_orientations no\n")
# fid.write("num_scored_conformers 5\n")
# fid.write("write_conformations no\n")
# fid.write("rank_ligands no\n")
fid.write(DOCKRunner.infile_line(kwargs, "conformer_search_type", "flex"))
fid.write(DOCKRunner.infile_line(kwargs, "write_fragment_libraries", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "user_specified_anchor", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "limit_max_anchors", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "min_anchor_size", "5"))
fid.write(DOCKRunner.infile_line(kwargs, "pruning_use_clustering", "yes"))
fid.write(DOCKRunner.infile_line(kwargs, "pruning_max_orients", "100"))
fid.write(DOCKRunner.infile_line(kwargs, "pruning_clustering_cutoff", "100"))
fid.write(DOCKRunner.infile_line(kwargs, "pruning_conformer_score_cutoff", "100"))
fid.write(DOCKRunner.infile_line(kwargs, "pruning_conformer_score_scaling_factor", "1"))
fid.write(DOCKRunner.infile_line(kwargs, "use_clash_overlap", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "write_growth_tree", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "use_internal_energy", "yes"))
fid.write(DOCKRunner.infile_line(kwargs, "internal_energy_rep_exp", "12"))
fid.write(DOCKRunner.infile_line(kwargs, "internal_energy_cutoff", "100"))
fid.write(f"ligand_atom_file {ligand_file}\n")
fid.write(DOCKRunner.infile_line(kwargs, "limit_max_ligands", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "skip_molecule", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "read_mol_solvation", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "calculate_rmsd", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "use_rmsd_reference_mol", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "use_database_filter", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "orient_ligand", "yes"))
fid.write(DOCKRunner.infile_line(kwargs, "automated_matching", "yes"))
fid.write(f"receptor_site_file {sph_file}\n")
fid.write(DOCKRunner.infile_line(kwargs, "max_orientations", "1000"))
fid.write(DOCKRunner.infile_line(kwargs, "critical_points", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "chemical_matching", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "use_ligand_spheres", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "bump_filter", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "score_molecules", "yes"))
fid.write(DOCKRunner.infile_line(kwargs, "contact_score_primary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "contact_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "grid_score_primary", "yes"))
fid.write(DOCKRunner.infile_line(kwargs, "grid_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "grid_score_rep_rad_scale", "1"))
fid.write(DOCKRunner.infile_line(kwargs, "grid_score_vdw_scale", "1"))
fid.write(DOCKRunner.infile_line(kwargs, "grid_score_es_scale", "1"))
fid.write(f"grid_score_grid_prefix {grid_prefix}\n")
fid.write(DOCKRunner.infile_line(kwargs, "multigrid_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "5_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "continuous_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs,"footprint_similarity_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "pharmacophore_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "descriptor_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "gbsa_zou_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "gbsa_hawkins_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "SASA_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "amber_score_secondary", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "minimize_ligand", "yes"))
fid.write(DOCKRunner.infile_line(kwargs, "minimize_anchor", "yes"))
fid.write(DOCKRunner.infile_line(kwargs, "minimize_flexible_growth", "yes"))
fid.write(DOCKRunner.infile_line(kwargs, "use_advanced_simplex_parameters", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_max_cycles", "1"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_score_converge", "0.1"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_cycle_converge", "1.0"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_trans_step", "1.0"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_rot_step", "0.1"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_tors_step", "10"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_anchor_max_iterations", "500"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_grow_max_iterations", "500"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_grow_tors_premin_iterations", "0"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_random_seed", "0"))
fid.write(DOCKRunner.infile_line(kwargs, "simplex_restraint_min", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "atom_model", "all"))
fid.write(f"vdw_defn_file {VDW_DEFN_FILE}\n")
fid.write(f"flex_defn_file {FLEX_DEFN_FILE}\n")
fid.write(f"flex_drive_file {FLEX_DRIVE_FILE}\n")
fid.write(f"ligand_outfile_prefix {outfile_prefix}\n")
fid.write(DOCKRunner.infile_line(kwargs, "write_orientations", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "num_scored_conformers", "5"))
fid.write(DOCKRunner.infile_line(kwargs, "write_conformations", "no"))
fid.write(DOCKRunner.infile_line(kwargs, "rank_ligands", "no"))
return infile, outfile_prefix
def infile_line(options: Mapping, param: str, default: str) -> str:
"""generate a line in the infile for the parameter and its default value. If the parameter
is present in the options dictionary, substitute that value."""
return f"{param} {options.get(param, default)}\n"
|
coleygroup/pyscreener | pyscreener/docking/runner.py | <filename>pyscreener/docking/runner.py
from abc import ABC, abstractmethod
from typing import Optional, Sequence
from pyscreener.docking.data import CalculationData
from pyscreener.docking.metadata import CalculationMetadata
class DockingRunner(ABC):
@staticmethod
@abstractmethod
def prepare_receptor(data: CalculationData) -> CalculationData:
pass
@staticmethod
@abstractmethod
def prepare_ligand(data: CalculationData) -> CalculationData:
pass
@staticmethod
@abstractmethod
def run(data: CalculationData) -> Optional[Sequence[float]]:
pass
@staticmethod
@abstractmethod
def prepare_and_run(data: CalculationData) -> CalculationData:
pass
@staticmethod
def validate_metadata(metadata: CalculationMetadata):
return |
coleygroup/pyscreener | pyscreener/utils/utils.py | <reponame>coleygroup/pyscreener<gh_stars>10-100
__all__ = [
"AutoName",
"ScoreMode",
"FileFormat",
"chunks",
"calc_score",
"reduce_scores",
"run_on_all_nodes",
]
from enum import Enum, auto
import functools
from itertools import islice
from typing import Callable, Iterable, Iterator, List, Optional, Sequence
import warnings
import numpy as np
import ray
class AutoName(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
@classmethod
def from_str(cls, s):
return cls[s.replace("-", "_").upper()]
class ScoreMode(AutoName):
"""The method by which to calculate a score from multiple possible scores.
Used when calculating an overall docking score from multiple conformations,
multiple repeated runs, or docking against an ensemble of receptors."""
AVG = auto()
BEST = auto()
BOLTZMANN = auto()
TOP_K = auto()
class FileFormat(AutoName):
"""The format of a molecular suppy file. FILE represents the format of all molecular supply
files with no explicit support (i.e., CSV, SDF, and SMI.)"""
CSV = auto()
FILE = auto()
SDF = auto()
SMI = auto()
def chunks(it: Iterable, size: int) -> Iterator[List]:
"""chunk an iterable into chunks of given size, with the last chunk being potentially smaller"""
it = iter(it)
return iter(lambda: list(islice(it, size)), [])
def calc_score(
scores: Sequence[float], score_mode: ScoreMode = ScoreMode.BEST, k: int = 1
) -> float:
"""Calculate an overall score from a sequence of scores
Parameters
----------
scores : Sequence[float]
score_mode : ScoreMode, default=ScoreMode.BEST
the method used to calculate the overall score. See ScoreMode for
choices
k : int, default=1
the number of top scores to average, if using ScoreMode.TOP_K_AVG
Returns
-------
float
"""
Y = np.array(scores)
if score_mode == ScoreMode.BEST:
return Y.min()
elif score_mode == ScoreMode.AVG:
return np.nanmean(Y)
elif score_mode == ScoreMode.BOLTZMANN:
Y_e = np.exp(-Y)
Z = Y_e / np.nansum(Y_e)
return np.nansum(Y * Z)
elif score_mode == ScoreMode.TOP_K:
return np.nanmean(Y.sort()[:k])
raise ValueError(f"Invalid ScoreMode! got: {score_mode}")
def reduce_scores(
S: np.ndarray,
repeat_score_mode: ScoreMode = ScoreMode.BEST,
ensemble_score_mode: ScoreMode = ScoreMode.BEST,
k: int = 1,
) -> Optional[float]:
"""Calculate the overall score of each ligand given all of its simulations
Parameters
----------
S : np.ndarray
an `n x r x t` array of docking scores, where n is the number of ligands that were docked,
r is the number of receptors each ligand was docked against, and t is the number of repeated
docking attempts against each receptor, and each value is the docking score calculated for
the given run
repeat_score_mode : ScoreMode, default=ScoreMode.BEST,
the mode used to calculate the overall score for from repeated runs
ensemble_score_mode : ScoreMode, default=ScoreMode.BEST,
the mode used to calculate the overall score for a given ensemble of receptors
k : int, default=1
the number of scores to consider, if averaging the top-k
Returns
-------
S : np.ndarray
an array of shape `n`, containing the reduced docking score for each ligand
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
if repeat_score_mode == ScoreMode.BEST:
S = np.nanmin(S, axis=2)
elif repeat_score_mode == ScoreMode.AVG:
S = np.nanmean(S, axis=2)
elif repeat_score_mode == ScoreMode.BOLTZMANN:
S_e = np.exp(-S)
Z = S_e / np.nansum(S_e, axis=2)[:, :, None]
S = np.nansum((S * Z), axis=2)
elif repeat_score_mode == ScoreMode.TOP_K:
S = np.nanmean(np.sort(S, axis=2)[:, :k], axis=2)
if ensemble_score_mode == ScoreMode.BEST:
S = np.nanmin(S, axis=1)
elif ensemble_score_mode == ScoreMode.AVG:
S = np.nanmean(S, axis=1)
elif ensemble_score_mode == ScoreMode.BOLTZMANN:
S_e = np.exp(-S)
Z = S_e / np.nansum(S_e, axis=1)[:, None]
S = np.nansum((S * Z), axis=1)
elif ensemble_score_mode == ScoreMode.TOP_K:
S = np.nanmean(np.sort(S, axis=1)[:, :, :k], axis=1)
return S
def run_on_all_nodes(func: Callable) -> Callable:
"""Run a function on all nodes in the ray cluster"""
@functools.wraps(func)
def wrapper_run_on_all_nodes(*args, **kwargs):
refs = []
for node in ray.nodes():
address = node["NodeManagerAddress"]
g = ray.remote(resources={f"node:{address}": 0.1})(func)
refs.append(g.remote(*args, **kwargs))
ray.wait(refs)
return ray.get(refs[-1])
return wrapper_run_on_all_nodes
|
bsholy/learn_flask_email | app.py | import os
from threading import Thread
from flask import Flask, redirect, url_for
from flask_mail import Mail, Message
from mail_html import get_mail_msg
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config.update(
SECRET_KEY=os.getenv('SECRET_KEY', 'secret string'),
MAIL_SERVER=os.getenv('MAIL_SERVER'),
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USERNAME=os.getenv('MAIL_USERNAME'),
MAIL_PASSWORD=os.getenv('MAIL_PASSWORD'),
MAIL_DEFAULT_SENDER=('bsholy', os.getenv('MAIL_USERNAME'))
)
mail = Mail(app)
def send_smtp_mail(subject, to, body):
message = Message(subject, recipients=[to], body=body)
mail.send(message)
def _send_async_mail(app_, message):
with app_.app_context():
mail.send(message)
def send_async_mail(subject, to, body, html):
message = Message(subject, recipients=[to], body=body, html=html)
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
@app.route('/')
def index():
return "Hello, world!"
@app.route('/send_test_mail')
def send_test_mail():
subject = 'Hello, world!'
to = '<EMAIL>'
body = 'Across the Great Wall we can reach every corner in the world.'
send_smtp_mail(subject, to, body)
return redirect('index')
@app.route('/send_async_test_mail')
def send_async_test_mail():
subject = 'Hello, world!'
to = '<EMAIL>'
body = 'This is a async mail.'
html = get_mail_msg()
send_async_mail(subject, to, body, html)
return redirect(url_for('index'))
|
bsholy/learn_flask_email | mail_html/get_mail_message.py | <reponame>bsholy/learn_flask_email<filename>mail_html/get_mail_message.py
mail_msg = """
<html>
<head>
<meta charset="utf-8" />
</head>
<body>
<table width="100%">
<tbody>
<tr>
<td style="width: 100%;">
<center>
<table class="content-wrap" style="margin: 0px auto; width: 600px;">
<tbody>
<tr>
<td style="margin: 0px auto; overflow: hidden; padding: 0px; border: 0px dotted rgb(238, 238, 238);">
<!---->
<div class="full" tindex="1" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 600px;">
<tbody>
<tr>
<td style="direction: ltr; font-size: 0px; padding-top: 0px; text-align: center; vertical-align: top;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td align="center" vertical-align="middle" style="line-height: 0; padding-top: 85px; background-color: rgb(18, 44, 90); width: 600px; background-image: url("https://www.drageasy.com/3be88a385faa07f990b2f26e57d72da3.png?imageslim"); background-size: 450px; background-position: 50% 50%; background-repeat: no-repeat;"></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div>
<div class="full" tindex="2" style="margin: 0px auto; line-height: 0px; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 600px;">
<tbody>
<tr>
<td align="center" class="fullTd" style="direction: ltr; font-size: 0px; padding: 10px 20px; text-align: center; vertical-align: top; word-break: break-word; width: 600px; background-color: rgb(18, 44, 90); background-image: url(""); background-repeat: no-repeat; background-size: 100px; background-position: 10% 50%;">
<table align="center" border="0" cellpadding="0" cellspacing="0" style="border-collapse: collapse; border-spacing: 0px;">
<tbody>
<tr>
<td style="width: 600px; border-top: 1px solid rgb(255, 255, 255);"></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div>
<div class="full" tindex="3" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 600px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 600px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top; background-color: rgb(18, 44, 90); background-image: url(""); background-repeat: no-repeat; background-size: 100px; background-position: 10% 50%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td align="left" style="font-size: 0px; padding: 24px 20px;">
<div class="text" style="font-family: 微软雅黑, "Microsoft YaHei"; overflow-wrap: break-word; margin: 0px; text-align: left; line-height: 1.6; color: rgb(255, 255, 255); font-size: 14px; font-weight: normal;">
<div>
<p style="text-align: center; text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"><span style="font-size: 20px; color: #ecf0f1;"><strong>【数据监测】网络数据出现异常,请及时查看!</strong></span></p>
<p style="text-align: center; text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"> </p>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"><span style="color: #ecf0f1;">尊敬的用户,您好!</span></p>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;">你的设备监控模块出现问题,详细如下:</p>
<p style="text-size-adjust: none; word-break: break-word; line-height: 0.8; font-size: 14px; margin: 0px;"> </p>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;">数据健康度分析中,漫游达标率不合格。</p>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"> </p>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;">此致</p>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"><strong>网络数据监测平台</strong></p>
</div>
</div></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div>
<div class="full" tindex="4" style="margin: 0px auto; line-height: 0px; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 600px;">
<tbody>
<tr>
<td align="center" class="fullTd" style="direction: ltr; font-size: 0px; padding: 0px 20px; text-align: center; vertical-align: top; word-break: break-word; width: 600px; background-color: rgb(18, 44, 90); background-image: url(""); background-repeat: no-repeat; background-size: 100px; background-position: 10% 50%;">
<table align="center" border="0" cellpadding="0" cellspacing="0" style="border-collapse: collapse; border-spacing: 0px;">
<tbody>
<tr>
<td style="width: 600px; border-top: 1px dashed rgb(217, 208, 208);"></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div>
<div tindex="5" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" style="background-color: rgb(18, 44, 90); background-image: url(""); background-repeat: no-repeat; background-size: 75px; background-position: 7% 50%;">
<tbody>
<tr>
<td style="direction: ltr; font-size: 0px; text-align: center; vertical-align: top; width: 600px;">
<table width="100%" border="0" cellpadding="0" cellspacing="0" style="vertical-align: top;">
<tbody>
<tr>
<td class="fourColumn column1" style="width: 25%; max-width: 25%; min-height: 1px; font-size: 13px; text-align: left; direction: ltr; vertical-align: top; padding: 0px;">
<div class="full" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 150px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 150px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top;">
<div style="display: inline-block; vertical-align: top; width: 100%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td style="font-size: 0px; word-break: break-word; width: 130px; text-align: center; padding: 10px;">
<div>
<img height="auto" alt="拖拽生成HTML邮件-拉易网-3" width="auto" src="https://www.drageasy.com/7b3ee4e89b329442c3806e675ff6fe7d.png?imageslim" style="box-sizing: border-box; border: 0px; display: inline-block; outline: none; text-decoration: none; height: auto; width: auto; max-width: 100%; padding: 0px;" />
</div></td>
</tr>
</tbody>
</table>
</div></td>
</tr>
</tbody>
</table>
</div></td>
<td class="fourColumn column2" style="width: 25%; max-width: 25%; min-height: 1px; font-size: 13px; text-align: left; direction: ltr; vertical-align: top; padding: 0px;">
<div class="full" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 150px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 150px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top;">
<div style="display: inline-block; vertical-align: top; width: 100%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td style="font-size: 0px; word-break: break-word; width: 130px; text-align: center; padding: 10px;">
<div>
<img height="auto" alt="拖拽生成HTML邮件-拉易网-4" width="auto" src="https://www.drageasy.com/d04ac0cfc056c4609c4ae5002bb29944.png?imageslim" style="box-sizing: border-box; border: 0px; display: inline-block; outline: none; text-decoration: none; height: auto; width: auto; max-width: 100%; padding: 0px;" />
</div></td>
</tr>
</tbody>
</table>
</div></td>
</tr>
</tbody>
</table>
</div></td>
<td class="fourColumn column3" style="width: 25%; max-width: 25%; min-height: 1px; font-size: 13px; text-align: left; direction: ltr; vertical-align: top; padding: 0px;">
<div class="full" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 150px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 150px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top;">
<div style="display: inline-block; vertical-align: top; width: 100%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td style="font-size: 0px; word-break: break-word; width: 130px; text-align: center; padding: 10px;">
<div>
<img height="auto" alt="拖拽生成HTML邮件-拉易网-1" width="auto" src="https://www.drageasy.com/90a0860cda107b2a2fc477a4daf15b08.png?imageslim" style="box-sizing: border-box; border: 0px; display: inline-block; outline: none; text-decoration: none; height: auto; width: auto; max-width: 100%; padding: 0px;" />
</div></td>
</tr>
</tbody>
</table>
</div></td>
</tr>
</tbody>
</table>
</div></td>
<td class="fourColumn column4" style="width: 25%; max-width: 25%; min-height: 1px; font-size: 13px; text-align: left; direction: ltr; vertical-align: top; padding: 0px;">
<div class="full" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 150px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 150px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top;">
<div style="display: inline-block; vertical-align: top; width: 100%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td style="font-size: 0px; word-break: break-word; width: 130px; text-align: center; padding: 10px;">
<div>
<img height="auto" alt="拖拽生成HTML邮件-拉易网-10" width="auto" src="https://www.drageasy.com/38a8da1aa836fce1c6ad1959deedc2f4.png?imageslim" style="box-sizing: border-box; border: 0px; display: inline-block; outline: none; text-decoration: none; height: auto; width: auto; max-width: 100%; padding: 0px;" />
</div></td>
</tr>
</tbody>
</table>
</div></td>
</tr>
</tbody>
</table>
</div></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div>
<div tindex="6" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" style="background-color: rgb(18, 44, 90); background-image: url(""); background-repeat: no-repeat; background-size: 100px; background-position: 1% 50%;">
<tbody>
<tr>
<td style="direction: ltr; font-size: 0px; text-align: center; vertical-align: top; width: 600px;">
<table width="100%" border="0" cellpadding="0" cellspacing="0" style="vertical-align: top;">
<tbody>
<tr>
<td class="fourColumn column1" style="width: 25%; max-width: 25%; min-height: 1px; font-size: 13px; text-align: left; direction: ltr; vertical-align: top; padding: 0px;">
<div class="full" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 150px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 150px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top; background-image: url(""); background-repeat: no-repeat; background-size: 100px; background-position: 10% 50%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td align="left" style="font-size: 0px; padding: 0px 20px;">
<div class="text" style="font-family: 微软雅黑, "Microsoft YaHei"; overflow-wrap: break-word; margin: 0px; text-align: center; line-height: 1.6; color: rgb(102, 102, 102); font-size: 14px; font-weight: normal;">
<div>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"><span style="color: #ecf0f1;">返回主页</span></p>
</div>
</div></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div></td>
<td class="fourColumn column2" style="width: 25%; max-width: 25%; min-height: 1px; font-size: 13px; text-align: left; direction: ltr; vertical-align: top; padding: 0px;">
<div class="full" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 150px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 150px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top; background-image: url(""); background-repeat: no-repeat; background-size: 100px; background-position: 10% 50%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td align="left" style="font-size: 0px; padding: 0px 20px;">
<div class="text" style="font-family: 微软雅黑, "Microsoft YaHei"; overflow-wrap: break-word; margin: 0px; text-align: center; line-height: 1.6; color: rgb(102, 102, 102); font-size: 14px; font-weight: normal;">
<div>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"><span style="color: #ecf0f1;">与我联系</span></p>
</div>
</div></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div></td>
<td class="fourColumn column3" style="width: 25%; max-width: 25%; min-height: 1px; font-size: 13px; text-align: left; direction: ltr; vertical-align: top; padding: 0px;">
<div class="full" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 150px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 150px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top; background-image: url(""); background-repeat: no-repeat; background-size: 100px; background-position: 10% 50%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td align="left" style="font-size: 0px; padding: 0px 20px;">
<div class="text" style="font-family: 微软雅黑, "Microsoft YaHei"; overflow-wrap: break-word; margin: 0px; text-align: center; line-height: 1.6; color: rgb(102, 102, 102); font-size: 14px; font-weight: normal;">
<div>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"><span style="color: #ecf0f1;">连接微信</span></p>
</div>
</div></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div></td>
<td class="fourColumn column4" style="width: 25%; max-width: 25%; min-height: 1px; font-size: 13px; text-align: left; direction: ltr; vertical-align: top; padding: 0px;">
<div class="full" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 150px;">
<tbody>
<tr>
<td class="fullTd" style="direction: ltr; width: 150px; font-size: 0px; padding-bottom: 0px; text-align: center; vertical-align: top; background-image: url(""); background-repeat: no-repeat; background-size: 100px; background-position: 10% 50%;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td align="left" style="font-size: 0px; padding: 0px 20px;">
<div class="text" style="font-family: 微软雅黑, "Microsoft YaHei"; overflow-wrap: break-word; margin: 0px; text-align: center; line-height: 1.6; color: rgb(102, 102, 102); font-size: 14px; font-weight: normal;">
<div>
<p style="text-size-adjust: none; word-break: break-word; line-height: 1.6; font-size: 14px; margin: 0px;"><span style="color: #ecf0f1;">连接QQ</span></p>
</div>
</div></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div>
<div class="full" tindex="7" style="margin: 0px auto; max-width: 600px;">
<table align="center" border="0" cellpadding="0" cellspacing="0" class="fullTable" style="width: 600px;">
<tbody>
<tr>
<td style="direction: ltr; font-size: 0px; padding-top: 0px; text-align: center; vertical-align: top;">
<table border="0" cellpadding="0" cellspacing="0" width="100%" style="vertical-align: top;">
<tbody>
<tr>
<td align="center" vertical-align="middle" style="line-height: 0; padding-top: 13px; background-color: rgb(18, 44, 90); width: 600px; background-image: url(""); background-size: 100px; background-position: 10% 50%; background-repeat: no-repeat;"></td>
</tr>
</tbody>
</table></td>
</tr>
</tbody>
</table>
</div></td>
</tr>
</tbody>
</table>
</center></td>
</tr>
</tbody>
</table>
<!---->
<center style="text-align:center;font-size: 12px;margin:5px;color:rgb(102, 102, 102);transform: scale(.9);-webkit-transform: scale(.9);">
</center>
</body>
</html>
"""
def get_mail_msg():
return mail_msg |
bsholy/learn_flask_email | mail_html/__init__.py | <reponame>bsholy/learn_flask_email
from mail_html.get_mail_message import get_mail_msg |
hortinstein/NOSETEST | PRETTYGOOD/test/encryption_test.py | """
Test the monocypher python binding.
"""
import unittest
import monocypher
import hashlib
import numpy as np
import binascii
import os
import json
import warnings
#!COMMENT Take a look how the monocypher library is tested
#!COMMENT This should help in understanding the crypto wrappers that I wrote
#!COMMENT and how to test them
class TestMonocypher(unittest.TestCase):
def test_symmetric(self):
random = np.random.RandomState(seed=1)
for i in range(10):
length = random.randint(1, 4096)
key = bytes(random.randint(0, 256, 32, dtype=np.uint8))
nonce = bytes(random.randint(0, 256, 24, dtype=np.uint8))
msg = bytes(random.randint(0, 256, length, dtype=np.uint8))
mac, c = monocypher.lock(key, nonce, msg)
msg2 = monocypher.unlock(key, nonce, mac, c)
self.assertNotEqual(msg, c)
self.assertEqual(msg, msg2)
def test_symmetric_aead(self):
random = np.random.RandomState(seed=1)
for i in range(10):
message_length = random.randint(1, 4096)
aead_length = random.randint(1, 128)
key = bytes(random.randint(0, 256, 32, dtype=np.uint8))
nonce = bytes(random.randint(0, 256, 24, dtype=np.uint8))
aead = bytes(random.randint(0, 256, aead_length, dtype=np.uint8))
msg = bytes(random.randint(0, 256, message_length, dtype=np.uint8))
mac, c = monocypher.lock(key, nonce, msg, associated_data=aead)
msg2 = monocypher.unlock(key, nonce, mac, c, associated_data=aead)
self.assertEqual(msg, msg2)
def test_sign(self):
random = np.random.RandomState(seed=1)
for i in range(10):
length = random.randint(1, 4096)
secret_key = bytes(random.randint(0, 256, 32, dtype=np.uint8))
msg = bytes(random.randint(0, 256, length, dtype=np.uint8))
public_key = monocypher.compute_signing_public_key(secret_key)
sig = monocypher.signature_sign(secret_key, msg)
self.assertTrue(monocypher.signature_check(sig, public_key, msg))
self.assertFalse(monocypher.signature_check(sig, public_key, msg + b'0'))
sig2 = sig[:10] + bytes([sig[10] + 1]) + sig[11:]
self.assertFalse(monocypher.signature_check(sig2, public_key, msg))
def test_key_exchange_static(self):
expect = b'\<KEY>'
your_secret_key = bytes(range(32))
their_public_key = bytes(range(32, 64))
shared_key = monocypher.key_exchange(your_secret_key, their_public_key)
self.assertEqual(expect, shared_key)
def test_key_exchange_random(self):
a_private_secret, a_public_secret = monocypher.generate_key_exchange_key_pair()
b_private_secret, b_public_secret = monocypher.generate_key_exchange_key_pair()
b_shared_secret = monocypher.key_exchange(b_private_secret, a_public_secret)
a_shared_secret = monocypher.key_exchange(a_private_secret, b_public_secret)
self.assertEqual(a_shared_secret, b_shared_secret)
def test_generate_key(self):
self.assertEqual(32, len(monocypher.generate_key()))
def test_deprecation_public_key_compute(self):
with warnings.catch_warnings(record=True) as w:
monocypher.public_key_compute(bytes(range(32)))
self.assertEqual(1, len(w))
self.assertIn('deprecated', str(w[0].message))
def test_deprecation_generate_key_pair(self):
with warnings.catch_warnings(record=True) as w:
monocypher.generate_key_pair()
self.assertEqual(1, len(w))
self.assertIn('deprecated', str(w[0].message)) |
hortinstein/NOSETEST | STOPANDCHAT/4_understanding_testoutput.py | <reponame>hortinstein/NOSETEST
#!COMMENT Warmup Exercise #4
def sum(arg):
total = 0
for val in arg:
total += val
return total
# Understanding Test Output
# That was a very simple example where everything passes, so now you’re going to try a failing test and interpret the output.
# sum() should be able to accept other lists of numeric types, like fractions.
# At the top of the test.py file, add an import statement to import the Fraction type from the fractions module in the standard library:
# Now add a test with an assertion expecting the incorrect value, in this case expecting the sum of 1/4, 1/4, and 2/5 to be 1:
import unittest
from fractions import Fraction
class TestSum(unittest.TestCase):
def test_list_int(self):
"""
Test that it can sum a list of integers
"""
data = [1, 2, 3]
result = sum(data)
self.assertEqual(result, 6)
def test_list_fraction(self):
"""
Test that it can sum a list of fractions
"""
data = [Fraction(1, 4), Fraction(1, 4), Fraction(2, 5)]
result = sum(data)
self.assertEqual(result, 1)
if __name__ == '__main__':
unittest.main()
# If you execute the tests again with python -m unittest test, you should see the following output:
# $ python -m unittest test
# F.
# ======================================================================
# FAIL: test_list_fraction (test.TestSum)
# ----------------------------------------------------------------------
# Traceback (most recent call last):
# File "test.py", line 21, in test_list_fraction
# self.assertEqual(result, 1)
# AssertionError: Fraction(9, 10) != 1
# ----------------------------------------------------------------------
# Ran 2 tests in 0.001s
# FAILED (failures=1)
# In the output, you’ll see the following information:
# The first line shows the execution results of all the tests, one failed (F) and one passed (.).
# The FAIL entry shows some details about the failed test:
# The test method name (test_list_fraction)
# The test module (test) and the test case (TestSum)
# A traceback to the failing line
# The details of the assertion with the expected result (1) and the actual result (Fraction(9, 10))
# Remember, you can add extra information to the test output by adding the -v flag to the python -m unittest command.
|
hortinstein/NOSETEST | STOPANDCHAT/3_writing_first_test.py | <reponame>hortinstein/NOSETEST
#!COMMENT Warmup Exercise #3
# Writing Your First Test
# Let’s bring together what you’ve learned so far and, instead of testing the built-in sum() function,
# test a simple implementation of the same requirement.
# Create a new project folder and, inside that, create a new folder called my_sum. Inside my_sum,
# create an empty file called __init__.py. Creating the __init__.py file means that the my_sum folder
# can be imported as a module from the parent directory.
# Your project folder should look like this:
# project/
# │
# └── my_sum/
# └── __init__.py
# Open up my_sum/__init__.py and create a new function called sum(), which takes an
# iterable (a list, tuple, or set) and adds the values together:
def sum(arg):
total = 0
for val in arg:
total += val
return total
# This code example creates a variable called total, iterates over all the values in arg,
# and adds them to total. It then returns the result once the iterable has been exhausted.
# How to Structure a Simple Test
# Before you dive into writing tests, you’ll want to first make a couple of decisions:
# What do you want to test?
# Are you writing a unit test or an integration test?
# Then the structure of a test should loosely follow this workflow:
# Create your inputs
# Execute the code being tested, capturing the output
# Compare the output with an expected result
# For this application, you’re testing sum(). There are many behaviors in sum() you could check, such as:
# Can it sum a list of whole numbers (integers)?
# Can it sum a tuple or set?
# Can it sum a list of floats?
# What happens when you provide it with a bad value, such as a single integer or a string?
# What happens when one of the values is negative?
# The most simple test would be a list of integers. Create a file, test.py with the following Python code:
import unittest
class TestSum(unittest.TestCase):
def test_list_int(self):
"""
Test that it can sum a list of integers
"""
data = [1, 2, 3]
result = sum(data)
self.assertEqual(result, 6)
if __name__ == '__main__':
unittest.main()
# How to Write Assertions
# The last step of writing a test is to validate the output against a known response.
# This is known as an assertion. There are some general best practices around how to write assertions:
# Make sure tests are repeatable and run your test multiple times to make sure it gives the same result every time
# Try and assert results that relate to your input data, such as checking that the result is the actual sum of values in the sum() example
# unittest comes with lots of methods to assert on the values, types, and existence of variables. Here are some of the most commonly used methods:
# Method Equivalent to
# .assertEqual(a, b) a == b
# .assertTrue(x) bool(x) is True
# .assertFalse(x) bool(x) is False
# .assertIs(a, b) a is b
# .assertIsNone(x) x is None
# .assertIn(a, b) a in b
# .assertIsInstance(a, b) isinstance(a, b)
# .assertIs(), .assertIsNone(), .assertIn(), and .assertIsInstance() all have opposite methods, named .assertIsNot(), and so forth.
# Side Effects
# When you’re writing tests, it’s often not as simple as looking at the return value of a function. Often, executing a piece of code
# will alter other things in the environment, such as the attribute of a class, a file on the filesystem, or a value in a database.
# These are known as side effects and are an important part of testing. Decide if the side effect is being tested before including
# it in your list of assertions.
# If you find that the unit of code you want to test has lots of side effects, you might be breaking the Single Responsibility Principle.
# Breaking the Single Responsibility Principle means the piece of code is doing too many things and would be better off being refactored.
# Following the Single Responsibility Principle is a great way to design code that it is easy to write repeatable and simple unit tests
# for, and ultimately, reliable applications.
# Run the tests:
# $ python -m unittest test
# You can provide additional options to change the output. One of those is -v for verbose. Try that next:
# $ python -m unittest -v test
# Instead of providing the name of a module containing tests, you can request an auto-discovery using the following:
# $ python -m unittest discover
# This will search the current directory for any files named test*.py and attempt to test them.
|
hortinstein/NOSETEST | STOPANDCHAT/1_basic_testing.py | #!COMMENT Warmup Exercise #1
# Testing Your Code
# There are many ways to test your code. In this tutorial, you’ll learn the techniques from the most basic steps and work towards advanced methods.
# Automated vs. Manual Testing
# The good news is, you’ve probably already created a test without realizing it. Remember when you ran your application and used it for the first time?
# Did you check the features and experiment using them? That’s known as exploratory testing and is a form of manual testing.
# Exploratory testing is a form of testing that is done without a plan. In an exploratory test, you’re just exploring the application.
# To have a complete set of manual tests, all you need to do is make a list of all the features your application has, the different types
# of input it can accept, and the expected results. Now, every time you make a change to your code, you need to go through every single item on that list and check it.
# That doesn’t sound like much fun, does it?
# This is where automated testing comes in. Automated testing is the execution of your test plan (the parts of your application you want to test,
# the order in which you want to test them, and the expected responses) by a script instead of a human. Python already comes with a set of tools
# and libraries to help you create automated tests for your application. We’ll explore those tools and libraries in this tutorial.
# Unit Tests vs. Integration Tests
# The world of testing has no shortage of terminology, and now that you know the difference between automated and manual testing, it’s time to go a level deeper.
# Think of how you might test the lights on a car. You would turn on the lights (known as the test step) and go outside the car or
# ask a friend to check that the lights are on (known as the test assertion). Testing multiple components is known as integration testing.
# Think of all the things that need to work correctly in order for a simple task to give the right result. These components are like the parts to your
# application, all of those classes, functions, and modules you’ve written.
# A major challenge with integration testing is when an integration test doesn’t give the right result. It’s very hard to diagnose the issue without being
# able to isolate which part of the system is failing. If the lights didn’t turn on, then maybe the bulbs are broken. Is the battery dead?
# What about the alternator? Is the car’s computer failing?
# If you have a fancy modern car, it will tell you when your light bulbs have gone. It does this using a form of unit test.
# A unit test is a smaller test, one that checks that a single component operates in the right way. A unit test helps you to isolate what is
# broken in your application and fix it faster.
# You have just seen two types of tests:
# An integration test checks that components in your application operate with each other.
# A unit test checks a small component in your application.
# You can write both integration tests and unit tests in Python. To write a unit test for the built-in function sum(), you would
# check the output of sum() against a known output.
# For example, here’s how you check that the sum() of the numbers (1, 2, 3) equals 6:
assert sum([1, 2, 3]) == 6 , "Should be 6"
# This will not output anything on the REPL because the values are correct.
# statement again with the wrong values to see an AssertionError:
assert sum([1, 1, 1]) == 6, "Should be 6"
def test_sum():
assert sum([1, 2, 3]) == 6, "Should be 6"
def test_sum_tuple():
assert sum((1, 2, 2)) == 6, "Should be 6"
# When you execute test_sum_2.py, the script will give an error because the sum() of (1, 2, 2) is 5, not 6.
if __name__ == "__main__":
test_sum()
test_sum_tuple()
print("Everything passed")
|
hortinstein/NOSETEST | SWANKILLER.py | <filename>SWANKILLER.py
import os
import sys
import fileinput
import shutil, errno
def copyanything(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
old_dir = sys.argv[1]
#creates a new copy of the data
walk_dir = "student_"+old_dir
copyanything(old_dir,walk_dir)
solution_dict = {}
print('walk_dir = ' + walk_dir)
# If your current working directory may change during script execution, it's recommended to
# immediately convert program arguments to an absolute path. Then the variable root below will
# be an absolute path as well. Example:
# walk_dir = os.path.abspath(walk_dir)
print('walk_dir (absolute) = ' + os.path.abspath(walk_dir))
for root, subdirs, files in os.walk(walk_dir):
if (".git" in root): continue
print('--\nroot = ' + root)
for subdir in subdirs:
print('\t- subdirectory ' + subdir)
for filename in files:
file_path = os.path.join(root, filename)
if ("__pycache__" in file_path) or \
(".gc" in filename) or \
("SWANKILLER" in filename) or \
(".o" in filename):
continue
print('\t- file %s (full path: %s)' % (filename, file_path))
CUT = False
solution_dict[file_path] = ""
for line in fileinput.input(file_path, inplace=True):
#preserve leading whitespace
startwhites = line[:len(line)-len(line.lstrip())]
#PYTHON
if ".py" in filename:
if "#!CUT_END" in line:
CUT = False
solution_dict[file_path] += '{}#END CODE HERE\n```\n'.format(startwhites)
print('{}#END CODE HERE\n'.format(startwhites), end='')
elif CUT == True:
solution_dict[file_path] += "{} {}".format(fileinput.filelineno(),line)
print('{}\n'.format(startwhites), end='')
elif "#!CUT_START" in line:
CUT = True
solution_dict[file_path] += '``` python\n{}#START CODE HERE\n'.format(startwhites)
print('{}#START CODE HERE\n'.format(startwhites), end='')
elif "#!COMMENT" in line:
solution_dict[file_path] += '{} {}\n'.format(fileinput.filelineno(),line.replace("#!COMMENT",""))
print('{}'.format(line.replace("#!COMMENT","#")), end='')
else:
print('{}'.format(line), end='')
#C and C++
if (".c" in filename) or (".cpp" in filename):
if "//!CUT_END" in line:
CUT = False
solution_dict[file_path] += '{}//END CODE HERE\n```\n'.format(startwhites)
elif CUT == True:
solution_dict[file_path] += "{} {}".format(fileinput.filelineno(),line)
print('{}\n'.format(startwhites), end='')
elif "//!CUT_START" in line:
CUT = True
solution_dict[file_path] += '``` c\n{}//START CODE HERE\n'.format(startwhites)
print('{}//START CODE HERE\n'.format(startwhites), end='')
elif "//!COMMENT" in line:
solution_dict[file_path] += '{} {}\n'.format(fileinput.filelineno(),line.replace("//!COMMENT",""))
print('{}'.format(line.replace("//!COMMENT","//")), end='')
else:
print('{}'.format(line), end='')
else:
print('{}'.format(line), end='')
#this creates the solution file
f = open("{}/SOLUTION.md".format(walk_dir), "w")
ordered_dict = [
"student_NOSETEST/STOPANDCHAT/1_basic_testing.py",
"student_NOSETEST/STOPANDCHAT/2_automated_testing.py",
"student_NOSETEST/STOPANDCHAT/3_writing_first_test.py",
"student_NOSETEST/STOPANDCHAT/4_understanding_testoutput.py",
"student_NOSETEST/PRETTYGOOD/test/encryption_test.py",
"student_NOSETEST/PRETTYGOOD/data_utils.py",
"student_NOSETEST/src/tests.cpp",
"student_NOSETEST/src/monocypher.c",
"student_NOSETEST/src/SPITESTORE.c",
"student_NOSETEST/src/ll.c",
"student_NOSETEST/src/encryption.c",
"student_NOSETEST/PRETTYGOOD/PRETTYGOOD.py",
]
f.write("\n\n")
for item in ordered_dict:
if solution_dict[item] == "":
continue
f.write("- [{}](#{})\n".format(item,item.replace("/","").replace(".","")))
for item in ordered_dict:
if solution_dict[item] == "":
continue
f.write("## {}\n".format(item))
f.write(solution_dict[item])
f.close()
|
hortinstein/NOSETEST | PRETTYGOOD/PRETTYGOOD.py | """
Very simple HTTP server in python (Updated for Python 3.7)
Usage:
./PRETTYGOOD.py -h
./PRETTYGOOD.py -l localhost -p 8000
Send a GET request:
curl http://localhost:8000
Send a HEAD request:
curl -I http://localhost:8000
Send a POST request:
curl -d "foo=bar&bin=baz" http://localhost:8000
"""
import argparse
from http.server import HTTPServer, BaseHTTPRequestHandler
from data_utils import *
#imports the python encryption library
import monocypher
import unittest
import base64
#imports the task queue that will be used by the test server
import threading, queue
q_task = queue.Queue()
q_resp = queue.Queue()
#these are the task numbers!
ECHO = 0
TIMEOUT = 1
EXIT = 2
#encryption globals
KEY_LEN = 32
NONCE_LEN = 24
MAC_LEN = 16
PRIV_KEY, PUB_KEY = monocypher.generate_key_exchange_key_pair()
SHARED_KEY = ""
assert(len(PRIV_KEY) == 32)
assert(len(PUB_KEY) == 32)
###############################################################################
# Task Server
###############################################################################
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
def _html(self, message):
"""This just generates an HTML document that includes `message`
in the body. Override, or re-write this do do more interesting stuff.
"""
content = message
return content.encode("utf8") # NOTE: must return a bytes object!
#this provides the public key so the client can derive the shared session key
def do_get_key(self):
print("GET key, sending ", base64.b64encode(PUB_KEY))
self.wfile.write(base64.b64encode(PUB_KEY)) #sends servers public key
#this sets the remote clients public key and allows for the shared session key
def do_post_key(self,key):
global SHARED_KEY
print("POST b64 key: ",key)
their_key = bytes(base64.b64decode(key))
print(len(their_key), their_key)
print("POST decoded key: ",their_key)
SHARED_KEY = bytes(monocypher.key_exchange(PRIV_KEY,their_key))
print("shared key: ",SHARED_KEY.hex())
#this provides a task or the client
def do_get_task(self):
global SHARED_KEY
print("GET task")
item = q_task.get()
q_task.task_done()
print ("got item: ",item)
enc = encrypyt_wrapper(SHARED_KEY, PUB_KEY, item.serialize_task())
print("enc bytes: ",enc.hex())
b64 = base64.b64encode(enc)
print("sending {} bytes: {}".format(len(b64),b64))
self.wfile.write(b64) #sends servers public key
#this gets a response based off of a task
def do_post_task_resp(self,resp):
global SHARED_KEY
print("POST task: ",resp.hex())
ctext = bytes(base64.b64decode(resp))
print (ctext.hex())
dec = decrypt_wrapper(SHARED_KEY,ctext)
print ("decrypted bin",dec)
#get the task num to check type of response
task_num = struct.unpack("H",dec[:2])
task_num = task_num[0]
#check the type of response and enqueue a resp object
if (ECHO == task_num):
print("recieved ECHO resposne")
task = TaskEcho("")
task.deserialize_response(dec)
q_resp.put(task)
elif (TIMEOUT == task_num):
print("recieved TIMEOUT resposne")
task = TaskTimeout()
task.deserialize_response(dec)
q_resp.put(task)
elif (EXIT == task_num):
print("recieved EXIT resposne")
task = TaskExit()
task.deserialize_response(dec)
q_resp.put(task)
else:
print("unknown response!")
self.wfile.write(self._html("POST!"))
#routing for get requests
def do_GET(self):
self._set_headers()
if self.path == '/key':
self.do_get_key()
elif self.path == '/task':
self.do_get_task()
else:
self.wfile.write(self._html("hi!"))
def do_HEAD(self):
self._set_headers()
#routing for post requests
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
print (self.path,post_data)
self._set_headers()
if self.path == '/key':
self.do_post_key(post_data.strip())
elif self.path == '/task':
print("RECV RESP")
self.do_post_task_resp(post_data.strip())
else:
self.wfile.write(self._html("POST!"))
#starts the process
def run(server_class=HTTPServer, handler_class=S, addr="localhost", port=8000):
server_address = (addr, port)
httpd = server_class(server_address, handler_class)
print("priv key:", PRIV_KEY,"pub key:", PUB_KEY)
print(f"Starting httpd server on {addr}:{port}")
httpd.serve_forever()
###############################################################################
# Task Server tests
###############################################################################
#LOL this is a terrible way to do this
class TestSPITESTORE(unittest.TestCase):
def test_echo(self):
#!COMMENT just use the solution on this one, but lets talk better ways
#!COMMENT i got really lazy...
#!CUT_START
print("starting echo test")
my_echo_string = "testing my echo string"
q_task.put(TaskEcho(my_echo_string))
print("awaiting echo test")
task_resp = q_resp.get()
q_resp.task_done()
num, res = task_resp.return_res()
self.assertEqual(ECHO,num)
self.assertEqual(my_echo_string,res)
#!CUT_END
def test_timeout(self):
print("timeout")
def test_exit(self):
print("exit")
###############################################################################
# Main
###############################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a simple HTTP server")
parser.add_argument(
"-l",
"--listen",
default="localhost",
help="Specify the IP address on which the server listens",
)
parser.add_argument(
"-p",
"--port",
type=int,
default=8000,
help="Specify the port on which the server listens",
)
args = parser.parse_args()
http_server_thread = threading.Thread(name='http_server',
target = run,
)#args=(args.listen,args.port))
http_server_thread.start()
# turn-on the worker thread
unittest.main() |
hortinstein/NOSETEST | STOPANDCHAT/2_automated_testing.py | #!COMMENT Warmup Exercise #2
# Writing tests in this way is okay for a simple check, but what if more than one fails? This is where test runners come in.
# The test runner is a special application designed for running tests, checking the output,
# and giving you tools for debugging and diagnosing tests and applications.
# Choosing a Test Runner
# There are many test runners available for Python. The one built into the Python standard library is called unittest.
# In this tutorial, you will be using unittest test cases and the unittest test runner.
# The principles of unittest are easily portable to other frameworks. The three most popular test runners are:
# unittest
# nose or nose2 [this is pure irony for out course]
# pytest
# Choosing the best test runner for your requirements and level of experience is important.
# unittest
# unittest has been built into the Python standard library since version 2.1. We will be using it
# unittest contains both a testing framework and a test runner. unittest has some important requirements for writing and executing tests.
# unittest requires that:
# You put your tests into classes as methods
# You use a series of special assertion methods in the unittest.TestCase class instead of the built-in assert statement
# To convert the earlier example to a unittest test case, you would have to:
# Import unittest from the standard library
# Create a class called TestSum that inherits from the TestCase class
# Convert the test functions into methods by adding self as the first argument
# Change the assertions to use the self.assertEqual() method on the TestCase class
# Change the command-line entry point to call unittest.main()
# Follow those steps by creating a new file test_sum_unittest.py with the following code:
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main()
|
hortinstein/NOSETEST | PRETTYGOOD/data_utils.py | <gh_stars>1-10
import unittest
import monocypher
import random
import numpy as np
import unittest
import monocypher
import hashlib
import numpy as np
import binascii
import os
import json
import warnings
import struct
#encryption globals
KEY_LEN = 32
NONCE_LEN = 24
MAC_LEN = 16
###############################################################################
# Task Wrappers
###############################################################################
#these are the task numbers!
ECHO = 0
TIMEOUT = 1
EXIT = 2
class TaskEcho():
res =""
# init method or constructor
def __init__(self, echo_string):
self.echo_string = echo_string
#TODO test something here
self.echo_string += '\0'
def deserialize_response(self,res):
#5 bytes is the size of the header
self.task_num, size, self.res = struct.unpack("=HH{}s".format(len(res[4:])),res)
def serialize_task(self):
return struct.pack("=HH{}s".format(len(self.echo_string)),
ECHO,
len(self.echo_string),
self.echo_string.encode('utf-8'))
def return_res(self):
return ECHO, self.res
class TaskExit():
res =""
def deserialize_response(res):
#5 bytes is the size of the header
self.task_num, size, self.res = struct.unpack("=HH{}s".format(len(res[4:])),res)
def serialize_task(self):
return struct.pack("=HH",EXIT,0)
def return_res(self):
return EXIT, self.res
class TaskTimeout():
res =""
def __init__(self, timeout):
self.timeout = timeout
# init method or constructor
def deserialize_response(self,res):
#5 bytes is the size of the header
self.task_num, size, self.res = struct.unpack("=HH{}s".format(len(res[4:])),res)
def serialize_task(self):
return struct.pack("=HHH",TIMEOUT,4,self.timeout)
def return_res(self):
return self.task_nun, self.res
###############################################################################
# Encrpytion Wrappers
###############################################################################
def encrypyt_wrapper(SHARED_KEY, PUB_KEY,plaintext):
random = np.random.RandomState(seed=1)
nonce = bytes(random.randint(0, 256, NONCE_LEN, dtype=np.uint8))
mac, c = monocypher.lock(SHARED_KEY, nonce, plaintext)
print ("encrypt: SHARED_KEY {}\nNONCE {}\nMAC {}".format(SHARED_KEY.hex(),nonce.hex(),mac.hex()))
print (len(c),c.hex())
#sender pub key [unnessecary], nonce, mac, len of cyber text, cipher text
return struct.pack("{}s{}s{}sL{}s".format(KEY_LEN,NONCE_LEN,MAC_LEN,len(c)),PUB_KEY,nonce,mac,len(c),c)
def decrypt_wrapper(SHARED_KEY,enc_msg):
OFFSET = NONCE_LEN+KEY_LEN+MAC_LEN+8
their_key, nonce, mac, clen, cipher = struct.unpack("{}s{}s{}sL{}s".format(KEY_LEN,NONCE_LEN,MAC_LEN,(len(enc_msg)-OFFSET)),enc_msg)
print ("decrypt: SHARED_KEY {}\nNONCE {}\nMAC {}".format(SHARED_KEY.hex(),nonce.hex(),mac.hex()))
print (clen,cipher)
msg = monocypher.unlock(SHARED_KEY, nonce, mac, cipher)
print(msg)
return msg
###############################################################################
# Encrpytion Wrappers testing
###############################################################################
class TestEncWrappers(unittest.TestCase):
def test_key_exchange_random(self):
#!COMMENT write a test for the above encrypt and decrypt library
#!CUT_START
a_private_secret, a_public_secret = monocypher.generate_key_exchange_key_pair()
b_private_secret, b_public_secret = monocypher.generate_key_exchange_key_pair()
b_shared_secret = monocypher.key_exchange(b_private_secret, a_public_secret)
a_shared_secret = monocypher.key_exchange(a_private_secret, b_public_secret)
self.assertEqual(a_shared_secret, b_shared_secret)
dumb_message = bytes("this is my message",encoding='utf8')
#CODE CAVING
self.assertEqual(dumb_message,
decrypt_wrapper(a_shared_secret,
encrypyt_wrapper(b_shared_secret, a_public_secret,
dumb_message))
)
#!CUT_END
if __name__ == "__main__":
global PRIV_KEY, PUB_KEY
PRIV_KEY, PUB_KEY = monocypher.generate_key_exchange_key_pair()
unittest.main() |
gclaassen/extension-scraper | pyScraper.py |
import os
import sys
import getopt
import shutil
import time
from datetime import datetime
EXT_TYPE = 0
MOVE_TYPE = 1
SCR_DIR = 2
DEST_DIR = 3
COPY = 0
MOVE = 1
REPORT_NAME = "_report.txt"
ignoreDirKeyWords = [
'Recycle',
'Windows',
'System Volume Information',
'AppData'
]
def argumentExtraction(argv):
destFile = None
srcFile = None
extType = None
moveType = None
try:
[opts, argv] = getopt.getopt(
argv, "ht:s:d:cm", ["help", "type=", "srcfile=", "destfile=", "copy", "move"])
except getopt.GetoptError:
helpPrints()
return None
for opt, arg in opts:
if opt == '-h':
helpPrints()
exit()
elif opt in ("-t", "--type"):
extType = arg.split(' ')
print('File type is {0}'.format(extType))
elif opt in ("-s", "--srcfile"):
srcFile = arg
print("Source Directory is {0}".format(srcFile))
elif opt in ("-d", "--destfile"):
destFile = arg
print("Destination Directory is {0}".format(destFile))
elif opt in ("-c", "--copy"):
moveType = COPY
print('Copy the files from {0} to {1}'.format(srcFile, destFile))
elif opt in ("-m", "--move"):
moveType = MOVE
print('Move the files from {0} to {1}'.format(srcFile, destFile))
return [extType, moveType, srcFile, destFile]
def helpPrints():
print('\npyScraper.py <arguments> \n')
print('~~~ARGUMENT LIST~~~\n')
print('-t:\tfile extension to be moved/copied\t-t <type>\n')
print('-s:\tSource Directory\t-s <srcpath>\n')
print('-d:\tDestination Path\t-d <destpath>\n')
print('-c:\tCopy the file to the destination\n')
print('-m:\tMove the file from the source to the destination\n')
def main(argv):
scraperParams = argumentExtraction(argv)
if(scraperParams != None):
scraper(scraperParams)
def scraper(scraperParams):
fileMoved = 0
reportFile = open(os.path.join(scraperParams[DEST_DIR], REPORT_NAME), "a+")
reportFile.write("{0}\n".format(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
startTime = time.time()
for root, _, files in os.walk(scraperParams[SCR_DIR], topdown=True):
ignoreDir = any(word for word in ignoreDirKeyWords if word in root)
if(ignoreDir == False):
print("root directory: {0}".format(root))
for filename in files:
print("filename: {0}".format(filename))
_, extension = os.path.splitext(filename)
extensionExists = extension.lower() in scraperParams[EXT_TYPE]
if extensionExists:
moveFile = os.path.join(root, filename)
if(scraperParams[MOVE_TYPE] == COPY):
print('copy file {0}'.format(moveFile))
try:
dest = shutil.copy2(
moveFile, scraperParams[DEST_DIR])
print('file copied to {0}'.format(dest))
reportFile.write('{0}\t--->\t{1}\n'.format(moveFile, dest))
fileMoved += 1
except:
print('FAILED to copy file {0}'.format(moveFile))
elif(scraperParams[MOVE_TYPE] == MOVE):
print('move file {0}'.format(moveFile))
try:
dest = shutil.move(
moveFile, scraperParams[DEST_DIR])
print('file moved to {0}'.format(dest))
reportFile.write('{0}\t--->\t{1}\n'.format(moveFile, dest))
fileMoved += 1
except:
print('FAILED to move file {0}'.format(moveFile))
endTime = time.time()
totalTime = endTime - startTime
print("Time Taken: {0}".format(totalTime))
print("Total Files Moved: {0}".format(fileMoved))
reportFile.write("Time Taken: {0}\n".format(totalTime))
reportFile.write("Total Files Moved: {0}".format(fileMoved))
reportFile.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
elhachimi-ch/dst | src/data_science_toolkit/rl.py | <filename>src/data_science_toolkit/rl.py
import gym
import numpy as np
class Environment:
def __init__(self, environment_name="FrozenLake-v0", is_slippery=True):
"""The environment constructor"""
if environment_name == "FrozenLake-v0":
self.__environment = gym.make(environment_name, is_slippery=is_slippery)
else:
self.__environment = gym.make(environment_name)
self.__status = self.reset()
def describe(self):
print("Environement states: ", self.__environment.observation_space)
print("Availble actions: ", self.__environment.action_space)
# env.P[state][action] to get one transition probability
# The result is in the form of: [(transition probability, next state, reward, Is terminal state?)]
print(gym.spaces)
print(isinstance(self.__environment.observation_space, gym.spaces.box.Box))
#print("Transition probabilities matrix ", self.__environment.P)
def get_transition_probability_state_action(self, state, action):
"""[(transition probability, next state, reward, Is terminal state?)]"""
return self.__environment.P[state][action]
def render(self):
"""show the environment"""
self.__environment.render()
def reset(self):
"""resetting puts our agent back to the initial state"""
self.__status = self.__environment.reset()
def close(self):
"""resetting puts our agent back to the initial state"""
self.__environment.close()
def step(self, action=None):
""" step
take a given action
The step function return : (next_state, reward, done, info)
if no action, a random action will be sampled
"""
if action is None:
action = self.__environment.action_space.sample()
self.__status = self.__environment.step(action)
def status(self):
""" The status is in the form : (next_state, reward, done, info) """
return self.__status
def is_terminal_state(self):
""" check if the agent is in a terminal state"""
if self.__status[2]:
return True
return False
def random_episode(self, number_of_times_steps=10):
"""random trajectory from an initial state to a final state possible only in an episodic
environement and not continous one
"""
t = 0
print('Time Step 0 :')
self.render()
self.step()
while not self.is_terminal_state():
self.render()
print ('Time Step {} :'.format(t+1))
self.step()
t += 1
def random_horizion(self, number_of_times_steps=10):
"""random trajectory starting from an initial state to given number of time steps
"""
for i in range(number_of_times_steps):
self.render()
self.step()
def all_environments(self):
print(gym.envs.registry.all())
def action_space_cardinal(self):
return self.__environment.action_space.n
def state_space_cardinal(self):
return self.__environment.observation_space.n
def value_iteration(self, num_iterations=1000, threshold=1e-20, gamma=1.0):
value_table = np.zeros(self.state_space_cardinal())
for i in range(num_iterations):
updated_value_table = np.copy(value_table)
for s in range(self.state_space_cardinal()):
Q_values = [sum([prob*(r + gamma * updated_value_table[s_])
for prob, s_, r, _ in self.get_transition_probability_state_action(s,a)])
for a in range(self.action_space_cardinal())]
value_table[s] = max(Q_values)
if (np.sum(np.fabs(updated_value_table - value_table)) <= threshold):
break
return value_table
def extract_policy(self, value_table, gamma = 1.0):
policy = np.zeros(self.state_space_cardinal())
for s in range(self.state_space_cardinal()):
Q_values = [sum([prob*(r + gamma * value_table[s_])
for prob, s_, r, _ in self.get_transition_probability_state_action(s, a)])
for a in range(self.action_space_cardinal())]
policy[s] = np.argmax(np.array(Q_values))
return policy
def run_policy(self, policy):
self.reset()
state = self.status()
print(policy)
for p in range(50):
self.step(int(policy[state]))
state = self.status()[0]
self.render()
|
elhachimi-ch/dst | src/data_science_toolkit/gis.py | <filename>src/data_science_toolkit/gis.py
import numpy as np
from numpy.linalg import matrix_power
from dataframe import DataFrame
from rl import *
import pandas as pd
from .lib import Lib
import contextily as cx
from matplotlib import pyplot as plt
import geopandas as gpd
from shapely.geometry import Point
import xarray as xr
class GIS:
"""
GIS class
"""
def __init__(self):
self.data_layers = {}
self.fig, self.ax = plt.subplots(figsize=(17,17))
def add_data_layer(self, layer_path, layer_name, data_source='sf'):
if data_source == 'df':
self.data_layers[layer_name] = gpd.GeoDataFrame(layer_path)
elif data_source == 'sf':
self.data_layers[layer_name] = gpd.read_file(layer_path)
def get_data_layer(self, layer_name):
return self.data_layers.get(layer_name)
def get_shape(self, layer_name):
return self.data_layers[layer_name].shape
def add_random_series_column(self, layer_name, column_name='random',min=0, max=100, distrubution_type='random', mean=0, sd=1):
if distrubution_type == 'random':
series = pd.Series(np.random.randint(min, max, self.get_shape(layer_name)[0]))
elif distrubution_type == 'standard_normal':
series = pd.Series(np.random.standard_normal(self.get_shape(layer_name)[0]))
elif distrubution_type == 'normal':
series = pd.Series(np.random.normal(mean, sd, self.get_shape(layer_name)[0]))
else:
series = pd.Series(np.random.randn(self.get_shape(layer_name)[0]))
self.add_column(layer_name, series, column_name)
def join_layer(self, layer_name, geo_datframe_to_join, on):
self.data_layers[layer_name] = self.data_layers.get(layer_name).merge(geo_datframe_to_join, on=on)
def plot(self, layer_name, column4color=None, color=None, alpha=0.5, legend=False,
figsize_tuple=(15,10), cmap=None, ):
"""_summary_
Args:
layer_name (_type_): _description_
column4color (_type_, optional): _description_. Defaults to None.
color (_type_, optional): _description_. Defaults to None.
alpha (float, optional): _description_. Defaults to 0.5.
legend (bool, optional): _description_. Defaults to False.
figsize_tuple (tuple, optional): _description_. Defaults to (15,10).
cmap (str, optional): exmaple: 'Reds' for heatmaps. Defaults to None.
"""
layer = self.data_layers.get(layer_name).to_crs(epsg=3857)
layer.plot(ax=self.ax, alpha=alpha, edgecolor='k', color=color, legend=legend,
column=column4color, figsize=figsize_tuple, cmap=cmap)
cx.add_basemap(ax=self.ax, source=cx.providers.Esri.WorldImagery)
def show(self, layer_name=None, interactive_mode=False):
if interactive_mode is True:
return self.data_layers.get(layer_name).explore()
else:
self.ax.set_aspect('equal')
plt.show()
def get_crs(self, layer_name):
"""
Cordonate Reference System
EPSG: european petroleum survey group
"""
return self.get_data_layer(layer_name).crs
def reorder_columns(self, layer_name, new_order_as_list):
self.data_layers[layer_name].reindex_axis(new_order_as_list, axis=1)
def export(self, layer_name, file_name, file_format='geojson'):
if file_format == 'geojson':
self.data_layers[layer_name].to_file(file_name + '.geojson', driver='GeoJSON')
elif file_format == 'shapefile':
self.data_layers[layer_name].to_file(file_name + '.shp')
def to_crs(self, layer_name, epsg="3857"):
self.data = self.data_layers[layer_name].to_crs(epsg)
def set_crs(self, layer_name, epsg="3857"):
self.data = self.data_layers[layer_name].set_crs(epsg)
def show_points(self, x_y_csv_path, crs="3857"):
pass
def show_point(self, x_y_tuple, crs="3857"):
pass
def add_point(self, x_y_tuple, layer_name, crs="3857"):
point = Point(0.0, 0.0)
#self.__dataframe = self.get_dataframe().append(row_as_dict, ignore_index=True)
row_as_dict = {'geometry': point}
self.data_layers[layer_name].append(row_as_dict, ignore_index=True)
def new_data_layer(self, layer_name, crs="EPSG:3857"):
self.data_layers[layer_name] = gpd.GeoDataFrame(crs=crs)
self.data_layers[layer_name].crs = crs
def add_column(self, layer_name, column, column_name):
y = column
if (not isinstance(column, pd.core.series.Series or not isinstance(column, pd.core.frame.DataFrame))):
y = np.array(column)
y = np.reshape(y, (y.shape[0],))
y = pd.Series(y)
self.data_layers[layer_name][column_name] = y
def show_data_layer(self, layer_name, number_of_row=None):
if number_of_row is None:
print(self.get_data_layer(layer_name))
elif number_of_row < 0:
return self.get_data_layer(layer_name).tail(abs(number_of_row))
else:
return self.get_data_layer(layer_name).head(number_of_row)
def add_row(self, layer_name, row_as_dict):
self.data_layers[layer_name] = self.get_data_layer(layer_name).append(row_as_dict, ignore_index=True)
def get_row(self, layer_name, row_index, column=None):
if column is not None:
return self.data_layers[layer_name].loc[self.data_layers[layer_name][column] == row_index].reset_index(drop=True)
return self.data_layers[layer_name].iloc[row_index]
def get_layer_shape(self, layer_name):
"""
return (Number of lines, number of columns)
"""
return self.data_layers[layer_name].shape
def get_columns_names(self, layer_name):
header = list(self.data_layers[layer_name].columns)
return header
def drop_column(self, layer_name, column_name):
"""Drop a given column from the dataframe given its name
Args:
column (str): name of the column to drop
Returns:
[dataframe]: the dataframe with the column dropped
"""
self.data_layers[layer_name] = self.data_layers[layer_name].drop(column_name, axis=1)
return self.data_layers[layer_name]
def keep_columns(self, layer_name, columns_names_as_list):
for p in self.get_columns_names(layer_name):
if p not in columns_names_as_list:
self.data_layers[layer_name] = self.data_layers[layer_name].drop(p, axis=1)
def get_area_column(self, layer_name):
return self.get_data_layer(layer_name).area
def get_perimeter_column(self, layer_name):
return self.get_data_layer(layer_name).length
def get_row_area(self, layer_name, row_index):
return self.data_layers[layer_name].area.iloc[row_index]
def get_distance(self, layer_name, index_column, row_index_a, row_index_b):
if 1 == 1:
other = self.get_row(layer_name, row_index_b, index_column)
return self.get_row(layer_name, row_index_a, index_column).distance(other)
def filter_dataframe(self, layer_name, column, func_de_decision, in_place=True, *args):
if in_place is True:
if len(args) == 2:
self.set_dataframe(
self.data_layers[layer_name].loc[self.get_column(column).apply(func_de_decision, args=(args[0], args[1]))])
else:
self.set_dataframe(
self.data_layers[layer_name].loc[self.get_column(column).apply(func_de_decision)])
else:
if len(args) == 2:
return self.data_layers[layer_name].loc[self.get_column(column).apply(func_de_decision, args=(args[0], args[1]))]
else:
return self.data_layers[layer_name].loc[self.get_column(column).apply(func_de_decision)]
def transform_column(self, layer_name, column_to_trsform, column_src, fun_de_trasformation, in_place= True,*args):
if in_place is True:
if (len(args) != 0):
self.set_column(layer_name, column_to_trsform, self.get_column(layer_name, column_src).apply(fun_de_trasformation, args=(args[0],)))
else:
self.set_column(layer_name, column_to_trsform, self.get_column(layer_name, column_src).apply(fun_de_trasformation))
else:
if (len(args) != 0):
return self.get_column(layer_name, column_src).apply(fun_de_trasformation, args=(args[0],))
else:
return self.get_column(layer_name, column_src).apply(fun_de_trasformation)
def set_column(self, layer_name, column_name, new_column):
self.data_layers[layer_name][column_name] = new_column
def get_column(self, layer_name, column):
return self.data_layers[layer_name][column]
def reindex_dataframe(self, layer_name, index_as_liste=None, index_as_column_name=None):
if index_as_liste is not None:
new_index = new_index = index_as_liste
self.data_layers[layer_name].index = new_index
if index_as_column_name is not None:
self.data_layers[layer_name].set_index(index_as_column_name, inplace=True)
if index_as_column_name is None and index_as_liste is None:
new_index = pd.Series(np.arange(self.get_shape()[0]))
self.data_layers[layer_name].index = new_index
def get_era5_land_grib_as_dataframe(self, file_path, layer_name):
grip_path = file_path
ds = xr.load_dataset(grip_path, engine="cfgrib")
self.data_layers[layer_name] = DataFrame()
self.data_layers[layer_name].set_dataframe(ds.to_dataframe())
return ds.to_dataframe()
def rename_columns(self, layer_name, column_dict_or_all_list, all_columns=False):
if all_columns is True:
types = {}
self.data_layers[layer_name].columns = column_dict_or_all_list
for p in column_dict_or_all_list:
types[p] = str
self.data_layers[layer_name] = self.get_dataframe().astype(types)
else:
self.data_layers[layer_name].rename(columns=column_dict_or_all_list, inplace=True)
def calculate_area_as_column(self, layer_name):
self.add_column(layer_name, self.get_area_column(layer_name), 'area')
def calculate_perimeter_as_column(self, layer_name):
self.add_column(layer_name, self.get_perimeter_column(layer_name), 'perimeter')
def count_occurence_of_each_row(self, layer_name, column_name):
return self.data_layers[layer_name].pivot_table(index=[column_name], aggfunc='size')
def add_transformed_columns(self, layer_name, dest_column_name="new_column", transformation_rule="okk*2"):
columns_names = self.get_columns_names(layer_name)
columns_dict = {}
for column_name in columns_names:
if column_name in transformation_rule:
columns_dict.update({column_name: self.get_column(layer_name, column_name)})
y_transformed = eval(transformation_rule, columns_dict)
self.data_layers[layer_name][dest_column_name] = y_transformed
@staticmethod
def new_geodaraframe_from_points():
map.new_data_layer('valves', crs="ESRI:102191")
for p in range(map.get_layer_shape('pipelines')[0]):
#print(map.get_row('pipelines', p))
vi = Point(map.get_row('pipelines', p)['X_Start'], map.get_row('pipelines', p)['Y_Start'])
vf = Point(map.get_row('pipelines', p)['X_End'], map.get_row('pipelines', p)['Y_End'])
id_pipeline = map.get_row('pipelines', p)['Nom_CANAL']
row_as_dict = {'id_pipeline': id_pipeline,
'geometry': vi}
map.add_row('valves', row_as_dict)
row_as_dict = {'id_pipeline': id_pipeline,
'geometry': vf}
map.add_row('valves', row_as_dict) |
elhachimi-ch/dst | test/test.py | import data_science_toolkit.dataframe as dst
data = dst.DataFrame()
data.show() |
elhachimi-ch/dst | setup.py | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="data-science-toolkit",
version="0.0.980",
author="<NAME>",
author_email="<EMAIL>",
description="Data Science Toolkit (DST) is a Python library that helps implement data science related project with ease.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/elhachimi-ch/dst",
project_urls={
"Bug Tracker": "https://github.com/elhachimi-ch/dst/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.5",
install_requires=[
"setuptools>=42",
"wheel",
"pandas",
"numpy",
"tensorflow",
"scikit-learn",
"seaborn",
"matplotlib",
"wordcloud",
"keras",
"plotly",
"xgboost",
"opencv-python",
"scikit-image",
"unidecode",
"emoji",
"textblob",
"nltk",
"wordcloud",
],
)
|
elhachimi-ch/dst | src/data_science_toolkit/imagefactory.py | <gh_stars>1-10
import cv2
import imutils
import imutils.perspective
import numpy as np
from matplotlib import pyplot as plt
from skimage.filters import threshold_local
class ImageFactory:
def __init__(self, img_link=None, url=False):
self.__img = None
if url:
self.__img = imutils.url_to_image(img_link)
else:
self.__img = cv2.imread(img_link)
def set_image(self, img):
self.__img = img
def get_image(self):
return self.__img
def reshape(self, new_shape_tuple):
self.set_image(np.reshape(self.get_image(), new_shape_tuple))
def show(self):
plt.figure("Image")
plt.imshow(imutils.opencv2matplotlib(self.__img))
plt.show()
def resize(self, new_size_tuple, auto=False):
# (height, width)
if auto:
self.__img = imutils.resize(self.__img, width=new_size_tuple[1])
else:
self.__img = cv2.resize(self.__img, new_size_tuple)
def crop(self, top_left_tuple=(0, 0), bottom_right_tuple=(50, 50)):
self.set_image(self.__img[top_left_tuple[1]:bottom_right_tuple[1], top_left_tuple[0]:bottom_right_tuple[0]])
def to_gray_scale(self):
self.set_image(cv2.cvtColor(self.__img, cv2.COLOR_BGR2GRAY))
def rotat_image(self, angle):
self.set_image(imutils.rotate(self.__img, angle))
def find_contour(self, seuil=75, auto_threshold=True, all_points=False, auto_canny=True):
# find contours (i.e., outlines) of the foreground objects in the
# thresholded image
# find contours and draw them in _img and return them as a list of tuples
if auto_threshold:
thresh = ImageFactory()
thresh.set_image(self.get_image())
thresh.set_image(thresh.get_contour_canny(auto=auto_canny, seuil=seuil))
else:
thresh = ImageFactory()
thresh.set_image(self.get_image())
thresh.to_gray_scale()
thresh.gaussian_blur()
thresh.adaptive_treshold()
screen_cnt = []
if not all_points:
cnts = cv2.findContours(thresh.get_image(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]
for c in cnts:
# Mo7iit
peri = cv2.arcLength(c, True)
# trouver le contour li kay dir chkl li baghin lwsst
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screen_cnt = approx
break
else:
screen_cnt = cv2.findContours(thresh.get_image(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
screen_cnt = imutils.grab_contours(screen_cnt)
# loop over the contours
for c in screen_cnt:
# draw each contour on the output image with a 3px thick purple
# outline, then display the output contours one at a time
cv2.drawContours(self.__img, [c], -1, (240, 0, 159), 3)
pts = [tuple(p[0]) for p in screen_cnt]
return pts
def get_contour_canny(self, auto=True, seuil=75):
gray = ImageFactory()
gray.set_image(self.get_image())
gray.to_gray_scale()
gray.gaussian_blur()
if auto:
return imutils.auto_canny(gray.get_image())
return cv2.Canny(gray.get_image(), seuil, 200)
def gaussian_blur(self, taille_mask=(5, 5)):
self.set_image(cv2.GaussianBlur(self.get_image(), taille_mask, 0))
def get_median_blur(self):
self.set_image(cv2.medianBlur(self.get_image(), 5))
def scan(self):
self.to_gray_scale()
# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
t = threshold_local(self.__img, 11, offset=10, method="gaussian")
warped = (self.__img > t).astype("uint8") * 255
self.set_image(warped)
def perspective_vue_from_4_points(self, points_list=None):
# load the notecard code image, clone it, and initialize the 4 points
# that correspond to the 4 corners of the notecard
if points_list is None:
pts = [(0, 0), (self.get_shape()[1], 0), (self.get_shape()[1], self.get_shape()[0]), (0,
self.get_shape()[0])]
else:
pts = points_list
pts = np.array(pts)
# loop over the points and draw them on the cloned image
"""for (x, y) in pts:
cv2.circle(self.__img, (x, y), 5, (0, 255, 0), -1)"""
# apply the four point tranform to obtain a "birds eye view" of
# the notecard
self.set_image(imutils.perspective.four_point_transform(self.get_image(), pts))
def find_skeleton(self):
pass
def get_shape(self):
return self.get_image().shape
def write_text(self, text_to_write, text_position_tuple=(0, 0), color_gbr_tuple=(0, 255, 0), font_size=0.7):
cv2.putText(self.__img, text_to_write, text_position_tuple,
cv2.FONT_HERSHEY_SIMPLEX, font_size, color_gbr_tuple, 2)
def merge(self, image_link, alpha=None):
img = ImageFactory(image_link)
img_shape = self.get_shape()
if img.get_shape() != img_shape:
img_shape = (img_shape[1], img_shape[0])
img.resize(img_shape[0:2])
if alpha is None:
self.set_image(cv2.add(self.get_image(), img.get_image()))
else:
self.set_image(cv2.addWeighted(self.get_image(), alpha, img.get_image(), 1 - alpha, 0))
def rectangle(self, top_left_tuple=(0, 0), bottom_right_tuple=(10, 10), rect_color_tuple=(0, 0, 255)):
cv2.rectangle(self.__img, top_left_tuple, bottom_right_tuple, rect_color_tuple, 2)
def cercle(self, centre_tuple=(0, 0), rayon=20, cer_color_tuple=(255, 0, 0)):
cv2.circle(self.__img, centre_tuple, rayon, cer_color_tuple, -1)
def line(self, point_a=(0, 0), point_b=(10, 10), line_color=(0, 0, 255)):
cv2.line(self.__img, point_a, point_b, line_color, 5)
def threshold(self, seuil=200, inversed_binary=True):
if inversed_binary:
self.set_image(cv2.threshold(self.__img, seuil, 255, cv2.THRESH_BINARY_INV)[1])
self.set_image(cv2.threshold(self.__img, seuil, 255, cv2.THRESH_BINARY)[1])
def adaptive_treshold(self):
self.set_image(cv2.adaptiveThreshold(self.__img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2))
def save(self, image_name_path='image.jpg'):
cv2.imwrite(image_name_path, self.get_image())
def get_bgr_layers(self):
return self.get_image().split()
def get_histogramme(self):
return cv2.calcHist([self.get_image()], [0], None, [256], [0, 256])
def show_histograme(self, rgb=True):
if rgb:
b, g, r = cv2.split(self.get_image())
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(121)
ax.imshow(self.get_image()[..., ::-1])
ax = fig.add_subplot(122)
for x, c in zip([b, g, r], ["b", "g", "r"]):
xs = np.arange(256)
ys = cv2.calcHist([x], [0], None, [256], [0, 256])
ax.plot(xs, ys.ravel(), color=c)
else:
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(121)
img = ImageFactory()
img.set_image(self.get_image())
img.to_rgb()
ax.imshow(img.get_image())
ax = fig.add_subplot(122)
xs = np.arange(256)
ys = cv2.calcHist([self.get_image()], [0], None, [256], [0, 256])
ax.plot(xs, ys.ravel(), color='black')
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.show()
def to_rgb(self):
self.__img = cv2.cvtColor(self.get_image(), cv2.COLOR_BGR2RGB)
@staticmethod
def find_function(func):
print(imutils.find_function(func)) |
elhachimi-ch/dst | src/data_science_toolkit/dataframe.py | from datetime import timedelta
from math import ceil
import pandas as pd
from pyparsing import col
import scipy.sparse
from sklearn.decomposition import PCA
from sklearn.feature_extraction import DictVectorizer
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from .lib import Lib
from .vectorizer import Vectorizer
from wordcloud import WordCloud, STOPWORDS
from matplotlib import pyplot as plt
from sklearn.preprocessing import minmax_scale
from sklearn.compose import ColumnTransformer
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator as SG
from sklearn.datasets import load_iris, load_boston
from collections import Counter
from .chart import Chart
import numpy as np
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
class DataFrame:
"""
"""
__vectorizer = None
__generator = None
def __init__(self, data_link=None, columns_names_as_list=None, data_types_in_order=None, delimiter=',',
data_type='csv', has_header=True, line_index=None, skip_empty_line=False, sheet_name='Sheet1'):
if data_link is not None:
if data_type == 'csv':
if has_header is True:
self.__dataframe = pd.read_csv(data_link, encoding='utf-8', delimiter=delimiter,
low_memory=False, error_bad_lines=False, skip_blank_lines=False)
else:
self.__dataframe = pd.read_csv(data_link, encoding='utf-8', delimiter=delimiter,
low_memory=False, error_bad_lines=False, skip_blank_lines=False,
header=None)
elif data_type == 'json':
self.__dataframe = pd.read_json(data_link, encoding='utf-8')
elif data_type == 'xls':
self.__dataframe = pd.read_excel(data_link, sheet_name=sheet_name)
elif data_type == 'pkl':
self.__dataframe = pd.read_pickle(data_link)
elif data_type == 'dict':
self.__dataframe = pd.DataFrame.from_dict(data_link)
elif data_type == 'matrix':
index_name = [i for i in range(len(data_link))]
colums_name = [i for i in range(len(data_link[0]))]
self.__dataframe = pd.DataFrame(data=data_link, index=index_name, columns=colums_name)
elif data_type == 'list':
y = data_link
if (not isinstance(y, pd.core.series.Series or not isinstance(y, pd.core.frame.DataFrame))):
y = np.array(y)
y = np.reshape(y, (y.shape[0],))
y = pd.Series(y)
self.__dataframe = pd.DataFrame()
if columns_names_as_list is not None:
self.__dataframe[columns_names_as_list[0]] = y
else:
self.__dataframe['0'] = y
"""data = array([['','Col1','Col2'],['Row1',1,2],['Row2',3,4]])
pd.DataFrame(data=data[1:,1:], index=data[1:,0], columns=data[0,1:]) """
elif data_type == 'df':
self.__dataframe = data_link
types = {}
if data_types_in_order is not None and columns_names_as_list is not None:
self.__dataframe.columns = columns_names_as_list
for i in range(len(columns_names_as_list)):
types[columns_names_as_list[i]] = data_types_in_order[i]
elif columns_names_as_list is not None:
self.__dataframe.columns = columns_names_as_list
for p in columns_names_as_list:
types[p] = str
self.__dataframe = self.get_dataframe().astype(types)
if line_index is not None:
self.__dataframe.index = line_index
else:
self.__dataframe = pd.DataFrame()
def get_generator(self):
return self.__generator
def remove_stopwords(self, column, language_or_stopwords_list='english', in_place=True):
if isinstance(language_or_stopwords_list, list) is True:
stopwords = language_or_stopwords_list
elif language_or_stopwords_list == 'arabic':
stopwords = Lib.read_text_file_as_list('data/arabic_stopwords.csv')
else:
nltk.download('stopwords')
stopwords = nltk.corpus.stopwords.words(language_or_stopwords_list)
self.transform_column(column, column, DataFrame.remove_stopwords_lambda, in_place, stopwords)
return self.__dataframe
@staticmethod
def remove_stopwords_lambda(document, stopwords_list):
document = str.lower(document)
stopwords = stopwords_list
words = word_tokenize(document)
clean_words = []
for w in words:
if w not in stopwords:
clean_words.append(w)
return ' '.join(clean_words)
def add_random_series_column(self, column_name='random',min=0, max=100, distrubution_type='random', mean=0, sd=1):
if distrubution_type == 'random':
series = pd.Series(np.random.randint(min, max, self.get_shape()[0]))
elif distrubution_type == 'standard_normal':
series = pd.Series(np.random.standard_normal(self.get_shape()[0]))
elif distrubution_type == 'normal':
series = pd.Series(np.random.normal(mean, sd, self.get_shape()[0]))
else:
series = pd.Series(np.random.randn(self.get_shape()[0]))
self.add_column(series, column_name)
return self.__dataframe
def drop_full_nan_columns(self):
for c in self.__dataframe.columns:
miss = self.__dataframe[c].isnull().sum()
missing_data_percent = round((miss/self.get_shape()[0])*100, 2)
if missing_data_percent == 100:
self.drop_column(c)
def drop_columns_with_nan_threshold(self, threshold=0.5):
for c in self.__dataframe.columns:
miss = self.__dataframe[c].isnull().sum()
missing_data_percent = round((miss/self.get_shape()[0])*100, 2)
if missing_data_percent >= threshold*100:
self.drop_column(c)
def get_index(self, as_list=True):
if as_list is True:
return self.__dataframe.index.to_list()
return self.__dataframe.index
def add_time_serie_row(self, date_column, value_column, value, date_format='%Y'):
last_date = self.get_index()[-1] + timedelta(days=1)
dataframe = DataFrame([{value_column: value, date_column: last_date}], data_type='dict')
dataframe.to_time_series(date_column, value_column, one_row=True, date_format=date_format)
self.append_dataframe(dataframe.get_dataframe())
def set_generator(self, generator):
self.__generator = generator
def set_dataframe(self, data, data_type='df'):
if data_type == 'matrix':
index_name = [i for i in range(len(data))]
colums_name = [i for i in range(len(data[0]))]
self.__dataframe = pd.DataFrame(data=data, index=index_name, columns=colums_name)
elif data_type == 'df':
self.__dataframe = data
def get_columns_types(self, show=True):
types = self.get_dataframe().dtypes
if show:
print(types)
return types
def set_data_types(self, column_dict_types):
self.__dataframe = self.get_dataframe().astype(column_dict_types)
def set_same_type(self, same_type='float64'):
"""
example of types: float64, object
"""
for p in self.get_columns_names():
self.set_column_type(p, same_type)
def describe(self, show=True):
description = self.get_dataframe().describe()
if show:
print(description)
return description
def reset_index(self, drop=True):
if drop is True:
self.set_dataframe(self.__dataframe.reset_index(drop=True))
else:
self.set_dataframe(self.__dataframe.reset_index())
def get_dataframe_as_sparse_matrix(self):
return scipy.sparse.csr_matrix(self.__dataframe.to_numpy())
def get_column_as_list(self, column):
return list(self.get_column(column))
def get_column_as_joined_text(self, column):
return ' '.join(list(self.get_column(column)))
def rename_index(self, new_name):
self.__dataframe.index.rename(new_name, inplace=True)
return self.get_dataframe()
def get_term_doc_matrix_as_df(self, text_column_name, vectorizer_type='count'):
corpus = list(self.get_column(text_column_name))
indice = ['doc' + str(i) for i in range(len(corpus))]
v = Vectorizer(corpus, vectorizer_type=vectorizer_type)
self.set_dataframe(DataFrame(v.get_sparse_matrix().toarray(), v.get_features_names(),
line_index=indice, data_type='matrix').get_dataframe())
def get_dataframe_from_dic_list(self, dict_list):
v = DictVectorizer()
matrice = v.fit_transform(dict_list)
self.__vectorizer = v
self.set_dataframe(DataFrame(matrice.toarray(), v.get_feature_names()).get_dataframe())
def check_decision_function_on_column(self, column, decision_func):
if all(self.get_column(column).apply(decision_func)):
return True
return False
def show_word_occurrences_plot(self, column_name, most_common=50):
"""Generating word occurrences plot from a column
Args:
column_name (_type_): column to be used
most_common (int, optional): number of most frequent term to use. Defaults to 50.
"""
text = self.get_column_as_joined_text(column_name)
counter = Counter(text.split(' '))
data = DataFrame(counter.most_common(most_common), ['term', 'count'], data_type='dict', data_types_in_order=[str,int])
chart = Chart(data.get_dataframe(), column4x='term', chart_type='bar')
chart.add_data_to_show('count')
chart.config('Term occurrences bar chart', 'Terms', 'Occurrences', titile_font_size=30,)
chart.show()
def set_dataframe_index(self, liste_indices):
self.__dataframe.index = liste_indices
def get_shape(self):
return self.__dataframe.shape
def set_column(self, column_name, new_column):
self.__dataframe[column_name] = new_column
def set_column_type(self, column, column_type):
self.__dataframe[column] = self.__dataframe[column].astype(column_type)
def get_lines_columns(self, lines, columns):
if Lib.check_all_elements_type(columns, str):
return self.get_dataframe().loc[lines, columns]
return self.get_dataframe().iloc[lines, columns]
def get_n_rows_as_dataframe(self, number_of_row=10):
"""
give a negative value if you want begin from last row
"""
if number_of_row < 0:
return self.get_dataframe().tail(abs(number_of_row))
else:
return self.get_dataframe().head(number_of_row)
def get_column(self, column):
return self.get_dataframe()[column]
def get_columns(self, columns_names_as_list):
return self.get_dataframe()[columns_names_as_list]
def rename_columns(self, column_dict_or_all_list, all_columns=False):
if all_columns is True:
types = {}
self.__dataframe.columns = column_dict_or_all_list
for p in column_dict_or_all_list:
types[p] = str
self.__dataframe = self.get_dataframe().astype(types)
else:
self.get_dataframe().rename(columns=column_dict_or_all_list, inplace=True)
def add_column(self, column, column_name):
y = column
if (not isinstance(column, pd.core.series.Series or not isinstance(column, pd.core.frame.DataFrame))):
y = np.array(column)
y = np.reshape(y, (y.shape[0],))
y = pd.Series(y, self.get_index())
self.__dataframe[column_name] = y
def add_transformed_columns(self, dest_column_name="new_column", transformation_rule="okk*2"):
columns_names = self.get_columns_names()
columns_dict = {}
for column_name in columns_names:
if column_name in transformation_rule:
columns_dict.update({column_name: self.get_column(column_name)})
y_transformed = eval(transformation_rule, columns_dict)
self.__dataframe[dest_column_name] = y_transformed
def add_one_value_column(self, column_name, value, length=None):
if length is not None:
y = np.zeros(length)
y.fill(value)
else:
y = np.zeros((self.get_shape()[0]))
y.fill(value)
self.__dataframe[column_name] = y
return self.get_dataframe()
def get_dataframe(self):
return self.__dataframe
def request(self, select, order_by=None, ascending=None):
if order_by is not None:
self.__dataframe = self.__dataframe.sort_values(order_by, ascending=ascending)
return self.__dataframe[select]
def contains(self, column, regex):
return self.get_dataframe()[column].str.contains(regex)
def to_upper_column(self, column):
self.set_column(column, self.get_column(column).str.upper())
def to_lower_column(self, column):
self.set_column(column, self.get_column(column).str.lower())
def sub(self, column, pattern, replacement):
self.__dataframe = self.get_dataframe()[column].str.replace(pattern, replacement)
def drop_column(self, column_name):
"""Drop a given column from the dataframe given its name
Args:
column (str): name of the column to drop
Returns:
[dataframe]: the dataframe with the column dropped
"""
self.__dataframe = self.__dataframe.drop(column_name, axis=1)
return self.__dataframe
def index_to_column(self):
self.__dataframe.reset_index(drop=False, inplace=True)
def drop_columns(self, columns_names_as_list):
for p in columns_names_as_list:
self.__dataframe = self.__dataframe.drop(p, axis=1)
return self.__dataframe
def reorder_columns(self, new_order_as_list):
self.__dataframe.reindex_axis(new_order_as_list, axis=1)
return self.__dataframe
def keep_columns(self, columns_names_as_list):
for p in self.get_columns_names():
if p not in columns_names_as_list:
self.__dataframe = self.__dataframe.drop(p, axis=1)
return self.__dataframe
def add_row(self, row_as_dict):
self.__dataframe = self.get_dataframe().append(row_as_dict, ignore_index=True)
def pivot(self, index_columns_as_list, column_columns_as_list, column_of_values, agg_func):
return self.get_dataframe().pivot_table(index=index_columns_as_list, columns=column_columns_as_list, values=column_of_values, aggfunc=agg_func)
def group_by(self, column):
self.set_dataframe(self.get_dataframe().groupby(column).count())
def missing_data_checking(self, column=None):
if column is not None:
if any(pd.isna(self.get_dataframe()[column])) is True:
print("Missed data found in column " + column)
else:
print("No missed data in column " + column)
else:
for c in self.__dataframe.columns:
miss = self.__dataframe[c].isnull().sum()
if miss>0:
missing_data_percent = round((miss/self.get_shape()[0])*100, 2)
print("{} has {} missing value(s) which represents {}% of dataset size".format(c,miss, missing_data_percent))
else:
print("{} has NO missing value!".format(c))
def missing_data_column_percent(self, column_name):
return self.__dataframe[column_name].isnull().sum()/self.get_shape()[0]
def missing_data(self, filling_dict_colmn_val=None, drop_row_if_nan_in_column=None):
if drop_row_if_nan_in_column is not None:
# a = a[~(np.isnan(a).all(axis=1))] # removes rows containing all nan
self.set_dataframe(self.__dataframe[self.__dataframe[drop_row_if_nan_in_column].notna()])
#self.__dataframe = self.__dataframe[~(np.isnan(self.__dataframe).any(axis=1))] # removes rows containing at least one nan
else:
self.get_dataframe().fillna(filling_dict_colmn_val, inplace=True)
def get_row(self, row_index):
if isinstance(row_index, int):
return self.get_dataframe().iloc[row_index]
return self.get_dataframe().loc[row_index]
def set_row(self, column_name, row_index, new_value):
if isinstance(row_index, int):
self.__dataframe[column_name].iloc[row_index] = new_value
self.__dataframe[column_name].loc[row_index] = new_value
def replace_column(self, column, pattern, replacement, regex=False, number_of_time=-1, case_sensetivity=False):
self.set_column(column, self.get_column(column).str.replace(pattern, replacement, regex=regex, n=number_of_time,
case=case_sensetivity))
def replace_num_data(self, val, replacement):
self.get_dataframe().replace(val, replacement, inplace=True)
def map_function(self, func, **kwargs):
self.__dataframe = self.get_dataframe().applymap(func, **kwargs)
def apply_fun_to_column(self, column, func, in_place=True):
if in_place is True:
self.set_column(column, self.get_column(column).apply(func))
else:
return self.get_column(column).apply(func)
def convert_column_type(self, column_name, new_type='float64'):
"""Convert the type of the column
Args:
column_name (str): Name of the column to convert
Retruns (dataframe): New dataframe after conversion
"""
self.set_column(column_name, self.get_column(column_name).astype(new_type))
return self.get_columns_types()
def convert_dataframe_type(self, new_type='float64'):
for p in self.get_columns_names():
self.convert_column_type(p, new_type)
return self.get_columns_types()
def concatinate(self, dataframe, ignore_index=False, join='outer'):
"""conacatenate horizontally two dataframe
Args:
dataframe (dataframe): the destination dataframe
ignore_index (bool, optional): If True, do not use the index values along the concatenation axis. Defaults to False.
"""
#
self.__dataframe = pd.concat([self.get_dataframe(), dataframe], axis=1, ignore_index=ignore_index, join=join)
def append_dataframe(self, dataframe):
# append dataset contents data_sets must have the same columns names
self.__dataframe = self.__dataframe.append(dataframe)
def intersection(self, dataframe, column):
self.__dataframe = pd.merge(self.__dataframe, dataframe, on=column, how='inner')
def left_join(self, dataframe, column):
self.__dataframe = pd.merge(self.__dataframe, dataframe, on=column, how='left')
def right_join(self, dataframe, column):
self.__dataframe = pd.merge(self.__dataframe, dataframe, on=column, how='right')
def eliminate_outliers_neighbors(self, n_neighbors=20, contamination=.05):
outliers = LocalOutlierFactor(n_neighbors=n_neighbors, contamination=contamination)
self.__dataframe['inlier'] = outliers.fit_predict(self.get_dataframe())
self.__dataframe = self.get_dataframe().loc[self.get_dataframe().inlier == 1,
self.get_dataframe().columns.tolist()]
def get_pca(self, new_dim):
# pca.explained_variance_ratio_ gain d'info pour chaque vecteur
pca_model = PCA(n_components=new_dim)
return pca_model.fit_transform(self.get_dataframe())
def get_centre_reduite(self):
sc = StandardScaler()
return sc.fit_transform(X=self.get_dataframe())
def column_to_standard_scale(self, column):
sc = StandardScaler()
columns_names = self.get_columns_names()
dataframe_copy = self
dataframe = DataFrame(sc.fit_transform(X=self.get_dataframe()), columns_names_as_list=columns_names, data_type='matrix')
self.reindex_dataframe()
dataframe_copy.set_column(column, dataframe.get_column(column))
self.set_dataframe(dataframe_copy.get_dataframe())
def s__column_to_min_max_scale(self, column):
self.set_column(column, minmax_scale(self.get_column(column)))
def column_to_min_max_scale(self, column):
self.__vectorizer = MinMaxScaler()
dataframe_copy = self.get_dataframe()
self.keep_columns([column])
self.__vectorizer.fit(self.get_dataframe())
scaled_column = self.__vectorizer.transform(self.get_dataframe())
self.set_dataframe(dataframe_copy)
self.set_column(column, scaled_column)
return scaled_column
def get_min_max_scaled_columns(self, columns_names_as_list):
self.__vectorizer = MinMaxScaler()
dataframe_copy = self.get_dataframe()
self.keep_columns(columns_names_as_list)
self.__vectorizer.fit(self.get_dataframe())
scaled_column = self.__vectorizer.transform(self.get_dataframe())
self.set_dataframe(dataframe_copy)
return scaled_column
def k_get_min_max_scaled_dataframe(self):
self.__vectorizer = MinMaxScaler()
self.__vectorizer.fit(self.get_dataframe())
scaled_dataframe = DataFrame(self.__vectorizer.transform(self.get_dataframe()),
data_type='matrix',
columns_names_as_list=self.get_columns_names())
return scaled_dataframe.get_dataframe()
def get_min_max_scaled_dataframe(self):
self.__vectorizer = MinMaxScaler()
self.__vectorizer.fit(self.get_dataframe())
return self.__vectorizer.transform(self.get_dataframe())
def dataframe_to_min_max_scale(self):
self.__vectorizer = MinMaxScaler()
self.set_dataframe(self.__vectorizer.fit_transform(X=self.get_dataframe()))
def get_inverse_transform(self, scaled_list):
scaled_list = np.reshape(scaled_list, (len(scaled_list), 1))
return self.__vectorizer.inverse_transform(scaled_list)
def get_last_window_for_time_serie_as_list(self, column, window_size=3):
#print(np.reshape(self.get_column(column).iloc[-window_size:].to_numpy(), (window_size, 1)))
#print(self.__vectorizer.transform([np.array(self.get_column(column).iloc[-window_size:])]))
return self.__vectorizer.transform(np.reshape(self.get_column(column).iloc[-window_size:].to_numpy(), (window_size, 1)))
def write_column_in_file(self, column, path='data/out.csv'):
Lib.write_liste_in_file(path, self.get_column(column).apply(str))
def check_duplicated_rows(self):
return any(self.get_dataframe().duplicated())
def check_duplicated_in_column(self, column):
return any(self.get_column(column).duplicated())
def write_check_duplicated_column_result_in_file(self, column, path='data/latin_comments.csv'):
Lib.write_liste_in_file(path, self.get_column(column).duplicated().apply(str))
def write_files_grouped_by_column(self, column_index, dossier):
for p in self.get_dataframe().values:
Lib.write_line_in_file(dossier + str(p[0]).lower() + '.csv', p[column_index])
def filter_dataframe(self, column, func_de_decision, in_place=True, *args):
if in_place is True:
if len(args) == 2:
self.set_dataframe(
self.get_dataframe().loc[self.get_column(column).apply(func_de_decision, args=(args[0], args[1]))])
else:
self.set_dataframe(
self.get_dataframe().loc[self.get_column(column).apply(func_de_decision)])
else:
if len(args) == 2:
return self.get_dataframe().loc[self.get_column(column).apply(func_de_decision, args=(args[0], args[1]))]
else:
return self.get_dataframe().loc[self.get_column(column).apply(func_de_decision)]
def transform_column(self, column_to_trsform, column_src, fun_de_trasformation, in_place=True, *args):
"""_summary_
Args:
column_to_trsform (_type_): column to transform
column_src (_type_): Column to use as a source for the transformation
fun_de_trasformation (_type_): The function of transformation, if it has multiple arguments pass them as args:
example: data.transform_column(column, column, Lib.remove_stopwords, True, stopwords)
in_place (bool, optional): If true the changes will affect the original dataframe. Defaults to True.
Returns:
_type_: _description_
"""
if in_place is True:
if (len(args) != 0):
self.set_column(column_to_trsform, self.get_column(column_src).apply(fun_de_trasformation, args=(args[0],)))
else:
self.set_column(column_to_trsform, self.get_column(column_src).apply(fun_de_trasformation))
else:
if (len(args) != 0):
return self.get_column(column_src).apply(fun_de_trasformation, args=(args[0],))
else:
return self.get_column(column_src).apply(fun_de_trasformation)
def to_no_accent_column(self, column):
self.trasform_column(column, column, Lib.no_accent)
self.set_column(column, self.get_column(column))
def write_dataframe_in_file(self, out_file='data/out.csv', delimiter=','):
Lib.write_liste_csv(self.get_dataframe().values, out_file, delimiter)
def sort(self, by_columns_list, ascending=False):
self.set_dataframe(self.get_dataframe().sort_values(by=by_columns_list, ascending=ascending,
na_position='first'))
def count_occurence_of_each_row(self, column):
return self.get_dataframe().pivot_table(index=[column], aggfunc='size')
def get_distinct_values_as_list(self, column):
return list(self.get_dataframe().pivot_table(index=[column], aggfunc='size').index)
def column_to_numerical_values(self, column):
maping = list(self.get_dataframe().pivot_table(index=[column], aggfunc='size').index)
self.transform_column(column, column, lambda o : maping.index(o))
return maping
def reverse_column_from_numerical_values(self, column, maping):
self.trasform_column(column, column, lambda o : maping[int(o)])
def count_occurence_of_row_as_count_column(self, column):
column_name = 'count'
self.set_column(column_name, self.get_column(column).value_counts())
self.trasform_column(column_name, column, lambda x:self.get_column(column).value_counts().get(x))
def get_count_number_of_all_words(self, column):
self.apply_fun_to_column(column, lambda x: len(x.split(' ')))
return self.get_column(column).sum()
def get_count_occurrence_of_value(self, column, value, case_sensitive=True):
if case_sensitive:
self.apply_fun_to_column(column, lambda x: x.split(' ').count(value))
return self.get_column(column).sum()
else:
self.apply_fun_to_column(column, lambda x: list(map(str.lower, x.split(' '))).count(value))
return self.get_column(column).sum()
def count_true_decision_function_rows(self, column, decision_function):
self.filter_dataframe(column, decision_function)
def show_wordcloud(self, column):
wordcloud = WordCloud(
background_color='white',
max_words=100,
max_font_size=30,
scale=3,
random_state=1)
wordcloud = wordcloud.generate(self.get_column_as_joined_text(column))
fig = plt.figure(1, figsize=(12, 12))
plt.axis('off')
plt.imshow(wordcloud)
plt.show()
def reindex_dataframe(self, index_as_column_name=None, index_as_liste=None):
if index_as_liste is not None:
new_index = new_index = index_as_liste
self.get_dataframe().index = new_index
if index_as_column_name is not None:
self.__dataframe.set_index(index_as_column_name, inplace=True)
if index_as_column_name is None and index_as_liste is None:
new_index = pd.Series(np.arange(self.get_shape()[0]))
self.get_dataframe().index = new_index
def get_columns_names(self):
header = list(self.get_dataframe().columns)
return header
def export(self, destination_path='data/json_dataframe.csv', type='csv'):
if type == 'json':
destination_path='data/json_dataframe.json'
self.get_dataframe().to_json(destination_path)
return 0
self.get_dataframe().to_csv(destination_path)
def sample(self, n=10, frac=None):
if frac is not None:
return self.get_dataframe().sample(n=frac)
return self.get_dataframe().sample(n=n)
"""
filter lines == WHERE
print(data.get_dataframe().loc[data.get_dataframe().Gender == 'H', 'Name']) nom des gen qui ont Gender == 'H'
data.set_column('Gender', data.get_column('Gender').apply(okkk))
def okkk(o):
if o == '0':
return 'OKK'
return 'NOK'
print(data.get_dataframe().loc[data.get_dataframe().Gender == 'H', 'Name'])
data.set_column('Gender', data.get_column('Gender').apply(okkk)) select en respectant la fun okkk
if o == '0':
return True
return False
filter = data["Age"]=="Twenty five"
# printing only filtered columns
data.where(filter).dropna()
In [13]: df.iloc[0] # first row in a DataFrame
Out[13]:
A 1
B 2
Name: a, dtype: int64
In [14]: df['A'].iloc[0] # first item in a Series (Column)
Out[14]: 1
"""
def show(self, number_of_row=None):
if number_of_row is None:
print(self.get_dataframe())
elif number_of_row < 0:
return self.get_dataframe().tail(abs(number_of_row))
else:
return self.get_dataframe().head(number_of_row)
def get_sliced_dataframe(self, line_tuple, columns_tuple):
return self.get_dataframe().loc[line_tuple[0]:line_tuple[1], columns_tuple[0]: columns_tuple[1]]
def eliminate_outliers_quantile(self, column, min_quantile, max_quantile):
min_q, max_q = self.get_column(column).quantile(min_quantile), self.get_column(column).quantile(max_quantile)
self.filter_dataframe(column, self.outliers_decision_function, min_q, max_q)
def scale_column(self, column):
max_column = self.get_column(column).describe()['max']
self.transform_column(column, column, self.scale_trasform_fun, max_column)
def drop_duplicated_rows(self, column):
self.set_dataframe(self.__dataframe.drop_duplicates(subset=column, keep='first'))
def plot_column(self, column):
self.get_column(column).plot()
plt.show()
def plot_dataframe(self):
self.get_dataframe().plot()
plt.show()
def to_numpy(self):
return self.get_dataframe().values
def info(self):
return self.get_dataframe().info()
def drop_rows_by_year(self, year=2020, in_place=True):
year = int(year)
if in_place is True:
self.set_dataframe(self.get_dataframe()[self.get_index(as_list=False).year != year])
else:
return self.get_dataframe()[self.get_index(as_list=False).year != year]
def keep_rows_by_year(self, year=2020, in_place=True):
year = int(year)
if in_place is True:
self.set_dataframe(self.get_dataframe()[self.get_index(as_list=False).year == year])
else:
return self.get_dataframe()[self.get_index(as_list=False).year == year]
def train_test_split(self, train_percent=0.8):
seuil = ceil(self.get_shape()[0]*train_percent)
train = self.get_dataframe().iloc[:seuil]
test = self.get_dataframe().iloc[seuil:]
return train, test
def train_test_split_column(self, column, train_percent=0.8):
seuil = ceil(self.get_shape()[0]*train_percent)
train = self.get_column(column).iloc[:seuil]
test = self.get_column(column).iloc[seuil:]
return train, test
def column_to_date(self, column, format='%Y-%m-%d %H:%M'):
self.set_column(column, pd.to_datetime(self.get_column(column)))
self.set_column(column, self.get_column(column).dt.strftime(format))
self.set_column(column, pd.to_datetime(self.get_column(column)))
def date_time_formate(self, date_time_column_name, new_format='%Y-%m-%d %H:%M'):
self.set_column(date_time_column_name, self.get_column(date_time_column_name).dt.strftime(new_format))
return self.get_dataframe()
def resample_timeseries(self, date_column_name='date', frequency='d', agg='mean', skip_rows=None, intitial_index=0, final_index=365, reset_index=False):
if skip_rows is not None:
self.set_dataframe(self.get_dataframe().loc[intitial_index:final_index:skip_rows])
else:
self.reindex_dataframe(date_column_name)
if agg == 'sum':
self.set_dataframe(self.__dataframe.resample(frequency).sum())
if agg == 'mean':
self.set_dataframe(self.__dataframe.resample(frequency).mean())
if reset_index is True:
self.reset_index()
return self.get_dataframe()
def to_time_series(self, date_column, value_column, date_format='%Y-%m-%d', window_size=2, one_row=False):
# when working with train test generators
"""def to_time_series_generators(self, date_column, time_series_column, date_format='%Y-%m-%d', window_size=2, train_percent=0.8):
self.column_to_date(date_column, format=date_format)
self.reindex_dataframe(self.get_column(date_column))
self.drop_column(date_column)
#dataframa.asfreq('d') # h hourly w weekly d normal daily b business day m monthly a annualy
train, test = self.train_test_split_column(time_series_column, 0.8)
self.set_train_generator(TimeseriesGenerator(np.reshape(train.values,
(len(train),1)),
np.reshape(train.values,
(len(train),1)),
length=window_size,
batch_size=1,
))
self.set_test_generator(TimeseriesGenerator(np.reshape(test.values,
(len(test),1)),
np.reshape(test.values,
(len(test),1)),
length=window_size,
batch_size=1,
))
return self.get_train_generator(), self.get_test_generator()"""
self.column_to_date(date_column, format=date_format)
self.reindex_dataframe(self.get_column(date_column))
self.keep_columns(value_column)
if one_row is False:
#dataframa.asfreq('d') # h hourly w weekly d normal daily b business day m monthly a annualy
self.set_generator(SG(self.get_min_max_scaled_dataframe(),
self.get_min_max_scaled_dataframe(),
length=window_size,
batch_size=1,))
"""self.set_generator(
TimeseriesGenerator(self.get_min_max_scaled_dataframe(),
self.get_min_max_scaled_dataframe(),
length=window_size,
length_output=7,
batch_size=1)"""
return self.get_generator()
def drop_rows(self, nbr_rows=1):
"""Drop the first nbr_rows of rows from the dataframe
Args:
nbr_rows (int, optional): if negative value is given then thelen the last nbr_rows. Defaults to 1.
Returns:
None
"""
if nbr_rows < 0:
self.set_dataframe(self.get_dataframe().iloc[:self.get_shape()[0]+nbr_rows])
else:
self.set_dataframe(self.get_dataframe().iloc[nbr_rows:])
def drop_rows_by_indices(self, indexes_as_list=[0]):
"""Drop rows given their indexes
Args:
indexes_as_list (list, optional): [description]. Defaults to [0].
"""
self.set_dataframe(self.get_dataframe().drop(indexes_as_list))
def save(self, path=None):
"""Save a dataframe in pkl format for future use
Args:
path ([type], optional): link and name of storage file. If set to None, it will be dataframe.pkl.
"""
if path is None:
self.get_dataframe().to_pickle("dataframe.pkl")
else:
self.get_dataframe().to_pickle(path)
def dataframe_skip_columns(self, intitial_index, final_index, step=2):
self.set_dataframe(self.get_dataframe().loc[intitial_index:final_index:step])
def shuffle_dataframe(self):
self.set_dataframe(self.get_dataframe().sample(frac=1).reset_index(drop=True))
def scale_columns(self, columns_names_as_list, scaler_type='min_max', in_place=True):
"""A method to standardize the independent features present in the concerned columns in a fixed range.
Args:
column_name ([type]):
scaler_type (str, optional): ['min_max', 'standard', 'adjusted_log']. Defaults to 'min_max'.
in_place (bool, optional): if False the modification do not affects the original columns. Defaults to True.
"""
if scaler_type == 'min_max':
self.__vectorizer = MinMaxScaler()
dest_columns = self.get_columns(columns_names_as_list)
dest_dataframe = DataFrame(self.__vectorizer.fit_transform(X=dest_columns),
line_index=self.get_index(),
columns_names_as_list=columns_names_as_list,
data_type='matrix')
self.drop_columns(columns_names_as_list)
self.concatinate(dest_dataframe.get_dataframe())
return dest_dataframe.get_dataframe()
elif scaler_type == 'standard':
self.__vectorizer = StandardScaler()
dest_columns = self.get_columns(columns_names_as_list)
dest_dataframe = DataFrame(self.__vectorizer.fit_transform(X=dest_columns),
line_index=self.get_index(),
columns_names_as_list=columns_names_as_list,
data_type='matrix')
self.drop_columns(columns_names_as_list)
self.concatinate(dest_dataframe.get_dataframe())
return dest_dataframe.get_dataframe()
elif scaler_type == 'adjusted_log':
def log_function(o, min_column):
return np.log(1 + o - min_column)
for name in columns_names_as_list:
min_column = self.get_column(name).min()
self.transform_column(name, name, log_function, min_column)
return self.get_columns(columns_names_as_list)
def scale_dataframe(self, scaler_type='min_max', in_place=True):
"""A method to standardize the independent features present in the dataframe in a fixed range.
Args:
column_name ([type]):
scaler_type (str, optional): ['min_max', 'standard', 'adjusted_log']. Defaults to 'min_max'.
in_place (bool, optional): if False the modification do not affects the dataframe. Defaults to True.
"""
if scaler_type == 'min_max':
self.__vectorizer = MinMaxScaler()
column_names = self.get_columns_names()
self.set_dataframe(DataFrame(self.__vectorizer.fit_transform(X=self.get_dataframe()),
line_index=self.get_index(),
columns_names_as_list=column_names,
data_type='matrix').get_dataframe())
elif scaler_type == 'standard':
self.__vectorizer = StandardScaler()
self.set_dataframe(self.__vectorizer.fit_transform(X=self.get_dataframe()))
elif scaler_type == 'adjusted_log':
def log_function(o, min_column):
return np.log(1 + o - min_column)
for name in self.get_columns_names():
min_column = self.get_column(name).min()
self.transform_column(name, name, log_function, min_column)
self.convert_dataframe_type()
return self.get_dataframe()
def load_dataset(self, dataset='iris'):
"""
boston: Load and return the boston house-prices dataset (regression)
iris: Load and return the iris dataset (classification).
"""
if dataset == 'boston':
data = load_boston()
x = data.data
y = data.target
features_names = data.feature_names
self.set_dataframe(x, data_type='matrix')
self.rename_columns(features_names, all_columns=True)
self.add_column(y,'house_price')
elif dataset == 'iris':
data = load_iris(as_frame=True)
x = data.data
y = data.target
self.set_dataframe(x)
self.add_column(y,'target')
@staticmethod
def outliers_decision_function(o, min_quantile, max_quantile):
if min_quantile < o < max_quantile:
return True
return False
@staticmethod
def generate_datetime_range(starting_datetime='2013-01-01', periods=365, freq='1H'):
return pd.date_range(starting_datetime, periods=periods, freq=freq)
@staticmethod
def scale_trasform_fun(o, max_column):
return o / max_column |
elhachimi-ch/dst | test/kitchen.py | import time
import os
import sys
def main():
ti = time.time()
data = DataFrame()
data.load_dataset('iris')
y = data.get_column('target')
data.drop_column('target')
# decision tree model
model = Model(data_x=data.get_dataframe(), data_y=y, model_type='dt', training_percent=0.8)
# train the model
model.train()
# get all classification evaluation metrics
model.report()
#get the cross validation
model.cross_validation(5)
print(time.time() - ti)
if __name__ == '__main__':
main()
|
elhachimi-ch/dst | src/data_science_toolkit/lib.py | <reponame>elhachimi-ch/dst
import csv
import pickle
import re
from os import listdir
from os.path import isfile, join
from time import sleep
import math
import numpy as np
import unidecode
from emoji import UNICODE_EMOJI
from textblob import TextBlob
#from stringdist.pystringdist.levenshtein import levenshtein as ed
import nltk
import calendar
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
class Lib:
@staticmethod
def substring(string_in):
return set([string_in[i: j] for i in range(len(string_in))
for j in range(i + 1, len(string_in) + 1) if len(string_in[i: j]) > 0])
@staticmethod
def write_liste_csv(liste_ligne_csv, file_name='data/out.csv', delimiter=',', quotechar='`'):
f = open(file_name, 'w+', newline='', encoding='utf-8')
writer = csv.writer(f, delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC, quotechar=quotechar)
for p in liste_ligne_csv:
writer.writerow(p)
f.close()
@staticmethod
def julian_date_to_mmddyyy(year,julian_day):
month = 1
while julian_day - calendar.monthrange(year,month)[1] > 0 and month <= 12:
julian_day = julian_day - calendar.monthrange(year,month)[1]
month = month + 1
print(month,julian_day,year)
@staticmethod
def load_model(model_path):
return joblib.load(open(model_path, 'rb'))
@staticmethod
def save_object(o, object_path):
pickle.dump(o, open(object_path, 'wb'))
@staticmethod
def load_object(obj_path):
return pickle.load(open(obj_path, 'rb'))
# verify condition on all list elements all(map(is_arabic, city))
# nCk
@staticmethod
def c_n_k(n, k):
"""
A fast way to calculate binomial coefficients by <NAME> (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
# factorielle
@staticmethod
def factorial(x):
x = int(x)
result = 1
while x > 1:
result = result * x
x = x - 1
return result
@staticmethod
def stemming(document, language_or_custom_stemmer_as_lambda='en'):
tokens = word_tokenize(document)
result = []
if language_or_custom_stemmer_as_lambda == 'en':
for p in tokens:
result.append(nltk.stem.PorterStemmer().stem(p))
elif language_or_custom_stemmer_as_lambda == 'fr':
for p in tokens:
result.append(nltk.stem.SnowballStemmer('french').stem(p))
elif language_or_custom_stemmer_as_lambda == 'ar':
for p in tokens:
result.append(nltk.stem.SnowballStemmer('arabic').stem(p))
else:
result.append(language_or_custom_stemmer_as_lambda(p))
return ' '.join(result)
# Fonction partie entière E()
@staticmethod
def partie_entiere(x):
if x == int(x):
return x
elif x >= 0:
return int(x)
else:
return -Lib.partie_entiere(-x) - 1
# Algorithme d'Euclide pour le pgcd
@staticmethod
def pgcd_iterative(a, b):
while a % b != 0:
a, b = b, a % b
return b
@staticmethod
def pgcd_recursive(a, b):
if a % b == 0:
return b
else:
return Lib.pgcd_recursive(b, a % b)
# plus petit commun multiple
def ppmc(a, b):
return (a * b) / Lib.pgcd_recursive(a, b)
# verifier premier
@staticmethod
def is_premier(n):
if n == 0 or n == 1:
return False
else:
for i in range(2, int(math.sqrt(n))):
if n % i == 0:
return False
return True
# decomposition en nombre premier
@staticmethod
def decompsition_premier(n):
liste = []
if Lib.is_premier(n) or n == 1 or n == 0:
liste.append((n, 1))
else:
i = 2
while n // i != 0:
j = 0
if n % i == 0:
while n % i == 0:
j += 1
n = n // i
liste.append((i, j))
else:
i += 1
return liste
# from scipy.comb(), but MODIFIED!
@staticmethod
def c_n_k_scipy(n, k):
if (k > n) or (n < 0) or (k < 0):
return 0
top = n
val = 1
while top > (n - k):
val *= top
top -= 1
n = 1
while n < k + 1:
val /= n
n += 1
return val
@staticmethod
def is_arabic(string_in):
ad = AlphabetDetector()
return ad.is_arabic(string_in)
@staticmethod
def is_latin(string_in):
ad = AlphabetDetector()
return ad.is_latin(string_in)
@staticmethod
def replace2or_more_char_by_2(string_in):
return re.sub(r'([a-zA-Z1-9ء-ۏ])\1+', r'\1\1', string_in)
@staticmethod
def replace2or_more_char_by_1(string_in):
return re.sub(r'([a-zA-Z1-9ء-ۏ])\1+', r'\1', string_in)
@staticmethod
def is_fr_wolf(string_in, french_dict_instance):
try:
is_in = french_dict_instance.synsets(string_in)
if len(is_in) > 0:
return True
except NameError:
return False
@staticmethod
def is_fr_or_en(string_in):
if len(string_in) < 3:
string_in += " "
return TextBlob(string_in).detect_language() == 'fr' or TextBlob(string_in).detect_language() == 'en'
@staticmethod
def is_single_word(string_in):
if re.match(r".+\s.+", string_in):
return False
return True
@staticmethod
def eliminate_multiple_whitespace(string_in):
s = string_in.strip()
return re.sub(r"\s+", " ", s)
@staticmethod
def eliminate_punctuation(string_in):
s = string_in.strip()
s = Lib.replace_apostrophes_and_points_by_space(s)
s = re.sub(r'[!"#$%&()*+,-./\\:;<=>?@[\]^_`{|}~]+', '', s)
return Lib.eliminate_multiple_whitespace(s)
@staticmethod
def eliminate_all_whitespaces(string_in):
string_in = str(string_in)
s = string_in.strip()
return re.sub(r"\s+", "", s)
@staticmethod
def eliminate_all_digits(string_in):
s = string_in.strip()
return re.sub(r"\d+", "", s)
@staticmethod
def read_text_file_as_list(path, with_anti_slash=False):
f = open(path, "r+", encoding='utf-8')
data = f.readlines()
if not with_anti_slash:
for i in range(len(data)):
data[i] = re.sub(r"\n", "", data[i]).strip()
return data
@staticmethod
def write_liste_in_file(liste, path='data/out.txt'):
f = open(path, 'w+', encoding='utf-8')
liste = list(map(str, liste))
for i in range(len(liste)-1):
liste[i] = str(liste[i]) + "\n"
f.writelines(liste)
@staticmethod
def strip_and_split(string_in):
return string_in.strip().split()
@staticmethod
def to_upper_file_text(path_source, path_destination):
data = Lib.read_text_file(path_source)
la = []
for line in data:
la.append(line.upper())
Lib.write_liste_in_file(path_destination, la)
@staticmethod
def no_accent_file_text(path_source, path_destination):
data = Lib.read_text_file(path_source)
la = []
for line in data:
la.append(Lib.no_accent(line))
Lib.write_liste_in_file(path_destination, la)
@staticmethod
def edit_distance(term_a, term_b):
term_a = Lib.replace2or_more_char_by_2(term_a)
term_b = Lib.replace2or_more_char_by_2(term_b)
return Lib.edit_dist_dp(term_a, term_b)
@staticmethod
def edit_distance_without_voyelle(term_a, term_b):
term_a = Lib.replace2or_more_char_by_2(term_a)
term_b = Lib.replace2or_more_char_by_2(term_b)
term_a = re.sub(r'[aeiouy]', '', term_a)
term_b = re.sub(r'[aeiouy]', '', term_b)
return Lib.edit_distance(term_a, term_b)
@staticmethod
def edit_dist_dp(str1, str2, m, n):
# Create a table to store results of sub problems
dp = np.zeros((n + 1, m + 1))
# Fill d[][] in bottom up manner
for i in range(m + 1):
for j in range(n + 1):
# If first string is empty, only option is to
# insert all characters of second string
if i == 0:
dp[i][j] = j # Min. operations = j
# If second string is empty, only option is to
# remove all characters of second string
elif j == 0:
dp[i][j] = i # Min. operations = i
# If last characters are same, ignore last char
# and recur for remaining string
elif str1[i - 1] == str2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
# If last character are different, consider all
# possibilities and find minimum
else:
dp[i][j] = 1 + min(dp[i][j - 1], # Insert
dp[i - 1][j], # Remove
dp[i - 1][j - 1]) # Replace
return dp[m][n]
@staticmethod
def write_line_in_file(line, path='data/latin_comments.csv', with_anti_slash=True):
f = open(path, "a+", encoding='utf-8')
if with_anti_slash:
f.write(str(line) + "\n")
else:
f.write(line)
# nltk.download('stopwords')
@staticmethod
def load_arabic_stop_words():
liste = Lib.read_text_file('data/arabic_stop_words.csv')
return liste
@staticmethod
def load_stop_words(language):
return stopwords.words(language)
@staticmethod
def remove_stopwords(document, language_or_stopwords_list='english'):
document = str.lower(document)
if isinstance(language_or_stopwords_list, list) is True:
stopwords = language_or_stopwords_list
elif language_or_stopwords_list == 'arabic':
stopwords = Lib.read_text_file_as_list('data/arabic_stopwords.csv')
elif language_or_stopwords_list == 'french':
stopwords = Lib.read_text_file_as_list('data/french_stopwords.csv')
else:
nltk.download('stopwords')
stopwords = nltk.corpus.stopwords.words(language_or_stopwords_list)
words = word_tokenize(document)
clean_words = []
for w in words:
if w not in stopwords:
clean_words.append(w)
return ' '.join(clean_words)
@staticmethod
def no_accent(string_in):
if not Lib.is_arabic(string_in):
return unidecode.unidecode(string_in)
return string_in
@staticmethod
def no_accent(string_in):
# \s*[A-Za-z\u00C0-\u00FF]+
if not Lib.is_arabic(string_in):
return unidecode.unidecode(string_in)
return string_in
@staticmethod
def list_to_string(liste):
liste_b = []
for p in liste:
if type(p) is not str:
liste_b.append(str(p))
else:
liste_b.append(p)
return "".join(liste_b)
@staticmethod
def check_all_elements_type(list_to_check, types_tuple):
return all(isinstance(p, types_tuple) for p in list_to_check)
@staticmethod
def list_all_files_in_folder(folder_path):
return [f for f in listdir(folder_path) if isfile(join(folder_path, f))]
@staticmethod
def translate(string_in, langue_dest='fr'):
translator = Translator()
translated = translator.translate(string_in, dest=langue_dest)
return translated.text
@staticmethod
def get_mnist_as_dataframe():
"""image_list = ch.get_reshaped_matrix(np.array([ch.get_reshaped_matrix(p, (1, 28 * 28)) for p in x_train]),
(x_train.shape[0], 28 * 28))"""
@staticmethod
def is_only_digits_filter(comment):
comment = str(comment)
if re.match(r'^\d+$', comment):
return True
return False
@staticmethod
def is_empty_world(string_in):
if re.match(r'^\s+$', string_in):
return True
return False
@staticmethod
def is_empty_line(string_in):
string_in = str(string_in)
if re.match(r'^\s*$', string_in):
return True
return False
@staticmethod
def is_only_emojis_filter(comment):
return not all([p in UNICODE_EMOJI for p in comment])
@staticmethod
def is_digit_with_emojis_filter(comment):
comment = str(comment)
comment = Lib.eliminate_all_whitespaces(comment)
return all([p in UNICODE_EMOJI or re.match(r'\d', p) or re.match(r'\W', p) for p in comment])
@staticmethod
def eliminate_stop_digits(comment):
return re.sub(r'\b\d+\b', '', comment)
@staticmethod
def is_all_arabic(document):
document = Lib.eliminate_all_whitespaces(document)
document = Lib.eliminate_all_digits(document)
return all([Lib.is_arabic(p) for p in document])
@staticmethod
def get_all_words(comment):
# comment = eliminate_stop_digits(comment)
comment = Lib.eliminate_punctuation(comment)
return comment.split(' ')
@staticmethod
def fr_or_eng_filter(word):
return Lib.is_fr_or_en(word)
@staticmethod
def load(path_data):
return joblib.load(open(path_data, 'rb'))
@staticmethod
def binary_search(input_list, item):
first = 0
last = len(input_list) - 1
while(first <= last):
mid = (first + last) // 2
if input_list[mid] == item :
return True
elif item < input_list[mid]:
last = mid - 1
else:
first = mid + 1
return False
@staticmethod
def fib(n):
if n== 0 or n== 1:
return n
return Lib.fib (n- 1) + Lib.fib (n- 2)
@staticmethod
def replace2or_more_char_by_1_sauf_hh(string_in):
return re.sub(r'([0-gi-zو-ۏء-ن])\1+', r'\1', string_in)
@staticmethod
def replace2or_more_h_by_2h(string_in):
return re.sub(r'([hه])\1+', r'\1\1', string_in)
@staticmethod
def is_stop_digits(string_in):
return re.match(r'\b\d+\b', string_in) is not None
@staticmethod
def eliminate_emoji(string_in):
RE_EMOJI = re.compile('[\U00010000-\U0010ffff]+', flags=re.UNICODE)
word = RE_EMOJI.sub(r'', string_in)
return re.sub(r'♥+|❤️+|❤+', '', word)
@staticmethod
def eliminate_d2a_stop_words(string_in):
D2A_STOP_WORDS = ['el', 'al', 'mi', 'ya', 'rah', 'ylh', 'hada', 'wa', 'ila', 'l', 'hadchi', 'ana', 'nti', 'howa', 'ntoma', 'lina', 'likom', 'lihom', 'gha', 'ghi', 'dial', 'dialo', 'dyl', 'diyalkom', 'dialhom', 'dyal', 'deyal']
words = string_in.split(' ')
for w in words:
if w in D2A_STOP_WORDS:
words.remove(w)
return ' '.join(words)
@staticmethod
def replace_apostrophes_and_points_by_space(string_in):
string_in = Lib.replace2or_more_appostrophe_and_point_by_1(string_in)
string_in = re.sub(r'\'', ' ', string_in)
string_in = re.sub(r'"', ' ', string_in)
string_in = re.sub(r'\.', ' ', string_in)
string_in = re.sub(r'…', ' ', string_in)
return string_in
@staticmethod
def write_row_csv(row_liste, file_name='data/latin_comments.csv', delimiter=',', quotechar='`'):
file = open(file_name, 'a+', newline='', encoding='utf-8')
writer = csv.writer(file, delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC, quotechar=quotechar)
writer.writerow(row_liste)
@staticmethod
def read_csv(file_path, delimiter=','):
f = open(file_path, 'r+', encoding='utf-8')
reader = csv.reader(f, delimiter=delimiter)
return reader
@staticmethod
def detect_lang(text):
return TextBlob(text).detect_language()
@staticmethod
def replace2or_more_appostrophe_and_point_by_1(string_in):
return re.sub(r'([\'\."])\1+', r'\1', string_in)
@staticmethod
def country_name_to_iso3(o):
iso = o
try:
iso = Lib.country_name_to_country_alpha3(o)
except KeyError:
return iso
return iso |
elhachimi-ch/dst | src/data_science_toolkit/vectorizer.py | <reponame>elhachimi-ch/dst<gh_stars>1-10
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import joblib
from tensorflow.keras.utils import to_categorical
from .lib import Lib
from sklearn.preprocessing import MinMaxScaler
import nltk
class Vectorizer:
__vectorizer = None
__matrice = None
def __init__(self, documents_as_list=None, vectorizer_type='count', ngram_tuple=(1,1), space_dimension=None, dataframe=None, preprocessing=None):
if documents_as_list is not None:
if vectorizer_type == 'count':
cv = CountVectorizer(ngram_range=ngram_tuple)
matrice = cv.fit_transform(documents_as_list)
self.__vectorizer = cv
self.__matrice = matrice
elif vectorizer_type == 'tfidf':
if preprocessing is None:
tfidfv = TfidfVectorizer(max_features=space_dimension, preprocessor=self.preprocessor)
matrice = tfidfv.fit_transform(documents_as_list)
self.__vectorizer = tfidfv
self.__matrice = matrice
else:
tfidfv = TfidfVectorizer(max_features=space_dimension, preprocessor=preprocessing)
matrice = tfidfv.fit_transform(documents_as_list)
self.__vectorizer = tfidfv
self.__matrice = matrice
elif vectorizer_type == 'custom':
features = np.vectorize(Vectorizer.get_custom_features)
data = features(documents_as_list)
v = DictVectorizer()
matrice = v.fit_transform(data)
self.__vectorizer = v
self.__matrice = matrice
elif vectorizer_type == 'min_max':
self.__vectorizer = MinMaxScaler()
self.__matrice = self.__vectorizer.fit_transform(dataframe)
else:
pass
def get_sparse_matrix(self):
return self.__matrice
def get_matrix(self):
return self.__matrice.toarray()
def get_vectorizer(self):
return self.__vectorizer
def get_features_names(self):
return self.__vectorizer.get_feature_names()
@staticmethod
def tokenizer(doc):
return doc.split()
@staticmethod
def preprocessor(doc):
"""def my_tokenizer(s):
return s.split()
vectorizer = CountVectorizer(tokenizer=my_tokenizer)
"""
tokens = doc.split(' ')
result = []
for p in tokens:
result.append(nltk.stem.PorterStemmer().stem(p))
return ' '.join(result)
@staticmethod
def get_custom_features(e):
e = e.lower()
return {
'f1': e[0], # First letter
'f2': e[0:2], # First 2 letters
'f3': e[0:3], # First 3 letters
'l1': e[-1],
'l2': e[-2:],
'l3': e[-3:],
}
def get_docs_projections_as_sparse(self, documents_as_liste, projection_type='normal'):
if projection_type != 'normal':
documents_as_liste = np.vectorize(Vectorizer.get_custom_features)(documents_as_liste)
return self.__vectorizer.transform(documents_as_liste)
def save_vectorizer(self, vectorizer_path='data/vectorizer.data'):
out_vectorizer_file = open(vectorizer_path, 'wb')
joblib.dump(self.__vectorizer, out_vectorizer_file)
out_vectorizer_file.close()
def load_vectorizer(self, vectorizer_path='data/vectorizer.data'):
self.__vectorizer = joblib.load(open(vectorizer_path, 'rb'))
def reshape(self, new_shpae_tuple):
self.__matrice = np.array(self.__matrice.reshape(new_shpae_tuple))
def get_sum_by_columns_as_list(self):
count_list = np.array(self.get_sparse_matrix().sum(axis=0))
count_list = count_list.reshape(self.get_shape()[1])
return count_list
def get_sum_by_rows_as_list(self):
count_list = np.array(self.get_sparse_matrix().sum(axis=1))
count_list = count_list.reshape(self.get_shape()[0])
return count_list
def get_shape(self):
return self.__matrice.shape
@staticmethod
def to_one_hot(vecteur_of_categories):
"""converti une colone avec des categorie mais numerique en forme One Hot Encoding exemple versicolor
est de label 2 se transform en [0 0 1]"""
return Lib.to_categorical(vecteur_of_categories)
@staticmethod
def get_reshaped_matrix(matrix, new_shape_tuple):
print(new_shape_tuple)
new_matrix = matrix.reshape(new_shape_tuple)
print('okkkk {}'.format(new_matrix.shape))
return new_matrix
@staticmethod
def reshape_images_for_cnn(images_as_liste):
images_as_liste.reshape(images_as_liste.shape[0], images_as_liste.shape[1], images_as_liste.shape[1], 1) \
.astype('float32')
|
elhachimi-ch/dst | src/data_science_toolkit/r3.py | <reponame>elhachimi-ch/dst
import gym
import numpy as np
from .dataframe import DataFrame
from gis import GIS
import re
from csm import CSM
class R3(gym.Env):
#
MAX_SOWING_DAY = 100
# action space definition
ADD_DAY = 0
SUB_DAY = 1
SAME_DAY = 2
action_space = gym.spaces.Discrete(2)
actions = [ADD_DAY, SUB_DAY]
# observation space definition
observation_space = gym.spaces.Box(low = 0,
high = 100,
shape = (33,),
dtype = np.int)
AGENT = -1
def __init__(self, stochastic=True, fitness_threshold=-1000):
r3_plots_path = "plots.shp"
r3_pipelines_path = "pipelines.shp"
self.layers = GIS()
self.layers.add_data_layer(r3_plots_path, 'plots')
self.layers.add_data_layer(r3_pipelines_path, 'pipelines')
self.csm = CSM()
self.sow()
self.fitness_threshold = fitness_threshold
def sow(self, sowing_dates_series=None):
if sowing_dates_series is None:
self.layers.add_random_series_column('pipelines', 'sowing_dates')
else:
self.layers.add_column('pipelines', sowing_dates_series, 'sowing_dates')
def get_state(self):
"""start_state = np.where(self.grid_state == self.AGENT)
start_not_found = not (start_state[0] and goal_state[0])
if start_not_found:
print("Start state not present in the Gridworld. Check the Grid layout")
#start_state = (start_state[0][0])
start_state = 0"""
return self.layers.get_column('sowing_dates')
def step(self, action):
"""
Run one step into the env
Args:
state (Any): Current index state of the maze
action (int): Discrete action for up, down,
left, right
slip (bool, optional): Stochasticity in the
env. Defaults to True.
Raises:
ValueError: If invalid action is provided as
input
Returns:
Tuple : Next state, reward, done, _
return next observation, reward, done, info
"""
action = int(action)
info = {"success": True}
self.grid_state, reward = self.get_state_reward(action)
done = (reward > self.fitness_threshold)
if done is False:
reward = 1.0
else:
reward = -2.0
return self.grid_state, reward, done, info
def get_state_reward(self, action):
actual_fitness = self.fitness_sowing_dates_distribution()
random_plot = np.random.randint(0, 33)
actual_sowing_dates = self.layers.get_column('pipelines', 'sowing_dates').to_numpy()
#print(actual_sowing_dates)
if action == self.ADD_DAY:
if actual_sowing_dates[random_plot] <= 100:
actual_sowing_dates[random_plot] += 1
elif action == self.SUB_DAY:
if actual_sowing_dates[random_plot] != 0:
actual_sowing_dates[random_plot] += -1
elif action == self.SAME_DAY:
pass
#print("Taken action:", action)
self.sow(actual_sowing_dates)
#self.layers.set_row('pipelines', 'sowing_dates', random_plot, actual_sowing_dates)
self.estimated_cluster_yield()
reward = self.fitness_sowing_dates_distribution() - actual_fitness
return self.layers.get_column('pipelines', 'sowing_dates').to_numpy(), reward
def render(self):
return self.show()
def reset(self):
self.sow()
return self.layers.get_column('pipelines', 'sowing_dates').to_numpy()
#return self.set_sowing_dates(np.ones((116, 1)))
def get_sowing_dates(self):
return self.layers.get_column('sowing_dates')
def get_delta_list(self, delta=7):
network_constraints_matrix = []
for index_q, row_q in self.layers.get_data_layer('pipelines').iterrows():
network_constraints_list = []
for index_p, row_p in self.layers.get_data_layer('pipelines').iterrows():
if row_p['sowing_dates'] >= row_q['sowing_dates'] - delta and row_p['sowing_dates'] <= row_q['sowing_dates'] + delta:
network_constraints_list.append(row_p['canal_id'])
network_constraints_matrix.append(network_constraints_list)
return network_constraints_matrix
def estimated_cluster_yield(self):
yield_series = []
for p in self.layers.get_column('pipelines', 'sowing_dates'):
self.csm.simulate_canopy_cover(p)
self.csm.simulate_ndvi()
yield_series.append(self.csm.estimate_yield()/100)
self.layers.add_column('pipelines', yield_series, 'yield')
return sum(self.layers.get_column('pipelines', 'yield'))
def show(self):
self.layers.plot(layer_name='plots', column4color='canal_id', alpha=0.8)
self.layers.show_data_layer(layer_name='plots')
self.layers.show_data_layer(layer_name='pipelines')
self.layers.show(layer_name='plots')
def fitness_sowing_dates_distribution(self):
self.verify_irrigation_network_constraints()
return self.estimated_cluster_yield() - sum(self.layers.get_column('pipelines', 'remaining'))
def verify_irrigation_network_constraints(self):
remaining_list = []
delta_list = self.get_delta_list()
for list_voisin in delta_list:
list_score = 0
for canal in list_voisin:
splited_canal = re.findall('[A-Z]*\d+', canal)
next_branch = ""
total_remaining = 0
for p in splited_canal:
temp_canals_list = list_voisin
temp_canals_list = list(map(lambda x: re.sub('-', '', x), temp_canals_list))
remaining = 0
next_branch += p
if self.layers.get_data_layer('pipelines')[self.layers.get_column('pipelines', 'canal_id_i') == next_branch]['capacity'].shape[0] > 0:
#print('verify ', next_branch)
if next_branch in temp_canals_list:
temp_canals_list.remove(next_branch)
common_canal_capacity = self.layers.get_data_layer('pipelines')[self.layers.get_column('pipelines', 'canal_id_i') == next_branch].capacity.to_numpy()[0]
activated_canals_sum = 0
for q in temp_canals_list:
if next_branch in q:
activated_canals_sum += self.layers.get_data_layer('pipelines')[self.layers.get_column('pipelines', 'canal_id_i') == q].capacity.to_numpy()[0]
remaining = common_canal_capacity - activated_canals_sum
total_remaining += remaining
list_score += total_remaining
#print("List score:", list_score)
remaining_list.append(list_score)
self.layers.add_column('pipelines', remaining_list, 'remaining')
#print("Total remaining:", total_remaining) |
elhachimi-ch/dst | src/data_science_toolkit/rl_dqn.py | <reponame>elhachimi-ch/dst<filename>src/data_science_toolkit/rl_dqn.py
import time
import os
import re
from stable_baselines3 import PPO, DQN
import gym
from r3 import R3
from stable_baselines3.common.env_checker import check_env
def main():
ti = time.time()
#left, down, up, right
#environment = Environment('CartPole-v0')
#environment.render()
env = R3(fitness_threshold=1000)
"""print(env.observation_space.sample().shape)
env.sow()
print(env.layers.get_column('pipelines', 'sowing_dates').to_numpy().shape)"""
#check_env(env)
models_dir = "models/DQN"
logdir = "logs"
if not os.path.exists(logdir):
os.makedirs(logdir)
if not os.path.exists(models_dir):
os.makedirs(models_dir)
model = DQN("MlpPolicy", env, verbose=1, tensorboard_log=logdir)
EPISODES = 10
TIMESTEPS = 100
for episode in range(EPISODES):
model.learn(total_timesteps=TIMESTEPS, reset_num_timesteps=False, tb_log_name="DQN")
model.save(f'{models_dir}/' + f'{TIMESTEPS*episode}')
"""obs = env.reset()
x = []
for i in range(10):
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
x.append(reward)
env.render()
if done:
obs = env.reset()
print(x)"""
env.close()
print(time.time() - ti)
if __name__ == '__main__':
main() |
elhachimi-ch/dst | src/data_science_toolkit/chart.py | <gh_stars>1-10
import seaborn as sns
from matplotlib import pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import matplotlib.ticker as plticker
import numpy as np
import pandas as pd
class Chart:
chart_type_list = ['line', 'bar', 'box', 'swarm', 'strip_swarm', 'count', 'scatter', 'dist', 'point', 'pair',
'correlation_map', 'reg', 'heat_map']
def __init__(self, dataframe=None, column4x=None, chart_type='pair', group_by=None, columns_names_list=None, plotly=False):
self.dataframe = dataframe
if column4x is None:
self.column4x = dataframe.index
else:
self.column4x = column4x
self.chart_type = chart_type
self.group_by = group_by
self.columns_names_list = columns_names_list
self.plotly = plotly
sns.set_theme(color_codes=True)
def add_data_to_show(self, data_column=None, column4hover=None, column4size=None, y_column=None, color=None):
print(self.chart_type)
if self.plotly == True:
if self.chart_type == self.chart_type_list[0]:
self.fig = px.line(self.dataframe, x=self.column4x, y=data_column, color=self.group_by, hover_name=column4hover)
elif self.chart_type == self.chart_type_list[1]:
self.ax = sns.barplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
loc = plticker.MultipleLocator(base=1.0) # this locator puts ticks at regular intervals
self.ax.xaxis.set_major_locator(loc)
elif self.chart_type == self.chart_type_list[2]:
sns.boxplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[3]:
sns.swarmplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[4]:
sns.stripplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[5]:
sns.countplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[6]:
sns.regplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[7]:
sns.distplot(self.dataframe.column, kde=False, hue=self.group_by)
elif self.chart_type == self.chart_type_list[8]:
sns.pointplot(x="class", y="survived", hue=self.group_by, data=self.dataframe, palette={"male": "g", "female": "m"},
markers=["^", "o"], linestyles=["-", "--"], capsize=.2)
elif self.chart_type == self.chart_type_list[9]:
dataframe = DataFrame()
dataframe.set_data_frame(self.dataframe)
self.fig = px.scatter_matrix(self.dataframe, dimensions=dataframe.get_columns_names(),
color=self.group_by)
elif self.chart_type == self.chart_type_list[10]:
sns.clustermap(self.dataframe.corr(), cmap=sns.diverging_palette(230, 20, as_cmap=True), annot=True,
fmt='1%',
center=0.0)
elif self.chart_type == self.chart_type_list[11]:
self.fig = px.scatter(self.dataframe, x=self.column4x, y=data_column, color=self.group_by, size=column4size, hover_name=column4hover)
else:
#sns.set_style("whitegrid")
if self.chart_type == self.chart_type_list[0]:
self.ax = sns.lineplot(data=self.dataframe, x=self.column4x, y=data_column, markers=True, hue=self.group_by)
elif self.chart_type == self.chart_type_list[1]:
sns.set_theme(style="whitegrid")
self.ax = sns.barplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[2]:
self.ax = sns.boxplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[3]:
self.ax = sns.swarmplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[4]:
self.ax = sns.stripplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[5]:
self.ax = sns.countplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[6]:
self.ax = sns.regplot(data=self.dataframe, x=self.column4x, y=data_column, hue=self.group_by)
elif self.chart_type == self.chart_type_list[7]:
self.ax = sns.distplot(self.dataframe.column, kde=False, hue=self.group_by)
elif self.chart_type == self.chart_type_list[8]:
self.ax = sns.pointplot(x="class", y="survived", hue=self.group_by, data=self.dataframe, palette={"male": "g", "female": "m"},
markers=["^", "o"], linestyles=["-", "--"], capsize=.2)
elif self.chart_type == self.chart_type_list[9]:
self.ax = sns.pairplot(self.dataframe, hue=self.group_by, vars=self.columns_names_list)
elif self.chart_type == self.chart_type_list[10]:
self.ax = sns.clustermap(self.dataframe.corr(), annot=True, center=0.0)
elif self.chart_type == self.chart_type_list[11]:
self.ax = sns.jointplot(x=data_column, y=y_column, data=self.dataframe, kind="reg", color=color)
#self.ax = sns.scatterplot(data=self.dataframe, x=data_column, y=y_column)
elif self.chart_type == self.chart_type_list[12]:
# Compute the correlation matrix
corr = self.dataframe.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
self.ax = sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
return self.ax
def plot_on_map(self,
iso_locations_column=None,
circle_size_column=None,
animation_frame_column=None,
hover_name_column=None,
projection='natural earth',
scope='world'):
self.fig = px.scatter_geo(
self.dataframe,
locations=iso_locations_column,
size=circle_size_column,
animation_frame=animation_frame_column,
hover_name=hover_name_column,
color=self.group_by,
projection=projection,
scope=scope,
)
def plot_colored_map(self,
iso_locations_column=None,
color_column=None,
animation_frame_column="Year",
scope='world',
hover_name_column=None, # column to add to hover information
):# column on which to animate):
self.fig = px.choropleth(
self.dataframe,
locations=iso_locations_column,
scope=scope,
color=color_column, # lifeExp is a column of gapminder
hover_name=hover_name_column, # column to add to hover information
color_continuous_scale=px.colors.sequential.Plasma,
animation_frame=animation_frame_column,
projection='natural earth'# column on which to animate
)
def show(self):
if self.plotly:
self.fig.show()
else:
plt.show()
def config(self,
title="",
x_label="X",
y_label="Y",
x_limit_i=None,
x_limit_f=None,
y_limit_i=None,
y_limit_f=None,
interval=None,
x_rotation_angle=90,
y_rotation_angle=0,
titile_font_size=29,
x_label_font_size=13,
y_label_font_size=13,
x_font_size=11,
y_font_size=11,
):
if self.plotly:
self.fig.update_layout(
# add a title text for the plot
title_text = title,
# set projection style for the plot
#geo = dict(projection={'type':'natural earth'}
) # by default, projection type is set to 'equirectangular'
else:
plt.title(title)
plt.xlim(x_limit_i, x_limit_f)
plt.ylim(y_limit_i, y_limit_f)
plt.xticks(rotation=x_rotation_angle, fontsize=x_font_size)
plt.yticks(rotation=y_rotation_angle, fontsize=y_font_size)
self.ax.set_title(title,fontsize=titile_font_size)
self.ax.set_xlabel(x_label,fontsize=x_label_font_size)
self.ax.set_ylabel(y_label,fontsize=y_label_font_size)
if interval is not None:
loc = plticker.MultipleLocator(base=interval) # this locator puts ticks at regular intervals
self.ax.xaxis.set_major_locator(loc)
def save(self, chart_path="output.png", transparent=False):
if self.plotly is True:
self.fig.savefig(chart_path, transparent=transparent, bbox_inches = 'tight', dpi=600)
else:
self.ax.savefig(chart_path, transparent=transparent, bbox_inches = 'tight', dpi=600)
|
elhachimi-ch/dst | src/data_science_toolkit/model.py | <gh_stars>1-10
"""
Under MIT License by <NAME>
"""
import tensorflow.keras.losses
import tensorflow.keras.optimizers
from tensorflow.keras import backend as K
import sklearn.tree as tree
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Dropout, MaxPool1D, Conv1D, Reshape, LSTM
from tensorflow.keras.models import Sequential
from matplotlib import pyplot as plt
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score, median_absolute_error, mean_squared_log_error, mean_absolute_error, classification_report
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from math import floor, ceil
from collections import deque
from math import sqrt
from sklearn.model_selection import cross_val_score
from joblib import dump, load
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
import pandas as pd
from xgboost import XGBClassifier, XGBRegressor, plot_importance
from sklearn.naive_bayes import GaussianNB
import graphviz
import tensorflow as tf
import numpy as np
from .chart import Chart
from .dataframe import DataFrame
import seaborn as sns
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
class Model:
def __init__(
self,
data_x=None,
data_y=None,
model_type='knn',
task='c',
training_percent=1,
epochs=50,
batch_size=32,
generator=None,
validation_percentage=0.2
):
if training_percent != 1:
self.__x_train, self.__x_test, self.__y_train, self.__y_test = train_test_split(data_x,
data_y,
train_size=training_percent,
test_size=1-training_percent)
else:
self.x = data_x
self.y = data_y
self.x = data_x
self.y = data_y
self.__y_pred = None
self.__epochs = epochs
self.__batch_size = batch_size
self.__model_type = model_type
self.__boosted_model = None
self.__generator = generator
self.history = 'None'
self.__c_or_r_ts = task
self.__validation_percentage = validation_percentage
if model_type == 'dt':
if task == 'c':
self.__model = tree.DecisionTreeClassifier()
else:
self.__model = tree.DecisionTreeRegressor()
elif model_type == 'svm':
if task == 'c':
self.__model = svm.SVC()
else:
self.__model = svm.SVR()
elif model_type == 'lr':
if task == 'c':
self.__model = LogisticRegression(random_state=2)
else:
self.__model = LinearRegression()
elif model_type == 'nb':
if task == 'c':
self.__model = MultinomialNB()
else:
self.__model = GaussianNB()
elif model_type == 'rf':
if task == 'c':
self.__model = RandomForestClassifier()
else:
self.__model = RandomForestRegressor()
elif model_type == 'xb':
if task == 'c':
self.__model = XGBClassifier()
else:
self.__model = XGBRegressor()
elif model_type == 'dl':
self.__model = Sequential()
elif model_type == 'knn':
if task == 'c':
self.__model = KNeighborsClassifier(n_neighbors=5)
else:
self.__model = KNeighborsRegressor(n_neighbors=5)
elif model_type == 'gb':
if task == 'c':
self.__model = GradientBoostingClassifier()
else:
self.__model = GradientBoostingRegressor()
else:
self.__model = None
def get_generator(self):
return self.__generator
def set_generator(self, generator):
self.__generator = generator
def get_model(self):
return self.__model
def set_model(self, model):
self.__model = model
def add_layer(self, connections_number=2, activation_function='relu', input_dim=None):
"""Add a dense layer to the model architecture
Args:
connections_number (int, optional): number of neurons to add. Defaults to 2.
activation_function (str, optional): function to apply on sum of wi.xi. examples: ['linear', 'relu', 'softmax']. Defaults to 'relu'.
input_dim (int, optional): number of features in X matrix. Defaults to None.
"""
if input_dim:
self.__model.add(Dense(connections_number, activation=activation_function, input_dim=input_dim))
else:
self.__model.add(Dense(connections_number, activation=activation_function))
def add_lstm_layer(self, connections_number=2, activation_function='relu', input_shape=None, return_sequences=True):
"""Add a lstm layer
Args:
connections_number (int, optional): [description]. Defaults to 2.
activation_function (str, optional): [description]. Defaults to 'relu'.
input_shape ([type], optional): example: (weather_window,1). Defaults to None.
"""
if input_shape is not None:
self.__model.add(LSTM(units=connections_number, activation=activation_function, input_shape=input_shape, return_sequences=return_sequences))
else:
self.__model.add(LSTM(units=connections_number, activation=activation_function, return_sequences=return_sequences))
def add_conv_2d_layer(self, filter_nbr=1, filter_shape_tuple=(3,3), input_shape=None, activation_function='relu'):
if input_shape:
self.__model.add(Conv2D(filters=filter_nbr, kernel_size=filter_shape_tuple, input_shape=input_shape,
activation=activation_function))
else:
self.__model.add(Conv2D(filters=filter_nbr, kernel_size=filter_shape_tuple,
activation=activation_function))
def add_conv_1d_layer(self, filter_nbr=1, filter_shape_int=3, input_shape=None, activation_function='relu', strides=10):
if input_shape:
#Input size should be (n_features, 1) == (data_x.shape[1], 1)
self.__model.add(Conv1D(filters=filter_nbr, kernel_size=filter_shape_int, input_shape=input_shape,
activation=activation_function))
else:
self.__model.add(Conv1D(filters=filter_nbr, kernel_size=filter_shape_int,
activation=activation_function))
def add_pooling_2d_layer(self, pool_size_tuple=(2, 2)):
self.__model.add(MaxPooling2D(pool_size=pool_size_tuple))
def add_pooling_1d_layer(self, pool_size_int=2):
self.__model.add(MaxPool1D(pool_size=pool_size_int))
def add_flatten_layer(self):
self.__model.add(Flatten())
def add_reshape_layer(self, input_dim):
"""
for 1dcnn and 2dcnn use this layer as first layer
"""
self.__model.add(Reshape((input_dim, 1), input_shape=(input_dim, )))
"""def add_reshape_layer(self, target_shape=None, input_shape=None):
self.__model.add(Reshape(target_shape=target_shape, input_shape=input_shape))"""
def add_dropout_layer(self, rate_to_keep_output_value=0.2):
""" dropout default initial value """
self.__model.add(Dropout(rate_to_keep_output_value))
def train(self, loss=tensorflow.keras.losses.mse, optimizer=tensorflow.keras.optimizers.Adam(learning_rate=0.001),
metrics_as_list_of_functions_or_str=['accuracy']):
"""
losses and metrics for regresion:
tensorflow.keras.losses.mse
r2_keras
losses and metrics for classification:
multi classes: tensorflow.keras.losses.categorical_crossentropy
two classes: tensorflow.keras.losses.binary_crossentropy
Optimizers:
tensorflow.keras.optimizers.SGD(learning_rate=0.01)
tensorflow.keras.optimizers.Adam(learning_rate=0.01)
...
if you pass y as integers use loss='sparse_categorical_crossentropy'
class Adadelta: Optimizer that implements the Adadelta algorithm.
class Adagrad: Optimizer that implements the Adagrad algorithm.
class Adam: Optimizer that implements the Adam algorithm.
class Adamax: Optimizer that implements the Adamax algorithm.
class Ftrl: Optimizer that implements the FTRL algorithm.
class Nadam: Optimizer that implements the NAdam algorithm.
class Optimizer: Base class for Keras optimizers.
class RMSprop: Optimizer that implements the RMSprop algorithm.
class SGD: Gradient descent (with momentum) optimizer.
"""
if self.__model_type == 'dl':
if 'r2_keras' in metrics_as_list_of_functions_or_str:
metrics_as_list_of_functions_or_str.remove('r2_keras')
metrics_as_list_of_functions_or_str.append(self.r2_keras)
self.__model.compile(loss=loss, optimizer=optimizer, metrics=metrics_as_list_of_functions_or_str)
if self.__generator is not None:
self.history = self.__model.fit(self.get_generator(), epochs=self.__epochs, batch_size=self.__batch_size)
print(self.history.history)
else:
self.history = self.__model.fit(self.x, self.y, epochs=self.__epochs,
batch_size=self.__batch_size, validation_split=self.__validation_percentage)
print(self.history.history)
self.__y_pred = self.__model.predict(self.__x_test)
else:
self.__model.fit(self.__x_train, self.__y_train)
self.__y_pred = self.__model.predict(self.__x_test)
"""history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']"""
def summary(self):
print(self.__model.summary())
# banary classification
def predict(self, x_to_pred):
return self.__model.predict(x_to_pred)
def forcast_next_step(self, window):
current_batch = window.reshape((1, window.shape[0], 1))
# One timestep ahead of historical 12 points
return self.predict(current_batch)[0]
def predict_proba(self, x_to_pred):
return self.__model.predict_proba(x_to_pred)
def accuracy(self):
return accuracy_score(self.__y_test, self.__y_pred)
def precision(self, binary_classification=False):
if binary_classification:
return precision_score(self.__y_test, self.__y_pred)
return precision_score(self.__y_test, self.__y_pred, average=None)
def recall(self):
return recall_score(self.__y_test, self.__y_pred)
def f1_score(self):
return f1_score(self.__y_test, self.__y_pred)
def regression_report(self, y_test=None, y_predicted=None, savefig=False):
"""
pass y_test and y_predected as pandas serie is get_column
"""
if y_test is not None and y_predicted is not None:
self.__y_test = y_test
self.__y_pred = y_predicted
data = DataFrame(self.__y_test, data_type='list', columns_names_as_list=['y_test'], data_types_in_order=[float])
data.add_column(self.__y_pred, 'y_predicted')
data.reset_index(drop=True)
sns.set_theme(color_codes=True)
x_plot = np.linspace(0, int(max(self.__y_test)))
g = sns.FacetGrid(data.get_dataframe(), size = 7)
g = g.map(plt.scatter, "y_test", "y_predicted", edgecolor='w')
plt.plot(x_plot, x_plot, color='red', label='Identity line')
g.set_xlabels('Real values')
g.set_ylabels('Estimated values')
plt.legend()
plt.show()
if savefig is True:
g.savefig('regression_scatter.png', dpi=600)
if not np.any(self.__y_test<=0) or not np.any(self.__y_pred<=0):
return {
'R2': r2_score(self.__y_test, np.squeeze(self.__y_pred)),
'MSE': mean_squared_error(self.__y_test, np.squeeze(self.__y_pred)),
'RMSE':sqrt(mean_squared_error(self.__y_test, np.squeeze(self.__y_pred))),
'MAE': mean_absolute_error(self.__y_test, np.squeeze(self.__y_pred)),
'MEDAE': median_absolute_error(self.__y_test, np.squeeze(self.__y_pred)),
}
return {
'R2': r2_score(self.__y_test, np.squeeze(self.__y_pred)),
'MSE': mean_squared_error(self.__y_test, np.squeeze(self.__y_pred)),
'RMSE':sqrt(mean_squared_error(self.__y_test, np.squeeze(self.__y_pred))),
'MAE': mean_absolute_error(self.__y_test, np.squeeze(self.__y_pred)),
'MEDAE': median_absolute_error(self.__y_test, np.squeeze(self.__y_pred)),
'MSLE': mean_squared_log_error(self.__y_test, np.squeeze(self.__y_pred))
}
def classification_report(self, y_test=None, y_predicted=None):
"""
pass y_test and y_predected as pandas serie is get_column
"""
if y_test is not None and y_predicted is not None:
self.__y_test = y_test
return classification_report(self.__y_test, self.__y_pred)
def roc_curve(self):
fpr, tpr, thresholds = roc_curve(self.__y_test, self.__y_pred)
roc_auc = auc(fpr, tpr)
print("Air sous la courbe" + str(roc_auc))
plt.figure()
plt.plot(fpr, tpr, color='orange', lw=2, label='ROC curve(area under curve = % 0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='darkgrey', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate(1 - Specificity)')
plt.ylabel('True Positive Rate(Sensitivity)')
plt.title('ROC Curve')
plt.legend(loc='upper left')
plt.show()
def boost_model(self):
ada_boost = AdaBoostClassifier(n_estimators=100, base_estimator=self.__model, learning_rate=0.1, random_state=0)
self.__boosted_model = ada_boost
self.__boosted_model.fit(self.__x_train, self.__y_train)
def predict_with_boosted_model(self, x_to_pred):
return self.__boosted_model.predict(x_to_pred)
def save_model(self, model_path='data/model.data'):
dump(self.__model, model_path)
def load_model(self, model_path):
self.__model = load(model_path)
def report(self):
if self.__model_type == 'dl':
if self.__c_or_r_ts == 'ts' or self.__c_or_r_ts == 'r':
if self.__validation_percentage == 0:
loss = self.history.history['loss']
x = range(1, len(loss) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, loss, 'b', label='Training loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
else:
loss = self.history.history['loss']
val_loss = self.history.history['val_loss']
r2 = self.history.history['r2_keras']
val_r2 = self.history.history['val_r2_keras']
x = range(1, len(loss) + 1)
# (1,2) one row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle('Training and validation monitoring of loss and R2')
ax1.plot(x, loss, 'b', label='Training loss')
ax1.plot(x, val_loss, 'r', label='Validation loss')
ax1.set_title('Loss monitoring')
ax1.legend()
ax2.plot(x, r2, 'b', label='Training R2')
ax2.plot(x, val_r2, 'r', label='Validation R2')
ax2.set_title('R2 monitoring')
ax2.legend()
print(self.regression_report())
plt.show()
elif self.__c_or_r_ts == 'c':
if self.__validation_percentage == 0:
acc = self.history.history['accuracy']
loss = self.history.history['loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.title('Accuracy ')
plt.plot(x, acc, 'r', label='Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Loss')
plt.title('Loss')
plt.legend()
else:
acc = self.history.history['accuracy']
val_acc = self.history.history['val_accuracy']
loss = self.history.history['loss']
val_loss = self.history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(2, 2, 1)
plt.plot(x, acc, 'b', label='Training accuracy')
plt.plot(x, val_acc, 'r', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(2, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
else:
if self.__c_or_r_ts == 'r':
print(self.regression_report())
else:
print(self.classification_report())
def cross_validation(self, k):
"""https://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter"""
# scoring = "neg_mean_squared_error"
if self.__c_or_r_ts == 'r':
scoring = "r2"
elif self.__c_or_r_ts == 'c':
scoring = "accuracy"
print(cross_val_score(self.__model, self.x, self.y, cv=k, scoring=scoring))
def get_features_importance(self):
if self.__c_or_r_ts == 'r':
etr_model = ExtraTreesRegressor()
etr_model.fit(self.x,self.y)
feature_imp = pd.Series(etr_model.feature_importances_,index=self.x.columns)
feature_imp.nlargest(10).plot(kind='barh')
plt.show()
else:
etr_model = ExtraTreesClassifier()
etr_model.fit(self.x,self.y)
feature_imp = pd.Series(etr_model.feature_importances_,index=[i for i in range(self.x.shape[1])])
feature_imp.nlargest(10).plot(kind='barh')
plt.show()
"""model = self.__model # or XGBRegressor
plot_importance(model, importance_type = 'gain') # other options available
plt.show()
# if you need a dictionary
model.get_booster().get_score(importance_type = 'gain')"""
return etr_model.feature_importances_
def dt_text_representation(self):
return tree.export_text(self.__model)
def plot_dt_representation(self, viz_type='graph_viz'):
if viz_type == 'graph_viz':
# DOT data
dot_data = tree.export_graphviz(self.__model, out_file=None,
feature_names=self.x.columns.values,
class_names=self.y.name,
filled=True)
# Draw graph
graph = graphviz.Source(dot_data, format="png")
return graph
elif viz_type == 'matplotlib':
fig = plt.figure(figsize=(25,20))
_ = tree.plot_tree(self.__model,
feature_names=self.x.columns.values,
class_names=self.y.name,
filled=True)
fig.savefig("decistion_tree.png")
plt.show()
def viz_reporter(self):
plt.figure(figsize=(10, 6))
plt.plot(self.__y_test, linewidth=3, label='ground truth')
plt.plot(self.__y_pred, linewidth=3, label='predicted')
plt.legend(loc='best')
plt.xlabel('X')
plt.ylabel('target value')
@staticmethod
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
|
elhachimi-ch/dst | src/data_science_toolkit/csm.py | from math import exp
from .dataframe import DataFrame
import numpy as np
import matplotlib.pyplot as plt
from pandas import Series
from .chart import Chart
import seaborn as sns
class CSM:
def __init__(self):
self.crop_type = 'Wheat'
self.season_length = 242
self.climate_dataframe = self.read_climate_dataframe()
self.monitoring = DataFrame(Series([i+1 for i in range(self.season_length)]), ['day'], data_type='list')
self.CC0 = 4.5
self.CCx = 89.33
self.CGC = 0.0089
self.CDC = 0.145
self.CCx_2 = self.CCx / 2
self.Tupper = 33
self.Tbase = 5
self.CGDD_sowing = 82
# hectare
self.area = 100
# crop characteristics #
# 0.32 m-3/3-3 ou % theta_fc
self.wc_field_capacity = 0.32
# 0.17 m-3/3-3 ou % theta_fc
# it is the water quantity below which the crop can no longer extract water, it is the separtion of
# AW or TAW and NAW
self.wc_wilting_point = 0.17
def cc_equation1(self, t):
return self.CC0 * exp(t * self.CGC)
def cc_equation2(self, t):
return self.CCx - (0.25 * exp(-t * self.CGC) * (self.CCx**2)/(self.CC0))
def cc_equation3(self, t):
return self.CCx * (1 - (0.05 * (exp((3.33 * self.CDC) * t/(self.CCx + 2.29)) - 1)))
def simulate_canopy_cover(self, offset=0):
Tmb = np.zeros((self.season_length,))
ti = np.zeros((self.season_length,))
CC = np.zeros((self.season_length,))
Eq1 = np.zeros((self.season_length,))
Eq2 = np.zeros((self.season_length,))
Eq3 = np.zeros((self.season_length,))
for day in range(offset, self.season_length):
if (self.climate_dataframe[day] < 5):
Tmb[day] = 0
else:
if (self.climate_dataframe[day] >= self.Tupper):
Tmb[day] = self.Tupper - self.Tbase
else:
Tmb[day] = self.climate_dataframe[day] - self.Tbase
ti[offset] = Tmb[offset]
for k in range((offset + 1), 242):
ti[k] = Tmb[k] + ti[k - 1]
t0_all = np.argwhere(ti >= self.CGDD_sowing)
t0 = t0_all[0]
for i in range(offset, t0[0]):
CC[i] = 0
CC[t0[0]] = self.CC0
ti[t0[0]] = 0
for p in range((t0[0] + 1), 242):
ti[p] = Tmb[p] + ti[p - 1]
for m in range((t0[0] + 1), 242):
Eq1[m] = self.cc_equation1(ti[m])
for m in range((t0[0] + 1),242):
Eq1[m] = self.cc_equation1(ti[m])
Eq2[m] = self.cc_equation2(ti[m])
Eq2[m] = Eq2[m].round(2)
p1 = np.argwhere(Eq1 >= self.CCx_2)
phase1 = p1[0][0]
for ii in range((t0[0] + 1), phase1):
CC[ii] = Eq1[ii]
p2 = np.argwhere(Eq2 >= self.CCx)
phase2 = p2[0][0]
for jj in range(phase1, phase2):
CC[jj] = Eq2[jj]
ti[phase2] = 0
CC[phase2] = self.CCx
for kk in range((phase2 + 1), 242):
ti[kk] = Tmb[kk] + ti[kk - 1]
Eq3[kk] = self.cc_equation3(ti[kk])
if (Eq3[kk] >= 0):
CC[kk] = Eq3[kk]
else:
CC[kk] = 0
for kk in range((phase2 + 1), 242):
if (Eq3[kk] < self.CCx_2):
day_final = kk - 1
break
self.monitoring.add_column(CC, 'cc')
return CC
def simulate_fc(self):
self.monitoring.add_transformed_columns('fc', 'cc/100')
def simulate_ndvi(self):
self.monitoring.add_transformed_columns('ndvi', '(cc/118)+0.14')
def simulate_kcb(self):
self.monitoring.add_transformed_columns('k_cb', '(1.64*ndvi)-0.2296')
def simulate_ke(self):
self.monitoring.add_transformed_columns('k_e', '[0.2 (1−fc)]')
def simulate_et0(self, method='pm'):
self.monitoring.add_transformed_columns('et_0', '(1.64*ndvi)-0.2296')
def simulate_etc(self, method='double'):
self.monitoring.add_transformed_columns('et_c', '[(1.64 * NDVI)-0.2296]+[0.2 * (1 - fc)]*et_0')
def simulate_p(self, method='pm'):
self.monitoring.add_transformed_columns('p', '0.55+0.04*(5-et_c)')
def simulate_raw(self, method='pm'):
self.monitoring.add_transformed_columns('raw', '0.55+0.04*(5-et_c)')
def simulate_taw(self, method='pm'):
self.monitoring.add_transformed_columns('taw', '1000*(0.32-0.17)*zr')
def estimate_yield(self, method='last_10_ndvi'):
ndvi_list = self.monitoring.get_column('ndvi')
if method == 'max_ndvi':
ndvi_max = float(max(ndvi_list))
estimated_yield = 23.69*ndvi_max - 13.87
elif method == 'last_10_ndvi':
ndvi_list = list(ndvi_list)
sum_of_last_10_ndvi = sum([float(ndvi_list[153-i]) for i in range(10)])
estimated_yield = 1.79*sum_of_last_10_ndvi - 8.62
return estimated_yield*self.area
def monitor(self):
self.monitoring.show()
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,5))
fig.suptitle('Visual simulation')
# CC
sns.lineplot(ax=axes[0], x=self.monitoring.get_dataframe().index, y=self.monitoring.get_column('cc').values)
axes[0].set_title(self.monitoring.get_column('cc').name)
# fc
#sns.lineplot(ax=axes[1], x=self.monitoring.get_dataframe().index, y=self.monitoring.get_column('fc').values)
#axes[1].set_title(self.monitoring.get_column('fc').name)
# NDVI
sns.lineplot(ax=axes[1], x=self.monitoring.get_dataframe().index, y=self.monitoring.get_column('ndvi').values)
axes[1].set_title(self.monitoring.get_column('ndvi').name)
plt.show()
def read_climate_dataframe(self):
data = DataFrame('mean_temperature.csv')
data.keep_columns(['t_mean'])
return data.get_column_as_list('t_mean') |
nickwanninger/helion | tools/scripts/generate_tokens.py | <filename>tools/scripts/generate_tokens.py<gh_stars>1-10
tokens = [
"eof",
"num",
"var",
"type",
"self_var",
"str",
"keyword",
"left_curly",
"right_curly",
"left_angle",
"right_angle",
"left_square",
"right_square",
"left_paren",
"right_paren",
"typedef", # actually the 'type' symbol, but that's taken
"extends",
"def",
"term",
"indent",
"dedent",
"or",
"and",
"not",
"let",
"global",
"const",
"some",
"is_type",
"colon",
"do",
"if",
"then",
"else",
"elif",
"for",
"while",
"return",
"nil",
"assign",
"arrow",
"fat_arrow",
"pipe",
"equal",
"notequal",
"gt",
"gte",
"lt",
"lte",
"add",
"sub",
"mul",
"div",
"mod",
"dot",
"comma",
"comment",
"end",
"question",
]
with open('include/helion/tokens.inc', 'w') as f:
for i, tok in enumerate(tokens):
f.write(f'TOKEN(tok_{tok}, {i}, "{tok}")\n')
|
DalavanCloud/alluxio-py | alluxio/__init__.py | <reponame>DalavanCloud/alluxio-py<gh_stars>1-10
# -*- coding: utf-8 -*-
from .client import Client
import option
import wire
__version__ = '0.1.1'
|
radjivC/interaction-node-riak | ExampleZukaiRestApi/node_modules/riak-js/node_modules/riakpbc/node_modules/riakproto/riak_pb/msgcodegen.py | <reponame>radjivC/interaction-node-riak<gh_stars>1-10
# Copyright 2013 Basho Technologies, Inc.
#
# This file is provided to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
distutils commands for generating protocol message-code mappings.
"""
__all__ = ['build_messages', 'clean_messages']
import re
import csv
import os
from os.path import isfile
from distutils import log
from distutils.core import Command
from distutils.file_util import write_file
from datetime import date
LICENSE = """# Copyright {0} Basho Technologies, Inc.
#
# This file is provided to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""".format(date.today().year)
class MessageCodeMapping(object):
def __init__(self, code, message, proto):
self.code = int(code)
self.message = message
self.proto = proto
self.message_code_name = self._message_code_name()
self.module_name = "riak_pb.{0}_pb2".format(self.proto)
self.message_class = self._message_class()
def __cmp__(self, other):
return cmp(self.code, other.code)
def _message_code_name(self):
strip_rpb = re.sub(r"^Rpb", "", self.message)
word = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', strip_rpb)
word = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', word)
word = word.replace("-", "_")
return "MSG_CODE_" + word.upper()
def _message_class(self):
try:
pbmod = __import__(self.module_name, globals(), locals(),
[self.message])
klass = pbmod.__dict__[self.message]
return klass
except KeyError:
log.debug("Did not find '{0}' message class in module '{1}'",
self.message, self.module_name)
except ImportError:
log.debug("Could not import module '{0}'", self.module_name)
return None
class clean_messages(Command):
"""
Cleans generated message code mappings. Add to the build process
using::
setup(cmd_class={'clean_messages': clean_messages})
"""
description = "clean generated protocol message code mappings"
user_options = [
('destination', None, 'destination Python source file')
]
def initialize_options(self):
self.destination = None
def finalize_options(self):
self.set_undefined_options('build_messages',
('destination', 'destination'))
def run(self):
if isfile(self.destination):
self.execute(os.remove, [self.destination],
msg="removing {0}".format(self.destination))
class build_messages(Command):
"""
Generates message code mappings. Add to the build process using::
setup(cmd_class={'build_messages': build_messages})
"""
description = "generate protocol message code mappings"
user_options = [
('source=', None, 'source CSV file containing message code mappings'),
('destination=', None, 'destination Python source file')
]
# Used in loading and generating
_pb_imports = set()
_messages = set()
_linesep = os.linesep
_indented_item_sep = ',{0} '.format(_linesep)
_docstring = [
''
'# This is a generated file. DO NOT EDIT.',
'',
'"""',
'Constants and mappings between Riak protocol codes and messages.',
'"""',
''
]
def initialize_options(self):
self.source = None
self.destination = None
def finalize_options(self):
if self.source is None:
self.source = 'src/riak_pb_messages.csv'
if self.destination is None:
self.destination = 'riak_pb/messages.py'
def run(self):
self.make_file(self.source, self.destination,
self._load_and_generate, [])
def _load_and_generate(self):
self._load()
self._generate()
def _load(self):
with open(self.source, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
message = MessageCodeMapping(*row)
self._messages.add(message)
self._pb_imports.add(message.module_name)
def _generate(self):
self._contents = []
self._generate_doc()
self._generate_imports()
self._generate_codes()
self._generate_classes()
write_file(self.destination, self._contents)
def _generate_doc(self):
# Write the license and docstring header
self._contents.append(LICENSE)
self._contents.extend(self._docstring)
def _generate_imports(self):
# Write imports
for im in sorted(self._pb_imports):
self._contents.append("import {0}".format(im))
def _generate_codes(self):
# Write protocol code constants
self._contents.extend(['', "# Protocol codes"])
for message in sorted(self._messages):
self._contents.append("{0} = {1}".format(message.message_code_name,
message.code))
def _generate_classes(self):
# Write message classes
classes = [self._generate_mapping(message)
for message in sorted(self._messages)]
classes = self._indented_item_sep.join(classes)
self._contents.extend(['',
"# Mapping from code to protobuf class",
'MESSAGE_CLASSES = {',
' ' + classes,
'}'])
def _generate_mapping(self, m):
if m.message_class is not None:
klass = "{0}.{1}".format(m.module_name,
m.message_class.__name__)
else:
klass = "None"
pair = "{0}: {1}".format(m.message_code_name, klass)
if len(pair) > 76:
# Try to satisfy PEP8, lulz
pair = (self._linesep + ' ').join(pair.split(' '))
return pair
|
radjivC/interaction-node-riak | ExampleZukaiRestApi/node_modules/riak-js/node_modules/riakpbc/node_modules/riakproto/riak_pb/riak_pb/__init__.py | from riak_pb2 import *
from riak_kv_pb2 import *
from riak_search_pb2 import *
from riak_dt_pb2 import *
from riak_yokozuna_pb2 import *
|
senzil/cabot-alert-hipchat | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
VERSION = '2.0.3'
setup(name='cabot-alert-hipchat',
version=VERSION,
description='A Hipchat alert plugin for Cabot by Arachnys',
author='Arachnys',
author_email='<EMAIL>',
url='http://cabotapp.com',
packages=find_packages(),
download_url='https://github.com/cabotapp/cabot-alert-hipchat/archive/{}.zip'.format(VERSION),
)
|
kuduta/QueueSystem | QueueWebProject/QueueWebProject/app.py | """
This script runs the application using a development server.
It contains the definition of routes and views for the application.
command >>> from api import db
command >>> db.create_all()
"""
import os
import queue
import pygame
import sys
from datetime import datetime
<<<<<<< HEAD
from flask import Flask
=======
from flask import Flask , request
>>>>>>> 5b806513d63e9dca74b4acf3c8bac76691cb4a25
from flask_sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+os.path.join(basedir, 'queue.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
##### class play sound
class playsound:
def playsound(num):
pygame.init()
#pygame.mixer.music.load("sounds/"+ str(num)+".mp3")
pygame.mixer.music.load("sound/"+ str(num)+".wav")
pygame.mixer.music.play()
time.sleep(1.1)
######## end classs playsound
###### Start classs queue #######
class Queue:
#Constructor creates a list
def __init__(self):
self.queue = list()
#Adding elements to queue
def enqueue(self,data):
#Checking to avoid duplicate entry (not mandatory)
if data not in self.queue:
self.queue.insert(0,data)
return True
return False
#Removing the last element from the queue
def dequeue(self):
if len(self.queue)>0:
return self.queue.pop()
return ("Queue Empty!")
#Getting the size of the queue
def size(self):
return len(self.queue)
#printing the elements of the queue
def printQueue(self):
return self.queue
###### end classs queue #######
# Make the WSGI interface available at the top level so wfastcgi can get it.
wsgi_app = app.wsgi_app
class Data(db.Model):
#Table name
__tablename__ = "tbqueue"
id = db.Column(db.Integer, primary_key=True)
numQue_ = db.Column(db.String(10))
numType_ = db.Column(db.String(10))
counter_ = db.Column(db.String(10))
dtReqest_ = db.Column(db.String(30))
dtStart_ = db.Column(db.String(30))
dtStop_ = db.Column(db.String(30))
def __init__(self, numQue_, numType_, counter_, dtReqest_, dtStart_, dtStop_ ):
self.numQue_ = numQue_
self.numType_ = numType_
self.counter_ = counter_
self.dtReqest_ = dtReqest_
self.dtStart_ = dtStart_
self.dtStop_ = dtStop_
@app.route('/addque/<numtype>',methods=['GET'])
def addqueue(numtype):
#numtype = request.args.get('numtype')
#dtnow = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return "Add Queue"
@app.route('/reqque')
def reqqueue():
return "Reqest Queue"
@app.route('/startque')
def startqueue():
return "Start Queue"
@app.route('/endque')
def endqueue():
return "End Queue"
@app.route('/')
def hello():
"""Renders a sample page."""
return "Hello World!"
if __name__ == '__main__':
import os
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
|
kuduta/QueueSystem | QueueWebProject/QueueWebProject/playsound.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 15 18:35:06 2019
@author: nexpose
"""
import pygame
import time
import sys
def playsound(num):
pygame.init()
#pygame.mixer.music.load("sounds/"+ str(num)+".mp3")
pygame.mixer.music.load("sound/"+ str(num)+".wav")
pygame.mixer.music.play()
time.sleep(1.1)
#playsound(8)
input = '765645'
#seconds = int(input('How many seconds to wait ? '))
#for i in range(seconds):
#print (str(seconds - i )+ ' seconds remain' )
# print (str(i+1 )+ ' seconds remain' )
# time.sleep(1)
print ('Argument List:', str(sys.argv[1]))
input=str(sys.argv[1])
#x = list(input)
x = list(input)
f = 'invit'
a = 'atport1'
e = 'end'
playsound(f)
time.sleep(0.3)
for i in x:
playsound(i)
playsound(a)
playsound(3)
playsound(e)
|
kuduta/QueueSystem | QueueWebProject/QueueWebProject/api.py | <reponame>kuduta/QueueSystem
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 29 17:25:09 2019
@author: nexpose
"""
import sys
import time
import sqlalchemy as db
import pandas as pd
from datetime import datetime
from flask import Flask , request ,jsonify
app = Flask(__name__)
# Make the WSGI interface available at the top level so wfastcgi can get it.
wsgi_app = app.wsgi_app
engine = db.create_engine('sqlite:///queue.sqlite')
connection = engine.connect()
metadata=db.MetaData()
tbqueue = db.Table('tbqueue', metadata, autoload=True, autoload_with=engine)
'''
result = session.query(tbqueue).all()
for row in result:
print (row)
type(result)
'''
###### Start classs queue #######
class Queue:
#Constructor creates a list
def __init__(self):
self.queue = list()
#Adding elements to queue
def enqueue(self,data):
#Checking to avoid duplicate entry (not mandatory)
if data not in self.queue:
self.queue.insert(0,data)
return True
return False
#Removing the last element from the queue
def dequeue(self):
if len(self.queue)>0:
return self.queue.pop()
return ("Queue Empty!")
#Getting the size of the queue
def size(self):
return len(self.queue)
#printing the elements of the queue
def printQueue(self):
return self.queue
###### end classs queue #######
que1 = Queue()
que2 = Queue()
que3 = Queue()
que4 = Queue()
que5 = Queue()
que5.enqueue('31231242')
que5.printQueue()
que5.dequeue()
'''
query=db.select([tbqueue])
result = connection.execute(query)
for row in result:
print(row)
'''
def dbquery(typenumber):
dtsearch = datetime.now().strftime('%Y-%m-%d')
dtnow = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
query=db.select([tbqueue]).where(db.and_(tbqueue.columns.dtReqest_.like(dtsearch+'%'),\
tbqueue.columns.numType_ == typenumber)).order_by(tbqueue.columns.id.desc()).limit(1)
result = connection.execute(query)
df = pd.DataFrame(result)
# df[1] is number queue
if df[1] != "":
number = int(df[1])
quenumber = typenumber+str(number).zfill(4)
else :
number = 1
quenumber = typenumber+str(number).zfill(4)
return quenumber, dtnow
@app.route('/addque/<numtype>',methods=['GET'])
def addqueue(numtype):
#1. check type of queue in table tbqueue type have 6 type
if numtype == '1':
#dbquery(numtype)
quenum,datenow = dbquery(numtype)
que1.enqueue(quenum)
ins = tbqueue.insert().values(numQue_=quenum, numType_=numtype, dtReqest_=datenow)
connection.execute(ins)
return jsonify( quenumber=quenum,sizeque=que1.size(), daterequest= datenow )
elif numtype == '2':
quenum,datenow = dbquery(numtype)
que2.enqueue(quenum)
ins = tbqueue.insert().values(numQue_=quenum, numType_=numtype, dtReqest_=datenow)
connection.execute(ins)
return jsonify( quenumber=quenum,sizeque=que2.size(), daterequest= datenow )
elif numtype == '3':
quenum,datenow = dbquery(numtype)
que3.enqueue(quenum)
ins = tbqueue.insert().values(numQue_=quenum, numType_=numtype, dtReqest_=datenow)
connection.execute(ins)
return jsonify( quenumber=quenum,sizeque=que3.size(), daterequest= datenow )
elif numtype == '4':
quenum,datenow = dbquery(numtype)
que2.enqueue(quenum)
ins = tbqueue.insert().values(numQue_=quenum, numType_=numtype, dtReqest_=datenow)
connection.execute(ins)
return jsonify( quenumber=quenum,sizeque=que2.size(), daterequest= datenow )
elif numtype == '5':
pquenum,datenow = dbquery(numtype)
que2.enqueue(quenum)
ins = tbqueue.insert().values(numQue_=quenum, numType_=numtype, dtReqest_=datenow)
connection.execute(ins)
return jsonify( quenumber=quenum,sizeque=que2.size(), daterequest= datenow )
else:
return 0
@app.route('/reqque')
def reqqueue():
return "Reqest Queue"
@app.route('/startque')
def startqueue():
return "Start Queue"
@app.route('/endque')
def endqueue():
return "End Queue"
@app.route('/')
def hello():
"""Renders a sample page."""
return "Hello World!"
if __name__ == '__main__':
import os
HOST = os.environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(os.environ.get('SERVER_PORT', '5555'))
except ValueError:
PORT = 5555
app.run(HOST, PORT)
|
ANDRESROMEROH/aws-solutions-architect | rekognition-app/rekognitionlambda/index.py | <filename>rekognition-app/rekognitionlambda/index.py<gh_stars>0
#
# Lambda function to detect labels in image using Amazon Rekognition
#
import logging
import boto3
from botocore.exceptions import ClientError
import os
from urllib.parse import unquote_plus
from boto3.dynamodb.conditions import Key, Attr
# Set the minimum confidence for Amazon Rekognition
minConfidence = 50
"""MinConfidence parameter (float) -- Specifies the minimum confidence level for the labels to return.
Amazon Rekognition doesn't return any labels with a confidence lower than this specified value.
If you specify a value of 0, all labels are returned, regardless of the default thresholds that the
model version applies."""
## Instantiate service clients outside of handler for context reuse / performance
# Constructor for our s3 client object
s3_client = boto3.client("s3")
# Constructor to create rekognition client object
rekognition_client = boto3.client("rekognition")
# Constructor for DynamoDB resource object
dynamodb = boto3.resource("dynamodb")
def handler(event, context):
print("Lambda processing event: ", event)
# For each message (photo) get the bucket name and key
for record in event["Records"]:
ourBucket = record["s3"]["bucket"]["name"]
ourKey = record["s3"]["object"]["key"]
# For each bucket/key, retrieve labels
rekFunction(ourBucket, ourKey)
return
def rekFunction(ourBucket, ourKey):
# Clean the string to add the colon back into requested name which was substitued by Amplify Library.
safeKey = replaceSubstringWithColon(ourKey)
print("Currently processing the following image")
print("Bucket: " + ourBucket + " key name: " + safeKey)
# Try and retrieve labels from Amazon Rekognition, using the confidence level we set in minConfidence var
try:
detectLabelsResults = rekognition_client.detect_labels(
Image={"S3Object": {"Bucket": ourBucket, "Name": safeKey}},
MaxLabels=10,
MinConfidence=minConfidence,
)
# Create our array and dict for our label construction
objectsDetected = []
imageLabels = {"image": safeKey}
# Add all of our labels into imageLabels by iterating over response['Labels']
for label in detectLabelsResults["Labels"]:
newItem = label["Name"]
objectsDetected.append(newItem)
objectNum = len(objectsDetected)
itemAtt = f"object{objectNum}"
# We now have our shiny new item ready to put into DynamoDB
imageLabels[itemAtt] = newItem
# Instantiate a table resource object of our environment variable
imageLabelsTable = os.environ["TABLE"]
table = dynamodb.Table(imageLabelsTable)
table.put_item(Item=imageLabels)
except ClientError as e:
logging.error(e)
return
# Clean the string to add the colon back into requested name
def replaceSubstringWithColon(txt):
return txt.replace("%3A", ":") |
jeffkess/excel2gtfs | excel2gtfs.py | <gh_stars>0
""""------------------------------------------------------------------
Excel2GTFS v0.0.8
(c) <NAME>, 2021-12-12-1620
0.0.1 Initial Commit
0.0.2 Schedule data processing
0.0.3 Support for calendar dates and overrides
0.0.4 GTFS specification conformity adjustments
0.0.5 Post-midnight trip support & config sheets
0.0.6 Adds feed info support and attribution
0.0.7 Converts to a function for variable filename operation
0.0.8 Suppresses openpyxl warnings
------------------------------------------------------------------"""
import openpyxl
import csv
import os
import datetime
import sys
import warnings
# Suppress openpyxl warnings
warnings.filterwarnings('ignore', category=UserWarning, module='openpyxl')
def excel2gtfs(filename=None):
"""Function to convert an excel template to GTFS"""
# Select Workbook and Load
wb = openpyxl.load_workbook(filename if filename else "excel2gtfsTemplate.xlsm", data_only=True)
# Identify applicable sheets
config_sheets = {"Agency", "Routes", "Stops", "Fare Rules", "Fare Attributes", "Shapes", "Calendar", "Calendar Dates", "Feed Info"}
skip_sheets = {"Settings & Checks"}
services = set(wb.sheetnames) - config_sheets - skip_sheets
config_sheets = config_sheets.intersection(wb.sheetnames) - skip_sheets
# Create Output Directory
fp = "Excel2GTFS Output Created " + datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
os.makedirs(fp)
# ------------------------------------------------------------------------------
# Process and Write Configuration Sheets
# ------------------------------------------------------------------------------
for sheet_name in config_sheets:
# Parse Dates in GTFS Format
data = wb[sheet_name].values
data = [[(val.strftime("%Y%m%d") if type(val)==datetime.datetime else val) for val in row] for row in data]
# Process Calendar Overrides
if sheet_name == "Calendar Dates" and data[1:]:
override_dates = {}
calendar_dates = []
# Process Override Entries and Extract Type 3s
for row in data[1:]:
# Convert to List of Dicts
row = {data[0][index]: val for index, val in enumerate(row)}
# Extract Type 3s or Add Generic Calendar Dates
if str(row["exception_type"])=="3":
override_dates[row["date"]].append(row["service_id"]) if row["date"] in override_dates else override_dates.update({row["date"]: [row["service_id"]]})
else:
calendar_dates.append(row)
# Covert Override Entries to Type 1/2s
for date, svcs in override_dates.items():
[calendar_dates.append({"service_id": svc, "date": date, "exception_type": ("1" if svc in svcs else "2")}) for svc in services]
# Covert Calendar Dates back to List vs Dict
data = [list(calendar_dates[0])] + [[row[key] for key in list(calendar_dates[0])] for row in calendar_dates[1:]]
# Append excel2gtfs Attribution
if sheet_name == "Feed Info" and data[1:]:
for row in data[1:]:
row[data[0].index("feed_publisher_name")] += " (Created via the excel2gtfs tool)"
# Save GTFS Configuration File
with open(f'{fp}/{sheet_name.lower().replace(" ", "_")}.txt', "w") as file:
writer = csv.writer(file)
writer.writerows(data)
# ------------------------------------------------------------------------------
# Process Schedule Data
# ------------------------------------------------------------------------------
# Initialize Services
special_keys = ["route_id", "direction_id", "shape_id", "headsign", "wheelchair_accessible", "bikes_allowed", "Then Every", "Until"]
gtfs_entries = {"trips": [], "stop_times": [], "frequencies": []}
# Process Schedule Sheets
for service in services:
svc_trips = list(wb[service].values)
svc_trip_dicts = [{svc_trips[1][index]: val for index, val in enumerate(row)} for row in svc_trips[2:] if any(row)]
for trip in svc_trip_dicts:
# Identify Stops and Define trip_id by Origin and Departure Time
trip_stop_times = sorted([(key, (str(val.day*24 + val.hour) if type(val)==datetime.datetime else val.strftime("%H")) + val.strftime(":%M:%S")) for key, val in trip.items() if key not in special_keys and val], key=lambda x: x[-1])
trip_id = "-".join(str(item) for item in [service, *trip_stop_times[0]])
# Append trips.txt Entries
gtfs_entries["trips"].append({
"service_id": service,
"trip_id": trip_id,
"route_id": trip.get("route_id"),
"direction_id": trip.get("direction_id"),
"shape_id": trip.get("shape_id", ""),
"trip_headsign": trip.get("headsign", ""),
"wheelchair_accessible": trip.get("wheelchair_accessible", ""),
"bikes_allowed": trip.get("bikes_allowed", ""),
})
# Append stop_time.txt Entries
[gtfs_entries["stop_times"].append({
"trip_id": trip_id,
"arrival_time": val[1],
"departure_time": val[1],
"stop_id": str(val[0]),
"stop_sequence": index,
"pickup_type": "1" if index==len(trip_stop_times)-1 else "0",
"drop_off_type": "1" if index==0 else "0"
}) for index, val in enumerate(trip_stop_times)]
# Append frequencies.txt Entries
if trip.get("Then Every") and trip.get("Until"):
gtfs_entries["frequencies"].append({
"trip_id": trip_id,
"start_time": trip_stop_times[0][-1],
"end_time": trip["Until"].strftime("%H:%M:%S"),
"headway_secs": (trip["Then Every"].hour*60*60 + trip["Then Every"].minute*60 + trip["Then Every"].second),
"exact_times": "1"
})
# ------------------------------------------------------------------------------
# Write Schedule Data
# ------------------------------------------------------------------------------
for key in gtfs_entries:
if gtfs_entries[key]:
with open(f'{fp}/{key}.txt', "w") as file:
writer = csv.DictWriter(file, fieldnames=list(gtfs_entries[key][0]))
writer.writeheader()
writer.writerows(gtfs_entries[key])
if __name__=="__main__":
filepath = input("Enter the filepath for excel2gtfs conversion:\n> ") if len(sys.argv) < 2 else sys.argv[1]
excel2gtfs(filepath)
|
jivii/connect-express-oauth | server/python/server.py | <gh_stars>0
#! /usr/bin/env python3.6
"""
server.py
Stripe Sample.
Python 3.6 or newer required.
"""
import json
import os
import secrets
import string
import stripe
from dotenv import load_dotenv, find_dotenv
from flask import Flask, jsonify, render_template, redirect, request, session, send_from_directory
import urllib
# Setup Stripe python client library
load_dotenv(find_dotenv())
stripe.api_key = os.getenv('STRIPE_SECRET_KEY')
stripe.api_version = os.getenv('STRIPE_API_VERSION', '2019-12-03')
static_dir = str(os.path.abspath(os.path.join(__file__ , "..", os.getenv("STATIC_DIR"))))
app = Flask(__name__, static_folder=static_dir,
static_url_path="", template_folder=static_dir)
# Set the secret key to some random bytes. Keep this really secret!
# This enables Flask sessions.
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
@app.route('/', methods=['GET'])
def get_example():
return render_template('index.html')
@app.route("/get-oauth-link", methods=["GET"])
def construct_oauth_link():
state = ''.join([secrets.choice(string.ascii_letters + string.digits) for n in range(16)])
session['state'] = state
args = {"client_id": os.getenv('STRIPE_CLIENT_ID'), "state": state}
url = "https://connect.stripe.com/express/oauth/authorize?{}".format(urllib.parse.urlencode(args))
return jsonify({'url': url})
@app.route("/authorize-oauth", methods=["GET"])
def handle_oauth_redirect():
if request.args.get("state") != session['state']:
return json.dumps({"error": "Incorrect state parameter: " + request.args.get("state")}), 403
# Send the authorization code to Stripe's API.
code = request.args.get("code")
try:
response = stripe.OAuth.token(grant_type="authorization_code", code=code,)
except stripe.oauth_error.OAuthError as e:
return json.dumps({"error": "Invalid authorization code: " + code}), 400
except Exception as e:
return json.dumps({"error": "An unknown error occurred."}), 500
connected_account_id = response["stripe_user_id"]
save_account_id(connected_account_id)
# Render some HTML or redirect to a different page.
return redirect("/success.html")
def save_account_id(id):
# Save the connected account ID from the response to your database.
print("Connected account ID: ", id) |
hexops/Azure-Kinect-Python | k4a/__init__.py | from .pyk4a import *
from .pyk4abt import * |
hexops/Azure-Kinect-Python | k4a/pyk4a.py | import ctypes
import enum
import sys
import os
try:
dirPath = os.path.dirname(os.path.abspath(__file__))+r'/../vendor/azure_kinect/windows/amd64/'
print(dirPath)
_k4a = ctypes.CDLL(dirPath+r'k4a.dll')
os.environ['PATH'] = dirPath+';'+os.environ['PATH']
except Exception as e1:
try:
dirPath = r'C:/Program Files/Azure Kinect SDK v1.4.1/sdk/windows-desktop/amd64/release/bin/'
_k4a = ctypes.CDLL(dirPath+r'k4a.dll')
os.environ['PATH'] = dirPath+';'+os.environ['PATH']
except Exception as e2:
try:
_k4a = ctypes.CDLL('k4a.so')
except Exception as e3:
print("Failed to load library", e1, e2, e3)
sys.exit(1)
# K4A_DECLARE_HANDLE(k4a_device_t);
class _handle_k4a_device_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4a_device_t = ctypes.POINTER(_handle_k4a_device_t)
# K4A_DECLARE_HANDLE(k4a_capture_t);
class _handle_k4a_capture_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4a_capture_t = ctypes.POINTER(_handle_k4a_capture_t)
# K4A_DECLARE_HANDLE(k4a_image_t);
class _handle_k4a_image_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4a_image_t = ctypes.POINTER(_handle_k4a_image_t)
# K4A_DECLARE_HANDLE(k4a_transformation_t);
class _handle_k4a_transformation_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4a_transformation_t = ctypes.POINTER(_handle_k4a_transformation_t)
#class k4a_result_t(CtypeIntEnum):
K4A_RESULT_SUCCEEDED = 0
K4A_RESULT_FAILED = 1
#class k4a_buffer_result_t(CtypeIntEnum):
K4A_BUFFER_RESULT_SUCCEEDED = 0
K4A_BUFFER_RESULT_FAILED = 1
K4A_BUFFER_RESULT_TOO_SMALL = 2
#class k4a_wait_result_t(CtypeIntEnum):
K4A_WAIT_RESULT_SUCCEEDED = 0
K4A_WAIT_RESULT_FAILED = 1
K4A_WAIT_RESULT_TIMEOUT = 2
#class k4a_log_level_t(CtypeIntEnum):
K4A_LOG_LEVEL_CRITICAL = 0
K4A_LOG_LEVEL_ERROR = 1
K4A_LOG_LEVEL_WARNING = 2
K4A_LOG_LEVEL_INFO = 3
K4A_LOG_LEVEL_TRACE = 4
K4A_LOG_LEVEL_OFF = 5
#class k4a_depth_mode_t(CtypeIntEnum):
K4A_DEPTH_MODE_OFF = 0
K4A_DEPTH_MODE_NFOV_2X2BINNED = 1
K4A_DEPTH_MODE_NFOV_UNBINNED = 2
K4A_DEPTH_MODE_WFOV_2X2BINNED = 3
K4A_DEPTH_MODE_WFOV_UNBINNED = 4
K4A_DEPTH_MODE_PASSIVE_IR = 5
#class k4a_color_resolution_t(CtypeIntEnum):
K4A_COLOR_RESOLUTION_OFF = 0
K4A_COLOR_RESOLUTION_720P = 1
K4A_COLOR_RESOLUTION_1080P = 2
K4A_COLOR_RESOLUTION_1440P = 3
K4A_COLOR_RESOLUTION_1536P = 4
K4A_COLOR_RESOLUTION_2160P = 5
K4A_COLOR_RESOLUTION_3072P = 6
#class k4a_image_format_t(CtypeIntEnum):
K4A_IMAGE_FORMAT_COLOR_MJPG = 0
K4A_IMAGE_FORMAT_COLOR_NV12 = 1
K4A_IMAGE_FORMAT_COLOR_YUY2 = 2
K4A_IMAGE_FORMAT_COLOR_BGRA32 = 3
K4A_IMAGE_FORMAT_DEPTH16 = 4
K4A_IMAGE_FORMAT_IR16 = 5
K4A_IMAGE_FORMAT_CUSTOM8 = 6
K4A_IMAGE_FORMAT_CUSTOM16 = 7
K4A_IMAGE_FORMAT_CUSTOM = 8
#class k4a_transformation_interpolation_type_t(CtypeIntEnum):
K4A_TRANSFORMATION_INTERPOLATION_TYPE_NEAREST = 0
K4A_TRANSFORMATION_INTERPOLATION_TYPE_LINEAR = 1
#class k4a_fps_t(CtypeIntEnum):
K4A_FRAMES_PER_SECOND_5 = 0
K4A_FRAMES_PER_SECOND_15 = 1
K4A_FRAMES_PER_SECOND_30 = 2
#class k4a_color_control_command_t(CtypeIntEnum):
K4A_COLOR_CONTROL_EXPOSURE_TIME_ABSOLUTE = 0
K4A_COLOR_CONTROL_AUTO_EXPOSURE_PRIORITY = 1
K4A_COLOR_CONTROL_BRIGHTNESS = 2
K4A_COLOR_CONTROL_CONTRAST = 3
K4A_COLOR_CONTROL_SATURATION = 4
K4A_COLOR_CONTROL_SHARPNESS = 5
K4A_COLOR_CONTROL_WHITEBALANCE = 6
K4A_COLOR_CONTROL_BACKLIGHT_COMPENSATION = 7
K4A_COLOR_CONTROL_GAIN = 8
K4A_COLOR_CONTROL_POWERLINE_FREQUENCY = 9
#class k4a_color_control_mode_t(CtypeIntEnum):
K4A_COLOR_CONTROL_MODE_AUTO = 0
K4A_COLOR_CONTROL_MODE_MANUAL = 1
#class k4a_wired_sync_mode_t(CtypeIntEnum):
K4A_WIRED_SYNC_MODE_STANDALONE = 0
K4A_WIRED_SYNC_MODE_MASTER = 1
K4A_WIRED_SYNC_MODE_SUBORDINATE = 2
#class k4a_calibration_type_t(CtypeIntEnum):
K4A_CALIBRATION_TYPE_UNKNOWN = -1
K4A_CALIBRATION_TYPE_DEPTH = 0
K4A_CALIBRATION_TYPE_COLOR = 1
K4A_CALIBRATION_TYPE_GYRO = 2
K4A_CALIBRATION_TYPE_ACCEL = 3
K4A_CALIBRATION_TYPE_NUM = 4
#class k4a_calibration_model_type_t(CtypeIntEnum):
K4A_CALIBRATION_LENS_DISTORTION_MODEL_UNKNOWN = 0
K4A_CALIBRATION_LENS_DISTORTION_MODEL_THETA = 1
K4A_CALIBRATION_LENS_DISTORTION_MODEL_POLYNOMIAL_3K = 2
K4A_CALIBRATION_LENS_DISTORTION_MODEL_RATIONAL_6KT = 3
K4A_CALIBRATION_LENS_DISTORTION_MODEL_BROWN_CONRADY = 4
#class k4a_firmware_build_t(CtypeIntEnum):
K4A_FIRMWARE_BUILD_RELEASE = 0
K4A_FIRMWARE_BUILD_DEBUG = 1
#class k4a_firmware_signature_t(CtypeIntEnum):
K4A_FIRMWARE_SIGNATURE_MSFT = 0
K4A_FIRMWARE_SIGNATURE_TEST = 1
K4A_FIRMWARE_SIGNATURE_UNSIGNED = 2
#define K4A_SUCCEEDED(_result_) (_result_ == K4A_RESULT_SUCCEEDED)
def K4A_SUCCEEDED(result):
return result == K4A_RESULT_SUCCEEDED
#define K4A_FAILED(_result_) (!K4A_SUCCEEDED(_result_))
def K4A_FAILED(result):
return not K4A_SUCCEEDED(result)
# TODO(Andoryuuta): Callbacks, are these needed?
"""
typedef void(k4a_logging_message_cb_t)(void *context,
k4a_log_level_t level,
const char *file,
const int line,
const char *message);
typedef void(k4a_memory_destroy_cb_t)(void *buffer, void *context);
typedef uint8_t *(k4a_memory_allocate_cb_t)(int size, void **context);
"""
class _k4a_device_configuration_t(ctypes.Structure):
_fields_= [
("color_format", ctypes.c_int),
("color_resolution", ctypes.c_int),
("depth_mode", ctypes.c_int),
("camera_fps", ctypes.c_int),
("synchronized_images_only", ctypes.c_bool),
("depth_delay_off_color_usec", ctypes.c_int32),
("wired_sync_mode", ctypes.c_int),
("subordinate_delay_off_master_usec", ctypes.c_uint32),
("disable_streaming_indicator", ctypes.c_bool),
]
k4a_device_configuration_t = _k4a_device_configuration_t
class _k4a_calibration_extrinsics_t(ctypes.Structure):
_fields_= [
("rotation", ctypes.c_float * 9),
("translation", ctypes.c_float * 3),
]
k4a_calibration_extrinsics_t = _k4a_calibration_extrinsics_t
class _param(ctypes.Structure):
_fields_ = [
("cx", ctypes.c_float),
("cy", ctypes.c_float),
("fx", ctypes.c_float),
("fy", ctypes.c_float),
("k1", ctypes.c_float),
("k2", ctypes.c_float),
("k3", ctypes.c_float),
("k4", ctypes.c_float),
("k5", ctypes.c_float),
("k6", ctypes.c_float),
("codx", ctypes.c_float),
("cody", ctypes.c_float),
("p2", ctypes.c_float),
("p1", ctypes.c_float),
("metric_radius", ctypes.c_float),
]
class k4a_calibration_intrinsic_parameters_t(ctypes.Union):
_fields_= [
("param", _param),
("v", ctypes.c_float * 15),
]
class _k4a_calibration_intrinsics_t(ctypes.Structure):
_fields_= [
("type", ctypes.c_int),
("parameter_count", ctypes.c_uint),
("parameters", k4a_calibration_intrinsic_parameters_t),
]
k4a_calibration_intrinsics_t = _k4a_calibration_intrinsics_t
class _k4a_calibration_camera_t(ctypes.Structure):
_fields_= [
("extrinsics", k4a_calibration_extrinsics_t),
("intrinsics", k4a_calibration_intrinsics_t),
("resolution_width", ctypes.c_int),
("resolution_height", ctypes.c_int),
("metric_radius", ctypes.c_float),
]
k4a_calibration_camera_t = _k4a_calibration_camera_t
class _k4a_calibration_t(ctypes.Structure):
_fields_= [
("depth_camera_calibration", k4a_calibration_camera_t),
("color_camera_calibration", k4a_calibration_camera_t),
("extrinsics", (k4a_calibration_extrinsics_t * K4A_CALIBRATION_TYPE_NUM) * K4A_CALIBRATION_TYPE_NUM),
("depth_mode", ctypes.c_int),
("color_resolution", ctypes.c_int),
]
k4a_calibration_t = _k4a_calibration_t
class _k4a_version_t(ctypes.Structure):
_fields_= [
("major", ctypes.c_uint32),
("minor", ctypes.c_uint32),
("iteration", ctypes.c_uint32),
]
k4a_version_t = _k4a_version_t
class _k4a_hardware_version_t(ctypes.Structure):
_fields_= [
("rgb", k4a_version_t),
("depth", k4a_version_t),
("audio", k4a_version_t),
("depth_sensor", k4a_version_t),
("firmware_build", ctypes.c_int),
("firmware_signature", ctypes.c_int),
]
k4a_hardware_version_t = _k4a_hardware_version_t
class _xy(ctypes.Structure):
_fields_= [
("x", ctypes.c_float),
("y", ctypes.c_float),
]
class k4a_float2_t(ctypes.Union):
_fields_= [
("xy", _xy),
("v", ctypes.c_float * 2)
]
class _xyz(ctypes.Structure):
_fields_= [
("x", ctypes.c_float),
("y", ctypes.c_float),
("z", ctypes.c_float),
]
class k4a_float3_t(ctypes.Union):
_fields_= [
("xyz", _xyz),
("v", ctypes.c_float * 3)
]
class _k4a_imu_sample_t(ctypes.Structure):
_fields_= [
("temperature", ctypes.c_float),
("acc_sample", k4a_float3_t),
("acc_timestamp_usec", ctypes.c_uint64),
("gyro_sample", k4a_float3_t),
("gyro_timestamp_usec", ctypes.c_uint64),
]
k4a_imu_sample_t = _k4a_imu_sample_t
K4A_DEVICE_DEFAULT = 0
K4A_WAIT_INFINITE = -1
# TODO(Andoryuuta): Not sure if a single instance of the default config like this will work, might need a creation function.
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL = k4a_device_configuration_t()
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.color_format = K4A_IMAGE_FORMAT_COLOR_MJPG
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.color_resolution = K4A_COLOR_RESOLUTION_OFF
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.depth_mode = K4A_DEPTH_MODE_OFF
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.camera_fps = K4A_FRAMES_PER_SECOND_30
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.synchronized_images_only = False
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.depth_delay_off_color_usec = 0
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.wired_sync_mode = K4A_WIRED_SYNC_MODE_STANDALONE
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.subordinate_delay_off_master_usec = 0
K4A_DEVICE_CONFIG_INIT_DISABLE_ALL.disable_streaming_indicator = False
# Functions
#K4A_EXPORT k4a_result_t k4a_device_open(uint32_t index, k4a_device_t *device_handle);
k4a_device_open = _k4a.k4a_device_open
k4a_device_open.restype=ctypes.c_int
k4a_device_open.argtypes=(ctypes.c_uint32, ctypes.POINTER(k4a_device_t))
#K4A_EXPORT k4a_result_t k4a_device_start_cameras(k4a_device_t device_handle, const k4a_device_configuration_t *config);
k4a_device_start_cameras = _k4a.k4a_device_start_cameras
k4a_device_start_cameras.restype=ctypes.c_int
k4a_device_start_cameras.argtypes=(k4a_device_t, ctypes.POINTER(k4a_device_configuration_t))
"""
K4A_EXPORT k4a_result_t k4a_device_get_calibration(k4a_device_t device_handle,
const k4a_depth_mode_t depth_mode,
const k4a_color_resolution_t color_resolution,
k4a_calibration_t *calibration);
"""
k4a_device_get_calibration = _k4a.k4a_device_get_calibration
k4a_device_get_calibration.restype=ctypes.c_int
k4a_device_get_calibration.argtypes=(k4a_device_t, ctypes.c_int, ctypes.c_int, ctypes.POINTER(k4a_calibration_t))
"""
K4A_EXPORT k4a_wait_result_t k4a_device_get_capture(k4a_device_t device_handle,
k4a_capture_t *capture_handle,
int32_t timeout_in_ms);
"""
k4a_device_get_capture = _k4a.k4a_device_get_capture
k4a_device_get_capture.restype=ctypes.c_int
k4a_device_get_capture.argtypes=(k4a_device_t, ctypes.POINTER(k4a_capture_t), ctypes.c_int32)
#K4A_EXPORT void k4a_capture_release(k4a_capture_t capture_handle);
k4a_capture_release = _k4a.k4a_capture_release
k4a_capture_release.argtypes=(k4a_capture_t,)
#K4A_EXPORT void k4a_image_release(k4a_image_t image_handle);
k4a_image_release = _k4a.k4a_image_release
k4a_image_release.argtypes=(k4a_image_t,)
#K4A_EXPORT void k4a_device_stop_cameras(k4a_device_t device_handle);
k4a_device_stop_cameras = _k4a.k4a_device_stop_cameras
k4a_device_stop_cameras.argtypes=(k4a_device_t,)
#K4A_EXPORT void k4a_device_close(k4a_device_t device_handle);
k4a_device_close = _k4a.k4a_device_close
k4a_device_close.argtypes=(k4a_device_t,)
|
hexops/Azure-Kinect-Python | k4a/pyk4abt.py | import ctypes
import enum
import sys
import os
from .pyk4a import k4a_float3_t, k4a_calibration_t, k4a_capture_t, k4a_image_t
try:
dirPath = os.path.dirname(os.path.abspath(__file__))+r'/../vendor/azure_kinect/windows/amd64/'
_k4abt = ctypes.CDLL(dirPath+r'k4abt.dll')
os.environ['PATH'] = dirPath+';'+os.environ['PATH']
except Exception as e1:
try:
dirPath = r'C:/Program Files/Azure Kinect Body Tracking SDK/tools/'
_k4abt = ctypes.CDLL(dirPath+r'k4abt.dll')
os.environ['PATH'] = dirPath+';'+os.environ['PATH']
except Exception as e2:
try:
_k4abt = ctypes.CDLL('k4abt.so')
except Exception as e3:
print("Failed to load library", e1, e2, e3)
sys.exit(1)
# K4A_DECLARE_HANDLE(k4abt_tracker_t);
class _handle_k4abt_tracker_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4abt_tracker_t = ctypes.POINTER(_handle_k4abt_tracker_t)
# K4A_DECLARE_HANDLE(k4abt_frame_t);
class _handle_k4abt_frame_t(ctypes.Structure):
_fields_= [
("_rsvd", ctypes.c_size_t),
]
k4abt_frame_t = ctypes.POINTER(_handle_k4abt_frame_t)
#class k4abt_joint_id_t(CtypeIntEnum):
K4ABT_JOINT_PELVIS = 0
K4ABT_JOINT_SPINE_NAVEL = 1
K4ABT_JOINT_SPINE_CHEST = 2
K4ABT_JOINT_NECK = 3
K4ABT_JOINT_CLAVICLE_LEFT = 4
K4ABT_JOINT_SHOULDER_LEFT = 5
K4ABT_JOINT_ELBOW_LEFT = 6
K4ABT_JOINT_WRIST_LEFT = 7
K4ABT_JOINT_HAND_LEFT = 8
K4ABT_JOINT_HANDTIP_LEFT = 9
K4ABT_JOINT_THUMB_LEFT = 10
K4ABT_JOINT_CLAVICLE_RIGHT = 11
K4ABT_JOINT_SHOULDER_RIGHT = 12
K4ABT_JOINT_ELBOW_RIGHT = 13
K4ABT_JOINT_WRIST_RIGHT = 14
K4ABT_JOINT_HAND_RIGHT = 15
K4ABT_JOINT_HANDTIP_RIGHT = 16
K4ABT_JOINT_THUMB_RIGHT = 17
K4ABT_JOINT_HIP_LEFT = 18
K4ABT_JOINT_KNEE_LEFT = 19
K4ABT_JOINT_ANKLE_LEFT = 20
K4ABT_JOINT_FOOT_LEFT = 21
K4ABT_JOINT_HIP_RIGHT = 22
K4ABT_JOINT_KNEE_RIGHT = 23
K4ABT_JOINT_ANKLE_RIGHT = 24
K4ABT_JOINT_FOOT_RIGHT = 25
K4ABT_JOINT_HEAD = 26
K4ABT_JOINT_NOSE = 27
K4ABT_JOINT_EYE_LEFT = 28
K4ABT_JOINT_EAR_LEFT = 29
K4ABT_JOINT_EYE_RIGHT = 30
K4ABT_JOINT_EAR_RIGHT = 31
K4ABT_JOINT_COUNT = 33
#class k4abt_sensor_orientation_t(CtypeIntEnum):
K4ABT_SENSOR_ORIENTATION_DEFAULT = 0
K4ABT_SENSOR_ORIENTATION_CLOCKWISE90 = 1
K4ABT_SENSOR_ORIENTATION_COUNTERCLOCKWISE90 = 2
K4ABT_SENSOR_ORIENTATION_FLIP180 = 3
#class k4abt_tracker_processing_mode_t(CtypeIntEnum):
K4ABT_TRACKER_PROCESSING_MODE_GPU = 0
K4ABT_TRACKER_PROCESSING_MODE_CPU = 1
class _k4abt_tracker_configuration_t(ctypes.Structure):
_fields_= [
("sensor_orientation", ctypes.c_int),
("processing_mode", ctypes.c_int),
("gpu_device_id", ctypes.c_int32),
]
k4abt_tracker_configuration_t = _k4abt_tracker_configuration_t
class _wxyz(ctypes.Structure):
_fields_= [
("w", ctypes.c_float),
("x", ctypes.c_float),
("y", ctypes.c_float),
("z", ctypes.c_float),
]
class k4a_quaternion_t(ctypes.Union):
_fields_= [
("wxyz", _wxyz),
("v", ctypes.c_float * 4)
]
#class k4abt_joint_confidence_level_t(CtypeIntEnum):
K4ABT_JOINT_CONFIDENCE_NONE = 0
K4ABT_JOINT_CONFIDENCE_LOW = 1
K4ABT_JOINT_CONFIDENCE_MEDIUM = 2
K4ABT_JOINT_CONFIDENCE_HIGH = 3
K4ABT_JOINT_CONFIDENCE_LEVELS_COUNT = 4
class _k4abt_joint_t(ctypes.Structure):
_fields_= [
("position", k4a_float3_t),
("orientation", k4a_quaternion_t),
("confidence_level", ctypes.c_int),
]
k4abt_joint_t = _k4abt_joint_t
class k4abt_skeleton_t(ctypes.Structure):
_fields_= [
("joints", _k4abt_joint_t * K4ABT_JOINT_COUNT),
]
class k4abt_body_t(ctypes.Structure):
_fields_= [
("id", ctypes.c_uint32),
("skeleton", k4abt_skeleton_t),
]
K4ABT_BODY_INDEX_MAP_BACKGROUND = 255
K4ABT_INVALID_BODY_ID = 0xFFFFFFFF
K4ABT_DEFAULT_TRACKER_SMOOTHING_FACTOR = 0.0
# TODO(Andoryuuta): Not sure if a single instance of the default config like this will work, might need a creation function.
K4ABT_TRACKER_CONFIG_DEFAULT = k4abt_tracker_configuration_t()
K4ABT_TRACKER_CONFIG_DEFAULT.sensor_orientation = K4ABT_SENSOR_ORIENTATION_DEFAULT
K4ABT_TRACKER_CONFIG_DEFAULT.processing_mode = K4ABT_TRACKER_PROCESSING_MODE_GPU
K4ABT_TRACKER_CONFIG_DEFAULT.gpu_device_id = 0
# Functions
k4abt_tracker_create = _k4abt.k4abt_tracker_create
k4abt_tracker_create.restype=ctypes.c_int
k4abt_tracker_create.argtypes=(ctypes.POINTER(k4a_calibration_t), k4abt_tracker_configuration_t, ctypes.POINTER(k4abt_tracker_t))
k4abt_tracker_destroy = _k4abt.k4abt_tracker_destroy
k4abt_tracker_destroy.argtypes=(k4abt_tracker_t,)
k4abt_tracker_set_temporal_smoothing = _k4abt.k4abt_tracker_set_temporal_smoothing
k4abt_tracker_set_temporal_smoothing.argtypes=(k4abt_tracker_t, ctypes.c_float)
k4abt_tracker_enqueue_capture = _k4abt.k4abt_tracker_enqueue_capture
k4abt_tracker_enqueue_capture.restype=ctypes.c_int
k4abt_tracker_enqueue_capture.argtypes=(k4abt_tracker_t, k4a_capture_t, ctypes.c_int32)
k4abt_tracker_pop_result = _k4abt.k4abt_tracker_pop_result
k4abt_tracker_pop_result.restype=ctypes.c_int
k4abt_tracker_pop_result.argtypes=(k4abt_tracker_t, ctypes.POINTER(k4abt_frame_t), ctypes.c_int32)
k4abt_tracker_shutdown = _k4abt.k4abt_tracker_shutdown
k4abt_tracker_shutdown.argtypes=(k4abt_tracker_t,)
k4abt_frame_release = _k4abt.k4abt_frame_release
k4abt_frame_release.argtypes=(k4abt_frame_t,)
k4abt_frame_reference = _k4abt.k4abt_frame_reference
k4abt_frame_reference.argtypes=(k4abt_frame_t,)
k4abt_frame_get_num_bodies = _k4abt.k4abt_frame_get_num_bodies
k4abt_frame_get_num_bodies.restype=ctypes.c_uint32
k4abt_frame_get_num_bodies.argtypes=(k4abt_frame_t,)
k4abt_frame_get_body_skeleton = _k4abt.k4abt_frame_get_body_skeleton
k4abt_frame_get_body_skeleton.restype=ctypes.c_int
k4abt_frame_get_body_skeleton.argtypes=(k4abt_frame_t, ctypes.c_uint32, ctypes.POINTER(k4abt_skeleton_t))
k4abt_frame_get_body_id = _k4abt.k4abt_frame_get_body_id
k4abt_frame_get_body_id.restype=ctypes.c_uint32
k4abt_frame_get_body_id.argtypes=(k4abt_frame_t, ctypes.c_uint32)
k4abt_frame_get_device_timestamp_usec = _k4abt.k4abt_frame_get_device_timestamp_usec
k4abt_frame_get_device_timestamp_usec.restype=ctypes.c_uint64
k4abt_frame_get_device_timestamp_usec.argtypes=(k4abt_frame_t,)
k4abt_frame_get_body_index_map = _k4abt.k4abt_frame_get_body_index_map
k4abt_frame_get_body_index_map.restype=k4a_image_t
k4abt_frame_get_body_index_map.argtypes=(k4abt_frame_t,)
k4abt_frame_get_capture = _k4abt.k4abt_frame_get_capture
k4abt_frame_get_capture.restype=k4a_capture_t
k4abt_frame_get_capture.argtypes=(k4abt_frame_t,)
if __name__ == "__main__":
print("Main called okay.")
|
medtray/MultiEm-RGCN | utils.py | <reponame>medtray/MultiEm-RGCN<gh_stars>0
"""
Utility functions for link prediction
Most code is adapted from authors' implementation of RGCN link prediction:
https://github.com/MichSchli/RelationPrediction
"""
import numpy as np
import torch
import dgl
import datetime
import re
import random
import networkx as nx
from matplotlib import pyplot as plt
#######################################################################
#
# Utility function for building training and testing graphs
#
#######################################################################
def print_log(msg='', end='\n'):
now = datetime.datetime.now()
t = str(now.year) + '/' + str(now.month) + '/' + str(now.day) + ' ' \
+ str(now.hour).zfill(2) + ':' + str(now.minute).zfill(2) + ':' + str(now.second).zfill(2)
if isinstance(msg, str):
lines = msg.split('\n')
else:
lines = [msg]
for line in lines:
if line == lines[-1]:
print('[' + t + '] ' + str(line), end=end)
else:
print('[' + t + '] ' + str(line))
def loadWord2Vec(filename):
"""Read Word Vectors"""
vocab = []
embd = []
word_vector_map = {}
file = open(filename, 'r')
for line in file.readlines():
row = line.strip().split(' ')
if(len(row) > 2):
vocab.append(row[0])
vector = row[1:]
length = len(vector)
for i in range(length):
vector[i] = float(vector[i])
embd.append(vector)
word_vector_map[row[0]] = vector
print_log('Loaded Word Vectors!')
file.close()
return vocab, embd, word_vector_map
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def get_adj_and_degrees(num_nodes, triplets):
""" Get adjacency list and degrees of the graph
"""
adj_list = [[] for _ in range(num_nodes)]
for i,triplet in enumerate(triplets):
adj_list[triplet[0]].append([i, triplet[2],triplet[1]])
#adj_list[triplet[2]].append([i, triplet[0]])
degrees = np.array([len(a) for a in adj_list])
adj_list = [np.array(a) for a in adj_list]
return adj_list, degrees
def sample_edge_neighborhood(adj_list, degrees, n_triplets, sample_size,tables_id):
"""Sample edges by neighborhool expansion.
This guarantees that the sampled edges form a connected graph, which
may help deeper GNNs that require information from more than one hop.
"""
edges = np.zeros((sample_size), dtype=np.int32)
#initialize
sample_counts = np.array([d for d in degrees])
picked = np.array([False for _ in range(n_triplets)])
seen = np.array([False for _ in degrees])
tables_included=[]
for i in range(0, sample_size):
found = False
weights = sample_counts * seen
if np.sum(weights) == 0:
weights = np.ones_like(weights)
weights[np.where(sample_counts == 0)] = 0
probabilities = (weights) / np.sum(weights)
if i==0:
chosen_vertex=random.choice(tables_id)
retry=0
if i>0:
if random.random()>0.90:
while not found and retry<50:
retry+=1
chosen_vertex=random.choice(np.where(seen==True)[0])
chosen_adj_list = adj_list[chosen_vertex]
np.random.shuffle(chosen_adj_list)
for chosen_edge in chosen_adj_list:
edge_number = chosen_edge[0]
other_vertex = chosen_edge[1]
if other_vertex in tables_id:
found=True
break
if found:
if not picked[edge_number]:
edges[i] = edge_number
other_vertex = chosen_edge[1]
picked[edge_number] = True
sample_counts[chosen_vertex] -= 1
#sample_counts[other_vertex] -= 1
seen[other_vertex] = True
tables_included.append(other_vertex)
else:
found=False
if found:
continue
if i>0:
chosen_vertex = np.random.choice(np.arange(degrees.shape[0]),
p=probabilities)
chosen_adj_list = adj_list[chosen_vertex]
chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))
chosen_edge = chosen_adj_list[chosen_edge]
edge_number = chosen_edge[0]
other_vertex = chosen_edge[1]
while picked[edge_number]:
# chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))
# chosen_edge = chosen_adj_list[chosen_edge]
# edge_number = chosen_edge[0]
# other_vertex = chosen_edge[1]
chosen_vertex = random.choice(np.where(seen == True)[0])
chosen_adj_list = adj_list[chosen_vertex]
chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))
chosen_edge = chosen_adj_list[chosen_edge]
edge_number = chosen_edge[0]
other_vertex = chosen_edge[1]
seen[chosen_vertex] = True
edges[i] = edge_number
picked[edge_number] = True
sample_counts[chosen_vertex] -= 1
#sample_counts[other_vertex] -= 1
seen[other_vertex] = True
print('nb tables included is {}'.format(len(set(tables_included))))
return edges
def sample_edge_uniform(adj_list, degrees, n_triplets, sample_size):
"""Sample edges uniformly from all the edges."""
all_edges = np.arange(n_triplets)
return np.random.choice(all_edges, sample_size, replace=False)
def generate_sampled_graph_and_labels(triplets, sample_size, split_size,
num_rels, adj_list, degrees,
negative_rate,tables_id, sampler="uniform"):
"""Get training graph and signals
First perform edge neighborhood sampling on graph, then perform negative
sampling to generate negative samples
"""
# perform edge neighbor sampling
if sampler == "uniform":
edges = sample_edge_uniform(adj_list, degrees, len(triplets), sample_size)
elif sampler == "neighbor":
edges = sample_edge_neighborhood(adj_list, degrees, len(triplets), sample_size,tables_id)
else:
raise ValueError("Sampler type must be either 'uniform' or 'neighbor'.")
# relabel nodes to have consecutive node ids
edges = triplets[edges]
src, rel, dst = edges.transpose()
# my_graph = nx.Graph()
# edges_to_draw = list(set(list(zip(dst, src, rel))))
# edges_to_draw = sorted(edges_to_draw)
# # my_graph.add_edges_from(edges_to_draw[:10])
#
# for item in edges_to_draw:
# my_graph.add_edge(item[1], item[0], weight=item[2]*10)
# pos = nx.spring_layout(my_graph)
# labels = nx.get_edge_attributes(my_graph, 'weight')
# plt.figure()
# nx.draw(my_graph, pos, edge_color='black', width=1, linewidths=1, arrows=True,
# node_size=100, node_color='red', alpha=0.9,
# labels={node: node for node in my_graph.nodes()})
# nx.draw_networkx_edge_labels(my_graph, pos, edge_labels=labels, font_color='red')
# plt.axis('off')
# plt.show()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
relabeled_edges = np.stack((src, rel, dst)).transpose()
# negative sampling
samples, labels = negative_sampling(relabeled_edges, len(uniq_v),
negative_rate)
#samples, labels = negative_relations(relabeled_edges, len(uniq_v),
# negative_rate)
# further split graph, only half of the edges will be used as graph
# structure, while the rest half is used as unseen positive samples
split_size = int(sample_size * split_size)
graph_split_ids = np.random.choice(np.arange(sample_size),
size=split_size, replace=False)
src = src[graph_split_ids]
dst = dst[graph_split_ids]
rel = rel[graph_split_ids]
# build DGL graph
print("# sampled nodes: {}".format(len(uniq_v)))
print("# sampled edges: {}".format(len(src) * 2))
#g, rel, norm,_ = build_graph_from_triplets_modified(len(uniq_v), num_rels,
# (src, rel, dst))
g, rel, norm=build_graph_directly(len(uniq_v), (src, rel, dst))
return g, uniq_v, rel, norm, samples, labels
def comp_deg_norm(g):
g = g.local_var()
in_deg = g.in_degrees(range(g.number_of_nodes())).float().numpy()
norm = 1.0 / in_deg
norm[np.isinf(norm)] = 0
return norm
def build_graph_from_triplets(num_nodes, num_rels, triplets):
""" Create a DGL graph. The graph is bidirectional because RGCN authors
use reversed relations.
This function also generates edge type and normalization factor
(reciprocal of node incoming degree)
"""
g = dgl.DGLGraph()
g.add_nodes(num_nodes)
src, rel, dst = triplets
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
rel = np.concatenate((rel, rel + num_rels))
edges = sorted(zip(dst, src, rel))
dst, src, rel = np.array(edges).transpose()
g.add_edges(src, dst)
norm = comp_deg_norm(g)
print("# nodes: {}, # edges: {}".format(num_nodes, len(src)))
return g, rel, norm
def build_graph_directly(num_nodes, triplets):
""" Create a DGL graph. The graph is bidirectional because RGCN authors
use reversed relations.
This function also generates edge type and normalization factor
(reciprocal of node incoming degree)
"""
g = dgl.DGLGraph()
g.add_nodes(num_nodes)
src, rel, dst = triplets
#edges = sorted(zip(dst, src, rel))
#dst, src, rel = np.array(edges).transpose()
inverse_mapping=[11,12,13,3,4,5,6,7,8,20,21,0,1,2,3,4,5,6,7,8,9,10]
rel2=np.array([inverse_mapping[i] for i in rel])
rel = np.concatenate((rel, rel2))
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
edges = list(set(list(zip(dst, src, rel))))
edges = sorted(edges)
#dst, src, rel = np.array(list(set(edges))).transpose()
dst, src, rel = np.array(edges).transpose()
# my_graph = nx.Graph()
#
# for item in edges:
# my_graph.add_edge(item[1], item[0], weight=str(item[2]))
# pos = nx.spring_layout(my_graph)
# labels = nx.get_edge_attributes(my_graph, 'weight')
# plt.figure()
# nx.draw(my_graph, pos, edge_color='black', width=1, linewidths=1,arrows=True,
# node_size=500, node_color='pink', alpha=0.9,
# labels={node: node for node in my_graph.nodes()})
# nx.draw_networkx_edge_labels(my_graph, pos, edge_labels=labels, font_color='red')
# plt.axis('off')
# plt.show()
g.add_edges(src, dst)
norm = comp_deg_norm(g)
print("# nodes: {}, # edges: {}".format(num_nodes, len(src)))
return g, rel, norm
def build_graph_from_triplets_modified(num_nodes, num_rels, triplets):
""" Create a DGL graph. The graph is bidirectional because RGCN authors
use reversed relations.
This function also generates edge type and normalization factor
(reciprocal of node incoming degree)
"""
g = dgl.DGLGraph()
g.add_nodes(num_nodes)
src, rel, dst = triplets
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
#rel = np.concatenate((rel, rel + num_rels))
#rel = np.array([i - num_rels if i in [12, 13, 14, 15, 16, 17] else i for i in rel])
#rel = np.array([i - num_rels if i in [9,10,11] else i for i in rel])
#rel= np.array([i - num_rels if i in [12,13,14,15,16,17] else i for i in rel])
#rel = np.array([i - num_rels if i in [14, 15, 16, 17,18,19] else i for i in rel])
inverse_mapping = [11, 12, 13, 3, 4, 5, 6, 7, 8, 20, 21, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
rel2 = np.array([inverse_mapping[i] for i in rel])
rel = np.concatenate((rel, rel2))
edges = sorted(zip(dst, src, rel))
dst, src, rel = np.array(edges).transpose()
g.add_edges(src, dst)
norm = comp_deg_norm(g)
print("# nodes: {}, # edges: {}".format(num_nodes, len(src)))
train_data=np.concatenate([src.reshape(len(src),-1),rel.reshape(len(rel),-1),dst.reshape(len(dst),-1)],axis=1)
return g, rel, norm,train_data
def build_test_graph(num_nodes, num_rels, edges):
src, rel, dst = edges.transpose()
print("Test graph:")
return build_graph_from_triplets_modified(num_nodes, num_rels, (src, rel, dst))
def negative_sampling(pos_samples, num_entity, negative_rate):
size_of_batch = len(pos_samples)
num_to_generate = size_of_batch * negative_rate
neg_samples = np.tile(pos_samples, (negative_rate, 1))
labels = np.zeros(size_of_batch * (negative_rate + 1), dtype=np.float32)
labels[: size_of_batch] = 1
values = np.random.randint(num_entity, size=num_to_generate)
choices = np.random.uniform(size=num_to_generate)
subj = choices > 0.5
obj = choices <= 0.5
neg_samples[subj, 0] = values[subj]
neg_samples[obj, 2] = values[obj]
return np.concatenate((pos_samples, neg_samples)), labels
def negative_relations(pos_samples, num_entity, negative_rate):
size_of_batch = len(pos_samples)
negative_relation=[]
irrelevant, somehowrelevant, relevant=6,7,8
for i in range(size_of_batch):
curr=pos_samples[i]
if curr[1]==irrelevant:
negative_relation.append([curr[0],somehowrelevant,curr[2]])
negative_relation.append([curr[0], relevant, curr[2]])
elif curr[1]==somehowrelevant:
negative_relation.append([curr[0], irrelevant, curr[2]])
negative_relation.append([curr[0], relevant, curr[2]])
elif curr[1] == relevant:
negative_relation.append([curr[0], somehowrelevant, curr[2]])
negative_relation.append([curr[0], irrelevant, curr[2]])
negative_relation_labels = np.zeros(len(negative_relation), dtype=np.float32)
negative_relation=np.array(negative_relation)
num_to_generate = size_of_batch * negative_rate
neg_samples = np.tile(pos_samples, (negative_rate, 1))
labels = np.zeros(size_of_batch * (negative_rate + 1), dtype=np.float32)
labels[: size_of_batch] = 1
values = np.random.randint(num_entity, size=num_to_generate)
choices = np.random.uniform(size=num_to_generate)
subj = choices > 0.5
obj = choices <= 0.5
neg_samples[subj, 0] = values[subj]
neg_samples[obj, 2] = values[obj]
labels=np.concatenate((labels,negative_relation_labels))
return np.concatenate((pos_samples, neg_samples,negative_relation)), labels
#######################################################################
#
# Utility function for evaluations
#
#######################################################################
def sort_and_rank(score, target):
_, indices = torch.sort(score, dim=1, descending=True)
indices = torch.nonzero(indices == target.view(-1, 1))
indices = indices[:, 1].view(-1)
return indices
def perturb_and_get_rank(embedding, w, a, r, b, test_size, batch_size=100):
""" Perturb one element in the triplets
"""
n_batch = (test_size + batch_size - 1) // batch_size
ranks = []
for idx in range(n_batch):
print("batch {} / {}".format(idx, n_batch))
batch_start = idx * batch_size
batch_end = min(test_size, (idx + 1) * batch_size)
batch_a = a[batch_start: batch_end]
batch_r = r[batch_start: batch_end]
emb_ar = embedding[batch_a] * w[batch_r]
emb_ar = emb_ar.transpose(0, 1).unsqueeze(2) # size: D x E x 1
emb_c = embedding.transpose(0, 1).unsqueeze(1) # size: D x 1 x V
# out-prod and reduce sum
out_prod = torch.bmm(emb_ar, emb_c) # size D x E x V
score = torch.sum(out_prod, dim=0) # size E x V
score = torch.sigmoid(score)
target = b[batch_start: batch_end]
ranks.append(sort_and_rank(score, target))
return torch.cat(ranks)
# TODO (lingfan): implement filtered metrics
# return MRR (raw), and Hits @ (1, 3, 10)
def calc_mrr(embedding, w, test_triplets, hits=[], eval_bz=100):
with torch.no_grad():
s = test_triplets[:, 0]
r = test_triplets[:, 1]
o = test_triplets[:, 2]
test_size = test_triplets.shape[0]
# perturb subject
ranks_s = perturb_and_get_rank(embedding, w, o, r, s, test_size, eval_bz)
# perturb object
ranks_o = perturb_and_get_rank(embedding, w, s, r, o, test_size, eval_bz)
ranks = torch.cat([ranks_s, ranks_o])
ranks += 1 # change to 1-indexed
mrr = torch.mean(1.0 / ranks.float())
print("MRR (raw): {:.6f}".format(mrr.item()))
for hit in hits:
avg_count = torch.mean((ranks <= hit).float())
print("Hits (raw) @ {}: {:.6f}".format(hit, avg_count.item()))
return mrr.item()
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/retrieval/toy_indexer.py | """
Toy Indexer
===========
Toy indexing example for testing purposes.
:Authors: <NAME>, <NAME>
"""
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.core.retrieval.scorer import *
import math
def main():
index_name = "toy_index"
mappings = {
"title": Elastic.analyzed_field(),
"content": Elastic.analyzed_field(),
}
docs = {
1: {"title": "Rap God pp m m m m m m m",
"content": "gonna, gonna, Look, I was gonna go easy on you and not to hurt your feelings cc"
},
2: {"title": "Lose Yourself",
"content": "Yo, if you cc could just, for one minute Or one split second in time, forget everything Everything that bothers you, or your problems Everything, and follow me"
},
3: {"title": "Love Way",
"content": "Just gonna stand there and watch me burn But that's alright, because I like the way it hurts you you you you cc"
},
4: {"title": "Monster",
"content": ["you gonna gonna I'm friends with the monster", "That's under my bed Get along with the voices inside of my head cc"]
},
5: {"title": "Beautiful",
"content": "d"
}
}
elastic = Elastic(index_name)
elastic.create_index(mappings, model='BM25',force=True)
elastic.add_docs_bulk(docs)
print("index has been built")
es = Elastic(index_name)
feature_used='title'
test_query='cc'
result = es.search(test_query, feature_used)
print(result)
params = {"fields": feature_used}
score = ScorerLM(es, test_query, params).score_doc(5)
print(score)
len_C_f = es.coll_length(feature_used)
tf_t_C_f = es.coll_term_freq(test_query, feature_used)
nb_docs=es.doc_count(feature_used)
term_freq=es.doc_freq(test_query,feature_used)
stat=es.avg_len(feature_used)
ss=es.get_field_stats(feature_used)
print(stat)
idf = math.log((nb_docs + 1.0) / (term_freq + 1.0)) + 1.0
print(len_C_f)
print(tf_t_C_f)
print(nb_docs)
print(term_freq)
print(idf)
if __name__ == "__main__":
main()
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/features/word2vec.py | <reponame>medtray/MultiEm-RGCN
"""
Word2vec
========
Implements functionalities over the 300-dim GoogleNews word2vec semantic representations of words.
:Author: <NAME>
"""
import argparse
import numpy as np
from nordlys.config import MONGO_HOST, MONGO_DB, MONGO_COLLECTION_WORD2VEC
from nordlys.core.storage.mongo import Mongo
class Word2Vec(object):
__DIMENSION = 300 # Dimension of GoogleNews pre-trained corpus vectors
def __init__(self, mongo):
self.__mongo_collection = mongo
def get_vector(self, word):
"""Gets the w2v vector corresponding to the word, or a zero-valued vector if not present.
:param word: a word.
:type word: str
:return:
"""
doc = self.__mongo_collection.find_by_id(word)
return np.array(doc["vector"]) if doc is not None else np.zeros((self.__DIMENSION,))
def get_centroid_vector(self, s):
"""
Returns the normalized sum of the word2vec vectors corresponding to the terms in s.
:param s: a phrase.
:type s: str
:return: Centroid vector of the terms in s.
"""
words = s.split()
return (sum([self.get_vector(word) for word in words]) / len(words) if len(words) > 0
else np.zeros((self.__DIMENSION,)))
def main(args):
# word2vec main __instances
w2v_mongo = Mongo(MONGO_HOST, MONGO_DB, MONGO_COLLECTION_WORD2VEC)
w2v = Word2Vec(w2v_mongo)
print("\t\t*** word2vec functionalities, with word vectors from GoogleNews 300-dim pre-trained corpus. ***\n")
# Testing some functionalities
if args.word:
word = args.word.strip()
vector = w2v.get_vector(word)
print("word = {}\nvector = {}\nvector dimension = {}\n".format(word, vector, vector.shape[0]))
if args.centroid:
str = args.centroid.strip()
centroid_v = w2v.get_centroid_vector(str)
print("expression = {}\ncentroid vector = {}\n".format(str, centroid_v))
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--word", help="a word, for showing its w2v vector", type=str)
parser.add_argument("-c", "--centroid", help="a string, for showing its centroid vector", type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
main(arg_parser())
|
medtray/MultiEm-RGCN | utils_.py | import numpy as np
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
#import tensorflow as tf
import os
import re
def isEnglish(s):
try:
s.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
def camel_case_split(identifier):
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
def preprocess(input,type):
if type=='attribute':
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
#w = input.replace('_', ' ')
tokens = word_tokenize(w)
camel_tokens=[]
for w in tokens:
inter = camel_case_split(w)
camel_tokens += inter
tokens=camel_tokens
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
# remove punctuation from each word
# table = str.maketrans('', '', string.punctuation)
# stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in inter_words2 if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
# final_words = []
# for w in words:
# inter = re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
# final_words += inter
final_words=words
final_words = [tok for tok in final_words if isEnglish(tok)]
elif type=='value':
#w = input.replace('_', ' ').replace(',', ' ')
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
#w=input
tokens = word_tokenize(w)
camel_tokens = []
for w in tokens:
inter = camel_case_split(w)
camel_tokens += inter
tokens = camel_tokens
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
# remove punctuation from each word
# table = str.maketrans('', '', string.punctuation)
# stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
numerical_values=[]
string_values=[]
for word in inter_words2:
try:
float(word)
numerical_values.append(word)
except ValueError:
string_values.append(word)
string_values_final=[]
for w in string_values:
inter=re.split(r'(\d+)', w)
for word in inter:
if len(word)>0:
try:
float(word)
numerical_values.append(word)
except ValueError:
string_values_final.append(word)
#keep 0 digits
#numerical_values = [re.sub('\d', '#', s) for s in numerical_values]
#keep 1 digit
numerical_values_inter=[]
for s in numerical_values:
if s[0]=='-':
ss=s[2::]
ss=re.sub('\d', '#', ss)
ss=s[0:2]+ss
else:
ss = s[1::]
ss = re.sub('\d', '#', ss)
ss = s[0] + ss
numerical_values_inter += [ss]
#keep 2 digits
# for s in numerical_values:
# ss=s[2::]
# ss=re.sub('\d', '#', ss)
# ss=s[0:2]+ss
# numerical_values_inter+=[ss]
numerical_values=numerical_values_inter
inter_words2 = string_values_final
words = [word for word in inter_words2 if word.isalpha() or word in['$','@','%','£','€','°']]
# filter out stop words
stop_words = set(stopwords.words('english'))
stop_words.remove('d')
stop_words.remove('m')
stop_words.remove('s')
words = [w for w in words if not w in stop_words]
# final_words = []
# for w in words:
# inter = re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
# final_words += inter
final_words=words
final_words = [tok for tok in final_words if isEnglish(tok) or tok in['$','@','%','£','€','°']]
final_words=final_words+numerical_values
elif type=='value2':
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
tokens = word_tokenize(w)
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
numerical_values=[]
string_values=[]
for word in inter_words2:
try:
float(word)
numerical_values.append(word)
except ValueError:
string_values.append(word)
string_values_final=[]
for w in string_values:
inter=re.split(r'(\d+)', w)
for word in inter:
if len(word)>0:
try:
float(word)
numerical_values.append(word)
except ValueError:
string_values_final.append(word)
inter_words2 = string_values_final
words = [word for word in inter_words2 if word.isalpha() or word in['$','@','%','£','€','°']]
# filter out stop words
stop_words = set(stopwords.words('english'))
stop_words.remove('d')
stop_words.remove('m')
stop_words.remove('s')
words = [w for w in words if not w in stop_words]
final_words = []
for w in words:
inter = re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
final_words += inter
final_words = [tok for tok in final_words if isEnglish(tok) or tok in['$','@','%','£','€','°']]
final_words=final_words+numerical_values
elif type == 'description':
#w = input.replace('_', ' ').replace(',', ' ').replace('-', " ").replace('.', ' ')
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
tokens = word_tokenize(w)
camel_tokens = []
for w in tokens:
inter = camel_case_split(w)
camel_tokens += inter
tokens = camel_tokens
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
# remove punctuation from each word
#table = str.maketrans('', '', string.punctuation)
#stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in inter_words2 if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
# final_words=[]
# for w in words:
# inter=re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
# final_words+=inter
final_words=words
final_words = [tok for tok in final_words if isEnglish(tok)]
not_to_use=['com','u','comma','separated','values','csv','data','dataset','https','api','www','http','non','gov','rows','p','download','downloads','file','files','p']
final_words=[tok for tok in final_words if tok not in not_to_use]
elif type == 'query':
#w = input.replace('_', ' ').replace(',', ' ').replace('-', " ").replace('.', ' ')
w = input.replace('-', " ").replace('_', ' ').replace('/', ' ').replace(',', ' ').replace('.', ' ').replace('|', ' ').replace(':', ' ')
tokens = word_tokenize(w)
camel_tokens = []
for w in tokens:
inter = camel_case_split(w)
camel_tokens += inter
tokens = camel_tokens
# convert to lower case
tokens = [w.lower() for w in tokens]
inter_words = []
for w in tokens:
inter = re.sub(r'\u2013+', ' ', w).split()
inter_words += inter
inter_words2 = []
for w in inter_words:
inter = re.sub(r'\u2014+', ' ', w).split()
inter_words2 += inter
# remove punctuation from each word
#table = str.maketrans('', '', string.punctuation)
#stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in inter_words2 if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
# final_words=[]
# for w in words:
# inter=re.sub('([a-z])([A-Z])', r'\1 \2', w).split()
# final_words+=inter
final_words=words
final_words = [tok for tok in final_words if isEnglish(tok)]
#not_to_use=['com','u','comma','separated','values','csv','data','dataset','https','api','www','http','non','gov','rows','p','download','downloads','file','files','p']
#final_words=[tok for tok in final_words if tok not in not_to_use]
return final_words
def dcg_score(y_true, y_score, k=10, gains="exponential"):
"""Discounted cumulative gain (DCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
DCG @k : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
if gains == "exponential":
gains = 2 ** y_true - 1
elif gains == "linear":
gains = y_true
else:
raise ValueError("Invalid gains option.")
# highest rank is 1 so +2 instead of +1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10, gains="exponential"):
"""Normalized discounted cumulative gain (NDCG) at rank k
Parameters
----------
y_true : array-like, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array-like, shape = [n_samples]
Predicted scores.
k : int
Rank.
gains : str
Whether gains should be "exponential" (default) or "linear".
Returns
-------
NDCG @k : float
"""
best = dcg_score(y_true, y_true, k, gains)
actual = dcg_score(y_true, y_score, k, gains)
return actual / best
|
medtray/MultiEm-RGCN | nordlys/nordlys/config.py | """
config
------
Global nordlys config.
:Author: <NAME>
:Author: <NAME>
"""
import logging
import os
from nordlys.core.utils.file_utils import FileUtils
from nordlys.core.utils.logging_utils import PrintHandler
def load_nordlys_config(file_name):
"""Loads nordlys config file. If local file is provided, global one is ignored."""
config_path = os.sep.join([BASE_DIR, "config"])
local_config = os.sep.join([config_path, "local", file_name])
if os.path.exists(local_config):
return FileUtils.load_config(local_config)
else:
return FileUtils.load_config(os.sep.join([config_path, file_name]))
# global variable for entity linking
KB_SNAPSHOT = None
# Nordlys DIRs
NORDLYS_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.sep.join([BASE_DIR, "data"])
LIB_DIR = os.sep.join([BASE_DIR, "lib"])
# config for MongoDB
MONGO_CONFIG = load_nordlys_config("mongo.json")
MONGO_HOST = MONGO_CONFIG["host"]
MONGO_DB = MONGO_CONFIG["db"]
MONGO_COLLECTION_DBPEDIA = MONGO_CONFIG["collection_dbpedia"]
MONGO_COLLECTION_SF_FACC = MONGO_CONFIG["collection_sf_facc"]
MONGO_COLLECTION_SF_DBPEDIA = MONGO_CONFIG["collection_sf_dbpedia"]
MONGO_COLLECTION_WORD2VEC = MONGO_CONFIG["collection_word2vec"]
MONGO_COLLECTION_FREEBASE2DBPEDIA = MONGO_CONFIG["collection_freebase2dbpedia"]
MONGO_ENTITY_COLLECTIONS = MONGO_CONFIG["entity_collections"]
# config for Elasticsearch
ELASTIC_CONFIG = load_nordlys_config("elastic.json")
ELASTIC_HOSTS = ELASTIC_CONFIG["hosts"]
ELASTIC_SETTINGS = ELASTIC_CONFIG["settings"]
ELASTIC_INDICES = ELASTIC_CONFIG["indices"]
ELASTIC_TTI_INDICES = ELASTIC_CONFIG["tti_indices"]
# config for trec_eval
TREC_EVAL = os.sep.join([LIB_DIR, "trec_eval", "trec_eval"])
# config for API
API_CONFIG = load_nordlys_config("api.json")
API_HOST = API_CONFIG["host"]
API_PORT = int(API_CONFIG["port"])
# config for Web interface
WWW_CONFIG = load_nordlys_config("www.json")
WWW_HOST = WWW_CONFIG["host"]
WWW_PORT = int(WWW_CONFIG["port"])
WWW_DOMAIN = WWW_CONFIG["domain"]
WWW_SETTINGS = WWW_CONFIG["settings"]
# config for RequestLogger
LOGGING_PATH = os.sep.join([BASE_DIR, "logs"])
# config for PrintLogger (PLOGGER)
LOGGING_LEVEL = logging.INFO
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("elasticsearch").setLevel(logging.WARNING)
PLOGGER = logging.getLogger("nordlys")
PLOGGER.addHandler(PrintHandler(LOGGING_LEVEL).ch)
PLOGGER.setLevel(LOGGING_LEVEL)
PLOGGER.propagate = 0 |
medtray/MultiEm-RGCN | nordlys/nordlys/core/__init__.py | """
Core packages
=============
These low-level packages (basic IR, ML, NLP, storage, etc.) are basic building blocks that do not have any interdependencies among each other.
""" |
medtray/MultiEm-RGCN | nordlys/nordlys/core/data/dbpedia/indexer_dbpedia_types.py | <filename>nordlys/nordlys/core/data/dbpedia/indexer_dbpedia_types.py
"""
DBpedia Types Indexer
=====================
Build an index of DBpedia types from entity abstracts.
:Authors: <NAME>, <NAME>
"""
import os
import argparse
from random import sample
from math import floor
from rdflib.plugins.parsers.ntriples import NTriplesParser
from rdflib.plugins.parsers.ntriples import ParseError
from rdflib.term import URIRef
from nordlys.core.storage.parser.nt_parser import Triple
from nordlys.core.storage.parser.uri_prefix import URIPrefix
from nordlys.core.utils.file_utils import FileUtils
from nordlys.core.retrieval.elastic import Elastic
from nordlys.config import DATA_DIR
from nordlys.config import PLOGGER
# -------
# Constants
# About index
ID_KEY = "id" # not used
CONTENT_KEY = "content"
# Distinguished strings
ABSTRACTS_SEPARATOR = "\n"
DBO_PREFIX = "<dbo:"
# Sizes
BULK_LEN = 1
MAX_BULKING_DOC_SIZE = 20000000 # max doc len when bulking, in chars (i.e., 20MB)
AVG_SHORT_ABSTRACT_LEN = 216 # according to all entities appearing in DBpedia-2015-10 entity-to-type mapping
# -------
# Indexer class
class IndexerDBpediaTypes(object):
__DOC_TYPE = "doc" # we don't make use of types
__MAPPINGS = {
# ID_KEY: Elastic.notanalyzed_field(),
CONTENT_KEY: Elastic.analyzed_field(),
}
def __init__(self, config):
self.__elastic = None
self.__config = config
self.__model = config.get("model", Elastic.BM25)
self.__index_name = config["index_name"]
self.__type2entity_file = config["type2entity_file"]
self.__entity_abstracts = {}
self.__load_entity_abstracts(config["entity_abstracts_file"])
@property
def name(self):
return self.__index_name
def __load_entity_abstracts(self, filename):
prefix = URIPrefix()
t = Triple()
p = NTriplesParser(t)
lines_counter = 0
PLOGGER.info("Loading entity abstracts from {}".format(filename))
for line in FileUtils.read_file_as_list(filename):
# basic line parsing
line = line.decode("utf-8") if isinstance(line, bytes) else line
try:
p.parsestring(line)
except ParseError: # skip lines that couldn't be parsed
continue
if t.subject() is None: # only if parsed as a triple
continue
# Subject and object identification
subj = prefix.get_prefixed(t.subject())
obj = ""
if type(t.object()) is URIRef:
# PLOGGER.error("Error: it is URIRef the parsed obj")
pass
else:
obj = t.object().encode("utf-8")
if len(obj) == 0:
continue # skip empty objects
self.__entity_abstracts[subj] = obj
lines_counter += 1
if lines_counter % 10000 == 0:
PLOGGER.info("\t{}K lines processed".format(lines_counter // 1000))
pass
PLOGGER.info("\n### Loading entity abstracts... Done.")
def __make_type_doc(self, entities, last_type):
"""Gets the document representation of a type to be indexed, from its entity short abstracts."""
content = ABSTRACTS_SEPARATOR.join([self.__entity_abstracts.get(e, b"").decode("utf-8")
for e in entities])
if len(content) > MAX_BULKING_DOC_SIZE:
PLOGGER.info("Type {} has content larger than allowed: {}.".format(last_type, len(content)))
# we randomly sample a subset of Y entity abstracts, s.t. Y * AVG_SHORT_ABSTRACT_LEN <= MAX_BULKING_DOC_SIZE
amount_abstracts_to_sample = min(floor(MAX_BULKING_DOC_SIZE / AVG_SHORT_ABSTRACT_LEN), len(entities))
entities_sample = [entities[i] for i in sample(range(len(entities)), amount_abstracts_to_sample)]
content = "" # reset content
for entity in entities_sample:
new_content_candidate = (content + ABSTRACTS_SEPARATOR +
self.__entity_abstracts.get(entity, b"").decode("utf-8"))
# we add an abstract only if by doing so it will not exceed MAX_BULKING_DOC_SIZE
if len(new_content_candidate) <= MAX_BULKING_DOC_SIZE:
content = new_content_candidate
else:
break
return {CONTENT_KEY: content}
def build_index(self, force=False):
"""Builds the index.
:param force: True iff it is required to overwrite the index (i.e. by creating it by force); False by default.
:type force: bool
:return:
"""
self.__elastic = Elastic(self.__index_name)
self.__elastic.create_index(mappings=self.__MAPPINGS, force=force)
prefix = URIPrefix()
# For indexing types in bulk
types_bulk = {} # dict from type id to type(=doc)
# process type2entity file
last_type = None
entities = []
lines_counter = 0
types_counter = 0
with FileUtils.open_file_by_type(self.__type2entity_file) as f:
for line in f:
line = line.decode() # o.w. line is made of bytes
if not line.startswith("<"): # bad-formed lines in dataset
continue
subj, obj = line.rstrip().split()
type = prefix.get_prefixed(subj) # subject prefixed
entity = prefix.get_prefixed(obj)
# use only DBpedia Ontology native types (no bibo, foaf, schema, etc.)
if not type.startswith(DBO_PREFIX):
continue
if last_type is not None and type != last_type:
# moving to new type, so:
# create a doc for this type, with all the abstracts for its entities, and store it in a bulk
types_counter += 1
# PLOGGER.info("\n\tFound {}-th type: {}\t\t with # of entities: {}".format(types_counter,
# last_type,
# len(entities)))
types_bulk[last_type] = self.__make_type_doc(entities, last_type)
entities = [] # important to reset it
if types_counter % BULK_LEN == 0: # index the bulk of BULK_LEN docs
self.__elastic.add_docs_bulk(types_bulk)
types_bulk.clear() # NOTE: important to reset it
PLOGGER.info("\tIndexing a bulk of {} docs (types)... OK. "
"{} types already indexed.".format(BULK_LEN, types_counter))
last_type = type
entities.append(entity)
lines_counter += 1
if lines_counter % 10000 == 0:
# PLOGGER.info("\t{}K lines processed".format(lines_counter // 1000))
pass
pass
# index the last type
types_counter += 1
PLOGGER.info("\n\tFound {}-th (last) type: {}\t\t with # of entities: {}".format(types_counter, last_type,
len(entities)))
types_bulk[last_type] = self.__make_type_doc(entities, last_type)
self.__elastic.add_docs_bulk(types_bulk) # a tiny bulk :)
# no need to reset neither entities nor types_bulk :P
# PLOGGER.info("Indexing a bulk of {} docs (types)... OK.".format(BULK_LEN))
PLOGGER.info("\n### Indexing all {} found docs (types)... Done.".format(types_counter))
# -------
# Main script
def main(args):
config = FileUtils.load_config(args.config)
type2entity_file = os.path.expanduser(os.path.join(config.get("type2entity_file", "")))
entity_abstracts_file = os.path.expanduser(os.path.join(config.get("entity_abstracts_file", "")))
if (not os.path.isfile(type2entity_file)) or (not os.path.isfile(entity_abstracts_file)):
exit(1)
indexer = IndexerDBpediaTypes(config)
indexer.build_index(force=True)
PLOGGER.info("Index build: <{}>".format(indexer.name))
def arg_parser(description=None):
"""Returns a 2-uple with the parsed paths to the type-to-entity and entity abstracts source files."""
default_description = "It indexes DBpedia types storing the abstracts of their respective assigning entities."
description_str = description if description else default_description
parser = argparse.ArgumentParser(description=description_str)
parser.add_argument("config", help="config file", type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
main(arg_parser())
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/storage/nt2mongo.py | """
NTriples to Mongo
=================
Loads NTriples RDF file into MongoDB.
Documents are identified by subject URIs.
Predicate-object values are stored as key-value pairs in a dictionary,
where object can be single-valued (string) or multi-valued (list of strings).
- Multiple ntriple files can be added to the same collection, where they would
be appended to the corresponding document.
- Empty object values are filtered out, even if they're present in the NTriples
file (that happens, for example, with DBpedia long abstracts.)
IMPORTANT:
- It is assumed that all triples with a given subject are grouped
together in the .nt file (this holds, e.g., for DBpedia)
- It is also assumed that a given predicate is present only in a single file;
when that is not the case, the contents in the last processed file will
overwrite the previously stored values in the given field, corresponding to
the predicate. If it can be a problem (e.g., DBpedia uses <rdf:type> for
both mapping-based types and YAGO types) then use predicate prefixing!
:Authors: <NAME>, <NAME>, <NAME>
"""
# import logging
from rdflib.plugins.parsers.ntriples import NTriplesParser
from rdflib.plugins.parsers.ntriples import ParseError
from rdflib.term import URIRef
from nordlys.core.storage.mongo import Mongo
from nordlys.core.storage.parser.nt_parser import Triple
from nordlys.core.storage.parser.uri_prefix import URIPrefix
from nordlys.core.utils.file_utils import FileUtils
from nordlys.config import PLOGGER
class NTriplesToMongoDB(object):
def __init__(self, host, db, collection):
self.__mongo = Mongo(host, db, collection)
self.__prefix = URIPrefix()
self.__m_id = None
self.__m_contents = None
# logging.basicConfig(level="ERROR") # no warnings from the rdf parser
def _next_triple(self, subj, pred, obj):
"""Processes a triple.
- Appends to previous triple if it's the same subject.
- Otherwise inserts last triple and creates a new one.
"""
if (self.__m_id is not None) and (self.__m_id == subj):
if pred not in self.__m_contents:
self.__m_contents[pred] = []
self.__m_contents[pred].append(obj)
else:
self._write_to_mongo()
self.__m_id = subj
self.__m_contents = {pred: [obj]}
def _write_to_mongo(self):
"""Writes triple (inserts or appends existing) to MongoDB collection."""
if self.__m_id is not None:
for field, value in self.__m_contents.items():
self.__mongo.append_set(self.__m_id, field, value)
# self.mongo.add(self.m_id, self.m_contents)
self.__m_id = None
self.__m_contents = None
def drop(self):
"""Deletes the collection."""
self.__mongo.drop()
def add_file(self, filename, reverse_triple=False, predicate_prefix=None):
"""Adds contents from an NTriples file to MongoDB.
:param filename: NTriples file.
:param reverse_triple: if set True, the subject and object values are swapped.
:param predicate_prefix: prefix to be added to predicates.
:param subjects_redirecter: redirects dict.
"""
PLOGGER.info("Processing " + filename + "...")
t = Triple()
p = NTriplesParser(t)
self.__m_id = None # document id for MongoDB -- subj
self.__m_contents = None # document contents for MongoDB -- pred, obj
i = 0
with FileUtils.open_file_by_type(filename) as f:
for line in f:
try:
p.parsestring(line.decode("utf-8"))
except ParseError: # skip lines that couldn't be parsed
continue
if t.subject() is None: # only if parsed as a triple
continue
# subject prefixing
subj = self.__prefix.get_prefixed(t.subject())
# predicate prefixing
pred = self.__prefix.get_prefixed(t.predicate())
if predicate_prefix is not None:
pred = predicate_prefix + pred
# Object prefixing
if type(t.object()) is URIRef:
obj = self.__prefix.get_prefixed(t.object())
else:
obj = t.object()
if len(obj) == 0:
continue # skip empty objects
# write or append
if reverse_triple: # reverse subj and obj
self._next_triple(obj, pred, subj)
else: # normal mode
self._next_triple(subj, pred, obj)
i += 1
if i % 100000 == 0:
PLOGGER.info(str(i // 1000) + "K lines processed from " + filename)
# process last triple
self._write_to_mongo()
if __name__ == "__main__":
pass
|
medtray/MultiEm-RGCN | LTR/data_reader_jm.py | <filename>LTR/data_reader_jm.py<gh_stars>0
from collections import Counter
import sys
import os
cwd = os.getcwd()
from pathlib import Path
path = Path(cwd)
sys.path.append(os.path.join(path.parent.absolute(),'nordlys'))
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.retrieval.elastic_cache import ElasticCache
#from nordlys.config import PLOGGER
from nordlys.core.retrieval.scorer import *
from nltk.corpus import wordnet as wn
import numpy as np
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from random import randint
import json
import pandas as pd
from sklearn import preprocessing
from dgl.nn.pytorch import RelGraphConv
sys.path.append("/home/mohamedt/R-GCN-Graph")
from utils_ import *
from load_model_for_testing import *
es = ElasticCache("new_data_index")
def load_pretrained_wv(path):
wv = {}
with open(path, 'r') as f:
for line in f.readlines():
items = line.split(' ')
wv[items[0]] = torch.DoubleTensor([float(a) for a in items[1:]])
return wv
def pad_or_crop(field,max_tokens,to_add):
if len(field)>max_tokens:
field=field[0:max_tokens]
if len(field)<max_tokens:
for i in range(max_tokens-len(field)):
field+=to_add
return field
def pad_or_crop_with_rep(field,max_tokens,dictt,field_type):
final=[]
for f in field:
if f in dictt.keys():
final.append(f)
else:
final.append('unk')
if len(final)>max_tokens:
final=final[0:max_tokens]
if len(final)<max_tokens:
inter=final.copy()
if len(inter)==0:
#print('here empty')
if field_type in ['description','attributes']:
inter=[',']
else:
inter = ['.']
j=0
for i in range(max_tokens-len(final)):
final.append(inter[j%len(inter)])
j+=1
return final
def encode_field(field,dictt,field_type):
vect_ = []
for qu in field:
try:
vect_ += [dictt[qu]]
except:
if field_type in ['description','attributes']:
vect_ += [dictt[',']]
else:
vect_ += [dictt['.']]
return vect_
def get_www_all_features(feature_file):
ids_left = []
ids_right = []
features = []
labels = []
f_f = open(feature_file,'r')
line = f_f.readline()
for line in f_f:
seps = line.strip().split(',')
qid = seps[0]
tid = seps[2]
ids_left.append(qid)
ids_right.append(tid)
rel = seps[-1]
labels.append(int(rel))
'''
if int(rel) > 0:
labels.append(1)
else:
labels.append(0)
'''
#feat_range=np.arange(3,25)
q_doc_f = np.array([float(each) for each in seps[3:-1]])
#q_doc_f = np.array([float(each) for num,each in enumerate(seps) if num in feat_range or num==41])
#feat_range = np.arange(25, 41)
#q_doc_f = np.array([float(each) for num, each in enumerate(seps) if num in feat_range])
features.append(q_doc_f)
df = pd.DataFrame({
'id_left': ids_left,
'id_right': ids_right,
'features': features,
'label': labels
})
return df
def synset_translation(word):
nb_tran=0
synsets = wn.synsets(word.strip())
translated_token=[syn._name for syn in synsets]
stack = synsets
while stack and nb_tran<20:
el = stack.pop(0)
hypers = el.hypernyms()
stack += hypers
translated_token += [hyper._name for hyper in hypers]
nb_tran+=1
return translated_token
class DataAndQueryJM(Dataset):
def __init__(self,file_name,wv,word_to_index,index_to_word,output_file,args):
data = RGCNLinkDataset(args.dataset)
data.dir = os.path.join(args.parent_path, args.dataset)
dir_base = data.dir
if wv and word_to_index and index_to_word:
self.wv=wv
self.word_to_index=word_to_index
self.index_to_word=index_to_word
else:
self.word_to_index = {}
self.index_to_word = []
self.wv=np.load(os.path.join(dir_base,'wv.npy'),allow_pickle=True)
self.wv=self.wv[()]
self.word_to_index=np.load(os.path.join(dir_base,'word_to_index.npy'),allow_pickle=True)
self.word_to_index=self.word_to_index[()]
self.index_to_word=list(np.load(os.path.join(dir_base,'index_to_word.npy')))
labels = []
all_vector_tables = []
max_tokens_query_QTE = 150
all_desc_QTE = []
all_query_QTE = []
max_tokens_desc_w = 20
max_tokens_att_w = 10
max_tokens_query_w = 6
all_desc_w = []
all_att_w = []
all_values_w = []
all_query_w = []
max_tokens_desc_wn = 50
max_tokens_att_wn = 50
max_tokens_query_wn = 30
all_desc_wn = []
all_att_wn = []
all_values_wn = []
all_query_wn = []
path = os.path.join(dir_base, 'data_fields_with_values.json')
with open(path) as f:
dt = json.load(f)
data_csv = pd.read_csv(os.path.join(dir_base, 'features2.csv'))
test_data = data_csv['table_id']
query = data_csv['query']
text_file = open(file_name, "r")
# text_file = open("ranking_results/train.txt", "r")
lines = text_file.readlines()
queries_id = []
list_lines = []
for line in lines:
# print(line)
line = line[0:len(line) - 1]
aa = line.split('\t')
queries_id += [aa[0]]
list_lines.append(aa)
queries_id = [int(i) for i in queries_id]
qq = np.sort(list(set(queries_id)))
test_data = list(test_data)
to_save = []
all_query_labels = []
all_semantic = []
normalize = True
all_df = get_www_all_features(os.path.join(dir_base, 'features2.csv'))
for q in qq:
# print(q)
# if q>2:
# break
indexes = [i for i, x in enumerate(queries_id) if x == q]
indices = data_csv[data_csv['query_id'] == q].index.tolist()
# print(indexes)
inter = np.array(list_lines)[indexes]
test_query = list(query[indices])[0]
query_tokens = test_query
result = es.search(query_tokens, 'desc_att', 10000)
query_tokens = list(result.keys())[:max_tokens_query_QTE]
query_ = pad_or_crop_with_rep(query_tokens, max_tokens_query_QTE, self.word_to_index, 'query')
vector_query_QT = [encode_field(query_, self.word_to_index, 'query')]
query_tokens = preprocess(test_query, 'description')
query_ = pad_or_crop_with_rep(query_tokens, max_tokens_query_w, self.word_to_index, 'query')
vector_query_w = [encode_field(query_, self.word_to_index, 'query')]
QT = []
for token in query_tokens:
trans = synset_translation(token)
QT += trans
query_ = pad_or_crop_with_rep(QT, max_tokens_query_wn, self.word_to_index, 'query')
vector_query_wn = [encode_field(query_, self.word_to_index, 'query')]
for item in inter:
if item[2] in test_data:
all_query_labels.append(q)
rel = float(
data_csv[((data_csv['query_id'] == q) & (data_csv['table_id'] == item[2]))].iloc[0]['rel'])
el = all_df.loc[(all_df['id_left'] == str(q)) & (all_df['id_right'] == item[2])]
el = el['features']
all_semantic.append(list(el.values)[0])
all_desc_QTE.append([self.word_to_index[item[2]]])
all_query_QTE.append([vector_query_QT])
table = dt[item[2]]
pgTitle_feat = table['pgTitle']
if len(pgTitle_feat) > 0:
pgTitle_feat = pgTitle_feat.split(' ')
else:
pgTitle_feat = []
secondTitle_feat = table['secondTitle']
if len(secondTitle_feat) > 0:
secondTitle_feat = secondTitle_feat.split(' ')
else:
secondTitle_feat = []
caption_feat = table['caption']
if len(caption_feat) > 0:
caption_feat = caption_feat.split(' ')
else:
caption_feat = []
description = pgTitle_feat + secondTitle_feat + caption_feat
original_attributes = table['attributes']
if len(original_attributes) > 0:
original_attributes = original_attributes.split(' ')
else:
original_attributes = []
values = table['data']
if len(original_attributes) > 0:
values = values.split(' ')
else:
values = []
description = pad_or_crop_with_rep(description, max_tokens_desc_w, self.word_to_index, 'description')
original_attributes = pad_or_crop_with_rep(original_attributes, max_tokens_att_w, self.word_to_index,
'attributes')
values = pad_or_crop_with_rep(values, max_tokens_desc_w, self.word_to_index, 'description')
vector_desc = [encode_field(description, self.word_to_index, 'attributes')]
vector_att = [encode_field(original_attributes, self.word_to_index, 'description')]
vector_values = [encode_field(values, self.word_to_index, 'description')]
all_desc_w.append([vector_desc])
all_att_w.append([vector_att])
all_query_w.append([vector_query_w])
all_values_w.append([vector_values])
des_t = []
for token in description:
trans = synset_translation(token)
des_t += trans
description = pad_or_crop_with_rep(des_t, max_tokens_desc_wn, self.word_to_index, 'description')
att_t = []
for token in original_attributes:
trans = synset_translation(token)
att_t += trans
original_attributes = pad_or_crop_with_rep(att_t, max_tokens_att_wn, self.word_to_index,
'attributes')
val_t = []
for token in values:
trans = synset_translation(token)
val_t += trans
values = pad_or_crop_with_rep(val_t, max_tokens_desc_wn, self.word_to_index, 'description')
vector_desc = [encode_field(description, self.word_to_index, 'attributes')]
vector_att = [encode_field(original_attributes, self.word_to_index, 'description')]
vector_values = [encode_field(values, self.word_to_index, 'description')]
all_desc_wn.append([vector_desc])
all_att_wn.append([vector_att])
all_query_wn.append([vector_query_wn])
all_values_wn.append([vector_values])
labels.append(rel)
to_save.append(item)
all_semantic = np.stack(all_semantic, axis=0)
if normalize:
# scaler = preprocessing.StandardScaler()
# all_semantic = scaler.fit_transform(all_semantic)
all_semantic = preprocessing.normalize(all_semantic)
self.all_desc_QTE = torch.tensor(all_desc_QTE)
self.all_query_QTE = torch.tensor(all_query_QTE)
self.all_desc_w = torch.tensor(all_desc_w)
self.all_att_w = torch.tensor(all_att_w)
self.all_query_w = torch.tensor(all_query_w)
self.all_desc_wn = torch.tensor(all_desc_wn)
self.all_att_wn = torch.tensor(all_att_wn)
self.all_query_wn = torch.tensor(all_query_wn)
self.all_query_labels = all_query_labels
self.all_semantic = all_semantic
self.all_semantic = torch.tensor(self.all_semantic)
self.labels = labels
inter = np.array(to_save)
np.savetxt(output_file, inter, fmt="%s", delimiter='\t')
def __getitem__(self, t):
"""
return: the t-th (center, context) word pair and their co-occurrence frequency.
"""
## Your codes go here
return self.all_desc_w[t],self.all_att_w[t], self.all_query_w[t], self.all_desc_wn[t], self.all_att_wn[t], \
self.all_query_wn[t], self.all_desc_QTE[t], self.all_query_QTE[t],\
self.labels[t], self.all_semantic[t]
def __len__(self):
"""
return: the total number of (center, context) word pairs.
"""
## Your codes go here
return len(self.all_desc_w)
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/features/ftr_entity_similarity.py | """
FTR Entity Similarity
=====================
Implements features capturing the similarity between entity and a query.
:Author: <NAME>
"""
from __future__ import division
import re
import math
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.retrieval.scorer import ScorerMLM
class FtrEntitySimilarity(object):
DEBUG = 0
def __init__(self, query, en_id, elastic):
self.__query = query
self.__en_id = en_id
self.__elastic = elastic
def lm_score(self, field=Elastic.FIELD_CATCHALL):
"""
Query length normalized LM score between entity field and query
:param field: field name
:return MLM score
"""
raw_score = self.nllr(self.__query, {field: 1})
score = math.exp(raw_score) if raw_score else 0
return score
def mlm_score(self, field_weights):
"""
Query length normalized MLM similarity between the entity and query
:param field_weights: dictionary {field: weight, ...}
:return MLM score
"""
raw_score = self.nllr(self.__query, field_weights)
score = math.exp(raw_score) if raw_score else 0
return score
def context_sim(self, mention, field=Elastic.FIELD_CATCHALL):
"""
LM score of entity to the context of query (context means query - mention)
E.g. given the query "uss yorktown charleston" and mention "uss",
query context is " yorktown charleston"
:param mention: string
:param field: field name
:return context similarity score
"""
# get query context
match = re.search(mention, self.__query)
if match is None:
raise Exception("NOTE: Mention \"" + mention + "\" is not found in the query \"" + self.__query + "\"")
mention_scope = match.span()
q_context = self.__query[:mention_scope[0]] + self.__query[mention_scope[1]:]
# scoring
raw_score = self.nllr(q_context.strip(), {field: 1})
score = math.exp(raw_score) if raw_score else 0
return score
def nllr(self, query, field_weights):
"""
Computes Normalized query likelihood (NLLR):
NLLR(q,d) = \sum_{t \in q} P(t|q) log P(t|\theta_d) - \sum_{t \in q} p(t|q) log P(t|C)
where:
P(t|q) = n(t,q)/|q|
P(t|C) = \sum_{f} \mu_f * P(t|C_f)
P(t|\theta_d) = smoothed LM/MLM score
:param query: query
:param field_weights: dictionary {field: weight, ...}
:return: NLLR score
"""
query = self.__elastic.analyze_query(query)
scorer_mlm = ScorerMLM(self.__elastic, query, {"fields": field_weights})
term_probs = scorer_mlm.get_mlm_term_probs(self.__en_id)
# none of query terms are in the collection
if sum(term_probs.values()) == 0:
if self.DEBUG:
print("\t\tP_mlm(q|theta_d) = None")
return None
# computes the NLLR score
query_len = len(query.split())
left_sum, right_sum = 0, 0
for t, p_t_theta_d in term_probs.items():
if p_t_theta_d == 0: # Skips the term if it is not in the collection
continue
query_tf = query.split().count(t)
p_t_C = self.__term_collec_prob(t, field_weights)
p_t_q = query_tf / query_len
left_sum += p_t_q * math.log(p_t_theta_d)
right_sum += p_t_q * math.log(p_t_C)
if self.DEBUG:
print("\tP(\"" + t + "\"|d) =", p_t_theta_d, "\tP(\"" + t + "\"|C) =", p_t_C, "\tp(\"" + t + "\"|q) =", p_t_q)
nllr_q_d = left_sum - right_sum
if self.DEBUG:
print("\t\tNLLR(" + query + "|theta_d) = " + str(nllr_q_d))
return nllr_q_d
def __term_collec_prob(self, term, fields):
"""
Computes term collection probability for NLLR: P(t|C) = \sum_{f} \mu_f * P(t|C_f)
:param term: string
:param fields: dictionary {field: weight, ...}
:return: probability P(t|C)
"""
p_t_C = 0
for f, mu_f in fields.items():
len_C_f = self.__elastic.coll_length(f)
tf_t_C_f = self.__elastic.coll_term_freq(term, f)
p_t_C += mu_f * (tf_t_C_f / len_C_f)
return p_t_C
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/features/ftr_mention.py | <reponame>medtray/MultiEm-RGCN
"""
FTR Mention
===========
Implements mention feature.
:Author: <NAME>
"""
from nordlys.logic.query.mention import Mention
class FtrMention(object):
def __init__(self, mention, entity=None, cand_ens=None):
self.__mention = mention.strip()
self.__entity = entity
self.__cand_ens = cand_ens
def __load_cand_ens(self):
"""Gets candidate entities if they are not provided."""
if self.__cand_ens is None:
self.__cand_ens = Mention(self.__mention, self.__entity).get_cand_ens()
def len_ratio(self, q):
"""Computes mention to query length."""
return len(self.__mention.split()) / len(q.split())
def mention_len(self):
"""Number of terms in the mention"""
return len(self.__mention.split())
def matches(self):
"""Number of entities whose surface form equals the mention.
Uses both DBpedia and Freebase name variants.
"""
self.__load_cand_ens()
return len(self.__cand_ens)
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/eval/trec_run.py | <filename>nordlys/nordlys/core/eval/trec_run.py
"""
Trec run
========
Utility module for working with TREC runfiles.
Usage
-----
Get statistics about a runfile
``trec_run <run_file> -o stat``
Filter runfile to contain only documents from a given set
``trec_run <run_file> -o filter -d <doc_ids_file> -f <output_file> -n <num_results>``
:Authors: <NAME>, <NAME>
"""
import argparse
from math import exp
from nordlys.core.retrieval.retrieval_results import RetrievalResults
from nordlys.core.storage.parser.uri_prefix import URIPrefix
from nordlys.config import PLOGGER
class TrecRun(object):
"""Represents a TREC runfile.
:param file_name: name of the run file
:param normalize: whether retrieval scores are to be normalized for each query (default: False)
:param remap_by_exp: whether scores are to be converted from the log-domain by taking their exp (default: False)
"""
def __init__(self, file_name=None, normalize=False, remap_by_exp=False, run_id=None):
self.__results = {} # key is a query_id, value is a RetrievalResults object
self.__sum_scores = {}
self.run_id = run_id
if file_name is not None:
self.load_file(file_name, remap_by_exp)
if normalize is True:
self.normalize()
def load_file(self, file_name, remap_by_exp=False):
"""Loads a TREC runfile.
:param file_name: name of the run file
:param remap_by_exp: whether scores are to be converted from the log-domain by taking their exp (default: False)
"""
# load the file such that self.results[query_id] = res holds the results for a given query,
# where res is a RetrievalResults object
pre = URIPrefix()
with open(file_name, "r") as f_baseline:
for line in f_baseline:
# Parse data
fields = line.rstrip().split()
if len(fields) != 6:
continue
query_id, doc_id, score = fields[0], fields[2], float(fields[4])
if self.run_id is None:
self.run_id = fields[5]
# Add parsed data
if query_id not in self.__results:
self.__results[query_id] = RetrievalResults() # initialize
# remap exponentially the scores in log-domain to (0, 1)
if remap_by_exp:
score = exp(score)
self.__results[query_id].append(doc_id, score)
# an additional data structure to make the normalization easier
self.__sum_scores[query_id] = self.__sum_scores.get(query_id, 0) + score
def normalize(self):
"""Normalizes the retrieval scores such that they sum up to one for each query."""
query_ids = self.get_results().keys() # new var, since for-loop will modify this dict
for query_id in query_ids:
norm_result = RetrievalResults()
for entity_id, score in self.get_results()[query_id].get_scores_sorted():
norm_result.append(entity_id, score / self.__get_sum_scores(query_id))
self.get_results()[query_id] = norm_result # overwrite previous result
def filter(self, doc_ids_file, output_file, num_results=100):
"""Filters runfile to include only selected docIDs and outputs the results to a file.
:param doc_ids_file: file with one doc_id per line
:param output_file: output file name
:param num_results: number of results per query
"""
# loading docids (with ignoring empty lines in the input file)
with open(doc_ids_file, "r") as f:
doc_ids = [l for l in (line.strip() for line in f) if l]
# filtering qrels
with open(output_file, "w") as f:
for query_id, res in self.__results.items():
filtered_res = RetrievalResults()
for doc_id, score in res.get_scores_sorted():
if doc_id in doc_ids:
filtered_res.append(doc_id, score)
if filtered_res.num_docs() == num_results:
break
filtered_res.write_trec_format(query_id, self.run_id, f, num_results)
def get_query_results(self, query_id):
"""Returns the corresponding RetrievalResults object for a given query.
:param query_id: queryID
:rtype: :py:class:`nordlys.core.retrieval.retrieval_results.RetrievalResults`
"""
return self.__results.get(query_id, None)
def get_results(self):
"""Returns all results.
:return: a dict with queryIDs as keys and RetrievalResults object as values
"""
return self.__results
def __get_sum_scores(self, query_id):
"""Returns the sum of all the retrieval scores for a given query.
:param query_id: queryID
:return: sum of scores (or None if the queryID cannot be found)
"""
return self.__sum_scores.get(query_id, None)
def print_stat(self):
"""Prints simple statistics."""
PLOGGER.info("#queries: " + str(len(self.__results)))
PLOGGER.info("#results: " + str(sum(v.num_docs() for k, v in self.__results.items())))
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("run_file", help="run file") # mandatory arg
parser.add_argument("-o", "--operation", help="operation name", choices=["stat", "filter"])
parser.add_argument("-d", "--doc_ids_file", help="file with the allowed doc_ids (for filtering)", type=str)
parser.add_argument("-f", "--output_file", help="output file", type=str)
parser.add_argument("-n", "--num_results", help="number of results", type=int)
args = parser.parse_args()
return args
def main(args):
run = TrecRun(args.run_file)
if args.operation == "stat":
run.print_stat()
elif args.operation == "filter":
if len(args.doc_ids_file) == 0 or len(args.output_file) == 0:
PLOGGER.info("doc_ids_file or output_file missing")
else:
run.filter(args.doc_ids_file, args.output_file)
if __name__ == "__main__":
main(arg_parser()) |
medtray/MultiEm-RGCN | nordlys/nordlys/core/storage/parser/nt_parser.py | <filename>nordlys/nordlys/core/storage/parser/nt_parser.py
"""
NTriples Parser
===============
NTriples parser with URI prefixing
:Author: <NAME>
"""
import sys
import logging
from nordlys.core.storage.parser.uri_prefix import URIPrefix
from rdflib.plugins.parsers.ntriples import NTriplesParser
from rdflib.term import URIRef
from nordlys.config import PLOGGER
class Triple(object):
"""Representation of a Triple to be used by the rdflib NTriplesParser."""
def __init__(self, prefix=None):
self.__s = None
self.__p = None
self.__o = None
self.__prefix = prefix
def triple(self, s, p, o):
"""Assign current triple object
:param s: subject
:param p: predicate
:param o: object
"""
self.__s = s
self.__p = p
self.__o = o
def __prefix_uri(self, uri):
"""Prefix URI and enclose in between <>
:param uri: prefix uri
:return: same uri, but enclosed in between <>
"""
if self.__prefix is None:
raise Exception("Prefix handler is not set!")
return "<" + self.__prefix.get_prefixed(uri) + ">"
def subject(self):
return self.__s
def subject_prefixed(self):
return self.__prefix_uri(self.__s)
def predicate(self):
return self.__p
def predicate_prefixed(self):
return self.__prefix_uri(self.__p)
def object(self):
return self.__o
def object_prefixed(self):
if type(self.__o) is URIRef: # only URI objects
return self.__prefix_uri(self.__o)
return self.__o
class TripleHandler(object):
"""This is an abstract class"""
def triple_parsed(self, triple):
"""This method is called each time a triple is parsed,
with the triple as parameter."""
pass
class NTParser(object):
"""NTriples parser class"""
def __init__(self):
logging.basicConfig(level="ERROR") # no warnings from the rdf parser
def parse_file(self, filename, triplehandler):
"""Parses file and calls callback function with the parsed triple"""
PLOGGER.info("Processing " + filename + "...")
prefix = URIPrefix()
t = Triple(prefix)
p = NTriplesParser(t)
i = 0
with open(filename) as f:
for line in f:
p.parsestring(line)
if t.subject() is None: # only if parsed as a triple
continue
# call the handler object with the parsed triple
triplehandler.triple_parsed(t)
i += 1
if i % 10000 == 0:
PLOGGER.info(str(i / 1000) + "K lines processed")
class TripleHandlerPrinter(TripleHandler):
"""Example triple handler that only prints whatever it received."""
def triple_parsed(self, triple):
PLOGGER.info("S: " + triple.subject() + " ==> " + triple.subject_prefixed())
PLOGGER.info(" P: " + triple.predicate() + " ==> " + triple.predicate_prefixed())
PLOGGER.info(" O: " + triple.object() + " ==> " + triple.object_prefixed())
def main(argv):
parser = NTParser()
thp = TripleHandlerPrinter()
parser.parse_file("/scratch/data/dbpedia-3.9/labels_en.nt", thp)
if __name__ == "__main__":
main(sys.argv[1:])
|
medtray/MultiEm-RGCN | nordlys/nordlys/services/er.py | """
Entity Retrieval
================
Command-line application for entity retrieval.
Usage
-----
::
python -m nordlys.services.er -c <config_file> -q <query>
If `-q <query>` is passed, it returns the results for the specified query and prints them in terminal.
Config parameters
------------------
- **index_name**: name of the index,
- **first_pass**:
- **num_docs**: number of documents in first-pass scoring (default: 100)
- **field**: field used in first pass retrieval (default: Elastic.FIELD_CATCHALL)
- **fields_return**: comma-separated list of fields to return for each hit (default: "")
- **num_docs**: number of documents to return (default: 100)
- **start**: starting offset for ranked documents (default:0)
- **model**: name of retrieval model; accepted values: [lm, mlm, prms] (default: lm)
- **field**: field name for LM (default: catchall)
- **fields**: list of fields for PRMS (default: [catchall])
- **field_weights**: dictionary with fields and corresponding weights for MLM (default: {catchall: 1})
- **smoothing_method**: accepted values: [jm, dirichlet] (default: dirichlet)
- **smoothing_param**: value of lambda or mu; accepted values: [float or "avg_len"], (jm default: 0.1, dirichlet default: 2000)
- **query_file**: name of query file (JSON),
- **output_file**: name of output file,
- **run_id**: run id for TREC output
Example config
---------------
.. code:: python
{"index_name": "dbpedia_2015_10",
"first_pass": {
"num_docs": 1000
},
"model": "prms",
"num_docs": 1000,
"smoothing_method": "dirichlet",
"smoothing_param": 2000,
"fields": ["names", "categories", "attributes", "similar_entity_names", "related_entity_names"],
"query_file": "path/to/queries.json",
"output_file": "path/to/output.txt",
"run_id": "test"
}
------------------------
:Author: <NAME>
"""
import argparse
from pprint import pprint
from nordlys.config import ELASTIC_INDICES
from nordlys.core.retrieval.elastic import Elastic
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.core.retrieval.retrieval import Retrieval
from nordlys.core.retrieval.scorer import Scorer
from nordlys.core.utils.file_utils import FileUtils
# Constants
DBPEDIA_INDEX = ELASTIC_INDICES[0]
class ER(object):
def __init__(self, config, elastic=None):
self.__check_config(config)
self.__config = config
self.__num_docs = int(config["num_docs"])
self.__start = int(config["start"])
self.__er = Retrieval(config)
self.__elastic = elastic
@staticmethod
def __check_config(config):
"""Checks config parameters and set default values."""
config["index_name"] = DBPEDIA_INDEX
if config.get("first_pass", None) is None:
config["first_pass"] = {}
if config["first_pass"].get("1st_num_docs", None) is None:
config["first_pass"]["1st_num_docs"] = 1000
if config["first_pass"].get("fields_return", None) is None:
config["first_pass"]["fields_return"] = ""
if config.get("num_docs", None) is None:
config["num_docs"] = config["first_pass"]["1st_num_docs"]
if config.get("start", None) is None:
config["start"] = 0
if config.get("model", None) is None:
config["model"] = "lm"
# Todo: Check the ELR params
return config
def __get_scorer(self, query):
"""Factory method to get entity retrieval method."""
scorer = Scorer.get_scorer(self.__elastic, query, self.__config)
return scorer
def retrieve(self, query):
"""Retrieves entities for a query"""
scorer = self.__get_scorer(query)
ens = self.__er.retrieve(query, scorer)
# converts to output format
res = {"query": query,
"total_hits": len(ens),
"results": {}}
if len(ens) != 0:
res["results"] = self.__get_top_k(ens)
return res
def __get_top_k(self, ens):
"""Returns top-k results."""
sorted_ens = sorted(ens.items(), key=lambda item: item[1]["score"], reverse=True)
results = {}
end = min(self.__num_docs, len(ens))
for i in range(self.__start, self.__start + end):
en_id, en = sorted_ens[i][0], sorted_ens[i][1]
results[i] = {"entity": en_id, "score": en["score"]}
if en.get("fields", {}) != {}:
results[i]["fields"] = en["fields"]
return results
def batch_retrieval(self):
"""Performs batch retrieval for a set of queries"""
# todo: integrate ELR approach
self.__er.batch_retrieval()
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--query", help="query string", type=str, default=None)
parser.add_argument("-c", "--config", help="config file", type=str, default={})
args = parser.parse_args()
return args
def main(args):
config = FileUtils.load_config(args.config)
er = ER(config, ElasticCache(DBPEDIA_INDEX))
if args.query:
res = er.retrieve(args.query)
pprint(res)
else:
er.batch_retrieval()
if __name__ == '__main__':
main(arg_parser()) |
medtray/MultiEm-RGCN | load_model_for_testing.py | <reponame>medtray/MultiEm-RGCN<gh_stars>0
import argparse
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from dgl.contrib.data import load_data
from dgl.nn.pytorch import RelGraphConv
import subprocess
from model import BaseRGCN
import os
import utils
class EmbeddingLayer(nn.Module):
def __init__(self, num_nodes, h_dim):
super(EmbeddingLayer, self).__init__()
self.embedding = torch.nn.Embedding(num_nodes, h_dim)
def forward(self, g, h, r, norm):
return self.embedding(h.squeeze())
class RGCN(BaseRGCN):
def build_input_layer(self):
return EmbeddingLayer(self.num_nodes, self.h_dim)
def build_hidden_layer(self, idx):
act = F.relu if idx < self.num_hidden_layers - 1 else None
return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, "bdd",
self.num_bases, activation=act, self_loop=True,
dropout=self.dropout)
class LinkPredict(nn.Module):
def __init__(self, in_dim, h_dim, num_rels, num_bases=-1,
num_hidden_layers=1, dropout=0, use_cuda=False, reg_param=0):
super(LinkPredict, self).__init__()
self.rgcn = RGCN(in_dim, h_dim, h_dim, num_rels * 2, num_bases,
num_hidden_layers, dropout, use_cuda)
self.reg_param = reg_param
self.w_relation = nn.Parameter(torch.Tensor(1*num_rels, h_dim))
nn.init.xavier_uniform_(self.w_relation,
gain=nn.init.calculate_gain('relu'))
def calc_score(self, embedding, triplets):
# DistMult
s = embedding[triplets[:, 0]]
r = self.w_relation[triplets[:, 1]]
o = embedding[triplets[:, 2]]
score = torch.sum(s * r * o, dim=1)
return score
def forward(self, g, h, r, norm):
return self.rgcn.forward(g, h, r, norm)
def regularization_loss(self, embedding):
return torch.mean(embedding.pow(2)) + torch.mean(self.w_relation.pow(2))
def get_loss(self, g, embed, triplets, labels):
# triplets is a list of data samples (positive and negative)
# each row in the triplets is a 3-tuple of (source, relation, destination)
score = self.calc_score(embed, triplets)
predict_loss = F.binary_cross_entropy_with_logits(score, labels)
reg_loss = self.regularization_loss(embed)
return predict_loss + self.reg_param * reg_loss
def node_norm_to_edge_norm(g, node_norm):
g = g.local_var()
# convert to edge norm
g.ndata['norm'] = node_norm
g.apply_edges(lambda edges: {'norm': edges.dst['norm']})
return g.edata['norm']
def _read_dictionary(filename):
d = {}
with open(filename, 'r+') as f:
for line in f:
line = line.strip().split('\t')
d[line[1]] = int(line[0])
return d
def _read_dictionary_test(filename):
d = {}
with open(filename, 'r+') as f:
for line in f:
line = line.strip().split('\t')
d[line[0]] = line[1]
return d
def _read_triplets(filename):
with open(filename, 'r+') as f:
for line in f:
processed_line = line.strip().split('\t')
yield processed_line
def _read_triplets_as_list(filename, entity_dict, relation_dict):
l = []
for triplet in _read_triplets(filename):
s = entity_dict[triplet[0]]
r = relation_dict[triplet[1]]
o = entity_dict[triplet[2]]
l.append([s, r, o])
return l
class RGCNLinkDataset(object):
def __init__(self, name):
self.name = name
self.dir = './'
self.dir = os.path.join(self.dir, self.name)
def load(self):
entity_path = os.path.join(self.dir, 'entities.dict')
relation_path = os.path.join(self.dir, 'relations.dict')
train_path = os.path.join(self.dir, 'train.txt')
valid_path = os.path.join(self.dir, 'valid.txt')
test_path = os.path.join(self.dir, 'test.txt')
entity_dict = _read_dictionary(entity_path)
relation_dict = _read_dictionary(relation_path)
self.train = np.array(_read_triplets_as_list(train_path, entity_dict, relation_dict))
self.valid = np.array(_read_triplets_as_list(valid_path, entity_dict, relation_dict))
self.test = np.array(_read_triplets_as_list(test_path, entity_dict, relation_dict))
self.num_nodes = len(entity_dict)
print("# entities: {}".format(self.num_nodes))
self.num_rels = len(relation_dict)
print("# relations: {}".format(self.num_rels))
print("# edges: {}".format(len(self.train)))
def sort_and_rank(score, target):
_, indices = torch.sort(score, dim=1, descending=True)
indices = torch.nonzero(indices == target.view(-1, 1))
indices = indices[:, 1].view(-1)
return indices
def get_relation_score(embedding, w, a, b, test_size, batch_size=100):
""" Perturb one element in the triplets
"""
n_batch = (test_size + batch_size - 1) // batch_size
ranks = []
for idx in range(n_batch):
print("batch {} / {}".format(idx, n_batch))
batch_start = idx * batch_size
batch_end = min(test_size, (idx + 1) * batch_size)
batch_a = a[batch_start: batch_end]
target = b[batch_start: batch_end]
relevance_relations = [6,7,8]
# relevance_relations = [1, 2, 3]
scores = []
for rel_relation in relevance_relations:
relation = rel_relation * torch.ones(target.shape[0]).type(torch.int64)
s = embedding[batch_a]
r = w[relation]
o = embedding[target]
scores.append(torch.sum(s * r * o, dim=1))
final_scores = torch.cat([score.view(-1, 1) for score in scores], dim=1)
labels = torch.argmax(final_scores, dim=1)
ranks.append(labels)
return torch.cat(ranks)
def get_relevance_relation_score(embedding, w, a, b, test_size, batch_size=100):
""" Perturb one element in the triplets
"""
n_batch = (test_size + batch_size - 1) // batch_size
ranks = []
for idx in range(n_batch):
print("batch {} / {}".format(idx, n_batch))
batch_start = idx * batch_size
batch_end = min(test_size, (idx + 1) * batch_size)
batch_a = a[batch_start: batch_end]
target = b[batch_start: batch_end]
rel_relation = 9
# relevance_relations = [1, 2, 3]
relation = rel_relation * torch.ones(target.shape[0]).type(torch.int64)
s = embedding[batch_a]
r = w[relation]
o = embedding[target]
ranks.append(torch.sum(s * r * o, dim=1))
return torch.cat(ranks)
def calculate_ndcg(output_file, ndcg_file):
# batcmd = "./trec_eval -m ndcg_cut.5 "+ndcg_file+" " + output_file
batcmd = "./trec_eval -m map " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
map = float(res[2])
batcmd = "./trec_eval -m recip_rank " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
mrr = float(res[2])
batcmd = "./trec_eval -m ndcg_cut.5 " + ndcg_file + " " + output_file
result = subprocess.check_output(batcmd, shell=True, encoding='cp437')
res = result.split('\t')
ndcg = float(res[2])
return ndcg, map, mrr
def load_checkpoint_for_eval(model, filename,device):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
start_epoch = 0
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename,map_location=device)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(filename, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(filename))
return model
def main(args):
# load graph data
data = RGCNLinkDataset(args.dataset)
dir_base = data.dir
data.load()
num_nodes = data.num_nodes
train_data = data.train
valid_data = data.valid
test_data = data.test
num_rels = data.num_rels
# check cuda
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(args.gpu)
# create model
model = LinkPredict(num_nodes,
args.n_hidden,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers,
dropout=args.dropout,
use_cuda=use_cuda,
reg_param=args.regularization)
# validation and testing triplets
test_data = torch.LongTensor(test_data)
# build test graph
test_graph, test_rel, test_norm = utils.build_test_graph(
num_nodes, num_rels, train_data)
model_state_file = './wikiTables/model_state.pth'
model=load_checkpoint_for_eval(model, model_state_file)
test_node_id = torch.arange(0, num_nodes, dtype=torch.long).view(-1, 1)
test_rel = torch.from_numpy(test_rel)
test_norm = node_norm_to_edge_norm(test_graph, torch.from_numpy(test_norm).view(-1, 1))
embed = model(test_graph, test_node_id, test_rel, test_norm)
s = test_data[:, 0]
r = test_data[:, 1]
o = test_data[:, 2]
test_size = test_data.shape[0]
# ranks1 = get_relation_score(embed, model.w_relation, s, o, test_size, batch_size=args.eval_batch_size)
# ranks2 = get_relation_score(embed, model.w_relation, o, s, test_size, batch_size=args.eval_batch_size)
# ranks_ = (ranks1.type(torch.float) + ranks2.type(torch.float)) / 2
ranks = get_relevance_relation_score(embed, model.w_relation, s, o, test_size, batch_size=args.eval_batch_size)
entity_path = os.path.join(dir_base, 'entities.dict')
entities_dict = _read_dictionary_test(entity_path)
test_score_file = os.path.join(dir_base, 'scores.txt')
split_id = 1
output_qrels_test = './wikiTables/qrels_test' + str(split_id) + '.txt'
with open(test_score_file, 'w') as f:
for i, (s, _, o) in enumerate(test_data):
qq = entities_dict[str(o.tolist())]
tt = entities_dict[str(s.tolist())]
qq = qq.split('_')[0]
row = qq + '\t' + 'Q0' + '\t' + tt + '\t' + '0' + '\t' + str(ranks[i].tolist()) + '\trow'
if i == test_data.shape[0] - 1:
f.write(row)
else:
f.write(row + '\n')
test_ndcg, test_map, test_mrr = calculate_ndcg(test_score_file, output_qrels_test)
print('ndcg={}'.format(test_ndcg))
print('map={}'.format(test_map))
print('mrr={}'.format(test_mrr))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RGCN')
parser.add_argument("--dropout", type=float, default=0.0,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=50,
help="number of hidden units")
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--n-bases", type=int, default=10,
help="number of weight blocks for each relation")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("--n-epochs", type=int, default=1,
help="number of minimum training epochs")
parser.add_argument("--dataset", type=str, default='wikiTables',
help="dataset to use")
parser.add_argument("--eval-batch-size", type=int, default=500,
help="batch size when evaluating")
parser.add_argument("--regularization", type=float, default=0.01,
help="regularization weight")
parser.add_argument("--grad-norm", type=float, default=1.0,
help="norm to clip gradient to")
parser.add_argument("--graph-batch-size", type=int, default=5000,
help="number of edges to sample in each iteration")
parser.add_argument("--graph-split-size", type=float, default=0.5,
help="portion of edges used as positive sample")
parser.add_argument("--negative-sample", type=int, default=0,
help="number of negative samples per positive sample")
parser.add_argument("--evaluate-every", type=int, default=1,
help="perform evaluation every n epochs")
parser.add_argument("--edge-sampler", type=str, default="uniform",
help="type of edge sampler: 'uniform' or 'neighbor'")
args = parser.parse_args()
print(args)
main(args) |
medtray/MultiEm-RGCN | nordlys/nordlys/logic/features/__init__.py | """
Features
========
This is the features package.
"""
|
medtray/MultiEm-RGCN | LTR/conv_knrm.py | <gh_stars>0
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as f
from torch.autograd import Variable
import torch.nn.functional as F
class CONVKNRM(nn.Module):
"""Class that classifies question pair as duplicate or not."""
def __init__(self, args):
""""Constructor of the class."""
super(CONVKNRM, self).__init__()
self.wv=args.wv
self.index_to_word=args.index_to_word
self.input_dim=args.emsize
self.device=args.device
self.nbins = args.nbins
self.dense_f = nn.Linear(self.nbins * 9, 1, 1)
self.tanh = nn.Tanh()
self.conv_uni = nn.Sequential(
nn.Conv2d(1, 128, (1, self.input_dim)),
nn.ReLU()
)
self.conv_bi = nn.Sequential(
nn.Conv2d(1, 128, (2, self.input_dim)),
nn.ReLU()
)
self.conv_tri = nn.Sequential(
nn.Conv2d(1, 128, (3, self.input_dim)),
nn.ReLU()
)
tensor_mu = torch.FloatTensor(args.mu).to(self.device)
tensor_sigma = torch.FloatTensor(args.sigma).to(self.device)
self.mu = Variable(tensor_mu, requires_grad=False).view(1, 1, 1, self.nbins)
self.sigma = Variable(tensor_sigma, requires_grad=False).view(1, 1, 1, self.nbins)
def get_intersect_matrix(self, q_embed, d_embed):
sim = torch.bmm(q_embed, d_embed).view(q_embed.size()[0], q_embed.size()[1], d_embed.size()[2], 1)
pooling_value = torch.exp((- ((sim - self.mu) ** 2) / (self.sigma ** 2) / 2))
pooling_sum = torch.sum(pooling_value, 2)
log_pooling_sum = torch.log(torch.clamp(pooling_sum, min=1e-10)) * 0.01
log_pooling_sum = torch.sum(log_pooling_sum, 1)
return log_pooling_sum
def to_embedding(self,input):
shape_input = list(input.shape)
em = input.view(-1)
list_of_embeddings = []
for key in em:
list_of_embeddings += self.wv[self.index_to_word[key]]
list_of_embeddings = torch.Tensor(list_of_embeddings)
embeds = list_of_embeddings.view(shape_input[0], shape_input[1],
self.input_dim).to(self.device)
# embeds = self.word_embeddings(input)
ss = embeds.shape
# embeds = embeds.view(ss[0],-1, ss[3], self.input_dim)
#emb = torch.squeeze(embeds) # batch_size * 1 * seq_len * emb_dim
return embeds
def forward(self, batch_queries, batch_docs,batch_semantic):
qlen = batch_queries.shape[1]
num_docs, dlen = batch_docs.shape[0], batch_docs.shape[1]
batch_size=1
emb_query = self.to_embedding(batch_queries)
emb_desc = self.to_embedding(batch_docs)
desc_att_shape = emb_desc.shape
query_shape = emb_query.shape
qwu_embed = torch.transpose(
torch.squeeze(self.conv_uni(emb_query.view(emb_query.size()[0], 1, -1, self.input_dim))), 1,
2) + 0.000000001
qwb_embed = torch.transpose(
torch.squeeze(self.conv_bi(emb_query.view(emb_query.size()[0], 1, -1, self.input_dim))), 1,
2) + 0.000000001
qwt_embed = torch.transpose(
torch.squeeze(self.conv_tri(emb_query.view(emb_query.size()[0], 1, -1, self.input_dim))), 1,
2) + 0.000000001
dwu_embed = torch.squeeze(
self.conv_uni(emb_desc.view(emb_desc.size()[0], 1, -1, self.input_dim))) + 0.000000001
dwb_embed = torch.squeeze(
self.conv_bi(emb_desc.view(emb_desc.size()[0], 1, -1, self.input_dim))) + 0.000000001
dwt_embed = torch.squeeze(
self.conv_tri(emb_desc.view(emb_desc.size()[0], 1, -1, self.input_dim))) + 0.000000001
qwu_embed_norm = F.normalize(qwu_embed, p=2, dim=2, eps=1e-10)
qwb_embed_norm = F.normalize(qwb_embed, p=2, dim=2, eps=1e-10)
qwt_embed_norm = F.normalize(qwt_embed, p=2, dim=2, eps=1e-10)
dwu_embed_norm = F.normalize(dwu_embed, p=2, dim=1, eps=1e-10)
dwb_embed_norm = F.normalize(dwb_embed, p=2, dim=1, eps=1e-10)
dwt_embed_norm = F.normalize(dwt_embed, p=2, dim=1, eps=1e-10)
log_pooling_sum_wwuu = self.get_intersect_matrix(qwu_embed_norm, dwu_embed_norm)
log_pooling_sum_wwut = self.get_intersect_matrix(qwu_embed_norm, dwt_embed_norm)
log_pooling_sum_wwub = self.get_intersect_matrix(qwu_embed_norm, dwb_embed_norm)
log_pooling_sum_wwbu = self.get_intersect_matrix(qwb_embed_norm, dwu_embed_norm)
log_pooling_sum_wwtu = self.get_intersect_matrix(qwt_embed_norm, dwu_embed_norm)
log_pooling_sum_wwbb = self.get_intersect_matrix(qwb_embed_norm, dwb_embed_norm)
log_pooling_sum_wwbt = self.get_intersect_matrix(qwb_embed_norm, dwt_embed_norm)
log_pooling_sum_wwtb = self.get_intersect_matrix(qwt_embed_norm, dwb_embed_norm)
log_pooling_sum_wwtt = self.get_intersect_matrix(qwt_embed_norm, dwt_embed_norm)
log_pooling_sum = torch.cat(
[log_pooling_sum_wwuu, log_pooling_sum_wwut, log_pooling_sum_wwub, log_pooling_sum_wwbu,
log_pooling_sum_wwtu,log_pooling_sum_wwbb, log_pooling_sum_wwbt, log_pooling_sum_wwtb, log_pooling_sum_wwtt], 1)
output = torch.squeeze(F.tanh(self.dense_f(log_pooling_sum)), 1)
return output
|
medtray/MultiEm-RGCN | nordlys/nordlys/services/el.py | """
Entity Linking
==============
The command-line application for entity linking
Usage
-----
::
python -m nordlys.services.el -c <config_file> -q <query>
If `-q <query>` is passed, it returns the results for the specified query and prints them in terminal.
Config parameters
-----------------
- **method**: name of the method
- **cmns** The baseline method that uses the overall popularity of entities as link targets
- **ltr** The learning-to-rank model
- **threshold**: Entity linking threshold; varies depending on the method *(default: 0.1)*
- **step**: The step of entity linking process: [linking|ranking|disambiguation], *(default: linking)*
- **kb_snapshot**: File containing the KB snapshot of proper named entities; required for LTR, and optional for CMNS
- **query_file**: name of query file (JSON)
- **output_file**: name of output file
*Parameters of LTR method:*
- **model_file**: The trained model file; *(default:"data/el/model.txt")*
- **ground_truth**: The ground truth file; *(optional)*
- **gen_training_set**: If True, generates the training set from the groundtruth and query files; *(default: False)*
- **gen_model**: If True, trains the model from the training set; *(default: False)*
- The other parameters are similar to the nordlys.core.ml.ml settings
Example config
---------------
.. code:: python
{
"method": "cmns",
"threshold": 0.1,
"query_file": "path/to/queries.json"
"output_file": "path/to/output.json"
}
------------------------
:Author: <NAME>
"""
import argparse
import json
from pprint import pprint
import pickle
from nordlys.config import ELASTIC_INDICES, PLOGGER
from nordlys.core.ml.instances import Instances
from nordlys.core.retrieval.elastic_cache import ElasticCache
from nordlys.core.utils.file_utils import FileUtils
from nordlys.logic.el.cmns import Cmns
from nordlys.logic.el.el_utils import load_kb_snapshot, to_elq_eval
from nordlys.logic.el.ltr import LTR
from nordlys.logic.entity.entity import Entity
from nordlys.logic.features.feature_cache import FeatureCache
from nordlys.logic.query.query import Query
# Constants
DBPEDIA_INDEX = ELASTIC_INDICES[0]
class EL(object):
def __init__(self, config, entity, elastic=None, fcache=None):
self.__check_config(config)
self.__config = config
self.__method = config["method"]
self.__threshold = float(config["threshold"])
self.__query_file = config.get("query_file", None)
self.__output_file = config.get("output_file", None)
self.__entity = entity
self.__elastic = elastic
self.__fcache = fcache
self.__model = None
if "kb_snapshot" in self.__config:
load_kb_snapshot(self.__config["kb_snapshot"])
@staticmethod
def __check_config(config):
"""Checks config parameters and set default values."""
if config.get("method", None) is None:
config["method"] = "ltr"
if config.get("step", None) is None:
config["step"] = "linking"
if config.get("threshold", None) is None:
config["threshold"] = 0.1
if config["method"] == "ltr":
if config.get("model_file", None) is None:
config["model_file"] = "data/el/model.txt"
if config.get("kb_snapshot", None) is None:
config["kb_snapshot"] = "data/el/snapshot_2015_10.txt"
return config
def __get_linker(self, query):
"""Returns the entity linker based on the given model and parameters
:param query: query object
:return: entity linking object
"""
if self.__method.lower() == "cmns":
return Cmns(query, self.__entity, threshold=self.__threshold)
if self.__method.lower() == "ltr":
if self.__model is None:
self.__model = pickle.load(open(self.__config["model_file"], "rb"))
return LTR(query, self.__entity, self.__elastic, self.__fcache, self.__model, threshold=self.__threshold)
else:
raise Exception("Unknown model " + self.__method)
def link(self, query, qid=""):
"""Performs entity linking for the query.
:param query: query string
:return: annotated query
"""
PLOGGER.info("Linking query " + qid + " [" + query + "] ")
q = Query(query, qid)
linker = self.__get_linker(q)
if self.__config["step"] == "ranking":
res = linker.rank_ens()
else:
linked_ens = linker.link()
res = {"query": q.raw_query,
"processed_query": q.query,
"results": linked_ens}
return res
def batch_linking(self):
"""Scores queries in a batch and outputs results."""
results = {}
if self.__config["step"] == "linking":
queries = json.load(open(self.__query_file))
for qid in sorted(queries):
results[qid] = self.link(queries[qid], qid)
to_elq_eval(results, self.__output_file)
# json.dump(results, open(self.__output_file, "w"), indent=4, sort_keys=True)
# only ranking step
if self.__config["step"] == "ranking":
queries = json.load(open(self.__query_file))
for qid in sorted(queries):
linker = self.__get_linker(Query(queries[qid], qid))
results[qid] = linker.rank_ens()
ranked_inss = Instances(sum([inss.get_all() for inss in results.values()], []))
ranked_inss.to_treceval(self.__output_file)
if self.__config.get("json_file", None):
ranked_inss.to_json(self.__config["json_file"])
# only disambiguation step
if self.__config["step"] == "disambiguation":
inss = Instances.from_json(self.__config["test_set"])
inss_by_query = inss.group_by_property("qid")
for qid, q_inss in sorted(inss_by_query.items()):
linker = self.__get_linker("")
results[qid] = {"results": linker.disambiguate(Instances(q_inss))}
if self.__config.get("json_file", None):
json.dump(open(self.__config["json_file"], "w"), results, indent=4, sort_keys=True)
to_elq_eval(results, self.__output_file)
PLOGGER.info("Output file: " + self.__output_file)
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--query", help="query string", type=str, default=None)
parser.add_argument("-c", "--config", help="config file", type=str, default={})
args = parser.parse_args()
return args
def main(args):
conf = FileUtils.load_config(args.config)
el = EL(conf, Entity(), ElasticCache(DBPEDIA_INDEX), FeatureCache())
if conf.get("gen_model", False):
LTR.train(conf)
if args.query:
res = el.link(args.query)
pprint(res)
else:
el.batch_linking()
if __name__ == '__main__':
main(arg_parser())
|
medtray/MultiEm-RGCN | nordlys/nordlys/core/storage/parser/uri_prefix.py | """
URI Prefixing
=============
URI prefixing.
:Author: <NAME>
"""
import json
from nordlys import config
PREFIX_JSON_FILE = config.DATA_DIR + "/uri_prefix/prefixes.json"
class URIPrefix(object):
def __init__(self, prefix_file=PREFIX_JSON_FILE):
self.prefixes = json.load(open(prefix_file))
def __get_prefixed(self, uri):
"""Get prefixed URI."""
prefix = None
# if the uri contains a # then try the uri up to #
pos = uri.find("#")
if pos > 0:
urip = uri[:pos + 1] # including trailing #
if urip in self.prefixes:
prefix = urip
# try longest possible match until prefix is found
pos = uri.rfind("/")
# note: if pos is smaller than 10 then it's probably the / from http://
while prefix is None and pos > 10:
urip = uri[:pos + 1] # including trailing /
if urip in self.prefixes:
prefix = urip
pos = urip[:pos].rfind("/")
if prefix is not None:
return uri.replace(prefix, self.prefixes[prefix] + ":")
else:
return uri
def get_prefixed(self, uri, angle_brackets=True):
if uri[0] == "<" and uri[-1] == ">":
pref = self.__get_prefixed(uri[1:-1])
else:
pref = self.__get_prefixed(uri)
if angle_brackets:
return "<" + pref + ">"
else:
return pref
def convert_txt_to_json(txt_file, json_file=PREFIX_JSON_FILE):
"""Convert prefixes txt file to json.
This has to be done only once.
And only in case there is no .json file, or any changes done in .txt.
"""
prefixes = {}
ins = open(txt_file, "r")
for line in ins:
prefix, uri = line.strip().split("\t", 1)
# there might be duplicates in the txt file
# we only consider the first appearance for each URI
# (the txt file shipped with nordlys is ordered by
# URI frequency, so it's reasonable)
if not uri in prefixes:
prefixes[uri] = prefix
ins.close()
# write the prefix dictionary to json
json.dump(prefixes, open(json_file, "wb"))
if __name__ == '__main__':
# convert prefix txt file to json
# convert_txt_to_json("../../data/uri_prefix/prefixes.txt")
pre = URIPrefix()
print(pre.get_prefixed("http://www.w3.org/2000/01/rdf-schema#label"))
print(pre.get_prefixed("<http://dbpedia.org/resource/xxx/aaa/Audi_A4>"))
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/entity/__init__.py | """
Entity
======
This is the entity package.
"""
|
medtray/MultiEm-RGCN | nordlys/nordlys/logic/fusion/fusion_scorer.py | <gh_stars>10-100
"""
Fusion Scorer
=============
Abstract class for fusion-based scoring.
:Authors: <NAME>, <NAME>, <NAME>
"""
from nordlys.core.retrieval.retrieval import Retrieval
class FusionScorer(object):
ASSOC_MODE_BINARY = 1
ASSOC_MODE_UNIFORM = 2
"""Abstract class for any fusion-based method."""
def __init__(self, index_name, association_file=None, run_id="fusion"):
"""
:param index_name: name of index
:param association_file: association file
"""
self._index_name = index_name
self.association_file = association_file
self.assoc_obj = dict()
self.assoc_doc = dict()
self.run_id = run_id
def load_associations(self):
"""Loads the document-object associations."""
# file format: documentId objectId per line
if self.association_file is not None:
# you can keep the def here
pass
pass
def score_query(self, query, assoc_fun=None):
pass
def score_queries(self, queries, output_file):
"""Scores all queries and optionally dumps results into an output file."""
out = open(output_file, "w")
for query_id in sorted(queries):
query = queries[query_id]
pqo = self.score_query(query)
pqo.write_trec_format(query_id, self.run_id, out)
out.close()
def load_queries(self, query_file):
"""Loads the query file
:return: query dictionary {queryID:query([term1,term2,...])}
"""
f = open(query_file, "r")
queries = {}
for line in f:
tmp = line.split()
query_id = tmp[0]
query = tmp[1:]
queries[query_id] = query
f.close()
return queries
# def parse(self, text):
# stopwords = [
# "a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in",
# "into", "is", "it", "no", "not", "of", "on", "or", "such", "that", "the",
# "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]
# terms = []
# # Replace specific characters with space
# chars = ["'", ".", ":", ",", "/", "(", ")", "-", "+"]
# for ch in chars:
# if ch in text:
# text = text.replace(ch, " ")
# # Tokenization
# for term in text.split(): # default behavior of the split is to split on one or more whitespaces
# # Lowercasing
# term = term.lower()
# # Stopword removal
# if term in stopwords:
# continue
# terms.append(term)
# return terms
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.