repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
libero/eLife-style-content-adapter | dags/libero/context_facades.py | <gh_stars>0
import logging
logger = logging.getLogger(__name__)
def get_previous_task_name(context: dict):
if context['task'].upstream_list:
return context['task'].upstream_list[0].task_id
return None
def get_return_value_from_previous_task(context: dict, task_id: str = None):
previous_task = task_id if task_id else get_previous_task_name(context)
return context['task_instance'].xcom_pull(task_ids=previous_task)
def get_file_name_passed_to_dag_run_conf_file(context: dict):
"""
Returns the value of the 'file' key in conf if it was supplied.
:param context: key-values generated by a DAG run
:return str: value of the conf 'file' key in conf
"""
dag_run = context['dag_run']
conf = dag_run.conf or {}
file_name = conf.get('file')
logger.info('FILE NAME PASSED FROM TRIGGER= %s', file_name)
message = 'conf={\'file\': <file_name>} not passed to %s' % dag_run.dag_id
assert file_name and isinstance(file_name, str), message
return file_name
|
libero/eLife-style-content-adapter | dags/trigger_dag.py | """
DAG identify zip files to process from s3 bucket and trigger dag for each zip file
"""
import json
import logging
import re
from datetime import timedelta
from uuid import uuid4
from airflow import DAG, configuration
from airflow.api.common.experimental.trigger_dag import trigger_dag
from airflow.models import DagRun
from airflow.operators import python_operator
from airflow.settings import Session
from airflow.utils import timezone
import process_zip_dag
from libero.aws import list_bucket_keys_iter
from libero.context_facades import get_return_value_from_previous_task
from sqlalchemy import and_
SCHEDULE_INTERVAL = timedelta(minutes=1)
# formula to start this DAG at server start up.
# More info at https://gtoonstra.github.io/etl-with-airflow/gotchas.html
START_DATE = timezone.utcnow().replace(second=0, microsecond=0) - SCHEDULE_INTERVAL
SOURCE_BUCKET = configuration.conf.get('libero', 'source_bucket_name')
DESTINATION_BUCKET = configuration.conf.get('libero', 'destination_bucket_name')
SUPPORTED_ARCHIVE_FORMATS = {'.zip', '.meca'}
logger = logging.getLogger(__name__)
default_args = {
'owner': 'libero',
'depends_on_past': False,
'start_date': START_DATE,
'email': ['<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 3,
'retry_delay': timedelta(seconds=5)
}
def get_zip_files_to_process() -> set:
"""
Compares the source and destination buckets and returns a set of file names
to process.
"""
incoming = {re.sub(r'\.\w+', '', key): key
for key in list_bucket_keys_iter(Bucket=SOURCE_BUCKET)
if any(key.endswith(ext) for ext in SUPPORTED_ARCHIVE_FORMATS)}
expanded = {re.sub(r'/$', '', key)
for key in
list_bucket_keys_iter(Bucket=DESTINATION_BUCKET, Delimiter='/')}
keys = set(incoming.keys()).difference(expanded)
return {incoming[key] for key in keys}
def run_dag_for_each_file(dag_to_trigger, **context) -> None:
file_names = get_return_value_from_previous_task(context)
message = 'None type passed from previous task. Accepted types are set, list or tuple.'
assert file_names is not None, message
session = Session()
files_triggered = []
for file_name in file_names:
# check if a file has already been triggered for processing
if session.query(DagRun).filter(and_(DagRun.run_id.startswith(file_name + '_'),
DagRun.state == 'running')).first():
continue
trigger_dag(dag_id=dag_to_trigger,
run_id='{}_{}'.format(file_name, uuid4()),
conf=json.dumps({'file': file_name}),
execution_date=None,
replace_microseconds=False)
files_triggered.append(file_name)
logger.info('triggered %s for %s files: %s' % (dag_to_trigger, len(files_triggered), files_triggered))
dag = DAG('trigger_process_zip_dag',
default_args=default_args,
schedule_interval=SCHEDULE_INTERVAL)
task_1 = python_operator.PythonOperator(
task_id='get_zip_files_to_process',
python_callable=get_zip_files_to_process,
dag=dag
)
task_2 = python_operator.PythonOperator(
task_id='run_dag_for_each_file',
provide_context=True,
python_callable=run_dag_for_each_file,
op_args=[process_zip_dag.dag.dag_id],
dag=dag
)
task_1.set_downstream(task_2)
|
libero/eLife-style-content-adapter | tests/python/mocks.py | from io import BytesIO
from pathlib import Path
from zipfile import ZipFile
from tests.assets import get_asset, find_asset
class s3ClientMock:
def __init__(self, *args, **kwargs):
self.downloaded_files = []
self.uploaded_files = []
self.last_uploaded_file_bytes = None
def __call__(self, *args, **kwargs):
return self
def _read_bytes(self, file_name):
"""
Trys to find the file in the project and read it. If the file is not
located, try to locate the file in a zip file if `file_name` is a path.
:param str file_name: name of file or file path
:return bytes: bytes of read file
"""
try:
return get_asset(file_name).read_bytes()
except FileNotFoundError:
file_name = Path(file_name)
path_in_zip = '/'.join(file_name.parts[1:])
for asset in find_asset('*%s*' % file_name.parts[0]):
if asset.suffix in ['.zip', '.meca']:
key = asset.name
break
return ZipFile(get_asset(key)).read(path_in_zip)
def download_fileobj(self, *args, **kwargs):
self.downloaded_files.append(kwargs['Key'])
kwargs['Fileobj'].write(self._read_bytes(kwargs['Key']))
def upload_fileobj(self, *args, **kwargs):
self.uploaded_files.append(kwargs['Key'])
self.last_uploaded_file_bytes = kwargs['Fileobj'].read()
def get_object(self, *args, **kwargs):
self.downloaded_files.append(kwargs['Key'])
return {'Body': BytesIO(self._read_bytes(kwargs['Key']))}
def put_object(self, *args, **kwargs):
self.uploaded_files.append(kwargs['Key'])
self.last_uploaded_file_bytes = kwargs['Body']
def get_paginator(self, *args):
return self
def paginate(self, *args, response=None, **kwargs):
return response
|
libero/eLife-style-content-adapter | dags/libero/operators.py | <reponame>libero/eLife-style-content-adapter<gh_stars>0
import os
from airflow import configuration, DAG
from airflow.operators.bash_operator import BashOperator
ARTICLE_ASSETS_URL = configuration.conf.get('libero', 'article_assets_url')
COMPLETED_TASKS_BUCKET = configuration.conf.get('libero', 'completed_tasks_bucket_name')
DESTINATION_BUCKET = configuration.conf.get('libero', 'destination_bucket_name')
SEARCH_URL = configuration.conf.get('libero', 'search_url')
SERVICE_NAME = configuration.conf.get('libero', 'service_name')
SERVICE_URL = configuration.conf.get('libero', 'service_url')
SOURCE_BUCKET = configuration.conf.get('libero', 'source_bucket_name')
def create_node_task(name: str,
js_task_script_path: str,
dag: DAG,
env: dict = None,
get_return_from: str = None) -> BashOperator:
"""
A facade for the BashOperator intended for non python developers.
:param name: name of task
:param js_task_script_path: full path of script to run as string
:param dag: reference to the DAG object this task belongs to
:param env: values to pass to nodejs accessed using process.env
:param get_return_from: gets the return value of a specified task
:return: instantiated BashOperator configured to run a nodejs script
"""
env_vars = {
**os.environ.copy(),
**{'ARCHIVE_FILE_NAME': '{{ dag_run.conf.get("file") }}',
'ARTICLE_ASSETS_URL': ARTICLE_ASSETS_URL,
'COMPLETED_TASKS_BUCKET': COMPLETED_TASKS_BUCKET,
'DESTINATION_BUCKET': DESTINATION_BUCKET,
'SEARCH_URL': SEARCH_URL,
'SERVICE_NAME': SERVICE_NAME,
'SERVICE_URL': SERVICE_URL,
'SOURCE_BUCKET': SOURCE_BUCKET}
}
if env:
env_vars.update(env)
bash_command_template = 'nodejs {{ params.js_function_caller }} {{ params.js_task_script }}'
if get_return_from:
bash_command_template += ' {{ ti.xcom_pull(task_ids="%s") }}' % get_return_from
return BashOperator(
task_id=name,
bash_command=bash_command_template,
params={
'js_function_caller': '${AIRFLOW_HOME}/dags/js/function-caller.js',
'js_task_script': js_task_script_path
},
env=env_vars,
xcom_push=True,
dag=dag
)
|
libero/eLife-style-content-adapter | dags/libero/aws.py | from airflow import configuration
from airflow.hooks.S3_hook import S3Hook
REMOTE_LOGS_CONNECTION_ID = configuration.conf.get('core', 'remote_log_conn_id') or None
def get_s3_client():
return S3Hook(aws_conn_id=REMOTE_LOGS_CONNECTION_ID).get_conn()
def list_bucket_keys_iter(**list_objects_v2_params):
"""
returns a generator that lists keys/prefixes in an AWS S3 bucket.
"""
client = get_s3_client()
paginator = client.get_paginator('list_objects_v2')
for page in paginator.paginate(**list_objects_v2_params):
if 'Contents' in page:
for item in page['Contents']:
yield item['Key']
elif 'CommonPrefixes' in page:
for item in page['CommonPrefixes']:
yield item['Prefix']
|
libero/eLife-style-content-adapter | tests/python/test_aws.py | <reponame>libero/eLife-style-content-adapter<filename>tests/python/test_aws.py
import pytest
from dags.libero.aws import get_s3_client, list_bucket_keys_iter
from dags.trigger_dag import SOURCE_BUCKET, DESTINATION_BUCKET
def test_get_s3_client():
conn = get_s3_client()
assert conn._endpoint._endpoint_prefix == 's3'
assert conn._endpoint.host == "https://s3.amazonaws.com"
def test_get_s3_client_using_AIRFLOW_CONN_env_variable(set_remote_logs_env_var):
conn = get_s3_client()
assert conn._endpoint._endpoint_prefix == 's3'
assert conn._endpoint.host == "http://test-host:1234"
@pytest.mark.parametrize('params, response, expected', [
(
{'Bucket': SOURCE_BUCKET, 'Delimiter': '.zip'},
{'CommonPrefixes':[{'Prefix': 'elife-00666-vor-r1.zip'}]},
['elife-00666-vor-r1.zip']
),
(
{'Bucket': SOURCE_BUCKET, 'Delimiter': '.zip'},
{},
[]
),
(
{'Bucket': DESTINATION_BUCKET, 'Delimiter': '/'},
{'CommonPrefixes':[{'Prefix': 'elife-666-vor-r1/'}]},
['elife-666-vor-r1/']
),
(
{'Bucket': DESTINATION_BUCKET, 'Delimiter': '/'},
{},
[]
),
(
{'Bucket': SOURCE_BUCKET},
{'Contents':[{'Key': 'elife-00666-vor-r1.zip'}]},
['elife-00666-vor-r1.zip']
),
(
{'Bucket': SOURCE_BUCKET},
{},
[]
)
])
def test_list_bucket_keys_iter(params, response, expected, s3_client):
keys = list_bucket_keys_iter(response=[response], **params)
assert list(keys) == expected
|
libero/eLife-style-content-adapter | tests/python/test_assets.py | <gh_stars>0
import pytest
from tests.assets import get_asset
@pytest.mark.parametrize('asset_name', ['elife-00666.xml', 'elife-00666-vor-r1.zip'])
def test_get_assets(asset_name):
assert get_asset(asset_name)
def test_get_assets_raises_exception():
with pytest.raises(FileNotFoundError):
get_asset('does-not-exist.txt')
|
CAOR-MINES-ParisTech/colibri-vr-unity-package | Runtime/ExternalConnectors/Blender_CheckOBJMeshInfo.py | ### Copyright 2019-2020 MINES ParisTech (PSL University)
### This work is licensed under the terms of the MIT license, see the LICENSE file.
###
### Author: <NAME>, <EMAIL>
import sys, os
sys.path.append(sys.argv[sys.argv.index("--") + 1:][0])
import bpy
from Blender_Core import print_out, print_err, delete_all_on_start, print_face_count
def main() :
argv = sys.argv[sys.argv.index("--") + 1:]
input_file_path = str(argv[1])
delete_all_on_start()
bpy.ops.import_scene.obj(filepath=input_file_path)
face_count = len(bpy.data.objects[0].data.polygons)
print_face_count(face_count)
print_out("Mesh currently has " + str(face_count) + " faces.")
sys.exit(0)
main() |
CAOR-MINES-ParisTech/colibri-vr-unity-package | Runtime/ExternalConnectors/Blender_Core.py | ### Copyright 2019-2020 MINES ParisTech (PSL University)
### This work is licensed under the terms of the MIT license, see the LICENSE file.
###
### Author: <NAME>, <EMAIL>
import sys
import bpy
def print_out(lineToWrite) :
sys.stdout.write(lineToWrite + "\n")
sys.stdout.flush()
def print_err(errorLine) :
sys.stderr.write(errorLine + "\n")
sys.stderr.flush()
def delete_all_on_start() :
if (len(bpy.data.objects) > 0) :
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete(use_global=False)
def print_face_count(face_count) :
print_out("FACE_COUNT_OUTPUT:" + str(face_count)) |
CAOR-MINES-ParisTech/colibri-vr-unity-package | Runtime/ExternalConnectors/Blender_ConvertPLYtoOBJ.py | ### Copyright 2019-2020 MINES ParisTech (PSL University)
### This work is licensed under the terms of the MIT license, see the LICENSE file.
###
### Author: <NAME>, <EMAIL>
import sys, os
sys.path.append(sys.argv[sys.argv.index("--") + 1:][0])
import bpy
from math import radians
from Blender_Core import print_out, print_err, delete_all_on_start, print_face_count
def main() :
argv = sys.argv[sys.argv.index("--") + 1:]
input_file_path = str(argv[1])
output_file_path = str(argv[2])
delete_all_on_start()
print_out("Importing mesh from " + input_file_path + ".")
bpy.ops.import_mesh.ply(filepath=input_file_path)
global_obj = bpy.data.objects[0]
face_count = len(global_obj.data.polygons)
print_face_count(face_count)
print_out("Rotating mesh.")
global_obj.rotation_euler[0] = radians(-90)
global_obj.rotation_euler[2] = radians(180)
print_out("Exporting mesh to " + output_file_path + ".")
bpy.ops.export_scene.obj(filepath=output_file_path)
print_out("Finished operation. Mesh can be found at " + output_file_path + ".")
sys.exit(0)
main() |
CAOR-MINES-ParisTech/colibri-vr-unity-package | Runtime/ExternalConnectors/Blender_SimplifyOBJ.py | ### Copyright 2019-2020 MINES ParisTech (PSL University)
### This work is licensed under the terms of the MIT license, see the LICENSE file.
###
### Author: <NAME>, <EMAIL>
import sys, os
sys.path.append(sys.argv[sys.argv.index("--") + 1:][0])
import bpy
from Blender_Core import print_out, print_err, delete_all_on_start, print_face_count
def main() :
argv = sys.argv[sys.argv.index("--") + 1:]
input_file_path = str(argv[1])
output_file_path = str(argv[2])
delete_all_on_start()
bpy.ops.import_scene.obj(filepath=input_file_path)
global_obj = bpy.data.objects[0]
bpy.context.view_layer.objects.active = global_obj
original_face_count = len(global_obj.data.polygons)
bpy.ops.object.modifier_add(type='DECIMATE')
decimate_modifier = global_obj.modifiers[0]
global_obj.modifiers[decimate_modifier.name].decimate_type = 'DISSOLVE'
global_obj.modifiers[decimate_modifier.name].angle_limit = 0.0872665
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=decimate_modifier.name)
bpy.ops.object.modifier_add(type='TRIANGULATE')
triangulate_modifier = global_obj.modifiers[0]
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=triangulate_modifier.name)
new_face_count = len(global_obj.data.polygons)
print_out("Decimated mesh from " + str(original_face_count) + " to " + str(new_face_count) + " faces.")
print_face_count(new_face_count)
bpy.ops.export_scene.obj(filepath=output_file_path)
print_out("Finished operation. Mesh can be found at " + output_file_path + ".")
sys.exit(0)
main() |
CAOR-MINES-ParisTech/colibri-vr-unity-package | Runtime/ExternalConnectors/Blender_SmartUVProjectOBJ.py | <reponame>CAOR-MINES-ParisTech/colibri-vr-unity-package
### Copyright 2019-2020 MINES ParisTech (PSL University)
### This work is licensed under the terms of the MIT license, see the LICENSE file.
###
### Author: <NAME>, <EMAIL>
import sys, os
sys.path.append(sys.argv[sys.argv.index("--") + 1:][0])
import bpy
from Blender_Core import print_out, print_err, delete_all_on_start
def main() :
argv = sys.argv[sys.argv.index("--") + 1:]
input_file_path = str(argv[1])
output_file_path = str(argv[2])
delete_all_on_start()
print_out("Importing mesh from " + input_file_path + ".")
bpy.ops.import_scene.obj(filepath=input_file_path)
obj = bpy.data.objects[0]
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
print_out("Applying the Smart UV Project algorithm.")
bpy.ops.uv.smart_project()
bpy.ops.object.mode_set(mode='OBJECT')
print_out("Exporting mesh to " + output_file_path + ".")
bpy.ops.export_scene.obj(filepath=output_file_path)
sys.exit(0)
main() |
akariv/kvfile | kvfile/cached.py | <reponame>akariv/kvfile
from itertools import chain
import cachetools
from .kvfile import KVFile
class CachedKVFile(cachetools.LRUCache):
def __init__(self, size=1024):
super().__init__(size)
self.db = KVFile()
def get(self, key):
return self[key]
def set(self, key, value):
self[key] = value
def insert(self, key_value_iterator, batch_size=1000):
for key, value in key_value_iterator:
self.set(key, value)
def keys(self, reverse=False):
return chain(iter(self), self.db.keys())
def items(self, reverse=False):
return chain(((k, self[k]) for k in iter(self)), self.db.items(reverse))
def popitem(self):
key, value = super().popitem()
self.db.set(key, value)
return key, value
def __missing__(self, key):
value = self.db.get(key)
self.db.delete(key)
self[key] = value
return value
|
akariv/kvfile | tests/test_main.py | import datetime
import decimal
import pytest
def test_sanity():
from kvfile import KVFile
kv = KVFile()
data = dict(
s='value',
i=123,
d=datetime.datetime.fromtimestamp(12325),
n=decimal.Decimal('1234.56'),
ss=set(range(10)),
o=dict(d=decimal.Decimal('1234.58'), n=datetime.datetime.fromtimestamp(12325))
)
for k, v in data.items():
kv.set(k, v)
for k, v in data.items():
assert kv.get(k) == v
assert list(kv.keys()) == sorted(data.keys())
assert list(kv.items()) == sorted(data.items())
assert list(kv.keys(reverse=True)) == sorted(data.keys(), reverse=True)
assert list(kv.items(reverse=True)) == sorted(data.items(), reverse=True)
def test_insert():
from kvfile import KVFile
kv = KVFile()
kv.insert(((str(i), ':{}'.format(i)) for i in range(50000)))
assert len(list(kv.keys())) == 50000
assert len(list(kv.items())) == 50000
assert kv.get('49999') == ':49999'
kv.insert(((str(i), ':{}'.format(i)) for i in range(50000, 100000)), batch_size=40000)
assert len(list(kv.items())) == 100000
kv.insert(((str(i), ':{}'.format(i)) for i in range(100000, 100002)), batch_size=1)
kv.insert(((str(i), ':{}'.format(i)) for i in range(100002, 100005)), batch_size=0)
assert len(list(kv.items())) == 100005
def test_insert_generator():
from kvfile import KVFile
kv = KVFile()
data = [(str(i), ':{}'.format(i)) for i in range(50)]
expected_data = []
for key, value in kv.insert_generator(data):
expected_data.append((key, value))
assert data == expected_data
assert len(list(kv.keys())) == 50
assert len(list(kv.items())) == 50
assert kv.get('49') == ':49'
def test_cached():
from kvfile import CachedKVFile
from random import shuffle
kv = CachedKVFile()
s = 5000
data = [
('%06d' % i, {'a': i}) for i in range(s)
]
for k, v in data:
kv.set(k, v)
for i in range(3):
shuffle(data)
for k, v in data[:(s//2)]:
kv.set(k, v)
shuffle(data)
for k, v in data[:(s//2)]:
assert kv.get(k) == v
items = list(kv.items())
items = sorted(items)
data = sorted(data)
assert items == data
keys = sorted(list(kv.keys()))
assert keys == [x[0] for x in data]
def test_filename():
from kvfile import KVFile, db_kind
filename = 'bla.filename.' + db_kind + '.db'
kv1 = KVFile(filename=filename)
kv1.insert(((str(i), ':{}'.format(i)) for i in range(50000)))
del kv1
kv = KVFile(filename=filename)
assert len(list(kv.keys())) == 50000
assert len(list(kv.items())) == 50000
assert kv.get('49999') == ':49999'
def test_default():
from kvfile import KVFile
kv = KVFile()
kv.set('aaaa', 5)
assert kv.get('aaaa') == 5
assert kv.get('bbbb', default=6) == 6
with pytest.raises(KeyError):
kv.get('bbbb')
|
akariv/kvfile | kvfile/__init__.py | from .kvfile import KVFile, db_kind
from .cached import CachedKVFile
|
akariv/kvfile | kvfile/kvfile.py | <reponame>akariv/kvfile<gh_stars>1-10
import os
import tempfile
import warnings
from collections import deque
try:
import plyvel as DB_ENGINE
db_kind = 'LevelDB'
except ImportError:
import sqlite3 as DB_ENGINE
db_kind = 'sqlite'
from .serializer import JsonSerializer
class SqliteDB(object):
def __init__(self, serializer=JsonSerializer, filename=None):
self.tmpdir = None
if filename is None:
self.tmpdir = tempfile.TemporaryDirectory()
filename = os.path.join(self.tmpdir.name, 'kvfile.db')
self.serializer = serializer()
self.db = DB_ENGINE.connect(filename)
self.cursor = self.db.cursor()
try:
self.cursor.execute('''CREATE TABLE d (key text, value text)''')
self.cursor.execute('''CREATE UNIQUE INDEX i ON d (key)''')
except DB_ENGINE.OperationalError:
pass
def __del__(self):
if hasattr(self, 'cursor'):
del self.cursor
if hasattr(self, 'db'):
del self.db
try:
if self.tmpdir is not None:
self.tmpdir.cleanup()
except Exception as e:
warnings.warn('Failed to cleanup sqlite DB: {}'.format(e), Warning)
def get(self, key, **kw):
ret = self.cursor.execute('''SELECT value FROM d WHERE key=?''',
(key,)).fetchone()
if ret is None:
if 'default' in kw:
return kw['default']
raise KeyError()
else:
return self.serializer.deserialize(ret[0])
def set(self, key, value):
value = self.serializer.serialize(value)
try:
self.get(key)
self.cursor.execute('''UPDATE d SET value=? WHERE key=?''',
(value, key))
except KeyError:
self.cursor.execute('''INSERT INTO d VALUES (?, ?)''',
(key, value))
self.db.commit()
def delete(self, key):
self.cursor.execute('''DELETE FROM d WHERE key=?''',
(key,)).fetchone()
self.db.commit()
def insert(self, key_value_iterator, batch_size=1000):
deque(self.insert_generator(key_value_iterator, batch_size), maxlen=0)
def insert_generator(self, key_value_iterator, batch_size=1000):
if batch_size == 1:
for key, value in key_value_iterator:
yield key, value
self.set(key, value)
else:
batch = []
def flush(force=False):
if len(batch) > 0 and (force or (batch_size and len(batch) >= batch_size)):
self.cursor.executemany('''INSERT INTO d VALUES (?, ?)''', batch)
self.db.commit()
batch.clear()
for key, value in key_value_iterator:
yield key, value
value = self.serializer.serialize(value).encode()
batch.append((key, value))
flush()
flush(force=True)
def keys(self, reverse=False):
cursor = self.db.cursor()
direction = 'DESC' if reverse else 'ASC'
keys = cursor.execute('''SELECT key FROM d ORDER BY key ''' + direction)
for key, in keys:
yield key
def items(self, reverse=False):
cursor = self.db.cursor()
direction = 'DESC' if reverse else 'ASC'
items = cursor.execute('''SELECT key, value FROM d ORDER BY key ''' + direction)
for key, value in items:
yield key, self.serializer.deserialize(value)
class LevelDB(object):
def __init__(self, serializer=JsonSerializer, filename=None):
if filename is None:
self.tmpdir = tempfile.TemporaryDirectory()
filename = self.tmpdir.name
self.serializer = serializer()
self.db = DB_ENGINE.DB(filename, create_if_missing=True)
def __del__(self):
if hasattr(self, 'db'):
self.db.close()
def get(self, key, **kw):
ret = self.db.get(key.encode('utf8'))
if ret is None:
if 'default' in kw:
return kw['default']
raise KeyError()
else:
return self.serializer.deserialize(ret.decode('utf8'))
def set(self, key, value):
value = self.serializer.serialize(value).encode('utf8')
key = key.encode('utf8')
self.db.put(key, value)
def delete(self, key):
key = key.encode('utf8')
self.db.delete(key)
def insert(self, key_value_iterator, batch_size=1000):
deque(self.insert_generator(key_value_iterator, batch_size), maxlen=0)
def insert_generator(self, key_value_iterator, batch_size=1000):
if batch_size == 1:
for key, value in key_value_iterator:
yield key, value
self.set(key, value)
else:
batch = []
def flush(force=False):
if len(batch) > 0 and (force or (batch_size and len(batch) >= batch_size)):
write_batch = self.db.write_batch()
for key, value in batch:
write_batch.put(key, value)
write_batch.write()
write_batch.clear()
del write_batch
batch.clear()
for key, value in key_value_iterator:
yield key, value
value = self.serializer.serialize(value).encode('utf8')
key = key.encode('utf8')
batch.append((key, value))
flush()
flush(True)
def keys(self, reverse=False):
for key, value in self.db.iterator(reverse=reverse):
yield key.decode('utf8')
def items(self, reverse=False):
for key, value in self.db.iterator(reverse=reverse):
yield (key.decode('utf8'),
self.serializer.deserialize(value.decode('utf8')))
KVFile = LevelDB if db_kind == 'LevelDB' else SqliteDB
|
andycranston/mbtester | mbtester.py | #
# @(!--#) @(#) mbtester.py, version 011, 08-february-2019
#
# drive the "Generic Modbus/Jbus Tester" windows program
# from Schneider Electric
#
# tester.exe available from:
#
# https://www.schneider-electric.com/en/faqs/FA180037/
#
# version 4.7 (c) 1998-2008
#
# Links:
#
# https://pyautogui.readthedocs.io/en/latest/
#
#
#########################################################################
import sys
import os
import time
import datetime
import argparse
import pyautogui
#########################################################################
#
# constants
#
WINDOW_SIZE_X = 590
WINDOW_SIZE_Y = 417
MB_ICON_BITMAP_FILE_NAME = 'mbicon.bmp'
MB_ICON_PADDING_X = 4
MB_ICON_PADDING_Y = 5
TOP_LEFT_SCREEN_REGION = (0, 0, 200, 200)
UNDER_WINDOW_ICON_X = 4
UNDER_WINDOW_ICON_Y = 24
PORT_DROP_DOWN_X = 78
PORT_DROP_DOWN_Y = 68
TCPIP_FIELD_X = 20
TCPIP_FIELD_Y = 151
TCPIP_FIELD_WIDE = 284
TIMEOUT_FIELD_X = 102
TIMEOUT_FIELD_Y = 215
TIMEOUT_FIELD_WIDE = 35
SAMPLE_RATE_FIELD_X = 269
SAMPLE_RATE_FIELD_Y = 215
SAMPLE_RATE_FIELD_WIDE = 35
SLAVE_ID_FIELD_X = 18
SLAVE_ID_FIELD_Y = 318
SLAVE_ID_FIELD_WIDE = 56
STARTING_REG_FIELD_X = 128
STARTING_REG_FIELD_Y = 318
STARTING_REG_FIELD_WIDE = 56
REG_COUNT_FIELD_X = 237
REG_COUNT_FIELD_Y = 318
REG_COUNT_FIELD_WIDE = 56
SAMPLE_MODE_DROP_DOWN_X = 295
SAMPLE_MODE_DROP_DOWN_Y = 180
DATA_TYPE_DROP_DOWN_X = 295
DATA_TYPE_DROP_DOWN_Y = 267
DROP_DOWN_MENU_MULTIPLER = 13
DISPLAY_MODE_DECIMAL_X = 342
DISPLAY_MODE_DECIMAL_Y = 69
DISPLAY_MODE_HEX_X = 430
DISPLAY_MODE_HEX_Y = 69
PROTOCOL_MODBUS_X = 508
PROTOCOL_MODBUS_Y = 186
PROTOCOL_JBUS_X = 508
PROTOCOL_JBUS_Y = 214
PROTOCOL_MODBUS_ASCII_X = 508
PROTOCOL_MODBUS_ASCII_Y = 244
BUTTON_STOP_X = 534
BUTTON_STOP_Y = 296
BUTTON_READ_X = 534
BUTTON_READ_Y = 328
BUTTON_WRITE_X = 534
BUTTON_WRITE_Y = 360
BUTTON_EXIT_X = 534
BUTTON_EXIT_Y = 394
RESULT_FIELD_0_X = 416
RESULT_FIELD_0_Y = 91
RESULT_FIELDS_Y = [ 91, 124, 156, 189, 221, 254, 286, 319, 351, 384 ]
RESULT_FIELD_WIDE = 56
RESULT_FIELD_TALL = 19
DEFAULT_CMD_FILE_NAME = 'mbcommands.txt'
#########################################################################
#
# globals
#
#########################################################################
def clickinwindow(x, y):
global origin_x, origin_y
pyautogui.moveTo(origin_x + x, origin_y + y)
pyautogui.click()
return
#########################################################################
def selectdropdown(dropdownx, dropdowny, index):
global origin_x, origin_y
pyautogui.moveTo(origin_x + dropdownx, origin_y + dropdowny)
pyautogui.click()
pyautogui.moveTo(origin_x + dropdownx, origin_y + dropdowny + (DROP_DOWN_MENU_MULTIPLER * index))
pyautogui.click()
pyautogui.moveTo(origin_x + dropdownx, origin_y + dropdowny)
return
#########################################################################
def fieldovertype(fieldx, fieldy, fieldwide, text):
global origin_x, origin_y
pyautogui.moveTo(origin_x + fieldx + fieldwide - 1, origin_y + fieldy)
pyautogui.click(clicks=2)
pyautogui.typewrite(text)
return
#########################################################################
def getresults():
global origin_x, origin_y
global cmap
results = []
for fieldoffsety in RESULT_FIELDS_Y:
fieldbitmap = pyautogui.screenshot(region=(origin_x + RESULT_FIELD_0_X, origin_y + fieldoffsety, RESULT_FIELD_WIDE, RESULT_FIELD_TALL))
if False:
for y in range(0, RESULT_FIELD_TALL):
for x in range(0, RESULT_FIELD_WIDE):
rgb = fieldbitmap.getpixel( (x, y) )
if rgb[0] == 0:
c = '#'
else:
c = '.'
print(c, end='')
print('')
chars = ''
bitmapsofar = ''
x = 0
while x < RESULT_FIELD_WIDE:
row = ''
for y in range(RESULT_FIELD_TALL - 7, 1, -1):
rgb = fieldbitmap.getpixel( (x, y) )
if rgb[0] == 0:
c = '1'
else:
c = '0'
row += c
### print(row)
if (row == '00000000000') or (row == '11111111111'):
bitmapsofar = ''
else:
bitmapsofar += row
### print(' ', bitmapsofar)
if bitmapsofar in cmap:
### print('Found character', cmap[bitmapsofar])
chars += cmap[bitmapsofar]
bitmapsofar = ''
x += 1
results.append(chars)
### print('Field-{} is: "{}"'.format(fieldoffsety, chars))
### print(results)
return results
#########################################################################
def printresults(results):
for result in results:
print(' {:>6s}'.format(result), end='')
print('')
return
#########################################################################
def writeresults(resultfilename, results, datetimenow):
try:
resultfile = open(resultfilename, 'a')
except IOError:
return
print('"{}"'.format(datetimenow), end='', file=resultfile)
for result in results:
print(',"{}"'.format(result), end='', file=resultfile)
print('', file=resultfile)
resultfile.flush()
resultfile.close()
return
#########################################################################
def resultsloop(resultsfilename, duration, interval):
starttime = time.time()
### print('Start time... (float):', starttime)
intstarttime = int(starttime)
### print('Start time..... (int):', intstarttime)
difftime = 1 - (starttime - intstarttime)
### print('Diff:', difftime)
time.sleep(difftime)
starttime = time.time()
while duration > 0.0:
datetimenow = datetime.datetime.now()
print(datetimenow)
clickinwindow(BUTTON_READ_X, BUTTON_READ_Y)
time.sleep(0.2)
results = getresults()
printresults(results)
writeresults(resultsfilename, results, datetimenow)
starttime += interval
timenow = time.time()
if timenow < starttime:
### print('A sleep is needed')
time.sleep(starttime - timenow)
duration -= interval
return
#########################################################################
def mbtester(cmdfile, cmdfilename):
global origin_x, origin_y
linenum = 0
for line in cmdfile:
linenum += 1
line = line.strip()
if len(line) == 0:
continue
if line[0:2] == ';;':
continue
print('line {} - {}'.format(linenum, line))
fields = line.split()
if (fields[0] == 'exit') or (fields[0] == 'quit'):
return
elif fields[0] == 'sleep':
time.sleep(float(fields[1]))
elif fields[0] == 'screenshot':
pyautogui.screenshot(fields[1], region=(origin_x, origin_y, WINDOW_SIZE_X, WINDOW_SIZE_Y))
elif fields[0] == 'port-tcpip':
selectdropdown(PORT_DROP_DOWN_X, PORT_DROP_DOWN_Y, 1)
elif fields[0] == 'port-com1':
selectdropdown(PORT_DROP_DOWN_X, PORT_DROP_DOWN_Y, 2)
elif fields[0] == 'port-com2':
selectdropdown(PORT_DROP_DOWN_X, PORT_DROP_DOWN_Y, 3)
elif fields[0] == 'port-com3':
selectdropdown(PORT_DROP_DOWN_X, PORT_DROP_DOWN_Y, 4)
elif fields[0] == 'sample-mode-manual':
selectdropdown(SAMPLE_MODE_DROP_DOWN_X, SAMPLE_MODE_DROP_DOWN_Y, 1)
elif fields[0] == 'sample-mode-scheduled':
selectdropdown(SAMPLE_MODE_DROP_DOWN_X, SAMPLE_MODE_DROP_DOWN_Y, 2)
elif fields[0] == 'sample-mode-scheduled-logging':
selectdropdown(SAMPLE_MODE_DROP_DOWN_X, SAMPLE_MODE_DROP_DOWN_Y, 3)
elif fields[0] == 'data-type-hold-reg':
selectdropdown(DATA_TYPE_DROP_DOWN_X, DATA_TYPE_DROP_DOWN_Y, 1)
elif fields[0] == 'data-type-input-reg':
selectdropdown(DATA_TYPE_DROP_DOWN_X, DATA_TYPE_DROP_DOWN_Y, 2)
elif fields[0] == 'data-type-single-hold-reg':
selectdropdown(DATA_TYPE_DROP_DOWN_X, DATA_TYPE_DROP_DOWN_Y, 3)
elif fields[0] == 'data-type-scattered-reg-read':
selectdropdown(DATA_TYPE_DROP_DOWN_X, DATA_TYPE_DROP_DOWN_Y, 4)
elif fields[0] == 'data-type-read-dev-id':
selectdropdown(DATA_TYPE_DROP_DOWN_X, DATA_TYPE_DROP_DOWN_Y, 5)
elif fields[0] == 'display-mode-decimal':
clickinwindow(DISPLAY_MODE_DECIMAL_X, DISPLAY_MODE_DECIMAL_Y)
elif fields[0] == 'display-mode-hex':
clickinwindow(DISPLAY_MODE_HEX_X, DISPLAY_MODE_HEX_Y)
elif fields[0] == 'protocol-modbus':
clickinwindow(PROTOCOL_MODBUS_X, PROTOCOL_MODBUS_Y)
elif fields[0] == 'protocol-jbus':
clickinwindow(PROTOCOL_JBUS_X, PROTOCOL_JBUS_Y)
elif fields[0] == 'protocol-modbus-ascii':
clickinwindow(PROTOCOL_MODBUS_ASCII_X, PROTOCOL_MODBUS_ASCII_Y)
elif fields[0] == 'button-stop':
clickinwindow(BUTTON_STOP_X, BUTTON_STOP_Y)
elif fields[0] == 'button-read':
clickinwindow(BUTTON_READ_X, BUTTON_READ_Y)
elif fields[0] == 'button-write':
clickinwindow(BUTTON_WRITE_X, BUTTON_WRITE_Y)
elif fields[0] == 'button-exit':
clickinwindow(BUTTON_EXIT_X, BUTTON_EXIT_Y)
elif fields[0] == 'tcpip-address':
fieldovertype(TCPIP_FIELD_X, TCPIP_FIELD_Y, TCPIP_FIELD_WIDE, fields[1])
elif fields[0] == 'timeout':
fieldovertype(TIMEOUT_FIELD_X, TIMEOUT_FIELD_Y, TIMEOUT_FIELD_WIDE, fields[1])
elif fields[0] == 'sample-rate':
fieldovertype(SAMPLE_RATE_FIELD_X, SAMPLE_RATE_FIELD_Y, SAMPLE_RATE_FIELD_WIDE, fields[1])
elif fields[0] == 'slave-id':
fieldovertype(SLAVE_ID_FIELD_X, SLAVE_ID_FIELD_Y, SLAVE_ID_FIELD_WIDE, fields[1])
elif fields[0] == 'starting-reg':
fieldovertype(STARTING_REG_FIELD_X, STARTING_REG_FIELD_Y, STARTING_REG_FIELD_WIDE, fields[1])
elif fields[0] == 'reg-count':
fieldovertype(REG_COUNT_FIELD_X, REG_COUNT_FIELD_Y, REG_COUNT_FIELD_WIDE, fields[1])
elif fields[0] == 'results':
results = getresults()
printresults(results)
if len(fields) >= 2:
writeresults(fields[1], results, datetime.datetime.now())
elif fields[0] == 'results-loop':
if len(fields) >= 4:
resultsloop(fields[1], float(fields[2]), float(fields[3]))
else:
print('{}: unrecognised keyword "{}" at line {} in command file "{}"'.format(progname, fields[0], linenum, cmdfilename), file=sys.stderr)
return
#########################################################################
def main():
global progname
global origin_x, origin_y
parser = argparse.ArgumentParser()
parser.add_argument('--cmd', help='command file name', default=DEFAULT_CMD_FILE_NAME)
parser.add_argument('--ip', help='IP address to preprogram for TCP/IP', nargs=1)
args = parser.parse_args()
cmdfilename = args.cmd
try:
cmdfile = open(cmdfilename, 'r')
except IOError:
print('{}: cannot open command file name "{}" for reading'.format(progname, cmdfilename), file=sys.stderr)
sys.exit(1)
mbicon = pyautogui.locateOnScreen(MB_ICON_BITMAP_FILE_NAME, region=TOP_LEFT_SCREEN_REGION)
if mbicon == None:
print('{}: cannot find the icon in the Generic Modbus/Jbus tester window - is the program running?'.format(progname), file=sys.stderr)
sys.exit(1)
origin_x = mbicon[0]
origin_y = mbicon[1]
if origin_x < MB_ICON_PADDING_X:
print('{}: the Generic Modbus/Jbus tester window is too close to the left hand edge of the screen'.format(progname), file=sys.stderr)
sys.exit(1)
if origin_y < MB_ICON_PADDING_Y:
print('{}: the Generic Modbus/Jbus tester window is too close to the top edge of the screen'.format(progname), file=sys.stderr)
sys.exit(1)
origin_x -= MB_ICON_PADDING_X
origin_y -= MB_ICON_PADDING_Y
### print(origin_x, origin_y)
clickinwindow(UNDER_WINDOW_ICON_X, UNDER_WINDOW_ICON_Y)
if args.ip:
if len(args.ip) == 1:
selectdropdown(PORT_DROP_DOWN_X, PORT_DROP_DOWN_Y, 1)
fieldovertype(TCPIP_FIELD_X, TCPIP_FIELD_Y, TCPIP_FIELD_WIDE, args.ip[0])
mbtester(cmdfile, cmdfilename)
clickinwindow(UNDER_WINDOW_ICON_X, UNDER_WINDOW_ICON_Y)
cmdfile.close()
return 0
#########################################################################
progname = os.path.basename(sys.argv[0])
cmap = {}
cmap['0011111110001000000010010000000100100000001000111111100'] = '0'
cmap['000000001000000000010001111111110'] = '1'
cmap['0110000010001010000010010010000100100010001001000011100'] = '2'
cmap['0010000010001000000010010001000100100010001000111011100'] = '3'
cmap['0001100000000010110000000100011000111111111000010000000'] = '4'
cmap['0010011111001000010010010000100100100001001000111100010'] = '5'
cmap['0011111110001000100010010001000100100010001000111000100'] = '6'
cmap['0000000001001110000010000011000100000001101000000000110'] = '7'
cmap['0011101110001000100010010001000100100010001000111011100'] = '8'
cmap['0010001110001000100010010001000100100010001000111111100'] = '9'
cmap['0011000000001001010000010010100000100101000001111100000'] = 'a'
cmap['0111111111001000010000010000100000100001000000111100000'] = 'b'
cmap['0011110000001000010000010000100000100001000000100100000'] = 'c'
cmap['0011110000001000010000010000100000100001000001111111110'] = 'd'
cmap['0011110000001001010000010010100000100101000000101100000'] = 'e'
cmap['0111111110000000010010'] = 'f'
origin_x = None
origin_y = None
sys.exit(main())
# end of file
|
i3detroit/dixie-narco | code/helper/display.py | #!/usr/bin/env python3
from luma.core.interface.serial import spi
from luma.core.render import canvas
from luma.oled.device import ssd1322
from PIL import Image,ImageFont
import signal
class Display:
def __init__(self):
self._serial = spi(device=0, port=0, bus_speed_hz=32000000)
self._device = ssd1322(self._serial,width=256,height=64)
self._fnt = ImageFont.truetype('/home/agmlego/src/b612/fonts/ttf/B612Mono-Regular.ttf',16)
signal.signal(signal.SIGALRM,self.handler)
def handler(self,signum, frame):
print('Time!')
with canvas(self._device) as draw:
draw.rectangle(self._device.bounding_box, outline='black', fill='black')
def splash(self,slot=''):
signal.alarm(60)
with canvas(self._device) as draw:
draw.rectangle(self._device.bounding_box, outline='white', fill='black')
draw.text((2,2),'Welcome to i3Detroit!\nPlease make a selection: \n%s'%slot,font=self._fnt,fill='white')
def draw_row(self,row):
self.splash(row)
def draw_slot(self,row,slot):
self.splash(row+slot)
if __name__ == '__main__':
from time import sleep
disp = Display()
while True:
disp.splash()
row = input('Row? ')
disp.draw_row(row)
slot = input('Slot? ')
disp.draw_slot(row,slot)
sleep(1)
|
i3detroit/dixie-narco | code/vend.py | <reponame>i3detroit/dixie-narco<filename>code/vend.py
#!/usr/bin/env python3
from helper.row_driver import Row
from helper.keypad import Keypad
from helper.display import Display
class Dixie_Narco:
def __init__(self):
self.rows = {
'A':Row(0x25,'A'),
'B':Row(0x24,'B'),
'C':Row(0x23,'C'),
'D':Row(0x22,'D'),
'E':Row(0x21,'E'),
'F':Row(0x20,'F')
}
for row in self.rows:
self.rows[row].status(False)
self.keypad = Keypad()
self.display = Display()
def get_selection(self):
while True:
char = ''
selection = []
while True:
char = self.keypad.scan()
if char in 'ABCDEF':
selection.append(char)
self.display.draw_row(char)
self.rows[char].status(True)
break
char = self.keypad.scan()
if char in '123456789':
selection.append(char)
self.display.draw_slot(selection[0],selection[1])
print('Selected %s'%selection)
return ''.join(selection)
elif char == 'CLR':
for row in self.rows:
self.rows[row].status(False)
self.display.splash()
continue
def vend(self,slot):
row,col = tuple(slot)
col = int(col)
print('Vending from %s,%d'%(row,col))
self.rows[row].vend(col)
self.rows[row].status(False)
if __name__ == '__main__':
vending = Dixie_Narco()
while True:
vending.display.splash()
slot = vending.get_selection()
vending.vend(slot)
|
i3detroit/dixie-narco | code/helper/keypad.py | #!/usr/bin/env python3
from time import sleep
import pigpio
class Keypad:
_cols = [21, 20, 26]
_rows = [5, 6, 12, 13, 19, 16]
_keys = [
['F','*','CLR'],
['E','9','0'],
['D','7','8'],
['C','5','6'],
['B','3','4'],
['A','1','2']
]
def __init__(self):
self._pi = pigpio.pi()
for col in self._cols:
self._pi.set_mode(col,pigpio.OUTPUT)
self._pi.write(col,0)
for row in self._rows:
self._pi.set_mode(row,pigpio.INPUT)
self._pi.set_pull_up_down(row,pigpio.PUD_DOWN)
def scan(self,blocking=True):
char = None
while char is None:
for col in self._cols:
self._pi.write(col,1)
for row in self._rows:
state = self._pi.read(row)
if state:
sleep(0.05)
state = self._pi.read(row)
if state:
char = self._keys[self._rows.index(row)][self._cols.index(col)]
#print('(%d,%d): %s'%(row,col,char))
self._pi.write(col,0)
sleep(0.05)
if not blocking:
break
return char
def __del__(self):
self._pi.stop()
if __name__ == '__main__':
k = Keypad()
while True:
print(k.scan())
|
i3detroit/dixie-narco | code/helper/row_driver.py | <gh_stars>0
#!/usr/bin/env python3
import board
import busio
import adafruit_mcp230xx
import digitalio
from time import sleep
class I2C:
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self.i2c = busio.I2C(board.SCL,board.SDA)
def __hash__(self): return 1
def __eq__(self, other):
try: return self.__dict__ is other.__dict__
except: return 0
class Row:
_pinmap = (None,8,7,6,5,4,3,2,1,0)
_status = 15
def __init__(self,address=0x20,label='F'):
self._i2c = I2C()
self.address = address
self.label = label
try:
self._driver = adafruit_mcp230xx.MCP23017(self._i2c.i2c,address=self.address)
for pin in self._pinmap[1:]+(self._status,):
self._driver.get_pin(pin).direction = digitalio.Direction.OUTPUT
self._driver.get_pin(pin).value = False
self.status(True)
except (OSError,ValueError):
print("No such device %x"%self.address)
raise
def vend(self,slot):
pin = self._pinmap[slot]
self._driver.get_pin(pin).value = True
sleep(0.5)
self._driver.get_pin(pin).value = False
def status(self,state):
self._driver.get_pin(self._status).value = state
if __name__ == '__main__':
rows = {
'A':Row(0x25,'A'),
'B':Row(0x24,'B'),
'C':Row(0x23,'C'),
'D':Row(0x22,'D'),
'E':Row(0x21,'E'),
'F':Row(0x20,'F')
}
while True:
c = input()
row = rows[c[0]]
if not row:
print('No such device %s'%c)
continue
try:
row.vend(int(c[1]))
except (IndexError,AttributeError):
print('bad pin "%s"'%c)
continue
|
flavours/fam-flavour | bin/check.py | <filename>bin/check.py
#!/usr/bin/env python3
import subprocess
import sys
import click
import libflavour
def log(string):
click.echo(f"fam-flavour: {string}")
def check_structure(yaml):
libflavour.Addon(yaml)
def check_policies(yaml):
process = subprocess.Popen(
["conftest", "test", "-p=/flavour/fam-flavour/policy", "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
process.stdin.write(str.encode(yaml))
outs, errs = process.communicate()
print(outs.decode("utf-8").strip())
if process.returncode:
if errs:
print(errs.decode("utf-8").strip())
sys.exit(process.returncode)
process.stdin.close()
@click.command()
def check():
yaml = click.get_text_stream("stdin").read()
log("Check structure")
check_structure(yaml)
log("Check policies")
check_policies(yaml)
if __name__ == "__main__":
check()
|
flavours/fam-flavour | bin/add.py | #!/usr/bin/env python3
import hashlib
import os
from collections import OrderedDict
from pathlib import Path
import click
import libflavour
from strictyaml import as_document
def log(string):
click.echo(f"fam-flavour: {string}")
def save_yaml(yaml_as_string):
base_folder = Path(".flavour") / "addons"
base_folder.mkdir(parents=True, exist_ok=True)
m = hashlib.sha256()
m.update(yaml_as_string.encode("utf-8"))
the_hash = str(m.hexdigest())
hash_file = base_folder / the_hash
with hash_file.open("w") as f:
f.write(yaml_as_string)
return the_hash
def add_requirement(flavour_package_name, platform_fam, version, yaml_hash):
flavour_file = "app.flavour"
with Path(flavour_file).open("r") as f:
yml = f.read()
yaml_data = libflavour.Application(yml)._data
if (
"addons" in yaml_data
and flavour_package_name in yaml_data["addons"]
):
log("can not add to configuration, addon entry already exists")
else:
log(f"adding new {flavour_package_name}")
yaml_data["addons"][
f"{flavour_package_name}:{version}"
] = as_document(
OrderedDict([("manager", platform_fam), ("hash", yaml_hash)])
)
with Path(flavour_file).open("w") as f:
f.write(yaml_data.as_yaml())
@click.command()
def add():
yaml = click.get_text_stream("stdin").read()
yaml_data = libflavour.Addon(yaml).data
yaml_hash = save_yaml(yaml)
add_requirement(
flavour_package_name=yaml_data["meta"]["name"],
platform_fam=os.environ["FAM_IDENTIFIER"],
version=str(yaml_data["meta"]["version"]),
yaml_hash=yaml_hash,
)
if __name__ == "__main__":
add()
|
flavours/fam-flavour | bin/remove.py | <filename>bin/remove.py
#!/usr/bin/env python3
from pathlib import Path
import click
import libflavour
from strictyaml import load
def log(string):
click.echo(f"fam-flavour: {string}")
def remove_requirement(flavour_package_name, version):
with Path("app.flavour").open("r") as f:
yml = f.read()
yaml_data = load(yml, libflavour.schema.schema_project)
name_and_version = f"{flavour_package_name}:{version}"
if name_and_version in yaml_data["addons"]:
log("deleting")
p = (
Path(".flavour")
/ "addons"
/ str(yaml_data["addons"][name_and_version]["hash"])
)
p.unlink()
del yaml_data["addons"][name_and_version]
log("can not add to app.flavour, addon entry already exists")
else:
log("could not find entry")
with Path("app.flavour").open("w") as f:
f.write(yaml_data.as_yaml())
@click.command()
def add():
yaml = click.get_text_stream("stdin").read()
yaml_data = load(yaml, libflavour.schema.schema_addon)
remove_requirement(yaml_data["meta"]["name"], yaml_data["meta"]["version"])
if __name__ == "__main__":
add()
|
oaksharks/HyperTS | hyperts/search_space.py | # -*- coding:utf-8 -*-
from hypernets.core.ops import HyperInput
from hypernets.core.search_space import HyperSpace, Choice
from hyperts.estimators import TSEstimatorMS, ProphetWrapper, VARWrapper, SKTimeWrapper
def search_space_univariate_forecast():
space = HyperSpace()
with space.as_default():
input = HyperInput(name='input1')
TSEstimatorMS(ProphetWrapper, interval_width=Choice([0.5, 0.6]), seasonality_mode=Choice(['additive', 'multiplicative']))(input)
space.set_inputs(input)
return space
def search_space_multivariate_forecast():
space = HyperSpace()
with space.as_default():
input = HyperInput(name='input1')
TSEstimatorMS(VARWrapper, ic=Choice(['aic', 'fpe', 'hqic', 'bic']))(input)
space.set_inputs(input)
return space
def space_classification_classification():
space = HyperSpace()
with space.as_default():
input = HyperInput(name='input1')
TSEstimatorMS(SKTimeWrapper, n_estimators=Choice([50, 100, 150]))(input)
space.set_inputs(input)
return space
# TODO: define others search space
|
oaksharks/HyperTS | hyperts/experiment.py | <reponame>oaksharks/HyperTS
import copy
import numpy as np
import pandas as pd
from hypernets.core import set_random_state
from hypernets.experiment import StepNames
from hypernets.experiment.compete import SteppedExperiment, ExperimentStep, EnsembleStep, FinalTrainStep
from hypernets.tabular import get_tool_box
from hypernets.tabular.data_cleaner import DataCleaner
from hypernets.utils import logging
from hyperts.hyper_ts import HyperTS
logger = logging.get_logger(__name__)
DEFAULT_EVAL_SIZE = 0.3
class TSDataPreprocessStep(ExperimentStep):
def __init__(self, experiment, name, covariate_cols=None, covariate_data_clean_args=None):
super().__init__(experiment, name)
if covariate_data_clean_args is None:
covariate_data_clean_args = {}
self.covariate_cols = covariate_cols
self.covariate_data_clean_args = covariate_data_clean_args
# fitted
self.covariate_data_cleaner = DataCleaner(**self.covariate_data_clean_args)
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
# 1. process covariate features
if self.covariate_cols is not None and len(self.covariate_cols) > 0:
excluded_cols = list(set(X_train.columns.tolist()) -set(self.covariate_cols))
df_exclude = X_train[excluded_cols]
df_covariate = self.covariate_data_cleaner.fit_transform(X_train[self.covariate_cols])
# TODO: check shape
X_train_cleaned_covariate = pd.concat([df_exclude, df_covariate])
X_train = X_train_cleaned_covariate
return hyper_model, X_train, y_train, X_test, X_eval, y_eval
def get_params(self, deep=True):
return {}
def transform(self, X, y=None, **kwargs):
# transform covariate features
X_transform = self.covariate_data_cleaner.fit_transform(X)
return X_transform[0] # selected X
def get_fitted_params(self):
return {} # TODO:
class TSSpaceSearchStep(ExperimentStep):
def __init__(self, experiment, name):
super().__init__(experiment, name)
# fitted
self.dataset_id = None
self.model = None
self.history_ = None
self.best_reward_ = None
def fit_transform(self, hyper_model, X_train, y_train, X_test=None, X_eval=None, y_eval=None, **kwargs):
super().fit_transform(hyper_model, X_train, y_train, X_test=X_test, X_eval=X_eval, y_eval=y_eval)
if X_eval is not None:
kwargs['eval_set'] = (X_eval, y_eval)
model = copy.deepcopy(self.experiment.hyper_model) # copy from original hyper_model instance
model.search(X_train, y_train, X_eval, y_eval, **kwargs)
if model.get_best_trial() is None or model.get_best_trial().reward == 0:
raise RuntimeError('Not found available trial, change experiment settings and try again pls.')
self.dataset_id = 'abc' # fixme
self.model = model
self.history_ = model.history
self.best_reward_ = model.get_best_trial().reward
logger.info(f'{self.name} best_reward: {self.best_reward_}')
return self.model, X_train, y_train, X_test, X_eval, y_eval
def transform(self, X, y=None, **kwargs):
return X
def is_transform_skipped(self):
return True
def get_fitted_params(self):
return {**super().get_fitted_params(),
'best_reward': self.best_reward_,
'history': self.history_,
}
class TSEnsembleStep(EnsembleStep):
def get_ensemble(self, estimators, X_train, y_train):
# return GreedyEnsemble(self.task, estimators, scoring=self.scorer, ensemble_size=self.ensemble_size)
tb = get_tool_box(X_train, y_train)
if self.task in ['forecast', "multivariate-forecast"]:
ensemble_task = 'regression'
else:
ensemble_task = self.task
return tb.greedy_ensemble(ensemble_task, estimators, scoring=self.scorer, ensemble_size=self.ensemble_size)
class TSExperiment(SteppedExperiment):
def __init__(self, hyper_model, X_train, y_train, time_series_col=None, covariate_cols=None,
covariate_data_clean_args=None, X_eval=None, y_eval=None, log_level=None,
random_state=None, ensemble_size=3, **kwargs):
if random_state is None:
random_state = np.random.randint(0, 65535)
set_random_state(random_state)
task = hyper_model.task
# todo: check task
# todo: check scorer
steps = []
# data clean
# Fix: `df.nunique(dropna=True)` in _get_df_uniques cause
# `TypeError: unhashable type: 'Series'` in case of nest pd.Series
if task not in [HyperTS.TASK_BINARY_CLASSIFICATION]:
steps.append(TSDataPreprocessStep(self, StepNames.DATA_CLEAN,
covariate_data_clean_args=covariate_data_clean_args))
# search step
steps.append(TSSpaceSearchStep(self, StepNames.SPACE_SEARCHING))
# ensemble step,
# steps.append(TSEnsembleStep(self, StepNames.FINAL_ENSEMBLE, scorer=scorer, ensemble_size=ensemble_size))
steps.append(FinalTrainStep(self, StepNames.FINAL_TRAINING, retrain_on_wholedata=False))
# ignore warnings
import warnings
warnings.filterwarnings('ignore')
# if log_level is not None:
# _set_log_level(log_level)
self.run_kwargs = kwargs
super(TSExperiment, self).__init__(steps, hyper_model, X_train, y_train, X_eval=X_eval, y_eval=y_eval,
eval_size=0.3, task=task, id=id, random_state=random_state)
def run(self, **kwargs):
run_kwargs = {**self.run_kwargs, **kwargs}
return super().run(**run_kwargs)
def _repr_html_(self):
try:
from hn_widget.widget import ExperimentSummary
from IPython.display import display
display(ExperimentSummary(self))
except:
return self.__repr__()
|
oaksharks/HyperTS | hyperts/estimators.py | import numpy as np
from prophet import Prophet
from sktime.classification.interval_based import TimeSeriesForestClassifier
from statsmodels.tsa.vector_ar.var_model import VAR
from hypernets.core.search_space import ModuleSpace
from hypernets.utils import logging
logger = logging.get_logger(__name__)
class EstimatorWrapper:
def fit(self, X, y):
pass
def predict(self, periods):
pass
class ProphetWrapper(EstimatorWrapper):
def __init__(self, **kwargs):
self.model = Prophet(**kwargs)
def fit(self, X, y):
# adapt for prophet
df_train = X[['ds']]
df_train['y'] = y
self.model.fit(df_train)
def predict(self, X):
df_predict = self.model.predict(X)
return df_predict['yhat'].values
class VARWrapper(EstimatorWrapper):
def __init__(self, **kwargs):
if kwargs is None:
kwargs = {}
self.init_kwargs = kwargs
self.model = None
# fitted
self._start_date = None
self._end_date = None
self._freq = None
self._targets = []
def fit(self, X, y):
# adapt for prophet
date_series_top2 = X['ds'][:2].tolist()
self._freq = (date_series_top2[1] - date_series_top2[0]).total_seconds()
self._start_date = X['ds'].head(1).to_list()[0].to_pydatetime()
self._end_date = X['ds'].tail(1).to_list()[0].to_pydatetime()
model = VAR(endog=y, dates=X['ds'])
self.model = model.fit(**self.init_kwargs)
def predict(self, X):
last_date = X['ds'].tail(1).to_list()[0].to_pydatetime()
steps = int((last_date - self._end_date).total_seconds()/self._freq)
predict_result = self.model.forecast(self.model.y, steps=steps)
def calc_index(date):
r_i = int((date - self._end_date).total_seconds()/self._freq) - 1
return predict_result[r_i].tolist()
return np.array(X['ds'].map(calc_index).to_list())
class SKTimeWrapper(EstimatorWrapper):
def __init__(self, **kwargs):
if kwargs is None:
kwargs = {}
self.init_kwargs = kwargs
self.model = TimeSeriesForestClassifier(**kwargs)
def fit(self, X, y):
# adapt for prophet
# init_kwargs
self.model.fit(X, y)
def predict(self, X):
predict_result = self.model.predict(X)
return predict_result
class TSEstimatorMS(ModuleSpace):
def __init__(self, wrapper_cls, fit_kwargs={}, space=None, name=None, **hyperparams):
ModuleSpace.__init__(self, space, name, **hyperparams)
self.fit_kwargs = fit_kwargs
self.wrapper_cls = wrapper_cls
self.estimator = None
def _build_estimator(self, task, kwargs):
raise NotImplementedError
def build_estimator(self, task=None):
pv = self.param_values
self.estimator = self.wrapper_cls(**pv)
return self.estimator
def _on_params_ready(self):
pass
def _compile(self):
pass
def _forward(self, inputs):
return self.estimator
|
oaksharks/HyperTS | hyperts/tests/task_test.py | <reponame>oaksharks/HyperTS<filename>hyperts/tests/task_test.py<gh_stars>0
# -*- coding:utf-8 -*-
from sklearn.model_selection import train_test_split
from sktime.datasets import load_arrow_head
from hypernets.core.callbacks import *
from hypernets.core.searcher import OptimizeDirection
from hypernets.searchers.random_searcher import RandomSearcher
from hyperts.experiment import TSExperiment
from hyperts.hyper_ts import HyperTS
from hyperts.search_space import search_space_univariate_forecast, search_space_multivariate_forecast, \
space_classification_classification
from .datasets import *
class Test_Task():
def test_univariate_forecast(self):
X, y = get_random_univariate_forecast_dataset()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False)
rs = RandomSearcher(search_space_univariate_forecast, optimize_direction=OptimizeDirection.Minimize)
hyper_model = HyperTS(rs, task='univariate-forecast', reward_metric='neg_mean_squared_error', callbacks=[SummaryCallback()])
exp = TSExperiment(hyper_model, X_train, y_train, time_series_col='ds', X_eval=X_test, y_eval=y_test)
pipeline_model = exp.run(max_trials=3)
y_pred = pipeline_model.predict(X_test)
assert y_pred.shape[0] == X_test.shape[0]
def test_multivariate_forecast(self):
X, y = get_random_multivariate_forecast_dataset()
X_train, X_test, y_train, y_test, = train_test_split(X, y, test_size=0.2, shuffle=False)
rs = RandomSearcher(search_space_multivariate_forecast, optimize_direction=OptimizeDirection.Minimize)
hyper_model = HyperTS(rs, task='multivariate-forecast', reward_metric='neg_mean_squared_error')
exp = TSExperiment(hyper_model, X_train, y_train, time_series_col='ds', X_eval=X_test, y_eval=y_test)
pipeline_model = exp.run(max_trials=3)
y_pred = pipeline_model.predict(X_test)
assert y_pred.shape[1] == 2
assert y_pred.shape[0] == X_test.shape[0]
def test_univariate_classification(self):
X, y = load_arrow_head(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
rs = RandomSearcher(space_classification_classification, optimize_direction=OptimizeDirection.Maximize)
hyper_model = HyperTS(rs, task='binary-classification', reward_metric='accuracy')
exp = TSExperiment(hyper_model, X_train, y_train, X_eval=X_test, y_eval=y_test)
pipeline_model = exp.run(max_trials=3)
y_pred = pipeline_model.predict(X_test)
assert y_pred.shape[0] == X_test.shape[0]
|
oaksharks/HyperTS | hyperts/hyper_ts.py | <gh_stars>0
# -*- coding:utf-8 -*-
"""
"""
import pickle
import numpy as np
from sklearn import metrics as sk_metrics
from hypernets.dispatchers.in_process_dispatcher import InProcessDispatcher
from hypernets.model.estimator import Estimator
from hypernets.model.hyper_model import HyperModel
from hypernets.utils import logging, fs
logger = logging.get_logger(__name__)
class HyperTSEstimator(Estimator):
def __init__(self, task, space_sample, data_cleaner_params=None):
super(HyperTSEstimator, self).__init__(space_sample=space_sample, task=task)
self.data_pipeline = None
self.data_cleaner_params = data_cleaner_params
self.model = None # Time-Series model
self.cv_gbm_models_ = None
self.data_cleaner = None
self.pipeline_signature = None
self.fit_kwargs = None
self.class_balancing = None
self.classes_ = None
self.pos_label = None
self.transients_ = {}
self._build_model(space_sample)
def _build_model(self, space_sample):
space, _ = space_sample.compile_and_forward()
outputs = space.get_outputs()
assert len(outputs) == 1, 'The space can only contains 1 output.'
self.model = outputs[0].build_estimator()
# logger.debug(f'data_pipeline:{self.data_pipeline}')
# todo self.pipeline_signature = self.get_pipeline_signature(self.data_pipeline)
# self.model = ProphetWrapper(**sampled_estimator_params)
def summary(self):
return "HyperTSEstimator"
def fit_cross_validation(self, X, y, verbose=0, stratified=True, num_folds=3, pos_label=None,
shuffle=False, random_state=9527, metrics=None, **kwargs):
return None, None, None
def get_iteration_scores(self):
return None
def fit(self, X, y, pos_label=None, verbose=0, **kwargs):
self.model.fit(X, y)
def predict(self, X, verbose=0, **kwargs):
return self.model.predict(X)
def predict_proba(self, X, verbose=0, **kwargs):
return None
def evaluate(self, X, y, metrics=None, verbose=0, **kwargs):
if not isinstance(y, np.ndarray):
y = np.array(y)
y_pred = self.model.predict(X)
if self.task == HyperTS.TASK_MULTIVARIATE_FORECAST:
scores = []
for i in range(y.shape[1]):
y_true_part = y[:, i]
y_pred_part = y_pred[:, i]
score_part = sk_metrics.mean_squared_error(y_true_part, y_pred_part) # todo calc mse
scores.append(score_part)
score = np.mean(scores)
return {'neg_mean_squared_error': score}
elif self.task == HyperTS.TASK_BINARY_CLASSIFICATION:
score = sk_metrics.accuracy_score(y, y_pred)
return {'accuracy': score}
# TODO: others task types and metrics
else:
score = sk_metrics.mean_squared_error(y, y_pred) # todo calc mse
return {'neg_mean_squared_error': score}
def save(self, model_file):
with fs.open(f'{model_file}', 'wb') as output:
pickle.dump(self, output, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(model_file):
with fs.open(f'{model_file}', 'rb') as input:
model = pickle.load(input)
return model
class HyperTS(HyperModel):
TASK_BINARY_CLASSIFICATION = 'binary-classification'
TASK_MULTIVARIATE_FORECAST = 'multivariate-forecast'
TASK_UNIVARIATE_FORECAST = 'univariate-forecast'
def __init__(self, searcher, dispatcher=None, callbacks=None, reward_metric='accuracy', task=None,
discriminator=None, data_cleaner_params=None, cache_dir=None, clear_cache=None):
self.data_cleaner_params = data_cleaner_params
HyperModel.__init__(self, searcher, dispatcher=dispatcher, callbacks=callbacks, reward_metric=reward_metric,
task=task, discriminator=discriminator)
def _get_estimator(self, space_sample):
estimator = HyperTSEstimator(task=self.task, space_sample=space_sample, data_cleaner_params=self.data_cleaner_params)
return estimator
def load_estimator(self, model_file):
return HyperTSEstimator.load(model_file)
def export_trial_configuration(self, trial):
return '`export_trial_configuration` does not implemented'
def search(self, X, y, X_eval, y_eval, max_trials=3, dataset_id=None, trial_store=None, **kwargs):
if dataset_id is None:
dataset_id = self.generate_dataset_id(X, y)
for callback in self.callbacks:
callback.on_search_start(self, X, y, X_eval, y_eval, None, None, max_trials, dataset_id, trial_store=trial_store)
dispatcher = InProcessDispatcher('/tmp/tmp_data') # TODO:
dispatcher.dispatch(self, X, y, X_eval, y_eval, cv=False, num_folds=None, max_trials=max_trials, dataset_id=dataset_id, trial_store=trial_store)
|
oaksharks/HyperTS | hyperts/utils/data_ops.py | import numpy as np
import pandas as pd
import datetime
import chinese_calendar
from sklearn.preprocessing import OrdinalEncoder
class offsets_pool:
neighbor = [-1, 1]
second = [-1, 1, -60 * 4, -60 * 3, -60 * 2, -60 * 1, 60 * 1, 60 * 2, 60 * 3, 60 * 4]
minute = [-1, 1, -60 * 4, -60 * 3, -60 * 2, -60 * 1, 60 * 1, 60 * 2, 60 * 3, 60 * 4]
hour = [-1, 1, -24 * 4, -24 * 3, -24 * 2, -24 * 1, 24 * 1, 24 * 2, 24 * 3, 24 * 4,
-168 * 4, -168 * 3, -168 * 2, -168 * 1, 168 * 1, 168 * 2, 168 * 3, 168 * 4]
day = [-1, 1, -30 * 4, -30 * 3, -30 * 2, -30 * 1, 30 * 1, 30 * 2, 30 * 3, 30 * 4]
month = [-1, 1, -12 * 4, -12 * 3, -12 * 2, -12 * 1, 12 * 1, 12 * 2, 12 * 3, 12 * 4]
year = [-1, 1]
def reduce_memory_usage(df: pd.DataFrame, verbose=True):
'''Reduce RAM Usage
'''
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (
start_mem - end_mem) / start_mem))
return df
def infer_ts_freq(df: pd.DataFrame, ts_name: str = 'TimeStamp'):
dateindex = pd.DatetimeIndex(pd.to_datetime(df[ts_name]))
for i in range(len(df)):
freq = pd.infer_freq(dateindex[i:i + 3])
if freq != None:
return freq
def _inpute(values, offsets):
indices0, indices1 = np.where(np.isnan(values))
padding = []
for offset in offsets:
offset_indices0 = indices0 + offset
start_bound_limit = np.where(indices0 + offset < 0)
end_bound_limit = np.where(indices0 + offset > len(values) - 1)
offset_indices0[start_bound_limit] = indices0[start_bound_limit]
offset_indices0[end_bound_limit] = indices0[end_bound_limit]
padding.append(values[(offset_indices0, indices1)])
values[(indices0, indices1)] = np.nanmean(padding, axis=0)
missing_rate = np.sum(np.isnan(values)) / values.size
return values, missing_rate
def multi_period_loop_imputer(df: pd.DataFrame, offsets: list, freq: str, max_loops: int = 10):
"""Multiple Period Loop Impute NAN.
Args:
offsets: list
freq: str
'S' - second
'T' - minute
'H' - hour
'D' - day
'M' - month
'Y','A', A-DEC' - year
"""
if offsets == None and freq == 'S':
offsets = offsets_pool.minute
elif offsets == None and freq == 'T':
offsets = offsets_pool.minute
elif offsets == None and freq == 'H':
offsets = offsets_pool.hour
elif offsets == None and freq == 'D':
offsets = offsets_pool.day
elif offsets == None and freq == 'M':
offsets = offsets_pool.month
elif offsets == None and freq == 'Y':
offsets = offsets_pool.year
elif offsets == None:
offsets = offsets_pool.neighbor
values = df.values.copy()
loop, missing_rate = 0, 1
while loop < max_loops and missing_rate > 0:
values, missing_rate = _inpute(values, offsets)
loop += 1
values[np.where(np.isnan(values))] = np.nanmean(values)
fill_df = pd.DataFrame(values, columns=df.columns)
return fill_df
def forward_period_imputer(df: pd.DataFrame, offset: int):
fill_df = df.fillna(df.rolling(window=offset, min_periods=1).agg(lambda x: x.iloc[0]))
return fill_df
def simple_numerical_imputer(df: pd.DataFrame, mode='mean'):
"""Fill NaN with mean, mode, 0."""
if mode == 'mean':
df = df.fillna(df.mean().fillna(0).to_dict())
elif mode == 'mode':
df = df.fillna(df.mode().fillna(0).to_dict())
else:
df = df.fillna(0)
return df
def columns_ordinal_encoder(df: pd.DataFrame):
enc = OrdinalEncoder(dtype=np.int)
encoder_df = enc.fit_transform(df)
return encoder_df
def drop_duplicated_ts_rows(df: pd.DataFrame, ts_name: str = 'TimeStamp', keep_data: str = 'last'):
"""Returns without duplicate time series, the last be keeped by default.
Example:
TimeStamp y
2021-03-01 3.4
2021-03-02 5.2
2021-03-03 9.3
2021-03-03 9.5
2021-03-04 6.7
2021-03-05 2.3
>>
TimeStamp y
2021-03-01 3.4
2021-03-02 5.2
2021-03-03 9.5
2021-03-04 6.7
2021-03-05 2.3
"""
assert isinstance(df, pd.DataFrame)
drop_df = df.drop_duplicates(subset=[ts_name], keep=keep_data)
return drop_df
def smooth_missed_ts_rows(df: pd.DataFrame, freq: str = None, ts_name: str = 'TimeStamp'):
"""Returns full time series.
Example:
TimeStamp y
2021-03-01 3.4
2021-03-02 5.2
2021-03-04 6.7
2021-03-05 2.3
>>
TimeStamp y
2021-03-01 3.4
2021-03-02 5.2
2021-03-03 NaN
2021-03-04 6.7
2021-03-05 2.3
"""
assert isinstance(df, pd.DataFrame)
if freq == None:
freq = infer_ts_freq(df, ts_name)
if df[ts_name].dtypes == object:
df[ts_name] = pd.to_datetime(df[ts_name])
df = df.sort_values(by=ts_name)
start, end = df[ts_name].iloc[0], df[ts_name].iloc[-1]
full_ts = pd.DataFrame(pd.date_range(start=start, end=end, freq=freq), columns=[ts_name])
smooth_df = full_ts.join(df.set_index(ts_name), on=ts_name)
return smooth_df
def clip_to_outliers(df: pd.DataFrame, std_threshold: int = 3):
"""Replace outliers above threshold with that threshold.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
assert isinstance(df, pd.DataFrame)
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
lower = df_mean - (df_std * std_threshold)
upper = df_mean + (df_std * std_threshold)
df_outlier = df.clip(lower=lower, upper=upper, axis=1)
return df_outlier
def nan_to_outliers(df: pd.DataFrame, std_threshold: int = 3):
"""Replace outliers above threshold with that threshold.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
assert isinstance(df, pd.DataFrame)
df_outlier = df.copy()
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
outlier_indices = np.abs(df - df_mean) > df_std * std_threshold
df_outlier = df_outlier.mask(outlier_indices, other=np.nan)
return df_outlier
def get_holidays(year=None, include_weekends=True):
"""
:param year: which year
:param include_weekends: False for excluding Saturdays and Sundays
:return: list
"""
if not year:
year = datetime.datetime.now().year
else:
year = year
start = datetime.date(year, 1, 1)
end = datetime.date(year, 12, 31)
holidays = chinese_calendar.get_holidays(start, end, include_weekends)
holidays = pd.DataFrame(holidays, columns=['Date'])
holidays['Date'] = holidays['Date'].apply(lambda x: x.strftime('%Y-%m-%d'))
return holidays
def generate_ts_covariables(start_date, periods, freq='H'):
dstime = pd.date_range(start_date, periods=periods, freq=freq)
fds = pd.DataFrame(dstime, columns={'TimeStamp'})
fds['Hour'] = fds['TimeStamp'].dt.hour
fds['WeekDay'] = fds['TimeStamp'].dt.weekday
period_dict = {
23: 0, 0: 0, 1: 0,
2: 1, 3: 1, 4: 1,
5: 2, 6: 2, 7: 2,
8: 3, 9: 3, 10: 3, 11: 3,
12: 4, 13: 4,
14: 5, 15: 5, 16: 5, 17: 5,
18: 6,
19: 7, 20: 7, 21: 7, 22: 7,
}
fds['TimeSegmnet'] = fds['Hour'].map(period_dict)
fds['MonthStart'] = fds['TimeStamp'].apply(lambda x: x.is_month_start * 1)
fds['MonthEnd'] = fds['TimeStamp'].apply(lambda x: x.is_month_end * 1)
fds['SeasonStart'] = fds['TimeStamp'].apply(lambda x: x.is_quarter_start * 1)
fds['SeasonEnd'] = fds['TimeStamp'].apply(lambda x: x.is_quarter_end * 1)
fds['Weekend'] = fds['TimeStamp'].apply(lambda x: 1 if x.dayofweek in [5, 6] else 0)
public_holiday_list = get_holidays(year=int(start_date[:4]))
public_holiday_list = public_holiday_list['Date'].to_list()
fds['Date'] = fds['TimeStamp'].apply(lambda x: x.strftime('%Y%m%d'))
fds['Holiday'] = fds['Date'].apply(lambda x: 1 if x in public_holiday_list else 0)
fds.drop(['Date'], axis=1, inplace=True)
return fds
def infer_forecast_interval(train, forecast, n: int = 5, prediction_interval: float = 0.9):
"""A corruption of Bayes theorem.
It will be sensitive to the transformations of the data."""
prior_mu = train.mean()
prior_sigma = train.std()
from scipy.stats import norm
p_int = 1 - ((1 - prediction_interval) / 2)
adj = norm.ppf(p_int)
upper_forecast, lower_forecast = pd.DataFrame(), pd.DataFrame()
for index, row in forecast.iterrows():
data_mu = row
post_mu = ((prior_mu / prior_sigma ** 2) + ((n * data_mu) / prior_sigma ** 2)
) / ((1 / prior_sigma ** 2) + (n / prior_sigma ** 2))
lower = pd.DataFrame(post_mu - adj * prior_sigma).transpose()
lower = lower.where(lower <= data_mu, data_mu, axis=1)
upper = pd.DataFrame(post_mu + adj * prior_sigma).transpose()
upper = upper.where(upper >= data_mu, data_mu, axis=1)
lower_forecast = pd.concat([lower_forecast, lower], axis=0)
upper_forecast = pd.concat([upper_forecast, upper], axis=0)
lower_forecast.index = forecast.index
upper_forecast.index = forecast.index
return upper_forecast, lower_forecast
def from_3d_array_to_nested_df(data: np.ndarray,
columns_names: str = None,
cells_as_array: bool = True):
"""Convert Numpy ndarray with shape (nb_samples, series_length, nb_variables)
into nested pandas DataFrame (with time series as numpy array or pandas Series in cells)
Parameters
----------
X : np.ndarray
3-dimensional Numpy array to convert to nested pandas DataFrame format
column_names: list-like, default = None
Optional list of names to use for naming nested DataFrame's columns
cells_as_numpy : bool, default = False
If True, then nested cells contain Numpy array
If False, then nested cells contain pandas Series
Returns
----------
df : pd.DataFrame
References
----------
sktime_data_processing: https://github.com/Riyabelle25/sktime/blob/main/sktime/utils/data_processing.py
"""
df = pd.DataFrame()
nb_samples, series_length, nb_variables = data.shape
cell = np.array if cells_as_array else pd.Series
if columns_names is None:
columns_names = [f'Var_{i}' for i in range(nb_variables)]
else:
if len(columns_names) != nb_variables:
raise ValueError(f'The number of column names supplied [{len(columns_names)}] \
does not match the number of data variables [{nb_variables}].')
for i, columns_name in enumerate(columns_names):
df[columns_name] = [cell(data[j, :, i]) for j in range(nb_samples)]
return df
def from_nested_df_to_3d_array(data: pd.DataFrame):
"""Convert nested pandas DataFrame (with time series as numpy array or pandas Series in cells)
into Numpy ndarray with shape (nb_samples, series_length, nb_variables).
Parameters
----------
X : pd.DataFrame
Nested pandas DataFrame
Returns
-------
X_3d : np.arrray
3-dimensional NumPy array
References
----------from_nested_to_3d_numpy
sktime_data_processing: https://github.com/Riyabelle25/sktime/blob/main/sktime/utils/data_processing.py
"""
nested_col_mask = [*data.applymap(lambda cell: isinstance(cell, (np.ndarray, pd.Series))).any().values]
if nested_col_mask.count(True) == len(nested_col_mask):
res = np.stack(data.applymap(lambda cell: cell.to_numpy() if isinstance(cell, pd.Series) else cell)
.apply(lambda row: np.stack(row), axis=1)
.to_numpy())
else:
raise ValueError
return res.transpose(0, 2, 1)
|
oaksharks/HyperTS | hyperts/mk_experiment.py | # -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
def make_experiment(train_data,
target=None,
eval_data=None,
test_data=None,
task=None,
id=None,
callbacks=None,
searcher=None,
search_space=None,
search_callbacks=None,
early_stopping_rounds=10,
early_stopping_time_limit=3600,
early_stopping_reward=None,
reward_metric=None,
optimize_direction=None,
estimator_early_stopping_rounds=None,
use_cache=None,
clear_cache=None,
discriminator=None,
log_level=None,
**kwargs):
pass
# TODO: |
bernardotorres/eventsourcing | eventsourcing/tests/test_postgres.py | <gh_stars>0
import os
from time import sleep
from unittest import TestCase
from unittest.mock import Mock
from uuid import uuid4
import psycopg2
from psycopg2.extensions import connection
from eventsourcing.persistence import (
DatabaseError,
DataError,
InfrastructureFactory,
IntegrityError,
InterfaceError,
InternalError,
NotSupportedError,
OperationalError,
PersistenceError,
ProgrammingError,
StoredEvent,
Tracking,
)
from eventsourcing.postgres import (
Connection,
Factory,
PostgresAggregateRecorder,
PostgresApplicationRecorder,
PostgresDatastore,
PostgresProcessRecorder,
Transaction,
)
from eventsourcing.tests.aggregaterecorder_testcase import (
AggregateRecorderTestCase,
)
from eventsourcing.tests.applicationrecorder_testcase import (
ApplicationRecorderTestCase,
)
from eventsourcing.tests.infrastructure_testcases import (
InfrastructureFactoryTestCase,
)
from eventsourcing.tests.processrecorder_testcase import (
ProcessRecorderTestCase,
)
from eventsourcing.utils import get_topic
def pg_close_all_connections(
name="eventsourcing",
host="127.0.0.1",
port="5432",
user="postgres",
password="<PASSWORD>",
):
try:
# For local development... probably.
pg_conn = psycopg2.connect(
dbname=name,
host=host,
port=port,
)
except psycopg2.Error:
# For GitHub actions.
"""CREATE ROLE postgres LOGIN SUPERUSER PASSWORD '<PASSWORD>';"""
pg_conn = psycopg2.connect(
dbname=name,
host=host,
port=port,
user=user,
password=password,
)
close_all_connections = """
SELECT
pg_terminate_backend(pid)
FROM
pg_stat_activity
WHERE
-- don't kill my own connection!
pid <> pg_backend_pid();
"""
pg_conn_cursor = pg_conn.cursor()
pg_conn_cursor.execute(close_all_connections)
return close_all_connections, pg_conn_cursor
class TestTransaction(TestCase):
def setUp(self) -> None:
self.mock = Mock(Connection(Mock(connection), max_age=None))
self.t = Transaction(self.mock, commit=True)
def test_calls_commit_if_error_not_raised_during_transaction(self):
with self.t:
pass
self.mock.commit.assert_called()
self.mock.rollback.assert_not_called()
self.mock.close.assert_not_called()
def test_calls_rollback_if_error_is_raised_during_transaction(self):
with self.assertRaises(TypeError):
with self.t:
raise TypeError
self.mock.commit.assert_not_called()
self.mock.rollback.assert_called()
self.mock.close.assert_not_called()
def test_calls_close_if_interface_error_is_raised_during_transaction(self):
with self.assertRaises(InterfaceError):
with self.t:
self.raise_interface_error()
self.mock.commit.assert_not_called()
self.mock.rollback.assert_called()
self.mock.close.assert_called()
def test_calls_close_if_interface_error_is_raised_during_commit(self):
self.mock.commit = Mock(
side_effect=self.raise_interface_error, name="mock commit method"
)
with self.assertRaises(InterfaceError):
with self.t:
pass
self.mock.commit.assert_called()
self.mock.rollback.assert_not_called()
self.mock.close.assert_called()
def test_does_not_call_close_if_data_error_is_raised_during_commit(self):
self.mock.commit = Mock(
side_effect=self.raise_data_error, name="mock commit method"
)
with self.assertRaises(DataError):
with self.t:
pass
self.mock.commit.assert_called()
self.mock.rollback.assert_not_called()
self.mock.close.assert_not_called()
def test_calls_close_if_interface_error_is_raised_during_rollback(self):
self.mock.rollback = Mock(
side_effect=self.raise_interface_error, name="mock rollback method"
)
with self.assertRaises(InterfaceError):
with self.t:
raise psycopg2.Error
self.mock.commit.assert_not_called()
self.mock.rollback.assert_called()
self.mock.close.assert_called()
def test_does_not_call_close_if_data_error_is_raised_during_rollback(self):
self.mock.rollback = Mock(
side_effect=self.raise_data_error, name="mock rollback method"
)
with self.assertRaises(DataError):
with self.t:
raise psycopg2.Error
self.mock.commit.assert_not_called()
self.mock.rollback.assert_called()
self.mock.close.assert_not_called()
def raise_interface_error(self):
raise psycopg2.InterfaceError()
def raise_data_error(self):
raise psycopg2.DataError()
def test_converts_errors_raised_in_transactions(self):
errors = [
(InterfaceError, psycopg2.InterfaceError),
(DataError, psycopg2.DataError),
(OperationalError, psycopg2.OperationalError),
(IntegrityError, psycopg2.IntegrityError),
(InternalError, psycopg2.InternalError),
(ProgrammingError, psycopg2.ProgrammingError),
(NotSupportedError, psycopg2.NotSupportedError),
(DatabaseError, psycopg2.DatabaseError),
(PersistenceError, psycopg2.Error),
]
for es_err, psy_err in errors:
with self.assertRaises(es_err):
with self.t:
raise psy_err
self.mock.commit.assert_not_called()
self.mock.rollback.assert_called()
self.mock.close.assert_called()
class TestPostgresDatastore(TestCase):
def test_connect_failure_raises_interface_error(self):
datastore = PostgresDatastore(
dbname="eventsourcing",
host="127.0.0.1",
port="9876543210", # bad port
user="eventsourcing",
password="eventsourcing",
)
with self.assertRaises(InterfaceError):
datastore.transaction(commit=True)
def test_transaction(self):
datastore = PostgresDatastore(
dbname="eventsourcing",
host="127.0.0.1",
port="5432",
user="eventsourcing",
password="<PASSWORD>",
)
# Get a transaction.
transaction = datastore.transaction(commit=False)
# Check connection is not idle.
self.assertFalse(transaction.c.is_idle.is_set())
# Check transaction gives database cursor when used as context manager.
with transaction as conn:
with conn.cursor() as c:
c.execute("SELECT 1")
self.assertEqual(c.fetchall(), [[1]])
# Check connection is idle after context manager has exited.
self.assertTrue(transaction.c.is_idle.wait(timeout=0.1))
def test_connection_of_transaction_not_used_as_context_manager_also_goes_idle(self):
datastore = PostgresDatastore(
dbname="eventsourcing",
host="127.0.0.1",
port="5432",
user="eventsourcing",
password="<PASSWORD>",
)
# Get a transaction.
transaction = datastore.transaction(commit=False)
# Check connection is not idle.
conn = transaction.c
self.assertFalse(conn.is_idle.is_set())
# Delete the transaction context manager before entering.
print("Testing transaction not used as context manager, expecting exception...")
del transaction
# Check connection is idle after garbage collection.
self.assertTrue(conn.is_idle.wait(timeout=0.1))
def test_close_connection(self):
datastore = PostgresDatastore(
dbname="eventsourcing",
host="127.0.0.1",
port="5432",
user="eventsourcing",
password="<PASSWORD>",
)
# Try closing without first creating connection.
datastore.close_connection()
# Create a connection.
with datastore.transaction(commit=False) as conn:
with conn.cursor() as c:
c.execute("SELECT 1")
self.assertEqual(c.fetchall(), [[1]])
# Try closing after creating connection.
datastore.close_connection()
def test_timer_closes_connection(self):
datastore = PostgresDatastore(
dbname="eventsourcing",
host="127.0.0.1",
port="5432",
user="eventsourcing",
password="<PASSWORD>",
conn_max_age=0,
)
# Check connection is closed after using transaction.
transaction = datastore.transaction(commit=False)
with transaction as conn:
with conn.cursor() as c:
c.execute("SELECT 1")
self.assertEqual(c.fetchall(), [[1]])
self.assertTrue(transaction.c.is_closing.wait(timeout=0.5))
for _ in range(1000):
if transaction.c.is_closed:
break
else:
sleep(0.0001)
else:
self.fail("Connection is not closed")
with self.assertRaises(psycopg2.InterfaceError) as cm:
transaction.c.cursor()
self.assertEqual(cm.exception.args[0], "connection already closed")
# Check closed connection can be recreated and also closed.
transaction = datastore.transaction(commit=False)
with transaction as conn:
with conn.cursor() as c:
c.execute("SELECT 1")
self.assertEqual(c.fetchall(), [[1]])
self.assertTrue(transaction.c.is_closing.wait(timeout=0.5))
for _ in range(1000):
if transaction.c.is_closed:
break
else:
sleep(0.0001)
else:
self.fail("Connection is not closed")
def test_pre_ping(self):
datastore = PostgresDatastore(
dbname="eventsourcing",
host="127.0.0.1",
port="5432",
user="eventsourcing",
password="<PASSWORD>",
pre_ping=True,
)
# Create a connection.
transaction = datastore.transaction(commit=False)
pg_conn = transaction.c.c
self.assertEqual(pg_conn, transaction.c.c)
# Check the connection works.
with transaction as conn:
with conn.cursor() as c:
c.execute("SELECT 1")
self.assertEqual(c.fetchall(), [[1]])
# Close all connections via separate connection.
pg_close_all_connections()
# Check the connection doesn't think it's closed.
self.assertFalse(transaction.c.is_closed)
# Check we can get a new connection that works.
transaction = datastore.transaction(commit=False)
with transaction as conn:
with conn.cursor() as c:
c.execute("SELECT 1")
self.assertEqual(c.fetchall(), [[1]])
# Check it's actually a different connection.
self.assertNotEqual(pg_conn, transaction.c.c)
# Check this doesn't work if we don't use pre_ping.
datastore = PostgresDatastore(
dbname="eventsourcing",
host="127.0.0.1",
port="5432",
user="eventsourcing",
password="<PASSWORD>",
pre_ping=False,
)
# Create a connection.
transaction = datastore.transaction(commit=False)
pg_conn = transaction.c.c
self.assertEqual(pg_conn, transaction.c.c)
# Check the connection works.
with transaction as conn:
with conn.cursor() as c:
c.execute("SELECT 1")
self.assertEqual(c.fetchall(), [[1]])
# Close all connections via separate connection.
pg_close_all_connections()
# Check the connection doesn't think it's closed.
self.assertFalse(transaction.c.is_closed)
# Get a stale connection and check it doesn't work.
transaction = datastore.transaction(commit=False)
# Check it's the same connection.
self.assertEqual(pg_conn, transaction.c.c)
with self.assertRaises(InterfaceError):
with transaction as conn:
with conn.cursor() as c:
c.execute("SELECT 1")
class TestPostgresAggregateRecorder(AggregateRecorderTestCase):
def setUp(self) -> None:
self.datastore = PostgresDatastore(
"eventsourcing",
"127.0.0.1",
"5432",
"eventsourcing",
"eventsourcing",
)
drop_postgres_table(self.datastore, "stored_events")
def create_recorder(self):
recorder = PostgresAggregateRecorder(
datastore=self.datastore, events_table_name="stored_events"
)
recorder.create_table()
return recorder
def test_performance(self):
super().test_performance()
def test_insert_and_select(self):
super().test_insert_and_select()
def test_retry_insert_events_after_closing_connection(self):
# Construct the recorder.
recorder = self.create_recorder()
# Check we have a connection (from create_table).
self.assertTrue(self.datastore._connections)
# Close connections.
pg_close_all_connections()
# Write a stored event.
stored_event1 = StoredEvent(
originator_id=uuid4(),
originator_version=0,
topic="topic1",
state=b"state1",
)
recorder.insert_events([stored_event1])
def test_retry_select_events_after_closing_connection(self):
# Construct the recorder.
recorder = self.create_recorder()
# Check we have a connection (from create_table).
self.assertTrue(self.datastore._connections)
# Write a stored event.
originator_id = uuid4()
stored_event1 = StoredEvent(
originator_id=originator_id,
originator_version=0,
topic="topic1",
state=b"state1",
)
recorder.insert_events([stored_event1])
# Close connections.
pg_close_all_connections()
# Select events.
recorder.select_events(originator_id)
class TestPostgresAggregateRecorderErrors(TestCase):
def setUp(self) -> None:
self.datastore = PostgresDatastore(
"eventsourcing",
"127.0.0.1",
"5432",
"eventsourcing",
"eventsourcing",
)
self.drop_tables()
def tearDown(self) -> None:
self.drop_tables()
def drop_tables(self):
drop_postgres_table(self.datastore, "stored_events")
def create_recorder(self):
return PostgresAggregateRecorder(
datastore=self.datastore, events_table_name="stored_events"
)
def test_create_table_raises_programming_error_when_sql_is_broken(self):
recorder = self.create_recorder()
# Mess up the statement.
recorder.create_table_statements = ["BLAH"]
with self.assertRaises(ProgrammingError):
recorder.create_table()
def test_insert_events_raises_programming_error_when_table_not_created(self):
# Construct the recorder.
recorder = self.create_recorder()
# Write a stored event without creating the table.
stored_event1 = StoredEvent(
originator_id=uuid4(),
originator_version=0,
topic="topic1",
state=b"state1",
)
with self.assertRaises(ProgrammingError):
recorder.insert_events([stored_event1])
def test_insert_events_raises_programming_error_when_sql_is_broken(self):
# Construct the recorder.
recorder = self.create_recorder()
# Create the table.
recorder.create_table()
# Write a stored event with broken statement.
recorder.insert_events_statement = "BLAH"
stored_event1 = StoredEvent(
originator_id=uuid4(),
originator_version=0,
topic="topic1",
state=b"state1",
)
with self.assertRaises(ProgrammingError):
recorder.insert_events([stored_event1])
def test_select_events_raises_programming_error_when_table_not_created(self):
# Construct the recorder.
recorder = self.create_recorder()
# Select events without creating the table.
originator_id = uuid4()
with self.assertRaises(ProgrammingError):
recorder.select_events(originator_id=originator_id)
def test_select_events_raises_programming_error_when_sql_is_broken(self):
# Construct the recorder.
recorder = self.create_recorder()
# Create the table.
recorder.create_table()
# Select events with broken statement.
recorder.select_events_statement = "BLAH"
originator_id = uuid4()
with self.assertRaises(ProgrammingError):
recorder.select_events(originator_id=originator_id)
def test_duplicate_prepared_statement_error_is_ignored(self):
# Construct the recorder.
recorder = self.create_recorder()
# Create the table.
recorder.create_table()
# Check the statement is not prepared.
statement_name = "select_stored_events"
conn = self.datastore.get_connection()
self.assertFalse(conn.is_prepared.get(statement_name))
# Cause the statement to be prepared.
recorder.select_events(originator_id=uuid4())
# Check the statement was prepared.
conn = self.datastore.get_connection()
self.assertTrue(conn.is_prepared.get(statement_name))
# Forget the statement is prepared.
del conn.is_prepared[statement_name]
# Should ignore "duplicate prepared statement" error.
recorder.select_events(originator_id=uuid4())
# Check the statement was prepared.
conn = self.datastore.get_connection()
self.assertTrue(conn.is_prepared.get(statement_name))
class TestPostgresApplicationRecorder(ApplicationRecorderTestCase):
def setUp(self) -> None:
self.datastore = PostgresDatastore(
"eventsourcing",
"127.0.0.1",
"5432",
"eventsourcing",
"eventsourcing",
)
self.drop_tables()
def tearDown(self) -> None:
self.drop_tables()
def drop_tables(self):
drop_postgres_table(self.datastore, "stored_events")
def create_recorder(self):
recorder = PostgresApplicationRecorder(
self.datastore, events_table_name="stored_events"
)
recorder.create_table()
return recorder
def close_db_connection(self, *args):
self.datastore.close_connection()
def test_concurrent_no_conflicts(self):
super().test_concurrent_no_conflicts()
def test_concurrent_throughput(self):
super().test_concurrent_throughput()
def test_retry_select_notifications_after_closing_connection(self):
# Construct the recorder.
recorder = self.create_recorder()
# Check we have a connection (from create_table).
self.assertTrue(self.datastore._connections)
# Write a stored event.
originator_id = uuid4()
stored_event1 = StoredEvent(
originator_id=originator_id,
originator_version=0,
topic="topic1",
state=b"state1",
)
recorder.insert_events([stored_event1])
# Close connections.
pg_close_all_connections()
# Select events.
recorder.select_notifications(start=1, limit=1)
def test_retry_max_notification_id_after_closing_connection(self):
# Construct the recorder.
recorder = self.create_recorder()
# Check we have a connection (from create_table).
self.assertTrue(self.datastore._connections)
# Write a stored event.
originator_id = uuid4()
stored_event1 = StoredEvent(
originator_id=originator_id,
originator_version=0,
topic="topic1",
state=b"state1",
)
recorder.insert_events([stored_event1])
# Close connections.
pg_close_all_connections()
# Select events.
recorder.max_notification_id()
class TestPostgresApplicationRecorderErrors(TestCase):
def setUp(self) -> None:
self.datastore = PostgresDatastore(
"eventsourcing",
"127.0.0.1",
"5432",
"eventsourcing",
"eventsourcing",
)
self.drop_tables()
def tearDown(self) -> None:
self.drop_tables()
def drop_tables(self):
drop_postgres_table(self.datastore, "stored_events")
def create_recorder(self):
return PostgresApplicationRecorder(
self.datastore, events_table_name="stored_events"
)
def test_select_notification_raises_programming_error_when_table_not_created(self):
# Construct the recorder.
recorder = self.create_recorder()
# Select notifications without creating table.
with self.assertRaises(ProgrammingError):
recorder.select_notifications(start=1, limit=1)
def test_select_notification_raises_programming_error_when_sql_is_broken(self):
# Construct the recorder.
recorder = self.create_recorder()
# Create table.
recorder.create_table()
# Select notifications with broken statement.
recorder.select_notifications_statement = "BLAH"
with self.assertRaises(ProgrammingError):
recorder.select_notifications(start=1, limit=1)
def test_max_notification_id_raises_programming_error_when_table_not_created(self):
# Construct the recorder.
recorder = PostgresApplicationRecorder(
datastore=self.datastore, events_table_name="stored_events"
)
# Select notifications without creating table.
with self.assertRaises(ProgrammingError):
recorder.max_notification_id()
def test_max_notification_id_raises_programming_error_when_sql_is_broken(self):
# Construct the recorder.
recorder = PostgresApplicationRecorder(
datastore=self.datastore, events_table_name="stored_events"
)
# Create table.
recorder.create_table()
# Select notifications with broken statement.
recorder.max_notification_id_statement = "BLAH"
with self.assertRaises(ProgrammingError):
recorder.max_notification_id()
class TestPostgresProcessRecorder(ProcessRecorderTestCase):
def setUp(self) -> None:
self.datastore = PostgresDatastore(
"eventsourcing",
"127.0.0.1",
"5432",
"eventsourcing",
"eventsourcing",
)
self.drop_tables()
def tearDown(self) -> None:
self.drop_tables()
def drop_tables(self):
drop_postgres_table(self.datastore, "stored_events")
drop_postgres_table(self.datastore, "notification_tracking")
def create_recorder(self):
recorder = PostgresProcessRecorder(
datastore=self.datastore,
events_table_name="stored_events",
tracking_table_name="notification_tracking",
)
recorder.create_table()
return recorder
def test_performance(self):
super().test_performance()
def test_retry_max_tracking_id_after_closing_connection(self):
# Construct the recorder.
recorder = self.create_recorder()
# Check we have a connection (from create_table).
self.assertTrue(self.datastore._connections)
# Write a stored event.
originator_id = uuid4()
stored_event1 = StoredEvent(
originator_id=originator_id,
originator_version=0,
topic="topic1",
state=b"state1",
)
recorder.insert_events([stored_event1], tracking=Tracking("upstream", 1))
# Close connections.
pg_close_all_connections()
# Select events.
notification_id = recorder.max_tracking_id("upstream")
self.assertEqual(notification_id, 1)
class TestPostgresProcessRecorderErrors(TestCase):
def setUp(self) -> None:
self.datastore = PostgresDatastore(
"eventsourcing",
"127.0.0.1",
"5432",
"eventsourcing",
"eventsourcing",
)
self.drop_tables()
def tearDown(self) -> None:
self.drop_tables()
def drop_tables(self):
drop_postgres_table(self.datastore, "stored_events")
drop_postgres_table(self.datastore, "notification_tracking")
def create_recorder(self):
return PostgresProcessRecorder(
datastore=self.datastore,
events_table_name="stored_events",
tracking_table_name="notification_tracking",
)
def test_max_tracking_id_raises_programming_error_when_table_not_created(self):
# Construct the recorder.
recorder = self.create_recorder()
# Get max tracking ID without creating table.
with self.assertRaises(ProgrammingError):
recorder.max_tracking_id("upstream")
def test_max_tracking_id_raises_programming_error_when_sql_is_broken(self):
# Construct the recorder.
recorder = self.create_recorder()
# Create table.
recorder.create_table()
# Mess up the SQL statement.
recorder.max_tracking_id_statement = "BLAH"
# Get max tracking ID with broken statement.
with self.assertRaises(ProgrammingError):
recorder.max_tracking_id("upstream")
class TestPostgresInfrastructureFactory(InfrastructureFactoryTestCase):
def test_create_application_recorder(self):
super().test_create_application_recorder()
def expected_factory_class(self):
return Factory
def expected_aggregate_recorder_class(self):
return PostgresAggregateRecorder
def expected_application_recorder_class(self):
return PostgresApplicationRecorder
def expected_process_recorder_class(self):
return PostgresProcessRecorder
def setUp(self) -> None:
os.environ[InfrastructureFactory.TOPIC] = get_topic(Factory)
os.environ[Factory.POSTGRES_DBNAME] = "eventsourcing"
os.environ[Factory.POSTGRES_HOST] = "127.0.0.1"
os.environ[Factory.POSTGRES_PORT] = "5432"
os.environ[Factory.POSTGRES_USER] = "eventsourcing"
os.environ[Factory.POSTGRES_PASSWORD] = "<PASSWORD>"
if Factory.POSTGRES_CONN_MAX_AGE in os.environ:
del os.environ[Factory.POSTGRES_CONN_MAX_AGE]
if Factory.POSTGRES_PRE_PING in os.environ:
del os.environ[Factory.POSTGRES_PRE_PING]
if Factory.POSTGRES_LOCK_TIMEOUT in os.environ:
del os.environ[Factory.POSTGRES_LOCK_TIMEOUT]
if Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT in os.environ:
del os.environ[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT]
self.drop_tables()
super().setUp()
def tearDown(self) -> None:
self.drop_tables()
if Factory.POSTGRES_DBNAME in os.environ:
del os.environ[Factory.POSTGRES_DBNAME]
if Factory.POSTGRES_HOST in os.environ:
del os.environ[Factory.POSTGRES_HOST]
if Factory.POSTGRES_PORT in os.environ:
del os.environ[Factory.POSTGRES_PORT]
if Factory.POSTGRES_USER in os.environ:
del os.environ[Factory.POSTGRES_USER]
if Factory.POSTGRES_PASSWORD in os.environ:
del os.environ[Factory.POSTGRES_PASSWORD]
if Factory.POSTGRES_CONN_MAX_AGE in os.environ:
del os.environ[Factory.POSTGRES_CONN_MAX_AGE]
if Factory.POSTGRES_PRE_PING in os.environ:
del os.environ[Factory.POSTGRES_PRE_PING]
if Factory.POSTGRES_LOCK_TIMEOUT in os.environ:
del os.environ[Factory.POSTGRES_LOCK_TIMEOUT]
if Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT in os.environ:
del os.environ[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT]
super().tearDown()
def drop_tables(self):
datastore = PostgresDatastore(
"eventsourcing",
"127.0.0.1",
"5432",
"eventsourcing",
"eventsourcing",
)
drop_postgres_table(datastore, "testcase_events")
drop_postgres_table(datastore, "testcase_tracking")
def test_conn_max_age_is_set_to_empty_string(self):
os.environ[Factory.POSTGRES_CONN_MAX_AGE] = ""
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.conn_max_age, None)
def test_conn_max_age_is_set_to_number(self):
os.environ[Factory.POSTGRES_CONN_MAX_AGE] = "0"
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.conn_max_age, 0)
def test_lock_timeout_is_zero_by_default(self):
self.assertTrue(Factory.POSTGRES_LOCK_TIMEOUT not in os.environ)
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.lock_timeout, 0)
os.environ[Factory.POSTGRES_LOCK_TIMEOUT] = ""
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.lock_timeout, 0)
def test_lock_timeout_is_nonzero(self):
os.environ[Factory.POSTGRES_LOCK_TIMEOUT] = "1"
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.lock_timeout, 1)
def test_idle_in_transaction_session_timeout_is_zero_by_default(self):
self.assertTrue(
Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT not in os.environ
)
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 0)
os.environ[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = ""
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 0)
def test_idle_in_transaction_session_timeout_is_nonzero(self):
os.environ[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = "1"
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 1)
def test_pre_ping_off_by_default(self):
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.pre_ping, False)
def test_pre_ping_off(self):
os.environ[Factory.POSTGRES_PRE_PING] = "off"
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.pre_ping, False)
def test_pre_ping_on(self):
os.environ[Factory.POSTGRES_PRE_PING] = "on"
self.factory = Factory("TestCase", os.environ)
self.assertEqual(self.factory.datastore.pre_ping, True)
def test_environment_error_raised_when_conn_max_age_not_a_float(self):
os.environ[Factory.POSTGRES_CONN_MAX_AGE] = "abc"
with self.assertRaises(EnvironmentError) as cm:
self.factory = Factory("TestCase", os.environ)
self.assertEqual(
cm.exception.args[0],
"Postgres environment value for key 'POSTGRES_CONN_MAX_AGE' "
"is invalid. If set, a float or empty string is expected: 'abc'",
)
def test_environment_error_raised_when_lock_timeout_not_an_integer(self):
os.environ[Factory.POSTGRES_LOCK_TIMEOUT] = "abc"
with self.assertRaises(EnvironmentError) as cm:
self.factory = Factory("TestCase", os.environ)
self.assertEqual(
cm.exception.args[0],
"Postgres environment value for key 'POSTGRES_LOCK_TIMEOUT' "
"is invalid. If set, an integer or empty string is expected: 'abc'",
)
def test_environment_error_raised_when_idle_in_transaction_session_timeout_not_an_integer(
self,
):
os.environ[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = "abc"
with self.assertRaises(EnvironmentError) as cm:
self.factory = Factory("TestCase", os.environ)
self.assertEqual(
cm.exception.args[0],
"Postgres environment value for key "
"'POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT' "
"is invalid. If set, an integer or empty string is expected: 'abc'",
)
def test_environment_error_raised_when_dbname_missing(self):
del os.environ[Factory.POSTGRES_DBNAME]
with self.assertRaises(EnvironmentError) as cm:
self.factory = InfrastructureFactory.construct("TestCase")
self.assertEqual(
cm.exception.args[0],
"Postgres database name not found in environment "
"with key 'POSTGRES_DBNAME'",
)
def test_environment_error_raised_when_dbhost_missing(self):
del os.environ[Factory.POSTGRES_HOST]
with self.assertRaises(EnvironmentError) as cm:
self.factory = InfrastructureFactory.construct("TestCase")
self.assertEqual(
cm.exception.args[0],
"Postgres host not found in environment with key 'POSTGRES_HOST'",
)
def test_environment_error_raised_when_user_missing(self):
del os.environ[Factory.POSTGRES_USER]
with self.assertRaises(EnvironmentError) as cm:
self.factory = InfrastructureFactory.construct("TestCase")
self.assertEqual(
cm.exception.args[0],
"Postgres user not found in environment with key 'POSTGRES_USER'",
)
def test_environment_error_raised_when_password_missing(self):
del os.environ[Factory.POSTGRES_PASSWORD]
with self.assertRaises(EnvironmentError) as cm:
self.factory = InfrastructureFactory.construct("TestCase")
self.assertEqual(
cm.exception.args[0],
"Postgres password not found in environment with key 'POSTGRES_PASSWORD'",
)
del AggregateRecorderTestCase
del ApplicationRecorderTestCase
del ProcessRecorderTestCase
del InfrastructureFactoryTestCase
def drop_postgres_table(datastore: PostgresDatastore, table_name):
try:
with datastore.transaction(commit=True) as t:
statement = f"DROP TABLE {table_name};"
with t.c.cursor() as c:
c.execute(statement)
except PersistenceError:
pass
|
wdomitrz/input_device_handler | input_device_handler.py | <filename>input_device_handler.py
#!/usr/bin/env python3
import json
from os import path
from evdev import InputDevice, InputEvent, categorize, ecodes
from subprocess import Popen
CONFIG_FILE = path.expanduser('~/.config/input_device_handler/config.json')
class DeviceHandler:
def __init__(self, device_options: dict, bindings: dict):
self.device = InputDevice(device_options['path'])
self.bindings = bindings
if 'exclusive' in device_options and device_options['exclusive']:
self.device.grab()
def run(self):
for event in self.device.read_loop():
self.process_event(event)
def process_event(self, event: InputEvent):
if event.type != ecodes.EV_KEY:
return
event = categorize(event)
if event.keycode not in self.bindings:
return
action = self.bindings[event.keycode]
if event.keystate in [event.key_down, event.key_hold]:
self.perform_action(action)
def perform_action(self, action: dict):
cmd = None
if 'cmd' in action:
cmd = action['cmd']
elif 'key' in action:
cmd = ['xdotool', 'key'] + [action['key']]
if cmd is not None:
Popen(cmd)
if __name__ == '__main__':
with open(CONFIG_FILE, 'r') as config_file:
config = json.load(config_file)
DeviceHandler(**config).run()
|
ppanero/invenio-rdm-extension-demo | invenio_rdm_extension_demo/views.py | <filename>invenio_rdm_extension_demo/views.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio RDM Extension Demo is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Invenio module to showcase how to add an extension to InvenioRDM."""
from __future__ import absolute_import, print_function
from flask import Blueprint, render_template
from flask_babelex import gettext as _
blueprint = Blueprint(
'invenio_rdm_extension_demo',
__name__
)
@blueprint.route("/rdm-ext-demo")
def index():
"""RDM Extension Demo view."""
return 'RDM Extension Demo!'
|
AlgebraicWolf/CAMNS | camns.py | import numpy as np
import cvxpy as cp
def is_extreme_point(C, alpha, d, tol):
"""
Check whether alpha is an extreme point of polyhedron
Arguments:
----------
C : np.ndarray matrix
d : np.ndarray vector
Tuple characterizing affine set
alpha:
Point to be tested
Returns:
--------
bool
Whether point is an extreme point
"""
# if alpha is None:
# return False
L, D = C.shape
T = C[np.all(np.abs(C @ alpha + d) < tol, axis=1), :]
if T.shape[0] == 0:
return False
return np.linalg.matrix_rank(T, tol=tol) == D
def CAMNS_LP(xs, N, lptol=1e-8, exttol=1e-8, verbose=True):
"""
Solve CAMNS problem via reduction to Linear Programming
Arguments:
----------
xs : np.ndarray of shape (M, L)
Observation matrix consisting of M observations
N : int
Number of observations
lptol : float
Tolerance for Linear Programming problem
exttol : float
Tolerance for extreme point check
verbose : bool
Whether to print information about progress
Returns:
--------
np.ndarray of shape (N, L)
Estimated source matrix
"""
M, L = xs.shape # Extract dimensions
xs = xs.T
d = np.mean(xs, axis=1, keepdims=True)
C, _, _ = np.linalg.svd(xs - d, full_matrices=False)
C = C[:, :(N - 1)] # Truncate the redundant one
# Step 1. Preparing variables
B = np.diag(np.ones(L))
l = 0 # Number of extracted sources
S = np.zeros((0, L)) # Source matrix
epoch = 1
while l < N:
if verbose:
print("Epoch {}:".format(epoch))
print("=" * 58)
epoch += 1
# Step 2. Choosing random vector and generating direction r
w = np.random.randn(L)
r = B @ w
# Step 3. Solving linear programming problems using CVXPY
alpha1_star = cp.Variable(C.shape[1])
alpha2_star = cp.Variable(C.shape[1])
problem1 = cp.Problem(cp.Minimize(
r.T @ (C @ alpha1_star)), [C @ alpha1_star + d.flatten() >= 0])
problem2 = cp.Problem(cp.Maximize(
r.T @ (C @ alpha2_star)), [C @ alpha2_star + d.flatten() >= 0])
if verbose:
print("\tLaunching LP solver 1")
p_star = problem1.solve()
if verbose:
print("\tLaunching LP solver 2")
q_star = problem2.solve()
if verbose:
print("\tLP solvers have finished, checking results")
alpha1_star = np.expand_dims(alpha1_star.value, axis=1)
alpha2_star = np.expand_dims(alpha2_star.value, axis=1)
s1 = C @ alpha1_star + d
s2 = C @ alpha2_star + d
# Step 4. Checking results (with augmentations from MATLAB implementation)
if l == 0:
if is_extreme_point(C, alpha1_star, d, exttol):
S = np.append(S, [s1.squeeze()], axis=0)
if is_extreme_point(C, alpha2_star, d, exttol):
S = np.append(S, [s2.squeeze()], axis=0)
else:
if np.abs(p_star) / (np.linalg.norm(r) * np.linalg.norm(s1)) >= lptol:
if is_extreme_point(C, alpha1_star, d, exttol):
S = np.append(S, [s1.squeeze()], axis=0)
if np.abs(q_star) / (np.linalg.norm(r) * np.linalg.norm(s2)) >= lptol:
if is_extreme_point(C, alpha2_star, d, exttol):
S = np.append(S, [s2.squeeze()], axis=0)
# Step 5. Updating l
l = S.shape[0]
if verbose:
print("\tRetrieved {}/{} sources\n".format(l, N))
# Step 6. Updating B
Q1, R1 = np.linalg.qr(S.T)
B = np.diag(np.ones(L)) - Q1 @ Q1.T
# Step 7 is kinda implicit, as it is hidden in the loop condition
# Yay, we're done!
return S
|
LinuxIsCool/dotfiles | sdk/cpp/ycm_extra_conf.py | import subprocess as sp
from sys import version_info
if version_info > (3, 0):
gcc_version = sp.run(['gcc', '-dumpversion'], stdout=sp.PIPE).stdout.decode('utf-8').strip()
else:
gcc_version = sp.check_output(['gcc', '-dumpversion']).strip()
FLAGS = [
'-Wall',
'-Wextra',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-std=c++17',
'-xc++',
# For linux libc++
'-isystem', '/usr/include/c++/v1',
# For linux stdlibc++
'-isystem', '/usr/include/c++/{}'.format(gcc_version),
# For windows
# '-isystem', 'c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\include',
# Project include folder
'-I', 'include',
# C headers
'-isystem', '/usr/include',
'-isystem', '/usr/local/include',
# Use libc++/libstdc++
# '-stdlib=libc++',
# '-stdlib=libstdc++',
]
def FlagsForFile( filename, **kwargs ):
return { 'flags': FLAGS, }
|
MohawkTSDB/mohawk | src/alerts/examples/alert-buffer.py | #!/usr/bin/env python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class AlertHandler(BaseHTTPRequestHandler):
def do_POST(self):
# print out new alert changes
print(self.rfile.read(int(self.headers['Content-Length'])))
self.send_response(200)
self.end_headers()
def run():
httpd = HTTPServer(('0.0.0.0', 9099), AlertHandler)
print('Starting httpd...')
httpd.serve_forever()
if __name__ == "__main__":
run()
|
gaybro8777/ApplicationInsights-LocalForwarder | examples/opencensus/python-app/app/views.py | <reponame>gaybro8777/ApplicationInsights-LocalForwarder
from django.http import HttpResponse
from django.shortcuts import render
from opencensus.trace import config_integration
from opencensus.trace.exporters.ocagent import trace_exporter
from opencensus.trace import tracer as tracer_module
from opencensus.trace.propagation.trace_context_http_header_format import TraceContextPropagator
from opencensus.trace.exporters.transports.background_thread \
import BackgroundThreadTransport
import time
import os
import requests
INTEGRATIONS = ['httplib']
service_name = os.getenv('SERVICE_NAME', 'python-service')
config_integration.trace_integrations(INTEGRATIONS, tracer=tracer_module.Tracer(
exporter=trace_exporter.TraceExporter(
service_name=service_name,
endpoint=os.getenv('OCAGENT_TRACE_EXPORTER_ENDPOINT'),
transport=BackgroundThreadTransport),
propagator=TraceContextPropagator()))
def call(request):
requests.get("http://go-app:50030/call")
return HttpResponse("hello world from " + service_name)
|
MatheusJCastro/astropy_displayFITSfile | open_fits.py | <reponame>MatheusJCastro/astropy_displayFITSfile<gh_stars>0
###########################################
# Display .FITS file #
# <NAME> #
# Version 2.1 #
# Last Modification: 05/04/2020 #
###########################################
# learn more at: http://learn.astropy.org/FITS-images.html
# example for ZScale: https://qiita.com/phyblas/items/87667c250b29e195f7fb
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from matplotlib.colors import LogNorm
from astropy.visualization import ZScaleInterval, ImageNormalize, MinMaxInterval, PowerStretch
image = 'm31_proc.fits' # name for your fits file
image_file = fits.open(image)
image_data = image_file[0].data # get the extension 0 of your fits
# normally, the image is on first extension
if len(image_file) > 1:
image_mask = image_file[1].data # get the extension 1, on my case, the mask of pixel for data reduction
mask = 3
else:
mask = 2
head = image_file[0].header["DATE"] # example of print one data from fits header
print(head)
head = np.array(repr(image_file[0].header)) # example of print all header
print(head)
print(type(head))
np.savetxt("Header_{}.txt".format(image[:-5]), [head], fmt="%s") # save header on a .txt file
print(image_file.info()) # print some useful information about your fits
image_file.close()
print('Min:', np.min(image_data)) # min of image
print('Max:', np.max(image_data)) # max of image
print('Mean:', np.mean(image_data)) # mean of image
print('Stdev:', np.std(image_data)) # standard deviation of image
# from numpy
plt.figure(figsize=(12, 4))
plt.subplot(1, mask, 1)
plt.imshow(image_data, cmap='gray', origin='lower') # the primary image, by default, is set no MinMaxInterval
plt.title("Primary")
# plt.colorbar() # if you want to add a color bar on your subplot
# OPTIONS FOR DISPLAY IMAGES
# norm=LogNorm() --> display in a logarithmic color scale
# norm=ImageNormalize(image_data, interval=ZScaleInterval()) --> display in zscale
plt.subplot(1, mask, 2)
plt.imshow(image_data, cmap='gray', origin='lower', norm=ImageNormalize(image_data, interval=ZScaleInterval()))#
# , stretch=PowerStretch(5))) # add other type of stretch on the image
# (need to be inside of ImageNormalize() function)
# print the zscale image
plt.title("Zscale")
if mask == 3:
plt.subplot(133)
plt.imshow(image_mask, cmap='gray', origin='lower') # print the pixel mask
plt.title("Masked Pixels")
plt.show()
|
nevalenny/WyliePy | Wylie.py | # -*- coding: utf-8 -*-
# pylint: disable=too-many-function-args
from __future__ import print_function
import re
# This Python package implements the conversion between Unicode Tibetan text, and
# Wylie (EWTS) transliteration.
# It is based on the equivalent Java module, found at
# http://www.thlib.org/reference/transliteration/wyconverter.php
# and includes numerous bugfixes for cases for which the original tool was failing
#
# The Extended Wylie Transliteration System is documented at:
# http://www.thlib.org/reference/transliteration/#essay=/thl/ewts/
class Wylie(object):
# various options for Wylie conversion
check = bool()
check_strict = bool()
print_warnings = bool()
fix_spacing = bool()
# constant hashes and sets to help with the conversion
m_consonant = {}
m_subjoined = {}
m_vowel = {}
m_final_uni = {}
m_final_class = {}
m_other = {}
m_ambiguous_wylie = {}
m_tib_vowel_long = {}
m_tib_caret = {}
m_tib_top = {}
m_tib_subjoined = {}
m_tib_vowel = {}
m_tib_final_wylie = {}
m_tib_final_class = {}
m_tib_other = {}
m_ambiguous_key = {}
m_tokens_start = {}
m_special = []
m_suffixes = []
m_tib_stacks = []
m_tokens = []
m_superscripts = {}
m_subscripts = {}
m_prefixes = {}
m_suff2 = {}
# initialize all the hashes with the correspondences between Wylie and Unicode.
# this gets called from a 'static section' to initialize the hashes the moment the
# class gets loaded.
@classmethod
def initHashes(self):
tmpSet = None
# *** Wylie to Unicode mappings ***
# list of wylie consonant => unicode
self.m_consonant = {
"k": u"\u0f40",
"kh": u"\u0f41",
"g": u"\u0f42",
"gh": u"\u0f42\u0fb7",
"g+h": u"\u0f42\u0fb7",
"ng": u"\u0f44",
"c": u"\u0f45",
"ch": u"\u0f46",
"j": u"\u0f47",
"ny": u"\u0f49",
"T": u"\u0f4a",
"-t": u"\u0f4a",
"Th": u"\u0f4b",
"-th": u"\u0f4b",
"D": u"\u0f4c",
"-d": u"\u0f4c",
"Dh": u"\u0f4c\u0fb7",
"D+h": u"\u0f4c\u0fb7",
"-dh": u"\u0f4c\u0fb7",
"-d+h": u"\u0f4c\u0fb7",
"N": u"\u0f4e",
"-n": u"\u0f4e",
"t": u"\u0f4f",
"th": u"\u0f50",
"d": u"\u0f51",
"dh": u"\u0f51\u0fb7",
"d+h": u"\u0f51\u0fb7",
"n": u"\u0f53",
"p": u"\u0f54",
"ph": u"\u0f55",
"b": u"\u0f56",
"bh": u"\u0f56\u0fb7",
"b+h": u"\u0f56\u0fb7",
"m": u"\u0f58",
"ts": u"\u0f59",
"tsh": u"\u0f5a",
"dz": u"\u0f5b",
"dzh": u"\u0f5b\u0fb7",
"dz+h": u"\u0f5b\u0fb7",
"w": u"\u0f5d",
"zh": u"\u0f5e",
"z": u"\u0f5f",
"'": u"\u0f60",
u"\u2018": u"\u0f60",
# typographic quotes
u"\u2019": u"\u0f60",
"y": u"\u0f61",
"r": u"\u0f62",
"l": u"\u0f63",
"sh": u"\u0f64",
"Sh": u"\u0f65",
"-sh": u"\u0f65",
"s": u"\u0f66",
"h": u"\u0f67",
"W": u"\u0f5d",
"Y": u"\u0f61",
"R": u"\u0f6a",
"f": u"\u0f55\u0f39",
"v": u"\u0f56\u0f39"
}
# subjoined letters
self.m_subjoined = {
"k": u"\u0f90",
"kh": u"\u0f91",
"g": u"\u0f92",
"gh": u"\u0f92\u0fb7",
"g+h": u"\u0f92\u0fb7",
"ng": u"\u0f94",
"c": u"\u0f95",
"ch": u"\u0f96",
"j": u"\u0f97",
"ny": u"\u0f99",
"T": u"\u0f9a",
"-t": u"\u0f9a",
"Th": u"\u0f9b",
"-th": u"\u0f9b",
"D": u"\u0f9c",
"-d": u"\u0f9c",
"Dh": u"\u0f9c\u0fb7",
"D+h": u"\u0f9c\u0fb7",
"-dh": u"\u0f9c\u0fb7",
"-d+h": u"\u0f9c\u0fb7",
"N": u"\u0f9e",
"-n": u"\u0f9e",
"t": u"\u0f9f",
"th": u"\u0fa0",
"d": u"\u0fa1",
"dh": u"\u0fa1\u0fb7",
"d+h": u"\u0fa1\u0fb7",
"n": u"\u0fa3",
"p": u"\u0fa4",
"ph": u"\u0fa5",
"b": u"\u0fa6",
"bh": u"\u0fa6\u0fb7",
"b+h": u"\u0fa6\u0fb7",
"m": u"\u0fa8",
"ts": u"\u0fa9",
"tsh": u"\u0faa",
"dz": u"\u0fab",
"dzh": u"\u0fab\u0fb7",
"dz+h": u"\u0fab\u0fb7",
"w": u"\u0fad",
"zh": u"\u0fae",
"z": u"\u0faf",
"'": u"\u0fb0",
u"\u2018": u"\u0fb0",
# typographic quotes
u"\u2019": u"\u0fb0",
"y": u"\u0fb1",
"r": u"\u0fb2",
"l": u"\u0fb3",
"sh": u"\u0fb4",
"Sh": u"\u0fb5",
"-sh": u"\u0fb5",
"s": u"\u0fb6",
"h": u"\u0fb7",
"a": u"\u0fb8",
"W": u"\u0fba",
"Y": u"\u0fbb",
"R": u"\u0fbc"
}
# vowels
self.m_vowel = {
"a": u"\u0f68",
"A": u"\u0f71",
"i": u"\u0f72",
"I": u"\u0f71\u0f72",
"u": u"\u0f74",
"U": u"\u0f71\u0f74",
"e": u"\u0f7a",
"ai": u"\u0f7b",
"o": u"\u0f7c",
"au": u"\u0f7d",
"-i": u"\u0f80",
"-I": u"\u0f71\u0f80"
}
# final symbols to unicode
self.m_final_uni = {
"M": u"\u0f7e",
"~M`": u"\u0f82",
"~M": u"\u0f83",
"X": u"\u0f37",
"~X": u"\u0f35",
"H": u"\u0f7f",
"?": u"\u0f84",
"^": u"\u0f39"
}
# final symbols organized by class
self.m_final_class = {
"M": "M",
"~M`": "M",
"~M": "M",
"X": "X",
"~X": "X",
"H": "H",
"?": "?",
"^": "^"
}
# other stand-alone symbols
self.m_other = {
"0": u"\u0f20",
"1": u"\u0f21",
"2": u"\u0f22",
"3": u"\u0f23",
"4": u"\u0f24",
"5": u"\u0f25",
"6": u"\u0f26",
"7": u"\u0f27",
"8": u"\u0f28",
"9": u"\u0f29",
" ": u"\u0f0b",
"*": u"\u0f0c",
"/": u"\u0f0d",
"//": u"\u0f0e",
";": u"\u0f0f",
"|": u"\u0f11",
"!": u"\u0f08",
":": u"\u0f14",
"_": " ",
"=": u"\u0f34",
"<": u"\u0f3a",
">": u"\u0f3b",
"(": u"\u0f3c",
")": u"\u0f3d",
"@": u"\u0f04",
"#": u"\u0f05",
"$": u"\u0f06",
"%": u"\u0f07"
}
# special characters: flag those if they occur out of context
self.m_special = []
self.m_special.append(".")
self.m_special.append("+")
self.m_special.append("-")
self.m_special.append("~")
self.m_special.append("^")
self.m_special.append("?")
self.m_special.append("`")
self.m_special.append("]")
# superscripts: hashmap of superscript => set of letters or stacks
# below
self.m_superscripts = {"r": [], "l": [], "s": []}
tmpSet = []
self.m_superscripts["r"].append("k")
self.m_superscripts["r"].append("g")
self.m_superscripts["r"].append("ng")
self.m_superscripts["r"].append("j")
self.m_superscripts["r"].append("ny")
self.m_superscripts["r"].append("t")
self.m_superscripts["r"].append("d")
self.m_superscripts["r"].append("n")
self.m_superscripts["r"].append("b")
self.m_superscripts["r"].append("m")
self.m_superscripts["r"].append("ts")
self.m_superscripts["r"].append("dz")
self.m_superscripts["r"].append("k+y")
self.m_superscripts["r"].append("g+y")
self.m_superscripts["r"].append("m+y")
self.m_superscripts["r"].append("b+w")
self.m_superscripts["r"].append("ts+w")
self.m_superscripts["r"].append("g+w")
tmpSet = []
self.m_superscripts["l"].append("k")
self.m_superscripts["l"].append("g")
self.m_superscripts["l"].append("ng")
self.m_superscripts["l"].append("c")
self.m_superscripts["l"].append("j")
self.m_superscripts["l"].append("t")
self.m_superscripts["l"].append("d")
self.m_superscripts["l"].append("p")
self.m_superscripts["l"].append("b")
self.m_superscripts["l"].append("h")
self.m_superscripts["s"].append("k")
self.m_superscripts["s"].append("g")
self.m_superscripts["s"].append("ng")
self.m_superscripts["s"].append("ny")
self.m_superscripts["s"].append("t")
self.m_superscripts["s"].append("d")
self.m_superscripts["s"].append("n")
self.m_superscripts["s"].append("p")
self.m_superscripts["s"].append("b")
self.m_superscripts["s"].append("m")
self.m_superscripts["s"].append("ts")
self.m_superscripts["s"].append("k+y")
self.m_superscripts["s"].append("g+y")
self.m_superscripts["s"].append("p+y")
self.m_superscripts["s"].append("b+y")
self.m_superscripts["s"].append("m+y")
self.m_superscripts["s"].append("k+r")
self.m_superscripts["s"].append("g+r")
self.m_superscripts["s"].append("p+r")
self.m_superscripts["s"].append("b+r")
self.m_superscripts["s"].append("m+r")
self.m_superscripts["s"].append("n+r")
# subscripts => set of letters above
self.m_subscripts = {"y": [], "r": [], "l": [], "w": []}
self.m_subscripts["y"].append("k")
self.m_subscripts["y"].append("kh")
self.m_subscripts["y"].append("g")
self.m_subscripts["y"].append("p")
self.m_subscripts["y"].append("ph")
self.m_subscripts["y"].append("b")
self.m_subscripts["y"].append("m")
self.m_subscripts["y"].append("r+k")
self.m_subscripts["y"].append("r+g")
self.m_subscripts["y"].append("r+m")
self.m_subscripts["y"].append("s+k")
self.m_subscripts["y"].append("s+g")
self.m_subscripts["y"].append("s+p")
self.m_subscripts["y"].append("s+b")
self.m_subscripts["y"].append("s+m")
self.m_subscripts["r"].append("k")
self.m_subscripts["r"].append("kh")
self.m_subscripts["r"].append("g")
self.m_subscripts["r"].append("t")
self.m_subscripts["r"].append("th")
self.m_subscripts["r"].append("d")
self.m_subscripts["r"].append("n")
self.m_subscripts["r"].append("p")
self.m_subscripts["r"].append("ph")
self.m_subscripts["r"].append("b")
self.m_subscripts["r"].append("m")
self.m_subscripts["r"].append("sh")
self.m_subscripts["r"].append("s")
self.m_subscripts["r"].append("h")
self.m_subscripts["r"].append("dz")
self.m_subscripts["r"].append("s+k")
self.m_subscripts["r"].append("s+g")
self.m_subscripts["r"].append("s+p")
self.m_subscripts["r"].append("s+b")
self.m_subscripts["r"].append("s+m")
self.m_subscripts["r"].append("s+n")
self.m_subscripts["l"].append("k")
self.m_subscripts["l"].append("g")
self.m_subscripts["l"].append("b")
self.m_subscripts["l"].append("r")
self.m_subscripts["l"].append("s")
self.m_subscripts["l"].append("z")
self.m_subscripts["w"].append("k")
self.m_subscripts["w"].append("kh")
self.m_subscripts["w"].append("g")
self.m_subscripts["w"].append("c")
self.m_subscripts["w"].append("ny")
self.m_subscripts["w"].append("t")
self.m_subscripts["w"].append("d")
self.m_subscripts["w"].append("ts")
self.m_subscripts["w"].append("tsh")
self.m_subscripts["w"].append("zh")
self.m_subscripts["w"].append("z")
self.m_subscripts["w"].append("r")
self.m_subscripts["w"].append("l")
self.m_subscripts["w"].append("sh")
self.m_subscripts["w"].append("s")
self.m_subscripts["w"].append("h")
self.m_subscripts["w"].append("g+r")
self.m_subscripts["w"].append("d+r")
self.m_subscripts["w"].append("ph+y")
self.m_subscripts["w"].append("r+g")
self.m_subscripts["w"].append("r+ts")
# prefixes => set of consonants or stacks after
self.m_prefixes = {"g": [], "d": [], "b": [],
"m": [], "'": [], u"\u2018": [], u"\u2019": []}
tmpSet = []
tmpSet.append("c")
tmpSet.append("ny")
tmpSet.append("t")
tmpSet.append("d")
tmpSet.append("n")
tmpSet.append("ts")
tmpSet.append("zh")
tmpSet.append("z")
tmpSet.append("y")
tmpSet.append("sh")
tmpSet.append("s")
self.m_prefixes["g"] = tmpSet
tmpSet = []
tmpSet.append("k")
tmpSet.append("g")
tmpSet.append("ng")
tmpSet.append("p")
tmpSet.append("b")
tmpSet.append("m")
tmpSet.append("k+y")
tmpSet.append("g+y")
tmpSet.append("p+y")
tmpSet.append("b+y")
tmpSet.append("m+y")
tmpSet.append("k+r")
tmpSet.append("g+r")
tmpSet.append("p+r")
tmpSet.append("b+r")
self.m_prefixes["d"] = tmpSet
tmpSet = []
tmpSet.append("k")
tmpSet.append("g")
tmpSet.append("c")
tmpSet.append("t")
tmpSet.append("d")
tmpSet.append("ts")
tmpSet.append("zh")
tmpSet.append("z")
tmpSet.append("sh")
tmpSet.append("s")
tmpSet.append("r")
tmpSet.append("l")
tmpSet.append("k+y")
tmpSet.append("g+y")
tmpSet.append("k+r")
tmpSet.append("g+r")
tmpSet.append("r+l")
tmpSet.append("s+l")
tmpSet.append("r+k")
tmpSet.append("r+g")
tmpSet.append("r+ng")
tmpSet.append("r+j")
tmpSet.append("r+ny")
tmpSet.append("r+t")
tmpSet.append("r+d")
tmpSet.append("r+n")
tmpSet.append("r+ts")
tmpSet.append("r+dz")
tmpSet.append("s+k")
tmpSet.append("s+g")
tmpSet.append("s+ng")
tmpSet.append("s+ny")
tmpSet.append("s+t")
tmpSet.append("s+d")
tmpSet.append("s+n")
tmpSet.append("s+ts")
tmpSet.append("r+k+y")
tmpSet.append("r+g+y")
tmpSet.append("s+k+y")
tmpSet.append("s+g+y")
tmpSet.append("s+k+r")
tmpSet.append("s+g+r")
tmpSet.append("l+d")
tmpSet.append("l+t")
tmpSet.append("k+l")
tmpSet.append("s+r")
tmpSet.append("z+l")
tmpSet.append("s+w")
self.m_prefixes["b"] = tmpSet
tmpSet = []
tmpSet.append("kh")
tmpSet.append("g")
tmpSet.append("ng")
tmpSet.append("ch")
tmpSet.append("j")
tmpSet.append("ny")
tmpSet.append("th")
tmpSet.append("d")
tmpSet.append("n")
tmpSet.append("tsh")
tmpSet.append("dz")
tmpSet.append("kh+y")
tmpSet.append("g+y")
tmpSet.append("kh+r")
tmpSet.append("g+r")
self.m_prefixes["m"] = tmpSet
tmpSet = []
tmpSet.append("kh")
tmpSet.append("g")
tmpSet.append("ch")
tmpSet.append("j")
tmpSet.append("th")
tmpSet.append("d")
tmpSet.append("ph")
tmpSet.append("b")
tmpSet.append("tsh")
tmpSet.append("dz")
tmpSet.append("kh+y")
tmpSet.append("g+y")
tmpSet.append("ph+y")
tmpSet.append("b+y")
tmpSet.append("kh+r")
tmpSet.append("g+r")
tmpSet.append("d+r")
tmpSet.append("ph+r")
tmpSet.append("b+r")
self.m_prefixes["'"] = tmpSet
self.m_prefixes[u"\u2018"] = tmpSet
self.m_prefixes[u"\u2019"] = tmpSet
# set of suffix letters
# also included are some Skt letters b/c they occur often in suffix
# position in Skt words
self.m_suffixes = []
self.m_suffixes.append("'")
self.m_suffixes.append(u"\u2018")
self.m_suffixes.append(u"\u2019")
self.m_suffixes.append("g")
self.m_suffixes.append("ng")
self.m_suffixes.append("d")
self.m_suffixes.append("n")
self.m_suffixes.append("b")
self.m_suffixes.append("m")
self.m_suffixes.append("r")
self.m_suffixes.append("l")
self.m_suffixes.append("s")
self.m_suffixes.append("N")
self.m_suffixes.append("T")
self.m_suffixes.append("-n")
self.m_suffixes.append("-t")
# suffix2 => set of letters before
self.m_suff2 = {"s": [], "d": []}
tmpSet = []
tmpSet.append("g")
tmpSet.append("ng")
tmpSet.append("b")
tmpSet.append("m")
self.m_suff2["s"] = tmpSet
tmpSet = []
tmpSet.append("n")
tmpSet.append("r")
tmpSet.append("l")
self.m_suff2["d"] = tmpSet
# root letter index for very ambiguous three-stack syllables
self.m_ambiguous_key = {
"dgs": 1,
"dms": 1,
"'gs": 1,
"mngs": 0,
"bgs": 0,
"dbs": 1
}
self.m_ambiguous_wylie = {
"dgs": "dgas",
"dms": "dmas",
"'gs": "'gas",
"mngs": "mangs",
"bgs": "bags",
"dbs": "dbas"
}
# *** Unicode to Wylie mappings ***
# top letters
self.m_tib_top = {
u'\u0f40': "k",
u'\u0f41': "kh",
u'\u0f42': "g",
u'\u0f43': "g+h",
u'\u0f44': "ng",
u'\u0f45': "c",
u'\u0f46': "ch",
u'\u0f47': "j",
u'\u0f49': "ny",
u'\u0f4a': "T",
u'\u0f4b': "Th",
u'\u0f4c': "D",
u'\u0f4d': "D+h",
u'\u0f4e': "N",
u'\u0f4f': "t",
u'\u0f50': "th",
u'\u0f51': "d",
u'\u0f52': "d+h",
u'\u0f53': "n",
u'\u0f54': "p",
u'\u0f55': "ph",
u'\u0f56': "b",
u'\u0f57': "b+h",
u'\u0f58': "m",
u'\u0f59': "ts",
u'\u0f5a': "tsh",
u'\u0f5b': "dz",
u'\u0f5c': "dz+h",
u'\u0f5d': "w",
u'\u0f5e': "zh",
u'\u0f5f': "z",
u'\u0f60': "'",
u'\u0f61': "y",
u'\u0f62': "r",
u'\u0f63': "l",
u'\u0f64': "sh",
u'\u0f65': "Sh",
u'\u0f66': "s",
u'\u0f67': "h",
u'\u0f68': "a",
u'\u0f69': "k+Sh",
u'\u0f6a': "R"
}
# subjoined letters
self.m_tib_subjoined = {
u'\u0f90': "k",
u'\u0f91': "kh",
u'\u0f92': "g",
u'\u0f93': "g+h",
u'\u0f94': "ng",
u'\u0f95': "c",
u'\u0f96': "ch",
u'\u0f97': "j",
u'\u0f99': "ny",
u'\u0f9a': "T",
u'\u0f9b': "Th",
u'\u0f9c': "D",
u'\u0f9d': "D+h",
u'\u0f9e': "N",
u'\u0f9f': "t",
u'\u0fa0': "th",
u'\u0fa1': "d",
u'\u0fa2': "d+h",
u'\u0fa3': "n",
u'\u0fa4': "p",
u'\u0fa5': "ph",
u'\u0fa6': "b",
u'\u0fa7': "b+h",
u'\u0fa8': "m",
u'\u0fa9': "ts",
u'\u0faa': "tsh",
u'\u0fab': "dz",
u'\u0fac': "dz+h",
u'\u0fad': "w",
u'\u0fae': "zh",
u'\u0faf': "z",
u'\u0fb0': "'",
u'\u0fb1': "y",
u'\u0fb2': "r",
u'\u0fb3': "l",
u'\u0fb4': "sh",
u'\u0fb5': "Sh",
u'\u0fb6': "s",
u'\u0fb7': "h",
u'\u0fb8': "a",
u'\u0fb9': "k+Sh",
u'\u0fba': "W",
u'\u0fbb': "Y",
u'\u0fbc': "R"
}
# vowel signs:
# a-chen is not here because that's a top character, not a vowel sign.
# pre-composed "I" and "U" are dealt here; other pre-composed Skt vowels are more
# easily handled by a global replace in toWylie(), b/c they turn into
# subjoined "r"/"l".
self.m_tib_vowel = {
u'\u0f71': "A",
u'\u0f72': "i",
u'\u0f73': "I",
u'\u0f74': "u",
u'\u0f75': "U",
u'\u0f7a': "e",
u'\u0f7b': "ai",
u'\u0f7c': "o",
u'\u0f7d': "au",
u'\u0f80': "-i"
}
# long (Skt) vowels
self.m_tib_vowel_long = {
"i": "I",
"u": "U",
"-i": "-I"
}
# final symbols => wylie
self.m_tib_final_wylie = {
u'\u0f7e': "M",
u'\u0f82': "~M`",
u'\u0f83': "~M",
u'\u0f37': "X",
u'\u0f35': "~X",
u'\u0f39': "^",
u'\u0f7f': "H",
u'\u0f84': "?"
}
# final symbols by class
self.m_tib_final_class = {
u'\u0f7e': "M",
u'\u0f82': "M",
u'\u0f83': "M",
u'\u0f37': "X",
u'\u0f35': "X",
u'\u0f39': "^",
u'\u0f7f': "H",
u'\u0f84': "?"
}
# special characters introduced by ^
self.m_tib_caret = {
"ph": "f",
"b": "v",
}
# other stand-alone characters
self.m_tib_other = {
' ': "_",
u'\u0f04': "@",
u'\u0f05': "#",
u'\u0f06': "$",
u'\u0f07': "%",
u'\u0f08': "!",
u'\u0f0b': " ",
u'\u0f0c': "*",
u'\u0f0d': "/",
u'\u0f0e': "//",
u'\u0f0f': ";",
u'\u0f11': "|",
u'\u0f14': ":",
u'\u0f20': "0",
u'\u0f21': "1",
u'\u0f22': "2",
u'\u0f23': "3",
u'\u0f24': "4",
u'\u0f25': "5",
u'\u0f26': "6",
u'\u0f27': "7",
u'\u0f28': "8",
u'\u0f29': "9",
u'\u0f34': "=",
u'\u0f3a': "<",
u'\u0f3b': ">",
u'\u0f3c': "(",
u'\u0f3d': ")",
}
# all these stacked consonant combinations don't need "+"s in them
self.m_tib_stacks = []
self.m_tib_stacks.append("b+l")
self.m_tib_stacks.append("b+r")
self.m_tib_stacks.append("b+y")
self.m_tib_stacks.append("c+w")
self.m_tib_stacks.append("d+r")
self.m_tib_stacks.append("d+r+w")
self.m_tib_stacks.append("d+w")
self.m_tib_stacks.append("dz+r")
self.m_tib_stacks.append("g+l")
self.m_tib_stacks.append("g+r")
self.m_tib_stacks.append("g+r+w")
self.m_tib_stacks.append("g+w")
self.m_tib_stacks.append("g+y")
self.m_tib_stacks.append("h+r")
self.m_tib_stacks.append("h+w")
self.m_tib_stacks.append("k+l")
self.m_tib_stacks.append("k+r")
self.m_tib_stacks.append("k+w")
self.m_tib_stacks.append("k+y")
self.m_tib_stacks.append("kh+r")
self.m_tib_stacks.append("kh+w")
self.m_tib_stacks.append("kh+y")
self.m_tib_stacks.append("l+b")
self.m_tib_stacks.append("l+c")
self.m_tib_stacks.append("l+d")
self.m_tib_stacks.append("l+g")
self.m_tib_stacks.append("l+h")
self.m_tib_stacks.append("l+j")
self.m_tib_stacks.append("l+k")
self.m_tib_stacks.append("l+ng")
self.m_tib_stacks.append("l+p")
self.m_tib_stacks.append("l+t")
self.m_tib_stacks.append("l+w")
self.m_tib_stacks.append("m+r")
self.m_tib_stacks.append("m+y")
self.m_tib_stacks.append("n+r")
self.m_tib_stacks.append("ny+w")
self.m_tib_stacks.append("p+r")
self.m_tib_stacks.append("p+y")
self.m_tib_stacks.append("ph+r")
self.m_tib_stacks.append("ph+y")
self.m_tib_stacks.append("ph+y+w")
self.m_tib_stacks.append("r+b")
self.m_tib_stacks.append("r+d")
self.m_tib_stacks.append("r+dz")
self.m_tib_stacks.append("r+g")
self.m_tib_stacks.append("r+g+w")
self.m_tib_stacks.append("r+g+y")
self.m_tib_stacks.append("r+j")
self.m_tib_stacks.append("r+k")
self.m_tib_stacks.append("r+k+y")
self.m_tib_stacks.append("r+l")
self.m_tib_stacks.append("r+m")
self.m_tib_stacks.append("r+m+y")
self.m_tib_stacks.append("r+n")
self.m_tib_stacks.append("r+ng")
self.m_tib_stacks.append("r+ny")
self.m_tib_stacks.append("r+t")
self.m_tib_stacks.append("r+ts")
self.m_tib_stacks.append("r+ts+w")
self.m_tib_stacks.append("r+w")
self.m_tib_stacks.append("s+b")
self.m_tib_stacks.append("s+b+r")
self.m_tib_stacks.append("s+b+y")
self.m_tib_stacks.append("s+d")
self.m_tib_stacks.append("s+g")
self.m_tib_stacks.append("s+g+r")
self.m_tib_stacks.append("s+g+y")
self.m_tib_stacks.append("s+k")
self.m_tib_stacks.append("s+k+r")
self.m_tib_stacks.append("s+k+y")
self.m_tib_stacks.append("s+l")
self.m_tib_stacks.append("s+m")
self.m_tib_stacks.append("s+m+r")
self.m_tib_stacks.append("s+m+y")
self.m_tib_stacks.append("s+n")
self.m_tib_stacks.append("s+n+r")
self.m_tib_stacks.append("s+ng")
self.m_tib_stacks.append("s+ny")
self.m_tib_stacks.append("s+p")
self.m_tib_stacks.append("s+p+r")
self.m_tib_stacks.append("s+p+y")
self.m_tib_stacks.append("s+r")
self.m_tib_stacks.append("s+t")
self.m_tib_stacks.append("s+ts")
self.m_tib_stacks.append("s+w")
self.m_tib_stacks.append("sh+r")
self.m_tib_stacks.append("sh+w")
self.m_tib_stacks.append("t+r")
self.m_tib_stacks.append("t+w")
self.m_tib_stacks.append("th+r")
self.m_tib_stacks.append("ts+w")
self.m_tib_stacks.append("tsh+w")
self.m_tib_stacks.append("z+l")
self.m_tib_stacks.append("z+w")
self.m_tib_stacks.append("zh+w")
# a map used to split the input string into tokens for fromWylie().
# all letters which start tokens longer than one letter are mapped to the max length of
# tokens starting with that letter.
self.m_tokens_start = {
'S': 2,
'/': 2,
'd': 4,
'g': 3,
'b': 3,
'D': 3,
'z': 2,
'~': 3,
'-': 4,
'T': 2,
'a': 2,
'k': 2,
't': 3,
's': 2,
'c': 2,
'n': 2,
'p': 2,
'\r': 2,
}
# also for tokenization - a set of tokens longer than one letter
self.m_tokens = []
self.m_tokens.append("-d+h")
self.m_tokens.append("dz+h")
self.m_tokens.append("-dh")
self.m_tokens.append("-sh")
self.m_tokens.append("-th")
self.m_tokens.append("D+h")
self.m_tokens.append("b+h")
self.m_tokens.append("d+h")
self.m_tokens.append("dzh")
self.m_tokens.append("g+h")
self.m_tokens.append("tsh")
self.m_tokens.append("~M`")
self.m_tokens.append("-I")
self.m_tokens.append("-d")
self.m_tokens.append("-i")
self.m_tokens.append("-n")
self.m_tokens.append("-t")
self.m_tokens.append("//")
self.m_tokens.append("Dh")
self.m_tokens.append("Sh")
self.m_tokens.append("Th")
self.m_tokens.append("ai")
self.m_tokens.append("au")
self.m_tokens.append("bh")
self.m_tokens.append("ch")
self.m_tokens.append("dh")
self.m_tokens.append("dz")
self.m_tokens.append("gh")
self.m_tokens.append("kh")
self.m_tokens.append("ng")
self.m_tokens.append("ny")
self.m_tokens.append("ph")
self.m_tokens.append("sh")
self.m_tokens.append("th")
self.m_tokens.append("ts")
self.m_tokens.append("zh")
self.m_tokens.append("~M")
self.m_tokens.append("~X")
self.m_tokens.append("\r\n")
# setup a wylie object
def initWylie(self, check, check_strict, print_warnings, fix_spacing):
# check_strict requires check
if check_strict and not check:
raise "check_strict requires check."
self.check = check
self.check_strict = check_strict
self.print_warnings = print_warnings
self.fix_spacing = fix_spacing
self.initHashes()
# constructor passing all options
# see the comments at the beginning of this file for more details.
# @overloaded
def __init__0(self, check, check_strict, print_warnings, fix_spacing):
self.initWylie(check, check_strict, print_warnings, fix_spacing)
# constructor with default options
# @__init__.register(object)
def __init__(self):
self.initWylie(True, True, False, True)
# helper functions to access the various hash tables
def consonant(self, s):
return self.m_consonant.get(s)
def subjoined(self, s):
return self.m_subjoined.get(s)
def vowel(self, s):
return self.m_vowel.get(s)
def final_uni(self, s):
return self.m_final_uni.get(s)
def final_class(self, s):
return self.m_final_class.get(s)
def other(self, s):
return self.m_other.get(s)
def isSpecial(self, s):
return s in self.m_special
def isSuperscript(self, s):
return s in self.m_superscripts
def superscript(self, sup, below):
tmpSet = self.m_superscripts.get(sup)
if tmpSet is None:
return False
return below in tmpSet
def isSubscript(self, s):
return s in self.m_subscripts
def subscript(self, sub, above):
tmpSet = self.m_subscripts.get(sub)
if tmpSet is None:
return False
return above in tmpSet
def isPrefix(self, s):
return s in self.m_prefixes
def prefix(self, pref, after):
tmpSet = self.m_prefixes.get(pref)
if tmpSet is None:
return False
return after in tmpSet
def isSuffix(self, s):
return s in self.m_suffixes
def isSuff2(self, s):
return s in self.m_suff2
def suff2(self, suff, before):
tmpSet = self.m_suff2.get(suff)
if tmpSet is None:
return False
return before in tmpSet
def ambiguous_key(self, syll):
return self.m_ambiguous_key.get(syll)
def ambiguous_wylie(self, syll):
return self.m_ambiguous_wylie.get(syll)
def tib_top(self, c):
return self.m_tib_top.get(c)
def tib_subjoined(self, c):
return self.m_tib_subjoined.get(c)
def tib_vowel(self, c):
return self.m_tib_vowel.get(c)
def tib_vowel_long(self, s):
return self.m_tib_vowel_long.get(s)
def tib_final_wylie(self, c):
return self.m_tib_final_wylie.get(c)
def tib_final_class(self, c):
return self.m_tib_final_class.get(c)
def tib_caret(self, s):
return self.m_tib_caret.get(s)
def tib_other(self, c):
return self.m_tib_other.get(c)
def tib_stack(self, s):
return s in self.m_tib_stacks
# split a string into Wylie tokens;
# make sure there is room for at least one null element at the end of the
# array
def splitIntoTokens(self, str_): # noqa: C901
i = 0
o = 0
maxlen = len(str_)
tokens = [''] * (maxlen + 2)
while i < maxlen:
try:
c = str_[i]
mlo = self.m_tokens_start.get(c, None)
# if there are multi-char tokens starting with this char, try
# them
if mlo is not None:
length = int(mlo)
while length > 1:
if i <= maxlen - length:
tr = str_[i: i + length]
if tr in self.m_tokens:
tokens[o] = tr
o += 1
i += length
length -= 1
raise Exception("Continue")
length -= 1
# things starting with backslash are special
if c == '\\' and i <= maxlen - 2:
if str_[i + 1] == 'u' and i <= maxlen - 6:
tokens[o] = str_.substring(i, i + 6)
o += 1
# \\uxxxx
i += 6
elif str_[i + 1] == 'U' and i <= maxlen - 10:
tokens[o] = str_.substring(i, i + 10)
o += 1
# \\Uxxxxxxxx
i += 10
else:
tokens[o] = str_.substring(i, i + 2)
o += 1
# \\x
i += 2
raise Exception("Continue")
# otherwise just take one char
tokens[o] = c
o += 1
i += 1
except Exception:
continue
return tokens
# Converts successive stacks of Wylie into unicode, starting at the given index
# within the array of tokens.
#
# Assumes that the first available token is valid, and is either a vowel or a consonant.
# Returns a WylieTsekbar object
def fromWylieOneTsekbar(self, tokens, i): # noqa: C901
orig_i = i
t = tokens[i]
# variables for tracking the state within the syllable as we parse it
stack = None
prev_cons = None
visarga = False
# variables for checking the root letter, after parsing a whole tsekbar made of only single
# consonants and one consonant with "a" vowel
check_root = True
consonants = []
root_idx = -1
out = ""
warns = []
# the type of token that we are expecting next in the input stream
# - PREFIX : expect a prefix consonant, or a main stack
# - MAIN : expect only a main stack
# - SUFF1 : expect a 1st suffix
# - SUFF2 : expect a 2nd suffix
# - NONE : expect nothing (after a 2nd suffix)
#
# the state machine is actually more lenient than this, in that a "main stack" is allowed
# to come at any moment, even after suffixes. this is because such syllables are sometimes
# found in abbreviations or other places. basically what we check is that prefixes and
# suffixes go with what they are attached to.
#
# valid tsek-bars end in one of these states: SUFF1, SUFF2, NONE
state = self.State.PREFIX
# iterate over the stacks of a tsek-bar
while t is not None and (self.vowel(t) is not None or self.consonant(t) is not None) and not visarga: # STACK
# translate a stack
if stack is not None:
prev_cons = stack.single_consonant
stack = self.fromWylieOneStack(tokens, i)
i += stack.tokens_used
t = tokens[i]
out += stack.uni_string
warns.extend(stack.warns)
visarga = stack.visarga
if not self.check:
continue
# check for syllable structure consistency by iterating a simple state machine
# - prefix consonant
if state == self.State.PREFIX and stack.single_consonant is not None:
consonants.append(stack.single_consonant)
if self.isPrefix(stack.single_consonant):
next = t
if self.check_strict:
next = self.consonantString(tokens, i)
if next is not None and not self.prefix(stack.single_consonant, next):
next = next.replace("+", "")
warns.append("Prefix \"" + stack.single_consonant +
"\" does not occur before \"" + next + "\".")
else:
warns.append("Invalid prefix consonant: \"" +
stack.single_consonant + "\".")
state = self.State.MAIN
# - main stack with vowel or multiple consonants
elif stack.single_consonant is None:
state = self.State.SUFF1
# keep track of the root consonant if it was a single cons with
# an "a" vowel
if root_idx >= 0:
check_root = False
elif stack.single_cons_a is not None:
consonants.append(stack.single_cons_a)
root_idx = len(consonants) - 1
# - unexpected single consonant after prefix
elif state == self.State.MAIN:
warns.append("Expected vowel after \"" +
stack.single_consonant + "\".")
# - 1st suffix
elif state == self.State.SUFF1:
consonants.append(stack.single_consonant)
# check this one only in strict mode b/c it trips on lots of
# Skt stuff
if self.check_strict:
if not self.isSuffix(stack.single_consonant):
warns.append("Invalid suffix consonant: \"" +
stack.single_consonant + "\".")
state = self.State.SUFF2
# - 2nd suffix
elif state == self.State.SUFF2:
consonants.append(stack.single_consonant)
if self.isSuff2(stack.single_consonant):
if not self.suff2(stack.single_consonant, prev_cons):
warns.append("Second suffix \"" + stack.single_consonant +
"\" does not occur after \"" + prev_cons + "\".")
else:
warns.append("Invalid 2nd suffix consonant: \"" +
stack.single_consonant + "\".")
state = self.State.NONE
# - more crap after a 2nd suffix
elif state == self.State.NONE:
warns.append("Cannot have another consonant \"" +
stack.single_consonant + "\" after 2nd suffix.")
if state == self.State.MAIN and stack.single_consonant is not None and self.isPrefix(stack.single_consonant):
warns.append("Vowel expected after \"" +
stack.single_consonant + "\".")
# check root consonant placement only if there were no warnings so far, and the syllable
# looks ambiguous. not many checks are needed here because the previous state machine
# already takes care of most illegal combinations.
if self.check and len(warns) == 0 and check_root and root_idx >= 0:
# 2 letters where each could be prefix/suffix: root is 1st
if len(consonants) == 2 and \
root_idx != 0 and \
self.prefix(consonants[0], consonants[1]) and \
self.isSuffix(consonants[1]):
warns.append("Syllable should probably be \"" +
consonants[0] + "a" + consonants[1] + "\".")
# 3 letters where 1st can be prefix, 2nd can be postfix before "s" and last is "s":
# use a lookup table as this is completely ambiguous.
elif len(consonants) == 3 and \
self.isPrefix(consonants[0]) and \
self.suff2("s", consonants[1]) and \
consonants[2] == "s":
cc = self.joinStrings(consonants, "")
cc = cc.replace(u'\u2018', '\'')
cc = cc.replace(u'\u2019', '\'') # typographical quotes
expect_key = self.ambiguous_key(cc)
if expect_key is not None and int(expect_key) != root_idx:
warns.append("Syllable should probably be \"" +
self.ambiguous_wylie(cc) + "\".")
# return the stuff as a WylieTsekbar struct
ret = Wylie.WylieTsekbar()
ret.uni_string = out
ret.tokens_used = i - orig_i
ret.warns = warns
return ret
def unicodeEscape(self, warns, line, t):
hex = t.substring(2)
if hex.isEmpty():
return None
if not self.validHex(hex):
self.warnl(warns, line, "\"" + t + "\": invalid hex code.")
return ""
return str(int(hex, base=16))
# Character.valueOf(str(int(hex, base=16))).__str__()
# Converts a Wylie (EWTS) string to unicode. If 'warns' is not the null List, puts warnings into it.
# @fromWylie.register(object, str, List)
def fromWylie(self, str_, warns=None): # noqa: C901
out = []
line = 1
units = 0
# remove initial spaces if required
if self.fix_spacing:
str_ = re.sub("^\\s+", "", str_, 1)
# split into tokens
tokens = self.splitIntoTokens(str_)
i = 0
# iterate over the tokens
# __i_5 = i
while tokens[i] != '': # ITER
try:
t = tokens[i]
o = None
# [non-tibetan text] : pass through, nesting brackets
if t == "[":
nesting = 1
i += 1
while tokens[i] is not None: # ESC
t = tokens[i]
i += 1
if t == "[":
nesting += 1
if t == "]":
nesting -= 1
if nesting == 0:
raise Exception("Continue") # ITER
# handle unicode escapes and \1-char escapes within
# [comments]...
if t.startsWith("\\u") or t.startsWith("\\U"):
o = self.unicodeEscape(warns, line, t)
if o is not None:
out.append(o)
continue # ESC
if t.startsWith("\\"):
o = t.substring(1)
else:
o = t
out.append(o)
self.warnl(warns, line, "Unfinished [non-Wylie stuff].")
break # ITER
# punctuation, numbers, etc
o = self.other(t)
if o is not None:
out.append(o)
i += 1
units += 1
# collapse multiple spaces?
if t == " " and self.fix_spacing:
while tokens[i] is not None and tokens[i] == " ":
pass
continue
if self.vowel(t) is not None or self.consonant(t) is not None:
tb = self.fromWylieOneTsekbar(tokens, i)
word = ""
j = 0
while j < tb.tokens_used:
word += tokens[i + j]
j += 1
out.append(tb.uni_string)
i += tb.tokens_used
units += 1
for w in tb.warns:
self.warnl(warns, line, "\"" + word + "\": " + w)
continue
if t == u"\ufeff" or t == u"\u200b":
i += 1
continue
if t.startswith("\\u") or t.startswith("\\U"):
o = self.unicodeEscape(warns, line, t)
if o is not None:
i += 1
out.append(o)
continue
if t.startswith("\\"):
out.append(t.substring(1))
i += 1
continue
if t == "\r\n" or t == "\n" or t == "\r":
line += 1
out.append(t)
i += 1
if self.fix_spacing:
while tokens[i] is not None and tokens[i] == " ":
pass
continue
if t == '':
i += 1
continue
c = t[0]
if self.isSpecial(t) or (c >= 'a' and c <= 'z') or (c >= 'A' and c <= 'Z'):
self.warnl(warns, line, "Unexpected character \"" + t + "\".")
out.append(t)
i += 1
except Exception:
continue
if units == 0:
self.warn(warns, "No Tibetan characters found!")
return ''.join(out)
def validHex(self, t):
i = 0
while i < len(t):
c = t[i]
if not ((c >= 'a' and c <= 'f') or (c >= '0' and c <= '9')):
return False
i += 1
return True
def warn(self, warns, str_):
if warns is not None:
warns.append(str_)
if self.print_warnings:
print(str_)
def warnl(self, warns, line, str_):
self.warn(warns, "line " + str(line) + ": " + str_)
def debug(self, str_):
print(str_)
def debugvar(self, o, name):
print(">>" + name + "<< : (" + ("NULL" if o is None else o.__str__()) + ")")
def joinStrings(self, a, sep):
return sep.join([x for x in a if x is not None])
# Converts one stack's worth of Wylie into unicode, starting at the given index
# within the array of tokens.
# Assumes that the first available token is valid, and is either a vowel or a consonant.
# Returns a WylieStack object.
def fromWylieOneStack(self, tokens, i): # noqa: C901
orig_i = i
t = None
t2 = None
# o = None
out = ""
warns = []
consonants = 0 # how many consonants found
vowel_found = None # any vowels (including a-chen)
# any vowel signs (that go under or above the main stack)
vowel_sign = None
single_consonant = None # did we find just a single consonant?
plus = False # any explicit subjoining via '+'?
caret = 0 # find any '^'?
final_found = {} # keep track of finals (H, M, etc) by class
# do we have a superscript?
t = tokens[i]
t2 = tokens[i + 1]
if t2 is not None and self.isSuperscript(t) and self.superscript(t, t2):
if self.check_strict:
next = self.consonantString(tokens, i + 1)
if not self.superscript(t, next):
next = next.replace("+", "")
warns.append(
"Superscript \"" + t + "\" does not occur above combination \"" + next + "\".")
out += self.consonant(t)
consonants += 1
i += 1
while tokens[i] is not None and tokens[i] == "^":
caret += 1
i += 1
# main consonant + stuff underneath.
# this is usually executed just once, but the "+" subjoining
# operator makes it come back here
while True: # MAIN
# main consonant (or a "a" after a "+")
t = tokens[i]
if self.consonant(t) is not None or (len(out) > 0 and self.subjoined(t) is not None):
if len(out) > 0:
out += self.subjoined(t)
else:
out += self.consonant(t)
i += 1
if t == "a":
vowel_found = "a"
else:
consonants += 1
single_consonant = t
while tokens[i] is not None and tokens[i] == "^":
caret += 1
i += 1
# subjoined: rata, yata, lata, wazur. there can be up two
# subjoined letters in a stack.
z = 0
while z < 2:
t2 = tokens[i]
if t2 is not None and self.isSubscript(t2):
# lata does not occur below multiple consonants
# (otherwise we mess up "brla" = "b.r+la")
if t2 == "l" and consonants > 1:
break
# full stack checking (disabled by "+")
if self.check_strict and not plus:
prev = self.consonantStringBackwards(
tokens, i - 1, orig_i)
if not self.subscript(t2, prev):
prev = prev.replace("+", "")
warns.append(
"Subjoined \"" + t2 + "\" not expected after \"" + prev + "\".")
# simple check only
elif self.check:
if not self.subscript(t2, t) and not (z == 1 and t2 == "w" and t == "y"):
warns.append(
"Subjoined \"" + t2 + "\"not expected after \"" + t + "\".")
out += self.subjoined(t2)
i += 1
consonants += 1
while tokens[i] is not None and tokens[i] == "^":
caret += 1
i += 1
t = t2
else:
break
z += 1
# caret (^) can come anywhere in Wylie but in Unicode we generate it at the end of
# the stack but before vowels if it came there (seems to be what OpenOffice expects),
# or at the very end of the stack if that's how it was in
# the Wylie.
if caret > 0:
if caret > 1:
warns.append(
"Cannot have more than one \"^\" applied to the same stack.")
final_found[self.final_class("^")] = "^"
out += self.final_uni("^")
caret = 0
# vowel(s)
t = tokens[i]
if t is not None and self.vowel(t) is not None:
if 0 == len(out):
out += self.vowel("a")
if not t == "a":
out += self.vowel(t)
i += 1
vowel_found = t
if not t == "a":
vowel_sign = t
# plus sign: forces more subjoining
t = tokens[i]
if t is not None and t == "+":
i += 1
plus = True
# sanity check: next token must be vowel or subjoinable
# consonant.
t = tokens[i]
if t is None or (self.vowel(t) is None and self.subjoined(t) is None):
if self.check:
warns.append(
"Expected vowel or consonant after \"+\".")
break # MAIN
# consonants after vowels doesn't make much sense but process
# it anyway
if self.check:
if self.vowel(t) is None and vowel_sign is not None:
warns.append("Cannot subjoin consonant (" + t +
") after vowel (" + vowel_sign + ") in same stack.")
elif t == "a" and vowel_sign is not None:
warns.append(
"Cannot subjoin a-chen (a) after vowel (" + vowel_sign + ") in same stack.")
continue # MAIN
break # MAIN
# final tokens
t = tokens[i]
while t is not None and self.final_class(t) is not None:
uni = self.final_uni(t)
klass = self.final_class(t)
# check for duplicates
if klass in final_found:
if final_found.get(klass) == t:
warns.append("Cannot have two \"" + t +
"\" applied to the same stack.")
else:
warns.append("Cannot have \"" + t + "\" and \"" +
final_found.get(klass) + "\" applied to the same stack.")
else:
final_found[klass] = t
out += uni
i += 1
single_consonant = None
t = tokens[i]
# if next is a dot "." (stack separator), skip it.
if tokens[i] is not None and tokens[i] == ".":
i += 1
# if we had more than a consonant and no vowel, and no explicit "+" joining, backtrack and
# return the 1st consonant alone
if consonants > 1 and vowel_found is None:
if plus:
if self.check:
warns.append(
"Stack with multiple consonants should end with vowel.")
else:
i = orig_i + 1
consonants = 1
single_consonant = tokens[orig_i]
out = ""
out += self.consonant(single_consonant)
# calculate "single consonant"
if consonants != 1 or plus:
single_consonant = None
# return the stuff as a WylieStack struct
ret = Wylie.WylieStack()
ret.uni_string = out
ret.tokens_used = i - orig_i
if vowel_found is not None:
ret.single_consonant = None
else:
ret.single_consonant = single_consonant
if vowel_found is not None and vowel_found == "a":
ret.single_cons_a = single_consonant
else:
ret.single_cons_a = None
ret.warns = warns
ret.visarga = "H" in final_found
return ret
def consonantString(self, tokens, i):
out = []
while tokens[i] is not None:
t = tokens[i]
i += 1
if t == "+" or t == "^":
continue
if self.consonant(t) is None:
break
out.append(t)
return self.joinStrings(out, "+")
def consonantStringBackwards(self, tokens, i, orig_i):
out = []
while i >= orig_i and tokens[i] is not None:
t = tokens[i]
i -= 1
if t == "+" or t == "^":
continue
if self.consonant(t) is None:
break
out.insert(0, t)
return self.joinStrings(out, "+")
def handleSpaces(self, str_, i, out):
found = 0
# orig_i = i
while i < len(str_) and str_[i] == ' ':
i += 1
found += 1
if found == 0 or i == len(str_):
return 0
t = str_[i]
if self.tib_top(t) is None and self.tib_other(t) is None:
return 0
while i < found:
out.append('_')
i += 1
return found
# Converts from Unicode strings to Wylie (EWTS) transliteration, without warnings,
# including escaping of non-tibetan into [comments].
def toWylie(self, str_):
return self.toWylieOptions(str_, None, True)
# Converts from Unicode strings to Wylie (EWTS) transliteration.
#
# Arguments are:
# str : the unicode string to be converted
# escape: whether to escape non-tibetan characters according to Wylie encoding.
# if escape == false, anything that is not tibetan will be just passed through.
#
# Returns: the transliterated string.
#
# To get the warnings, call getWarnings() afterwards.
# @toWylie.register(object, str, List, bool)
def toWylieOptions(self, str_, warns, escape): # noqa: C901
out = ""
line = 1
units = 0
# globally search and replace some deprecated pre-composed Sanskrit
# vowels
str_ = str_.replace(u"\u0f76", u"\u0fb2\u0f80")
str_ = str_.replace(u"\u0f77", u"\u0fb2\u0f71\u0f80")
str_ = str_.replace(u"\u0f78", u"\u0fb3\u0f80")
str_ = str_.replace(u"\u0f79", u"\u0fb3\u0f71\u0f80")
str_ = str_.replace(u"\u0f81", u"\u0f71\u0f80")
str_ = str_.replace(u"\u0f00", u"\u0F68\u0F7C\u0F7E")
i = 0
length = len(str_)
# iterate over the string, codepoint by codepoint
while i < length: # ITER
t = str_[i]
# found tibetan script - handle one tsekbar
if self.tib_top(t) is not None:
tb = self.toWylieOneTsekbar(str_, length, i)
out += tb.wylie
i += tb.tokens_used
units += 1
for w in tb.warns:
self.warnl(warns, line, w)
if not escape:
i += self.handleSpaces(str_, i, out)
continue # ITER
# punctuation and special stuff. spaces are tricky:
# - in non-escaping mode: spaces are not turned to '_' here (handled by handleSpaces)
# - in escaping mode: don't do spaces if there is non-tibetan coming, so they become part
o = self.tib_other(t)
if o is not None and (t != ' ' or (escape and not self.followedByNonTibetan(str_, i))):
out += o
i += 1
units += 1
if not escape:
i += self.handleSpaces(str_, i, out)
continue # ITER
# newlines, count lines. "\r\n" together count as one newline.
if t == '\r' or t == '\n':
line += 1
i += 1
out += t
if t == '\r' and i < length and str_[i] == '\n':
i += 1
out += '\n'
continue # ITER
# ignore BOM and zero-width space
if t == u'\ufeff' or t == u'\u200b':
i += 1
continue # ITER
# anything else - pass along?
if not escape:
out += t
i += 1
continue # ITER
# other characters in the tibetan plane, escape with \\u0fxx
if t > u'\u0f00' and t <= u'\u0fff':
# c = t.encode("utf8")
out += t
i += 1
# warn for tibetan codepoints that should appear only after a
# tib_top
if self.tib_subjoined(t) is not None or self.tib_vowel(t) is not None or self.tib_final_wylie(t) is not None:
self.warnl(warns, line, "Tibetan sign " + t +
" needs a top symbol to attach to.")
continue # ITER
# ... or escape according to Wylie:
# put it in [comments], escaping[] sequences and closing at
# line ends
out += "["
while self.tib_top(t) is None and (self.tib_other(t) is None or t == ' ') and t != '\r' and t != '\n':
# \escape [opening and closing] brackets
if t == '[' or t == ']':
out += "\\"
out += t
# unicode-escape anything in the tibetan plane (i.e characters
# not handled by Wylie)
elif t > u'\u0f00' and t <= u'\u0fff':
out += self.formatHex(t)
# and just pass through anything else!
else:
out += t
i += 1
if i >= length:
break
t = str_[i]
out += "]"
return out
def formatHex(self, t):
return u''.join(char if 32 <= ord(char) <= 126 else u'\\u%04x' % ord(char) for char in t)
def followedByNonTibetan(self, str_, i):
length = len(str_)
while i < length and str_[i] == ' ':
i += 1
if i == length:
return False
t = str_[i]
return self.tib_top(t) is None and self.tib_other(t) is None and t != '\r' and t != '\n'
# C onvert Unicode to Wylie: one tsekbar
def toWylieOneTsekbar(self, str_, length, i): # noqa: C901
orig_i = i
warns = []
stacks = []
while True: # ITER
st = self.toWylieOneStack(str_, length, i)
stacks.append(st)
if st.warns:
warns.extend(st.warns)
i += st.tokens_used
if st.visarga:
break # ITER
if i >= length or self.tib_top(str_[i]) is None:
break # ITER
# figure out if some of these stacks can be prefixes or suffixes (in which case
# they don't need their "a" vowels)
if len(stacks) > 1 and stacks[0].single_cons is not None:
cs = stacks[1].cons_str.replace("+w", "")
if self.prefix(stacks[0].single_cons, cs):
stacks[0].prefix = True
if len(stacks) > 1 and stacks[-1].single_cons is not None and self.isSuffix(stacks[-1].single_cons):
stacks[-1].suffix = True
if len(stacks) > 2 and \
stacks[-1].single_cons is not None and \
stacks[-2].single_cons is not None and \
self.isSuffix(stacks[-2].single_cons) and \
self.suff2(stacks[-1].single_cons, stacks[-2].single_cons):
stacks[-1].suff2 = True
stacks[-2].suffix = True
if len(stacks) == 2 and stacks[0].prefix and stacks[1].suffix:
stacks[0].prefix = False
if len(stacks) == 3 and stacks[0].prefix and stacks[1].suffix and stacks[2].suff2:
strb = ""
for st in stacks:
strb += st.single_cons
ztr = strb
root = self.ambiguous_key(ztr)
if root is None:
warns.append(
"Ambiguous syllable found: root consonant not known for \"" + ztr + "\".")
root = 1
stacks[root].prefix = stacks[root].suffix = False
stacks[root + 1].suff2 = False
if stacks[0].prefix and self.tib_stack(stacks[0].single_cons + "+" + stacks[1].cons_str):
stacks[0].dot = True
out = ""
for st in stacks:
out += self.putStackTogether(st)
ret = self.ToWylieTsekbar()
ret.wylie = out
ret.tokens_used = i - orig_i
ret.warns = warns
return ret
# Unicode to Wylie: one stack at a time
def toWylieOneStack(self, str_, length, i): # noqa: C901
orig_i = i
ffinal = None
vowel = None
klass = None
# split the stack into a ToWylieStack object:
# - top symbol
# - stacked signs (first is the top symbol again, then subscribed main characters...)
# - caret (did we find a stray tsa-phru or not?)
# - vowel signs (including small subscribed a-chung, "-i" Skt signs, etc)
# - final stuff (including anusvara, visarga, halanta...)
# - and some more variables to keep track of what has been found
st = self.ToWylieStack()
# assume: tib_top(t) exists
t = str_[i]
i += 1
st.top = self.tib_top(t)
st.stack.append(self.tib_top(t))
# grab everything else below the top sign and classify in various
# categories
while i < length:
t = str_[i]
o = self.tib_subjoined(t)
o1 = self.tib_vowel(t)
o2 = self.tib_final_wylie(t)
if o is not None:
i += 1
st.stack.append(o)
# check for bad ordering
if len(st.finals) > 0:
st.warns.append(
"Subjoined sign \"" + o + "\" found after final sign \"" + str(ffinal) + "\".")
elif len(st.vowels) > 0:
st.warns.append(
"Subjoined sign \"" + o + "\" found after vowel sign \"" + str(vowel) + "\".")
elif (o1) is not None:
i += 1
st.vowels.append(o1)
if vowel is None:
vowel = o1
if len(st.finals) > 0:
st.warns.append(
"Vowel sign \"" + o1 + "\" found after final sign \"" + str(ffinal) + "\".")
elif (o2) is not None:
i += 1
klass = self.tib_final_class(t)
if o2 == "^":
st.caret = True
else:
if o2 == "H":
st.visarga = True
st.finals.append(o2)
if ffinal is None:
ffinal = o2
if klass in st.finals_found:
st.warns.append(
"Final sign \"" + o2 + "\" should not combine with\
found after final sign \"" + str(ffinal) + "\".")
else:
st.finals_found[klass] = o2
else:
break
# now analyze the stack according to various rules
# a - chen with vowel signs: remove the "a" and keep the vowel
# signs
if st.top == "a" and len(st.stack) == 1 and len(st.vowels) > 0:
st.stack.pop(0)
# handle long vowels: A+i becomes I, etc.
if len(st.vowels) > 1 and st.vowels[0] == "A" and self.tib_vowel_long(st.vowels[1]) is not None:
vowel_long = self.tib_vowel_long(st.vowels[1])
st.vowels.pop(0)
st.vowels.pop(0)
st.vowels.insert(0, vowel_long)
if st.caret and len(st.stack) == 1 and self.tib_caret(st.top) is not None:
caret = self.tib_caret(st.top)
st.top = vowel_long
st.stack.pop(0)
st.stack.insert(0, caret)
st.caret = False
st.cons_str = self.joinStrings(st.stack, "+")
if len(st.stack) == 1 and not st.stack[0] == "a" and not st.caret and len(st.vowels) == 0 and len(st.finals) == 0:
st.single_cons = st.cons_str
st.tokens_used = i - orig_i
return st
def putStackTogether(self, st):
out = ""
if self.tib_stack(st.cons_str):
out += self.joinStrings(st.stack, "")
else:
out += st.cons_str
if st.caret:
out += "^"
if len(st.vowels) > 0:
out += self.joinStrings(st.vowels, "+")
elif not st.prefix and not st.suffix and not st.suff2 and (len(st.cons_str) == 0 or st.cons_str[-1] != 'a'):
out += "a"
out += self.joinStrings(st.finals, "")
if st.dot:
out += "."
return out
class State:
PREFIX = 'PREFIX'
MAIN = 'MAIN'
SUFF1 = 'SUFF1'
SUFF2 = 'SUFF2'
NONE = 'NONE'
class WylieStack(object):
uni_string = None
tokens_used = int()
single_consonant = None
single_cons_a = None
warns = None
visarga = bool()
class WylieTsekbar(object):
uni_string = None
tokens_used = int()
warns = None
class ToWylieStack(object):
top = None
stack = []
caret = bool()
vowels = []
finals = []
finals_found = None
visarga = bool()
cons_str = None
single_cons = None
prefix = bool()
suffix = bool()
suff2 = bool()
dot = bool()
tokens_used = int()
warns = None
def __init__(self):
self.stack = []
self.vowels = []
self.finals = []
self.finals_found = {}
self.warns = []
class ToWylieTsekbar(object):
wylie = None
tokens_used = int()
warns = None
# warn = []
# print (Wylie().fromWylie("sems can thams cad", warn))
# print ('\n'.join(warn))
# warn = []
# print (Wylie().toWylieOptions(u"ཨོཾ་ཨཿཧཱུྂ་བཛྲ་གུ་རུ་པདྨ་སིདྡྷི་ཧཱུྂ༔", warn, True))
# print ('\n'.join(warn))
# print (Wylie().fromWylie("phyugs zog gi stod nad} ", []))
warn = []
print (Wylie().toWylie(u"ཨོཾ་ཨཿཧཱུྂ་བཛྲ་གུ་རུ་པདྨ་སིདྡྷི་ཧཱུྂ༔", warn, True))
print ('\n'.join(warn))
warn = []
print (Wylie().toWylie(
u"༄༅། །ཞེས་བྱ་བྱ་རྣམས་ཡོད། སྤྱོད་ལྡན་གནས་སུ་སྐྱེ་བའི་རྒྱུ། །འཐབ་འཁྲོལ་གནོད་པ་ཐར་བྱེད་པའོ། །གཟུགས་ཀྱིསལྷག་མྱོས་མེ་ལོང་ཚལ། །རྒྱལ་ཆེན་མིག་མི་བཟང་གནས་སོ། །ཡུལ་འཁོར་\n"+
u"སྐྱོང་ནི་ཤར་ཕྱོགས་ཏེ། །ལུས་ངན་བྱང་ཕྱོགས་ལྕང་ལོ་ཅན། །གཞན་ཡང་ཡུལ་པ་འཕགས་པར་སྐྱེས། །སྣ་ཚོགས་གཟུགས་དང་ལྕང་ལོ་ཅན། །ངོས་ལ་ཉིས་བརྒྱ་ལྔ་ཅུ་པ། །གཉའ་ཤིང་འཛིན་སྟེང་གནས་པ་\n"+
u"སྟེ༑ ༑དཔག་མེད་བཀོད་པ་ལོངས་སྤྱོད་ལྡན། །དེ་འཁོར་ཀུན་ཏུརྒྱུ་བའི་ལྷ། །བྱེ་བ་ཕྲག་ནི་སུམ་ཅུ་དྲུག །དེ་རྣམས་གནས་པའི་གཞལ་ཡསཁང་། །གཟའ་སྐར་ཞེས་ཀྱང་འཇིག་རྟེན་གྲགས། །ཉི་ཟླ་གཉིས་ནི་\n"+
u"ལྷ་གནས་ཏེ། །དཔག་ཚད་ལྔ་ཅུ་གཅིག་དང་བཅས། །གྱེན་འཐུར་འཕང་བའི་མདའ་ཡབ་དང་། །ནང་ན་སྐྱེད་ཚལ་གྲོང་ཁྱེར་དང་། །ལྟེང་ཁས་བརྒྱན་ཅིང་ལོངས་སྤྱོད་ལྡན། །འཁོར་ལོའི་རླུང་གིས་འདྲེན་པ་\n"+
u"ཡིན༑ ༑ནམ་ཕྱེད་ཉི་མ་ནུབ་པ་དང་། །ཉི་མ་ཕྱེད་དང་འཆར་དུས་ཅིག །ཉི་མའི་འོད་དང་རང་གྲིབ་ལ། །བརྟེན་ནས་ཟླ་བ་འཕེལ་འགྲིབ་བྱེད། །འདིར་ནི་ཚུལ་ལྡན་བརྩེ་བ་ཅན། །ཁྱད་པར་སྒྲོན་མེ་བྱིན་པའི་\n"+
u"མཐུ༑ ༑འཇིག་རྟེན་སྐྱོང་བ་བཞི་པོ་ནི། །ཚུལ་ཁྲིམས་བསོད་ནམས་གཞན་པས་ལྷག །སྣ་ཚོགས་ལོངས་སྤྱོད་བསམ་མི་ཁྱབ། །སེམས་ཀྱི་ཀུན་རྟོག་དགེ་བ་ཡིན། །གྲངས་བཞིན་ཡུལ་ཀྱང་དེ་འདྲར་སྣང་། །འདི་\n", warn, True))
print ('\n'.join(warn))
|
LasTOmocha/SadigliBot | bot.py | #!/usr/bin/env python
# -*BSD 3-Clause License*-
#__müəllif__ = "<NAME>"
#__müəllif huquqları__ = "Müə<NAME> 2021, SadigliBot"
#__lizensiya__ = "BSD 3-Clause License"
import os
import json
import discord
import random
import asyncio
from random import *
from os.path import join, dirname
from dotenv import load_dotenv
from discord.utils import get
from discord.ext import commands
# load .env file
dir_path = os.path.dirname(os.path.realpath(__file__))
dotenv_path = join(dir_path, '.env')
load_dotenv(dotenv_path)
DISCORD_TOKEN = os.environ.get('DISCORD_TOKEN')
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
JSON_FILE = str(os.path.dirname(os.path.realpath(__file__))) + '/data.json'
def get_prefix(client, message):
with open("prefixes.json", "r") as f:
prefixes = json.load(f)
return prefixes[str(message.guild.id)]
bot = commands.Bot(command_prefix = get_prefix)
@bot.event
async def on_ready():
""" BOT sunucuya bağlandı """
print(f'{bot.user.name} sunucuya bağlandı')
# check if bot has connected to guilds
if len(bot.guilds) > 0:
print('kodlar yazıldı:')
for guild in bot.guilds:
print(f'* {guild.name}#{guild.id}')
@bot.event
async def on_guild_join(guild):
with open("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes[str(guild.id)] = "."
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent=4)
@bot.event
async def on_guild_remove(guild):
with open("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes.pop(str(guild.id))
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent=4)
@bot.command()
async def prefixdəyiş(ctx, prefix):
with open("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes[str(ctx.guild.id)] = prefix
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent=4)
await ctx.send(f"Prefix {prefix} olaraq dəyişdirildi.")
@bot.command(name="salam")
async def Salam(ctx):
guild = ctx.guild
channel = ctx.channel
author = ctx.author
command = ctx.message.content
await channel.send("Əleyküm")
@bot.command(name="necəsiz")
async def HalXetir(ctx):
guild = ctx.guild
channel = ctx.channel
author = ctx.author
command = ctx.message.content
await channel.send("Şükür Allaha. Botuq da. Yola veririk. Sən necəsən?")
@bot.command(name="sikayet")
async def sikayet(ctx, *, member):
guild = ctx.guild
channel = ctx.channel
author = ctx.author
command = ctx.message.content
await channel.send("Şikayət başarıyla göndərildi.")
@bot.command(name="şikayət")
async def shikayet(ctx):
guild = ctx.guild
channel = ctx.channel
author = ctx.author
command = ctx.message.content
await channel.send("Ala yenə noldu?\n\n Şikayət üçün **.sikayet @istifadəçiadı** yaz.")
@bot.command(name="adminlər")
async def adminler(ctx):
guild = ctx.guild
channel = ctx.channel
author = ctx.author
command = ctx.message.content
await channel.send(" ")
def yoxlama(ctx):
return ctx.author.id == Sizin ID-niz
@bot.command()
@commands.check(yoxlama)
async def sil(ctx, amount=5):
await ctx.channel.purge(limit=amount)
@bot.command()
@commands.check(yoxlama)
async def kick(ctx, member : discord.Member, *, reason=None):
await member.kick(reason=reason)
await ctx.send(f'{member.mention} kickləndi.')
@bot.command()
@commands.check(yoxlama)
async def ban(ctx, member : discord.Member, *, reason=None):
await member.ban(reason=reason)
await ctx.send(f'{member.mention} banlandı.')
@bot.command()
@commands.check(yoxlama)
async def unban(ctx, *, member):
banned_users = await ctx.guild.bans()
member_name, member_discriminator = member.split("#")
for ban_entry in banned_users:
user = ban_entry.user
if (user.name, user.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user)
await ctx.send(f'{user.mention} istifadəçisinin banı qaldırıldı.')
return
@bot.command(description="Istifadecini muteleyir")
@commands.check(yoxlama)
@commands.has_permissions(manage_messages=True)
async def mute(ctx, member: discord.Member, *, reason=None):
guild = ctx.guild
muteliRole = discord.utils.get(guild.roles, name="Mutelendi")
if not muteliRole:
muteliRole = await guild.create_role(name="Mutelendi")
for channel in guild.channels:
await channel.set_permissions(muteliRole, speak=False, send_messages=False, read_messages_history=True, read_messages=False)
await member.add_roles(muteliRole, reason=reason)
await ctx.send(f"{member.mention} Muteləndi.")
await member.send(f"Siz TORO serverindən {reason} səbəbinə görə muteləndiniz {guild.name}")
if __name__ == "__main__":
# botu başlat
bot.run(DISCORD_TOKEN)
|
carlingkirk/ircportland | plugins/comic.py | # WeedBotRefresh's comic.py - based on nekosune's comic.py
from util import hook
import os
from random import shuffle
from PIL import Image, ImageDraw, ImageFont
import base64
import requests
import json
from datetime import datetime
from io import BytesIO
api_key = None
background_file = None
font_file = None
font_size = None
buffer_size = None
initialized = False
mcache = dict()
@hook.event("*")
def track(paraml, inp=None, bot=None, nick=None, channel=None):
user_ignore_list = bot.config.get("user_ignore_list")
if inp and len(inp) > 1 and inp[1] and str(inp[1]) != ".comic" and nick not in user_ignore_list and inp[1][0] != ".":
key = (channel)
if key not in mcache:
mcache[key] = []
value = (datetime.now(), nick, str(inp[1]))
mcache[key].append(value)
buffer_size = 30
mcache[key] = mcache[key][-1*buffer_size:]
@hook.command
def comic(inp, bot=None, nick=None, channel=None):
text = channel
try:
msgs = mcache[channel]
except KeyError:
return "Not Enough Messages."
sp = 0
chars = set()
for i in range(len(msgs)-1, 0, -1):
sp += 1
diff = msgs[i][0] - msgs[i-1][0]
chars.add(msgs[i][1])
if sp > 10 or diff.total_seconds() > 120 or len(chars) > 3:
break
msgs = msgs[-1*sp:]
panels = []
panel = []
for (d, char, msg) in msgs:
if len(panel) == 2 or len(panel) == 1 and panel[0][0] == char:
panels.append(panel)
panel = []
if msg.count('\x01') >= 2:
ctcp = msg.split('\x01', 2)[1].split(' ', 1)
if len(ctcp) == 1:
ctcp += ['']
if ctcp[0] == 'ACTION':
msg = '*'+ctcp[1]+'*'
panel.append((char, msg))
panels.append(panel)
print(repr(chars))
print(repr(panels))
# Initialize a variable to store our image
image_comic = BytesIO()
# Save the completed composition to a JPEG in memory
make_comic(chars, panels, bot).convert("RGB").save(image_comic, format="JPEG", quality=85)
api_key = bot.config.get("api_keys", {}).get("imgur_client_id")
# Get API Key, upload the comic to imgur
headers = {'Authorization': 'Client-ID ' + api_key}
base64img = base64.b64encode(image_comic.getvalue())
url = "https://api.imgur.com/3/upload.json"
r = requests.post(url, data={'key': api_key, 'image': base64img, 'title': 'Weedbot Comic'}, headers=headers, verify=False)
val = json.loads(r.text)
try:
return val['data']['link']
except KeyError:
return val['data']['error']
def wrap(st, font, draw, width):
st = st.split()
mw = 0
mh = 0
ret = []
while len(st) > 0:
s = 1
while True and s < len(st):
w, h = draw.textsize(" ".join(st[:s]), font=font)
if w > width:
s -= 1
break
else:
s += 1
if s == 0 and len(st) > 0: # we've hit a case where the current line is wider than the screen
s = 1
w, h = draw.textsize(" ".join(st[:s]), font=font)
mw = max(mw, w)
mh += h
ret.append(" ".join(st[:s]))
st = st[s:]
return ret, (mw, mh)
def rendertext( st, font, draw, pos):
ch = pos[1]
for s in st:
w, h = draw.textsize(s, font=font)
draw.text((pos[0], ch), s, font=font, fill=(0x5b, 0xf6, 0xff, 0xff))
ch += h
def fitimg( img, width, height):
scale1 = float(width) / img.size[0]
scale2 = float(height) / img.size[1]
l1 = (img.size[0] * scale1, img.size[1] * scale1)
l2 = (img.size[0] * scale2, img.size[1] * scale2)
if l1[0] > width or l1[1] > height:
l = l2
else:
l = l1
return img.resize((int(l[0]), int(l[1])), Image.ANTIALIAS)
def make_comic(chars, panels, bot):
panelheight = 300
panelwidth = 450
filenames = os.listdir('chars/')
shuffle(filenames)
filenames = map(lambda x: os.path.join('chars', x), filenames[:len(chars)])
chars = list(chars)
chars = zip(chars, filenames)
charmap = dict()
for ch, f in chars:
charmap[ch] = Image.open(f)
imgwidth = panelwidth
imgheight = panelheight * len(panels)
background_file = bot.config.get("resources", {}).get("background")
bg = Image.open(background_file).convert("RGB")
im = Image.new("RGBA", (imgwidth, imgheight), (0xff, 0xff, 0xff, 0xff))
font_file = bot.config.get("resources", {}).get("font")
font_size = bot.config.get("resources", {}).get("font_size")
font = ImageFont.truetype(font_file, font_size)
for i in range(len(panels)):
pim = Image.new("RGBA", (panelwidth, panelheight), (0xff, 0xff, 0xff, 0xff))
pim.paste(bg, (0, 0))
draw = ImageDraw.Draw(pim)
st1w = 0; st1h = 0; st2w = 0; st2h = 0
(st1, (st1w, st1h)) = wrap(panels[i][0][1], font, draw, 2*panelwidth/3.0)
rendertext(st1, font, draw, (10, 10))
if len(panels[i]) == 2:
(st2, (st2w, st2h)) = wrap(panels[i][1][1], font, draw, 2*panelwidth/3.0)
rendertext(st2, font, draw, (panelwidth-10-st2w, st1h + 10))
texth = st1h + 10
if st2h > 0:
texth += st2h + 10 + 5
maxch = panelheight - texth
im1 = fitimg(charmap[panels[i][0][0]], 2*panelwidth/5.0-10, maxch)
pim.paste(im1, (10, panelheight-im1.size[1]), im1)
if len(panels[i]) == 2:
im2 = fitimg(charmap[panels[i][1][0]], 2*panelwidth/5.0-10, maxch)
im2 = im2.transpose(Image.FLIP_LEFT_RIGHT)
pim.paste(im2, (panelwidth-im2.size[0]-10, panelheight-im2.size[1]), im2)
draw.line([(0, 0), (0, panelheight-1), (panelwidth-1, panelheight-1), (panelwidth-1, 0), (0, 0)], (0, 0, 0, 0xff))
del draw
im.paste(pim, (0, panelheight * i))
return im
|
tkcroat/SC | UnderDev/SC_under_development.py | <reponame>tkcroat/SC
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 3 15:13:53 2016
@author: tkcplayers['DOB'] = pd.to_datetime(players['DOB'], format='%Y-%m-%d')
"""
import numpy as np
import pandas as pd
import glob, math
import datetime
from datetime import date
from PIL import Image, ImageDraw, ImageFont
import csv
import glob
import tkinter as tk
import numpy as np
from pkg.SC_signup_functions import findcards
from email.mime.text import MIMEText
import os
import string
import re
import flask
import sys
from datetime import datetime
import numpy as np
#%%
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 08:39:33 2020
@author: Kevin
"""
parish=pd.read_excel(cnf._INPUT_DIR +'\\CYC_parish_table.xlsx')
parish.to_csv(cnf._INPUT_DIR +'\\CYC_parish_table.csv', index=False)
parish=pd.read_csv(cnf._INPUT_DIR +'\\CYC_parish_table.csv')
schools=pd.read_excel(cnf._INPUT_DIR +'\\CYC_school_table.xlsx')
designation = pd.read_excel(cnf._INPUT_DIR +'\\CYC_school_table.xlsx', sheet_name=)
Saint Michael School of Clayton
# Get schools list for recent signups
# do a global find/replace
#%% School standarization to match Pat Moore table
def getCurrSchools()
renameDict={'Soulard':'The Soulard School/City Charter'
'Cabrini':'St. Frances Cabrini Academy'
'Cecilia':'St. Cecilia (St. Louis)'
'McKinley':'McKinley CLA Middel/City Magnet'
'Lafayette':'Lafayette}
raw_schools = players.School.unique().tolist()
for rs in raw_schools:
rs=rs.replace(',','').replace('.')
def getCityPublic(schools, currSchool, myZip):
''' Find an approved city public school name matching players zip code
myZip=63116
'''
schools=schools.rename(columns={'School Designation':'Designation','Parish per Parish Finder':'Parish'})
slps = schools[schools['Designation']=='City Public']
magnet=schools[schools['Designation']=='City Magnet']
if myZip in slps.Zip.unique():
return random.choice(slps[slps['Zip']==myZip]['Name'].tolist())
elif myZip in magnet.Zip.unique():
return random.choice(magnet[magnet['Zip']==myZip]['Name'].tolist())
else:
print('No matching school in zip code {}'.format(myZip))
return currSchool
def findSLPS(myZip, ):
''' If listed generic SLPS, random assign to St. Louis Public based on home zip
code,
'''
#%% New pygsheets method of finding/processing new signups
def assignGsignupKey(numkeys):
'''
'''
if 'Gkey' not in headers:
# first time assignment/addition of gkey column
usedkeys=np.ndarray.tolist(usedkeys)
availkeys=[i for i in allnums if i not in usedkeys]
if len(availkeys)<numkeys: # get more keys starting at max+1
needed=numkeys-len(availkeys)
for i in range(0,needed):
nextval=int(max(usedkeys)+1) # if no interior vals are available find next one
availkeys.append(nextval+i)
def processNewGsignups(myPygSheet, newrownums):
'''
'''
myPygSheet=downloadSignups(sheetID)
#%%
def downloadSignups(sheetID):
''' Download all from current season's signups and adds Gkey to remote sheet
Using pygsheets version w/ assigned Gkey.. original forms only gets addition
of Gkey and cols for plakey/ famkey
processed signups w/ Plakey/Famkey in separate sheet
args:
sheetID - signup google sheet ID
pygsheets short tutorial
https://medium.com/game-of-data/play-with-google-spreadsheets-with-python-301dd4ee36eb
'''
creds = getGoogleCreds() # google.oauth2.credentials
gc = pyg.authorize(custom_credentials=creds) # pygsheets client
sh = gc.open_by_key(sheetID)
myPygSheet=sh[0]
mycols=myPygSheet.get_row(1)
def findMax(vals):
maxKey=0
for val in vals:
try:
if int(val)>maxKey:
maxKey=int(val)
except:
pass
return maxKey
# Can't necessarily count on rows not getting renumbered or resorted
if 'Gkey' not in mycols: # Initialize spreadsheet key (Gkey)
myPygSheet.add_cols(2) # auto adds to gsheet
# initialize this new row for each occupied column
myPygSheet.update_col(len(mycols+1),['Gkey', 1,2,3])
# Find numbers of occupied rows .. timestamp always occupied for entries
# pygsheets is zero-indexed, but worksheet addresses start w/ 1
occRows = [i+1 for i,val in enumerate(myPygSheet.get_col(1)) if val !='']
gkeyvals=['Gkey']
gkeyvals.extend([str(i) for i in range(2, len(occRows)+1)])
# add Gkeys to this newly-added column
myPygSheet.update_col(len(mycols)+1,gkeyvals)
# myPygSheet.update_col(len(mycols)+2,['Processed') # col to track processing status
else:
# Find rows w/ occupied timestamp entry
tstampRows = [i+1 for i,val in enumerate(myPygSheet.get_col(1)) if val !='']
# Get column w/ Gkeys (and terminate at length of timestamps)
gkeys=myPygSheet.get_col(mycols.index('Gkey')+1)[:len(tstampRows)]
blankInds= [i for i,val in enumerate(gkeys) if val =='']
startKey=findMax(gkeys)+1
for i, bl in enumerate(blankInds):
gkeys[bl]=str(startKey+i)
# Write back to column
myPygSheet.update_col(mycols.index('Gkey')+1, gkeys)
# Gkey and Processed cols already present
headers = changeColNames(mycols)
gsignups=pd.DataFrame(myPygSheet.get_all_records()[1:], columns=headers)
sh = gc.open_by_key(sheetID)
myPygSheet=sh[0]
return myPygSheet
#%%
gsignups = myPygSheet.get_as_df()
mycols=myPygSheet.get_row(1)
creds = getGoogleCreds() # google.oauth2.credentials
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=sheetID,
range=rangeName).execute()
sh = gc.open_by_key(sheetID)
myPygSheet=sh[0]
mycols=myPygSheet.get_row(1) # gets column names
myPygSheet=sh[0]
mycols=myPygSheet.get_row(1) # gets column names
values = result.get('values', []) # list of lists
if len(values)==0:
print('Signup data not found')
return pd.DataFrame()
headers = changeColNames(values[0])
# Google API retrieved rows each become lists truncated at last value
newValList=[]
for vallist in values[1:]:
while len(vallist)<len(headers):
vallist.append('') # add blanks for missing/optional answer
newEntry={}
for i, val in enumerate(vallist):
newEntry[headers[i]]= val
newValList.append(newEntry)
signups=pd.DataFrame(newValList, columns=headers)
return signups
#%%
def writeGsheetChanges(df, pygSheet):
'''
'''
pygSheet.set_dataframe(df, 'A1')
wks1.get_row(2)
wks1.update_value('C12','testval')
# Get pygsheet as pandas (screws up col order but is actively linked)
# write pandas as pygsheet
wks1.set_dataframe(test)
df1 = SCapi.readPaylog() # direct download version
df2= pd.DataFrame(wks1.get_all_records()) # cols out of order
wks1.set_dataframe(teams, 'A1')
# Strategy for alteration of online gsheets
# pygsheets is actively linked ... use update_value method?
# Google sheets undo button only works for browser made changes (not programmatic)
# Check for differences between 2 dataframes
# pygsheets tutorial
# https://medium.com/game-of-data/play-with-google-spreadsheets-with-python-301dd4ee36eb
# Revert to prior by clicking "last edit..."
def diffDf(df1,df2):
bothdf=pd.concat([df1,df2])
altrows=bothdf.drop_duplicates(keep=False)
altrows=altrows.sort_values(['First','Last'])
def modGsheet(df):
# perform datetime and int/nan conversions on assorted columns
def convInt(val):
try:
return int(val)
except:
return np.nan
def convDate(val):
try:
return datetime.strptime(val, '%m/%d/%Y')
except:
try:
return datetime.strptime(val, '%m/%d/%y')
except:
try:
return datetime.strptime(val.split(' ')[0], '%Y-%m-%d')
except:
print('Error converting', val)
return val
def write2Sheet(sheetID, rangeName):
'''
'''
{
"range": "Sheet1!A1:D5",
"majorDimension": "ROWS",
"values": [
["Item", "Cost", "Stocked", "Ship Date"],
["Wheel", "$20.50", "4", "3/1/2016"],
["Door", "$15", "2", "3/15/2016"],
["Engine", "$100", "1", "3/20/2016"],
["Totals", "=SUM(B2:B4)", "=SUM(C2:C4)", "=MAX(D2:D4)"]
],
}
#%%
def write2Sheet(sheetID, rangeName):
''' Write to google sheet from current season's signups
'''
creds = getGoogleCreds() # google.oauth2.credentials
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=sheetID,
range=rangeName).execute()
values = result.get('values', []) # list of lists
if len(values)==0:
print('Signup data not found')
return pd.DataFrame()
headers = changeColNames(values[0])
# Google API retrieved rows each become lists truncated at last value
newValList=[]
for vallist in values[1:]:
while len(vallist)<len(headers):
vallist.append('') # add blanks for missing/optional answer
newEntry={}
for i, val in enumerate(vallist):
newEntry[headers[i]]= val
newValList.append(newEntry)
signups=pd.DataFrame(newValList, columns=headers)
return signups
#%%
# custom mod after removal of unis from mastersignups
mastersign=Mastersignups.copy()
mycols=mastersign.columns
mycols=[i for i in mycols if i not in ['Issue date','Uniform#','UniReturnDate']]
def writeuniformlog(mastersign, teams, unilist, players, season, year, paylog):
''' From mastersignups and teams, output contact lists for all teams/all sports
separately into separate tabs of xls file
autosaves to "Fall2016"_uniform_log.xls
args:
mastersign - list of signups and team assignments
teams - tab w/ uniform setname for this team
unilist - check if number/size already checked out for player
'''
mastersign=mastersign[mastersign['Year']==year] # remove prior years in case of duplicate name
mastersign=mastersign.reset_index(drop=True)
# keep track of needed cols
mycols=list(mastersign.columns)
# Just need school from players.csv
mastersign=pd.merge(mastersign, players, how='left', on=['Plakey'], suffixes=('','_r'))
mycols.append('School')
mastersign=mastersign[mycols]
# rename uniforms col from teams to setname (matches other uni tracking sheets)
teams=teams.rename(columns={'Uniforms':'Setname'})
# Only need teams w/ issued uniforms
teams=teams[ (pd.notnull(teams['Setname'])) & (teams['Setname']!='') & (teams['Setname']!='N') ]
uniformlist= list(teams.Team.unique())
# Can just eliminate any entries not in uniform deposit list
mastersign=mastersign[mastersign['Team'].isin(uniformlist)] # only players on teams needing uniforms
# Need uniform set name (from team tab)
mastersign=pd.merge(mastersign, teams, how='left', on=['Year','Sport','Team'], suffixes=('','_r'))
mycols.append('Setname')
mastersign=mastersign[mycols]
# Now see if any team players already have checked out uniform of correct type
outset=unilist[(unilist['Location']=='out') & (unilist['Plakey']!=0)]
outset=outset.rename(columns={'Number':'Uniform#'})
mycols.append('Number','Size')
# Now find existing uniform deposits from paylog
# Handle deposits by family (and need single entry per family)
# returns mastersign w/ deposit info interpolated
mastersign=processDeposits(paylog, mastersign)
# single uniform log per season
contactfile='\\'+str(season)+'_'+str(year)+'_uniform_log.xlsx'
writer=pd.ExcelWriter(cnf._OUTPUT_DIR+contactfile, engine='openpyxl',date_format='mm/dd/yy')
# Columns needed for log output
outcols=['First', 'Last', 'School', 'Issue date', 'Uniform#', 'Size', 'Deposit date',
'Amount', 'Deptype', 'DepComment', 'UniReturnDate', '$ returned',
'Comments', 'Plakey', 'Famkey']
mycols=['First', 'Last', 'School', 'Issue date', 'Uniform#', 'Size', 'Amount',
'Deposit type', 'Deposit date', 'UniReturnDate', '$ returned',
'Comments', 'Plakey', 'Famkey']
tabnamelist=[]
# TODO Find size from this year's sport signup
for team in uniformlist:
thismask = mastersign['Team'].str.contains(team, case=False, na=False)
thisteam=mastersign.loc[thismask] # this team's signups
sport=thisteam.iloc[0]['Sport'].lower()
thisteam=finddeposits(thisteam, paylog) # thisteam is this team's slice of info from master_signups
missing=[i for i in mycols if i not in thisteam.columns]
for miss in missing:
thisteam[miss]=''
thisteam=thisteam[mycols] # organize in correct format for xls file
tabname=sport[0:3]+team[0:3] # name tab with team's name..
if tabname in tabnamelist:
tabname+='2' # handles two teams per grade
tabnamelist.append(tabname)
thisteam.to_excel(writer, sheet_name=tabname,index=False) # this overwrites existing file
writer.save()
return
def processDeposits(paylog, mastersign):
''' For uniform issue, need to handle multiple players from family and multiple
historical deposits together
args:
paylog - payment logbook w/ non-deposit transactions filtered out
mastersign - signups for this season, uni issue only subset
'''
fams=list(mastersign.Famkey.unique())
paylog=paylog.dropna(subset=['Deposit']) # only need deposit info
# Need to handle the deposits by family
paylog=paylog[paylog['Famkey'].isin(fams)]
# Handle multiple entries from same family
dups=paylog[paylog.duplicated('Famkey')]
# plakey merge would be easiest
master
dupPlayers=
test=paylog[paylog.duplicated('Famkey', keep=False)]
famgroup=mastersign.groupby(['Famkey'])
famDepGroup=paylog.groupby(['Famkey'])
for fk, gr in famgroup:
# see if famkey is present in famDep
if fk in list(famDepGroup.groups.keys()):
deps=famDepGroup.get_group(fk)
#%%
def findOpponents(teamName, badDay, sched, **kwargs):
''' Finds opponent(s) on day
args:
teamName - name as string (as in schedule)
date - datetime of game in question (usually problematic one)
sched - full schedule
returns:
opponents - opponents as list
kwargs:
badTime: if only 1 game of multiples on day is a problem
'''
# Find bad game(s) in question
match=sched[ (sched['Date']==badDay) & ( (sched['Home']==teamName) | (sched['Visitor']==teamName)) ]
if 'badTime' in kwargs:
bt=kwargs.get('badTime')
# TODO make more robust using time conversion from string?
match=match[match['Start']==bt]
if len(match)!=1:
print('Time matched game not found')
return
opponents = [i for i in match.Home.to_list() + match.Visitor.to_list() if i != teamName]
return opponents
def findTeamSwap(teamName, badDay, sched, gamerank=0, **kwargs):
''' Swap team1 (with conflict) into another existing game (find opponent switching existing
away team)...
args:
teamName=' Cabrini-Clavin-6GD' # name as listed on schedule not traditional league name
badDay=datetime(2020,1,11)
sched - full game schedule
gamerank -- index in list of games to choose (defaults 0 for best match)
used if first run unsatisfactory for some reason
kwargs:
'badTime' -- string matching single game to change (not double game).. '7:30 PM'
returns:
swapOld -- original offending set of games
swapNew -- replacement w/ opponents rearranged
'''
# first check for proper team name (as printed in schedule)
allTeams=list(set(list(sched.Home.unique())+list(sched.Visitor.unique())))
if teamName not in allTeams:
print('Faulty team name not found in full schedule')
return
league=teamName.split('-')[2] # full league 7BD1 (grade/gender/letter and sublevel)
leagueTeams=[i for i in allTeams if league in str(i)]
# Find teams w/ bye that weekend
weekend=sched[ (sched['Date']>badDay-timedelta(days=3)) & (sched['Date']<badDay+timedelta(days=3)) ]
teamInAction = list(set(list(weekend.Home.unique())+list(weekend.Visitor.unique())))
bestAvailTeams = [i for i in leagueTeams if i not in teamInAction]
# Preference for teams w/o full/half schedule
bestAvailTeams = [i for i in bestAvailTeams if not i.endswith('-H')]
bestAvailTeams = [i for i in bestAvailTeams if not i.endswith('-F')]
rankedSwaps={} # dict to hold possible games and assigned score
# Find timespan of bye weekend for team1 (best swap)
byeWeeks=findByeWeek(teamName, sched)
for byeWeek in byeWeeks:
# Find possible swap/replacement games in first bye week
swapGames=sched[ (sched['Date']>=byeWeek[0]) & (sched['Date']<=byeWeek[1])]
# Find games for best swap team candidates while ensuring original team doesn't play itself
swapGames=swapGames[ (swapGames['Visitor'].isin(bestAvailTeams)) & (swapGames['Home']!=teamName) ]
theseSwaps=rankSwaps(teamName, badDay, bestAvailTeams, sched, swapGames, **kwargs)
rankedSwaps.update(theseSwaps)
# find more possible swaps ..not necessarily during bye week
swapOld, swapNew, swapTeam = pickBestSwap(rankedSwaps, gameRank)
return swapOld, swapNew, swapTeam
def findAllOpponents(teamName, sched):
''' Return list of all of given teams opponents
'''
thisSched=sched[ (sched['Home']==teamName) | (sched['Visitor']==teamName) ]
opponents = [i for i in thisSched.Home.to_list() + thisSched.Visitor.to_list() if i !=teamName]
return opponents
def findBadGames(teamName, badDay, sched, **kwargs):
''' Find offending game or games
args:
teamName - name as string (as in schedule)
date - datetime of game in question (usually problematic one)
sched - full schedule
kwargs:
'badTime' -- offensive time as string (for picking single game)
returns:
badGames- dataframes with offending game or games
'''
match=sched[ (sched['Date']==badDay) & ( (sched['Home']==teamName) | (sched['Visitor']==teamName)) ]
if 'badTime' in kwargs:
bt=kwargs.get('badTime')
# TODO make more robust using time conversion from string?
match=match[match['Start']==bt]
if len(match)!=1:
print('Time matched game not found')
return match
def rankSwaps(teamName, badDay, bestAvailTeams, sched, swapGames, **kwargs):
''' if multiple possible swap games available, choose best swap by maximizing
schedule diversity of opponents (simultaneous for both swapped teams)
adds large penalty (+3) for setting up 3rd game against same team
adds small penalty (+1) for swapping team out of one of its home games
args:
teamName - original team name w/ conflict (string)
badDay - date of conflicting game or games
bestAvailTeams - list of possible swap teams
gamerank - index of game to choose from ranked list (defaults 0)
sched - full CYC schedule
swapGames -- dataframe w/ possible swap/replacement games
kwargs:
'badTime' -- offensive time as string (for picking single game)
returns:
swapOld - offending set of games
swapNew - new game alteration to replace above
'''
# Find list of team 1 opponents (includes duplicates)
opponents1 = findAllOpponents(teamName, sched)
# opponents in original swapped game (len 2 if solving game conflict)
badGames = findBadGames(teamName, badDay, sched, **kwargs)
# Team overlap counts for each possibility
overlapScore={}
for ind, row in swapGames.iterrows():
for ind2, row2 in badGames.iterrows():
# Possible swapping team already chosen as visitor
ct1=opponents1.count(row.Home) # existing games against this oppo
# new opponent for original swapped team will be home in swap candidate
# Now find min in # of games of swap team against potential new opponents
oppos=findAllOpponents(row.Visitor, sched) # other opponents
# Drop opponent (once) from this swap game
oppos.remove(row.Home)
# Now calculate minimum overlap w/ new opponent (from chosen badGame(s))
newOpp=[i for i in [row2.Home, row2.Visitor] if i !=teamName][0]
ct2=oppos.count(newOpp)
# Add large triple play penalties (already playing team twice) but allow single repeat
if ct1>=2:
ct1=5
if ct2>=2:
ct2=5
# add small penalty for switching original team out of its home game
if row2.Home==teamName:
ct2+=1
# key is indices of games to swap and name of team to swap from other game
overlapScore[ind, ind2, row.Visitor]= ct1+ct2
return overlapScore
#%%
def pickBestSwap(rankedSwaps,gameRank=0):
''' After evaluating all possible swaps across all available bye
weeks for original team, sort by rank and pick first (or pick
one based on gameRank index if original doesn't work for some
other reason)
args:
rankedSwaps -- dict w/ game score (val) and key (list w/ 2 game indices and
affected swap team
gameRank -- which one to choose
'''
rankSwapList=[]
for i in range( min(rankedSwaps.values()), max(rankedSwaps.values())+1):
theseSwaps = [key for key, val in rankedSwaps.items() if val ==i]
rankSwapList.extend(theseSwaps)
bestSwap=rankSwapList[gameRank] # choice from ranked list (index 0 unless overridden
swapTeam=bestSwap[-1] # pull out swap team name
bestSwap=bestSwap[0:2] # indices of swapped games
swapOld = sched[sched.index.isin(bestSwap)]
# Also return new arrangement of games
swapNew = swapOld.copy()
# Find/replace teamName w/ swapteam
if teamName in [swapOld.iloc[0]['Home'],swapOld.iloc[0]['Visitor'] ]:
if teamName==swapOld.iloc[0]['Home']:
swapNew.at[swapOld.index[0],'Home']=swapTeam # avoids chained indexing & assignment problems
swapNew.at[swapOld.index[1], 'Visitor']=teamName
else:
swapNew.at[swapOld.index[0], 'Visitor']=swapTeam
swapNew.at[swapOld.index[1],'Visitor']=teamName
else:
if teamName==swapOld.iloc[1]['Home']:
swapNew.at[swapOld.index[1], 'Home']=swapTeam
swapNew.at[swapOld.index[0], 'Visitor']=teamName
else:
swapNew.at[swapOld.index[1], 'Visitor']=swapTeam
swapNew.at[swapOld.index[0], 'Visitor']=teamName
return swapOld, swapNew, swapTeam
#%%
# turn rankedSwaps into list
#TODO wrap in the final picker/ game alterer
# Produce sorted list from dict in best to worst order
# Best swapping games combo will have minimum overlap score
def findResched(team1, team2, gymsched):
''' Find a date and venue for a game ... for full reschedule not swapping
method
TESTING team1='6-F-6GD-Clavin' team2='<NAME>-Long-6GD-F'
'''
league=team1.split('-')[2][0:3]
# Best match (open date both teams and venue)
bestAvails=gymsched[ gymsched['Assignments'].str.contains(league) ]
# Could also swap games/ opponents (but don't swap home team)
def plotAvailSlots(gymsched):
''' Create histogram with # of available slots by date
'''
avail7 = gymsched[ gymsched['Assignments'].str.contains('7') & pd.isnull(gymsched['Home'])]
allAvail=gymsched[ pd.isnull(gymsched['Home'])]
def findGymSlot(gymsched, thisDate):
''' Return empty available gym slots for given day
'''
def convDate(val):
try:
return datetime.strptime(val,'%m/%d/%Y')
except:
return val
gymsched['Date']=gymsched['Date'].apply(lambda x:convDate(x))
avail= gymsched[ pd.isnull(gymsched['Home']) & ( gymsched['Date']== thisDate)]
return avail
def getSchedule(sched, teamname):
''' Returns full schedule for given team name
args:
sched - full CYC schedule
teamname - exact unique team string
returns:
teamsched
'''
teamsched=sched[(sched['Home'].str.contains(teamname)) | (sched['Visitor'].str.contains(teamname))]
return teamsched
#%% Vectorized version of find players
#%%
# TODO interface directly w/ google form
# TODO smarter file rename tool
Temp=players['DOB']
players.to_csv('players.csv',index=False)
def loadSchedule():
''' Choose schedule file and open
'''
def get_file_path():
'''
Popup dialog box to find db path if non-standard
'''
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = 'Choose schedule name',
filetypes=[ ('XLS','*.xls*'), ('csv','*.csv')] )
root.destroy() # closes pop up window
return full_path
myPath = get_file_path()
if myPath.endswith('.csv'):
sched=pd.read_csv(myPath, , encoding='cp437')
elif myPath.endswith('.xls) or myPath.endswith('.xlsx):
sched=pd.read_excel(myPath)
else:
print('Schedule file must be CSV or Excel')
return sched
def openSmtpObj():
'''
Open and return smtp connection for gmail send
TODO rearrange messaging scripts to make single call
'''
try:
mailserver = smtplib.SMTP('smtp.gmail.com', 587) # port 587
mailserver.set_debuglevel(True)
mailserver.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
mailserver.login('<EMAIL>', passwd)
mailserver.ehlo() # say hello
except:
print("Mail Server Error:", sys.exc_info()[0])
mailserver=np.nan
return mailserver
def removeOldKids(players):
'''
Remove if >16
'''
def prepPMSchedule(allteams):
'''
'''
allteams=allteams[pd.notnull(allteams['Visitor Name'])]
def finddiv(sched):
'''
'''
# TODO modify for possibility of multiple teams per grade
df=findmissinginfo(df, players, famcontact)
index=2 row=df.loc[index]
kwargs={}
kwargs.update({'Comment':'Pay direct to OLS league'})
test=maketracksummary(Mastersignups, 2017, players)
test.to_csv('track_summ.csv', index=False)
unilogfile=pd.read_excel('Fall_2017_uniform_log.xlxs')
def sync_unilogs(unilist, Mastersignups, teams, oldteams):
'''
Both are primary sources so discrepancies should be resolved (i.e. after
inventory)
Check-out or Check-in options if conflicting
'''
# need
def update_uniinfo(unilogfile, unilist, Mastersignups):
'''
After uniform night or other event, use unilogfile with possible new info
and update both unilogfile and mastersignups in self-consistent manner
Need to synchronize master unilist, mastersignups and uniform log (often
read back with unique information )
Assumes unique size/number/uniset
'''
def display_conflict(group):
''' In cases of conflicting uniform info send to gui for display/resolution
'''
'''TESTING
teams=SCbill.loadoldteams(['Winter'], [2017])
unilogfile='Winter_2017_uniform_log.xlsx'
row=alluniplayers.iloc[1]
index=row.name
year=2017
'''
# TESTING test=grouped.get_group(('Postup','13'))
# unis.groupby(['Uniforms','Year','Gender']).size()
# unis[unis['Year']==2016].groupby(['Uniforms','Year','Gender']).size()
# unis[(unis['Year']==2016) & (unis['Uniforms']=="Y")].groupby(['Uniforms','Year','Gender','Team','Sport']).size()
def mark_return_tk():
'''
Interactive stamping of return date on suspected returned unis in mastersignups
maybe not necessary eventually if master unilist takes off
'''
pass
def matchunisets(Mastersignups, teams, oldteams):
'''
Get uniset for all uniforms issued in mastersignups... needed before compare
w/ master unilist
'''
unis=Mastersignups.dropna(subset=['Issue date']) # only signups with uniform issued
# mycols=teams.columns
teams=pd.concat([teams,oldteams])
unis=pd.merge(unis, teams, how='left', on=['Year','Sport', 'Team'], suffixes=('','_2'))
# Y or nan for uniform set not useful for uniform tracking
unis=unis[pd.notnull(unis['Uniforms'])] # probably old ones we don't care about
# without knowing the set, also not useful
unis=unis[unis['Uniforms']!='Y']
# remove ?? in uniform number
unis=unis[unis['Uniform#']!='??']
# Drop returned uniforms?
unis=unis[pd.notnull(unis['UniReturnDate'])]
grouped=unis.groupby(['Uniforms','Uniform#'])
for (uniset, num), group in grouped:
if len(group)>1:
gr=group.groupby('Plakey')
for key, gro in gr:
if len(gro)>1:
mark_return_tk(gro)
print(uniset, num, len(group))
else: # multiple checkouts to same player (mark older returned)
print(uniset, num, len(group))
# multiple reports on same ..
# keep most recent issue date, ensure older ones marked returned
test=test.sort_values(['Issue date'], ascending=False)
older=test.iloc[1:]
older=older[pd.isnull(older['Return date'])]
test.iloc[0]['Issue date']
test.iloc[1]['Issue date']
unisets=np.ndarray.tolist(unis.Uniforms.unique())
unisets=[i for i in unisets if str(i)!='nan']
unisets=[i for i in unisets if i!='Y']
grouped=unis.groupby(['Uniforms','Uniform#'])
for (uniset, num), group in grouped:
print(uniset, num, len(group))
# Keeps only unreturned uniforms
outunis=outunis.loc[pd.isnull(outunis['Uni return date'])]
# Finish DOB timestamp to formatted string conversion
def maketrackroster(df, players, year):
''' Pat moore format for track rosters autosaved
I;Smith;Mary;;F;04/01/2008;SGM;St. <NAME>
'''
temp=df[(df['Year']==year) & (df['Sport']=='Track')]
temp=temp[temp['Team']!='drop'] # drop the drops
# Get DOB
temp=pd.merge(temp, players, on=['Plakey'], how='left', suffixes=('','_2'))
temp['Type']='I'
temp['Teamcode']='SFC'
temp['Blank']=''
temp['Teamname']='<NAME>'
mycols=['Type','Last','First','Blank','Gender','DOB','Teamcode','Teamname']
temp=temp[mycols]
temp['DOB']=pd.to_datetime(temp['DOB'], format="%m/%d/%Y", errors='coerce')
temp['NewDOB']=temp['DOB'].date()
temp['DOB']=temp.loc[index]['DOB'].date()
fname='Cabrini_trackroster_'+str(year+1)+'.csv.'
temp.to_csv(fname, index=False)
return
def creditOLS(df, season, year, paylog, **kwargs):
''' Enter a credit/waiver into paylog for various types of signups
i.e. OLS league direct pay
df is mastersignups'''
# Convert Timestamp to datetime (default value on import)
paylog.Date=paylog.Date.apply(lambda x:x.date())
# Remove players than have dropped (drop as team assignment)
thismask=df['Team'].str.contains('drop',na=False,case=False)
df=df.loc[~thismask]
df=df.dropna(subset=['Team']) # also drop those not yet assigned to a team
CurrentSU, PriorSU =getthisperiod(df, season, year, 0) # returns subset of signups in specified period
for index, row in CurrentSU.iterrows():
if 'OLS' in row.Team:
payrow=makecredit(paylog, season, row, 30, **kwargs)
print('Credit added for', row.First, row.Last)
paylog=paylog.append(payrow, ignore_index=True)
paylog.Date
datetime.datetime.today()
datetime.datetime.now()
datetime.date(row.Date)
def makecredit(paylog, season, row, amount, **kwargs):
''' Make credit row for addition to paylog for given selected row (of specified amount) '''
row=row.set_value('Amount', 30)
row=row.set_value('Season', season)
row=row.set_value('Delivered', 'n/a')
row=row.set_value('Paytype', 'credit')
row=row.set_value('Comment', kwargs.get('Comment',''))
thisdate=datetime.datetime.strftime(datetime.datetime.now(),format="%m/%d/%Y")
row=row.set_value('Date', thisdate)
row.Date=row.Date.apply()
row.Date=row.Date.apply(lambda x:datetime.datetime.strptime(x, "%m/%d/%Y"))
pd.to_datetime.strptime(row.Date, "%m/%d/%Y")
row=row.set_value('Paykey', int(paylog.Paykey.max()+1))
return row[paylog.columns]
unilog=pd.read_excel('Master_uniform_logbook.xlsx', sheetname='Uniforms')
unilist=pd.read_excel('Master_uniform_logbook.xlsx', sheetname='Unilist')
def update_unilist():
''' Update uniform in/out and plakey based on current season's
uniform log '''
# Change size in master signups if issued size different than requested
# interactive conflicts handling
def getsizedists(Mastersignups, season, year, teams, unilog,):
''' Get size distributions desired by team's players
After team is assigned a uniform set, change unavailable sizes
move up or down based on availability '''
sportsdict={'Fall':['VB','Soccer'],
'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season,[])
SUs=Mastersignups[(Mastersignups['Year']==year) & (Mastersignups['Sport'].isin(sportlist))]
uniteams=teams[teams['Uniforms']!='N']
# Get size distribution of players needing uniforms
for i, team in enumerate(uniteams.Team.tolist()):
thisteam=SUs[SUs['Team']==team]
print(len(thisteam))
sizedist
# TESTING index=0 row=unilog.loc[index]
sizes=['YM','YL','YXL','S','M','L','XL','2XL']
for index, row in unilog.iterrows():
thisset=unilist[unilist['Setname']==row.Setname]
grouped=thisset.groupby(['Location','Size'])
for []
thisset.groupby(['Location','Size']).count()
for
print(len(thisset))
thisset.Number.tolist()
def updateunilog():
''' Using unilist update in, out, missing totals in unilog first page
in is instock (closet) after inventory
out includes missing & assigned (make assigned plakey list and missing
'''
def assigntotwoteams(df, Twoteams):
''' Randomly pick a team if two are available '''
def assigntoteams(df, season, year, teams, overwrite=False):
'''From mastersignups finds CYC team name based on year, grade, gender and sport from teams tab
(which only contains names from this season/year to avoid screwing up old custom team assignments'''
# teamsmult has multi grade range teams with duplicates for merge matching
# twoteams is multiple teams for same grade
Teamsmult, Twoteams=makemultiteam(teams) # makes duplicates team entries to match both grades
# Compare grades as ints with K=0
df.Grade=df.Grade.replace('K','0', regex=True) # convert Ks to zeros
df=df[pd.notnull(df['Grade'])] # shouldn't happen
df['Grade']=df['Grade'].astype('int')
Teamsmult['Grade']=Teamsmult['Grade'].astype('int') # ensure these are ints
# First deal with gender, grade, sport w/ multiple team options (twoteams)
df=assigntotwoteams(df, )
# left merge keeps all master_signups oentries
df=pd.merge(df, Teamsmult, how='left', on=['Year','Grade','Gender','Sport'], suffixes=('','_r'))
# need to drop SUkey duplicates (keeping first)... occurs if >1 team per grade
df=df.drop_duplicates(subset=['SUkey']) # drops any duplicates by unique SUkey
# Consider all sports except Track (team assignment done separately by DOB)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# this is post-merge so no chance of getting indices screwed up
# select current sports & year and subset with new team assignment
CurrentSU=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year) & (pd.notnull(df['Team_r']))]
if overwrite==False: # if no overwrite, keep only those with nan for team
CurrentSU=CurrentSU.loc[pd.isnull(CurrentSU['Team'])]
# Never overwrite team assignment for known drops
CurrentSU=CurrentSU[CurrentSU['Team']!='drop']
counter=0
for index, row in CurrentSU.iterrows():
# all remaining can be overwritted (those w/ existing team dropped above)
match=df[df['SUkey']==CurrentSU.loc[index]['SUkey']]
if len(match)==1:
thisind=match.index[0]
# add new team assignment to correct index in original master signups
df=df.set_value(thisind, 'Team', CurrentSU.loc[index]['Team_r'])
counter+=1
print(str(counter),' player(s) newly assigned to teams')
# now drop extra columns and sort
mycols=['SUkey','First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family',
'SUdate', 'Issue date', 'Uniform#','Uni return date']
df.Grade=df.Grade.replace('K',0)
df=df.sort_values(['Year','Sport', 'Gender', 'Grade'], ascending=True)
df.Grade=df.Grade.replace('0','K', regex=True) # make sure any 0 grades are again replaced with K
df=df[mycols]
autocsvbackup(df,'master_signups', newback=True) # autobackup of master signups
df.to_csv('master_signups.csv', index=False) # save/overwrite existing csv
return df
# Team splitter/ rearranger
'''TESTING
sport, team, school, graderange, gender, coachinfo, playerlist=teamlist[1]
'''
duration=1.5
kwargs={}
kwargs.update({'sport':'soccer'})
kwargs.update({'subject':'Ethan soccer game'})
tempstr='10:30 AM'
pd.to_datetime(tempstr).strftime('%H:%M:%S')
famcontact=update_contact(ser, famcontact)
# from twilio.rest import TwilioRestClient # text messaging ($10/mo)
#%%
thissched=getgameschedule(sched, sport, team, school, graderange, coachinfo)
kwargs={}
kwargs.update({'teams':teams}) # all teams with Cabrini kids assigned (Cabrini + transfers)
teams=SCbill.loadoldteams('Fall', 2016) # load prior season's teams
teamlist=findschteams(df, teams)
teamnamedict=findschteams(sched, teams, coaches)
teaminfo=getteaminfo(teams)
thissched=getthissched(schname, sched)
getgameschedule(schname, sched)
for key, [div,name] in teamnamedict.items():
thissched=getgameschedule(div, name, sched)
print(div, name)
print(len(thissched))
schname=teamnamedict.get(team,'')
thissched=getthissched(schname, sched)
# TODO need a dictionary of real name and scheduled name
def getCYCschedules(df, **kwargs):
'''Get Cabrini team schedules from Pat Moore jumbo spreadsheet
kwargs div ...
probably legacy as output format has changed
'''
df=df.rename(columns={'Game Time':'Time','Field Name':'Location','AwayTeam':'Away','Home Team':'Home'})
# return only passed grade (can be used in combo with other options)
if 'div' in kwargs:
div=kwargs.get('div','')
df=df[pd.notnull(df['Division Name'])]
df=df[df['Division Name'].str.contains(div)]
if 'school' in kwargs: # get only passed school name
school=kwargs.get('school','')
df=df[df['Home'].str.contains(school) | df['Away'].str.contains(school)]
# get all from Cabrini teams list (including non-Cabrini transfers)
elif 'teams' in kwargs:
teams=kwargs.get('teams',pd.DataFrame())
teamlist=findschteams(df, teams)
df=df[df['Home'].isin(teamlist) | df['Away'].isin(teamlist)]
df=df[pd.notnull(df['Date'])] # removes unscheduled games
# split day/date field
df['Day']=df['Date'].str.split(' ').str[0].str.strip()
df['Date']=df['Date'].str.split(' ').str[1].str.strip()
mycols=['Date','Day','Time','Location','Home','Away']
df=df[mycols]
# shorten team names
df.Home=df.Home.str.replace('St Frances Cabrini','Cabrini')
df.Home=df.Home.str.replace('St ','')
df.Away=df.Away.str.replace('St Frances Cabrini','Cabrini')
df.Away=df.Away.str.replace('St ','')
return df
test=df[df['Home'].isin(teamlist)]
def getschoolnames(df):
''' '''
df=df[pd.notnull(df['Home Team'])]
teams=np.ndarray.tolist(df['Home Team'].unique())
schools=[s.split('/')[0] for s in teams]
schools=set(schools)
schools=list(schools)
test=famcontact[famcontact['Family']=='Lehrer']
# TODO summary by gender/grade also needed (preliminary)
mytab=zipgroups.to_html()
import tkinter as tk
rows=Recs.get_group(('Vance','Delong'))
test=players[players['Last']=='Norwood']
messfile='messages\\CYCcard_needed.txt'
def findmissingcards(Mastersignups, season, year):
''' Find all players on CYC level teams, search for card and return list
without scan on file... send prompt/ reminder incl. team assignment
grouped by famkey '''
def updateteam():
''' Send confirmation about signup + current team summary; ask about other players
include ideal #; mention missing players '''
# TODO .. how about a more sophisticated imperfect matcher via tk... currently only used for very likely new
# player
def checkalias_gui(first, last, DOB, match):
''' Confirm ID of some player and possibly add alias to players.csv
passing both match rows? '''
root = tk.Tk()
yesvar=tk.IntVar()
novar=tk.IntVar()
aliasvar=tk.IntVar()
yesvar=0
novar=0
aliasvar=0
def choosea():
yesvar=1
root.destroy()
def chooseb():
aliasvar=1
root.destroy()
def choosec():
novar=1
root.destroy()
T=tk.Text(root, height=4, width=5)
mystr='Associate player', first, last, DOB ' with existing player', first
T.insert(END, 'some string')
a=tk.Button(root, text='ID player but skip alias').pack()
b=tk.Button(root, text='ID and add name as alias').pack()
c=tk.Button(root, text='Do not ID player').pack()
a.bind("<Button-1>", choosea)
b.bind("<Button-1>", chooseb)
c.bind("<Button-1>", choosec)
# add alias first to player's info?
root.mainloop()
vallist=[yesvar, aliasvar, novar]
def moveplayers(df, team):
''' In case of multiple teams for same gender grade, use listbox to shift/reassign players from one
team to another; initial assignment can be random '''from Tkinter import *
# Use global variables to get thes
master = Tk() # creates window object
master.title('Team tinkering')
# can set sizing
listbox = Listbox(master)
listbox.pack() # embeds this on page
listbox2 = Listbox(master)
def moveDown():
move_text = listbox.selection_get()
curindex = int(listbox.curselection()[0])
listbox.delete(curindex)
listbox2.insert(END, move_text)
moveBtn = Button(master, text="Move Down", command=moveDown)
moveBtn.pack()
listbox2.pack()
for item in ["one", "two", "three", "four"]:
listbox.insert(END, item)
mainloop() # continuous run
# This option would also send paper bill summary subsection for each family who still owes to coach
# current e-bill to coach only has brief summary table for each player
# maybe worth adding this later
def messagecoachbill(teams, coaches, billlist, players, emailtitle, messageheader):
'''Aggregate info about text/call only players and send to coaches in single file'''
noemaillist=billlist.loc[billlist['Email1'].isnull()] # subset with no working e-mail address
teamlist=np.ndarray.tolist(teams.Team.unique())
for i, team in enumerate(teamlist):
# get head coach e-mail address
coachemail=getcoachemail(team, teams, coaches)
if coachemail=='':
continue
thisteam=noemaillist[noemaillist['Teams'].str.contains(team)]
if len(thisteam)==0: # Skip team if all players have working e-mail addresses
# could send a different message
continue
papermessage=makecoachmessage(thisteam)
fullmessage=messageheader+papermessage
# insert coach e-mail address and e-mail title
fullmessage=fullmessage.replace('$RECIPIENTSTR', coachemail)
fullmessage=fullmessage.replace('$EMAILTITLE', emailtitle)
# re-structure to allow testing to log (similar to ebill structure)
def makecoachmessage(thisteam, players):
'''Passed subset of text-only players for this team (df with rows from main bill); make summary for e-mail to
coach '''
thisstring=''
for index, row in thisteam.iterrows():
thisstring+='Family:'+thisteam.loc[index]['Family']+'\n'
plakeys=thisteam.loc[index]['Plakeys']
plakeys=[int(s) for s in plakeys.split(',')]
tempstr=getplayers(plakeys, players)
thisstring+='Players:'+tempstr+'\n'
thisstring+='Fees for this season: '+str(thisteam.loc[index]['Charges'])+'\n'
thisstring+='Payments for this season: '+str(thisteam.loc[index]['CurrPayment'])+'\n'
thisstring+='Fees/Payments from prior season(s): ' + thisteam.loc[index]['Feepaydetail']
thisstring+='Total fees due: '+str(-thisteam.loc[index]['Balance'])
thisstring+=thisteam.loc[index]['Unidetail'] # family's uniform situation
thisstring+="Text message for player's family (optional):\n"
thisstring+=thisteam.loc[index]['Textmessage']+'\n'
return thisstring
# formatting of xls files using xlsxwriter
format2 = workbook.add_format({'num_format': 'mm/dd/yy'})
worksheet.write('A2', number, format2)
def writeuniformlog(df, teams, players, season, year, paylog):
''' From mastersignups and teams, output contact lists for all teams/all sports separately into separate tabs of xls file
autosaves to "Fall2016"_uniform_log.xls'''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
df=df[df['Year']==year] # remove prior years in case of duplicate name
df=df.reset_index(drop=True)
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r'))
# find Cabrini teams from this season needing uniforms
thismask = teams['Uniforms'].str.contains('y', case=False, na=False)
uniformteams=teams.loc[thismask]
uniformlist= uniformteams.Team.unique()
uniformlist=np.ndarray.tolist(uniformlist)
# single uniform log per season
contactfile=str(season)+'_'+str(year)+'_uniform_log.xlsx'
writer=pd.ExcelWriter(contactfile, engine='xlxswriter',date_format='mm/dd/yy')
# Can just eliminate any entries not in uniform deposit list
df=df[df['Team'].isin(uniformlist)] # only players on teams needing uniforms
# columns needed for log output
mycols=['First', 'Last', 'School', 'Issue date', 'Uniform#', 'Amount', 'Deposit type', 'Deposit date', 'Uni return date', '$ returned', 'Comments', 'Plakey', 'Famkey']
tabnamelist=[]
for i, team in enumerate(uniformlist):
thismask = df['Team'].str.contains(team, case=False, na=False)
thisteam=df.loc[thismask] # this team's signups
sport=thisteam.iloc[0]['Sport'].lower()
thisteam=finddeposits(thisteam, paylog) # thisteam is this team's slice of info from master_signups
dropcollist=[s for s in thisteam.dtypes.index if s not in mycols]
thisteam.drop(dropcollist, axis=1, inplace=True) # drops extraneous columns
thisteam=thisteam[mycols] # organize in correct format for xls file
tabname=sport[0:3]+team[0:3] # name tab with team's name..
if tabname in tabnamelist:
tabname+='2' # handles two teams per grade
tabnamelist.append(tabname)
thisteam.to_excel(writer,sheet_name=tabname,index=False) # this overwrites existing file
# Now need to retrieve to allow header modification
workbook=writer.book
worksheet=writer.sheets(tabname) # retrieves this team
worksheet.set_header([header=tabname+team],) # set tab header to tab + team name
writer.save()
return
billlist=pd.read_csv('Billlist_18Jan17.csv')
def getplayers(plakeys, players):
''' Returns player first last name list for entire family (from passed list of player keys)'''
theseplayers=players[players['Plakey'].isin(plakeys)]
tempstr=''
for index, row in theseplayers.iterrows():
tempstr+=theseplayers.loc[index]['First']
tempstr+=theseplayers.loc[index]['Last']+'; '
return tempstr
def transferunistr(df, season, year, famkey):
'''Reminder to return uniforms from transfer teams from just prior season
current setup for old uniform e-mail and SMS deals with Cabrini only'''
df=df.dropna(subset=['Issue date']) # only signups with uniform issued
mask=pd.isnull(df['Uni return date'])
df=df.loc[mask] # keeps only unreturned uniforms
df=df[df['Famkey']==famkey] # this family's outstanding uniforms
unikeys=np.ndarray.tolist(df.SUkey.unique()) # list of signup keys with outstanding uniforms
# create string for e-bill with outstanding uniforms
unistr=''
if len(df)>0:
unistr+='Old uniforms to return\n'
unistr+='Player\tSport\tUni #\tTeam\n'
for index, row in df.iterrows():
first=df.loc[index]['First']
sport=df.loc[index]['Sport']
num=df.loc[index]['Uniform#']
team=df.loc[index]['Team']
unistr+=first + '\t' + sport + '\t' +str(num) + '\t'+ team +'\n'
return unistr, unikeys
messagename='messages\\ebill_uninight.txt'
# various modules to attempt direct read-write from google drive
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def readGDsignups():
'''Read file from google drive and compare with Excel version ... find new entries '''
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('GoogleAPI_credential.json', scope)
docid = "182QFOXdz0cjQCTlxl2Gb9b_oEqInH93Peo6EKkKod-g" # winter signups file
client = gspread.authorize(credentials)
spreadsheet = client.open_by_key(docid)
for i, worksheet in enumerate(spreadsheet.worksheets()):
filename = docid + '-worksheet' + str(i) + '.csv'
with open(filename, 'wb') as f:
writer = csv.writer(f)
writer.writerows(worksheet.get_all_values())
year=2016
season='Winter'
df=Mastersignups
thissum= summarizesignups(df, season, year)
def autocsvbackup(df, filename, newback=True):
''' Pass df (i.e players for backup and basename (i.e. "family_contact" for file.. finds list of existing backups and keeps ones of
certain ages based on targetdates list;
can't remember why was newback=False was needed (always true here to make new backup)
'''
# targetdates gives ideal ages of list of backup files
targetdates=[datetime.timedelta(120,0,0),datetime.timedelta(7,0,0)]
now=datetime.datetime.now()
mystr='*'+ filename +'*.csv'
filelist=glob.glob(mystr)
dates=[] # list of file backup dates
fileage=[] # age of backup
for i,name in enumerate(filelist):
if '_' not in name:
continue
try:
thisdate=name.split(filename+'_')[1] # splits at players_
thisdate=thisdate.split('.csv')[0]
thisdate=datetime.datetime.strptime(thisdate, "%d%b%y")
age=now-thisdate
fileage.append(age)
dates.append([thisdate, age, name])
except:
print('Problem getting date from file', name)
dates.sort() # sort earliest to latest
fileage.sort(reverse=True) # list of datetimes doesn't show in var list
if newback==True:
if len(dates)==0: # no existing backups so make one
fname=filename+'_'+datetime.date.strftime(now, "%d%b%y")+'.csv'
df.to_csv(fname,index=False)
print(fname + ' saved to file')
dates.append([now, now-now,fname])
fileage.append(now)
if dates[-1][1]>datetime.timedelta(2,0,0): # also make if no backup in last 2 days (checks youngest file)
fname=filename+'_'+datetime.date.strftime(now, "%d%b%y")+'.csv'
df.to_csv(fname,index=False)
print(fname + ' saved to file')
dates.append([now, datetime.timedelta(0,0,100),fname])
# enter 100ms as timedelta for new backup
fileage.append(datetime.timedelta(0,0,100)) # fileage needs to be a timedelta
# find list of files closest to ~4 mo old backup, 1 week old, and recent backup (2 days ish)
# keep at 4 mo and 1 week
keepindices=[] # finds index of which backup is closest to target dates (can be duplicates)
for i,thisage in enumerate(targetdates):
# find closest entry to each
ind, age = min(enumerate(fileage), key=lambda x: abs(x[1]-thisage))
keepindices.append(ind)
keepindices.append(len(dates)-1) # always keep most recent backup
# for list of lists, any way to just make list of element 1
for i, datelist in enumerate(dates):
if i not in keepindices: # deletes those entries that are not closest to target dates
os.remove(datelist[2])
return
def standardizeparish(ser):
''' Clean up and standardize all the acronyms ... just run on entire series'''
# Parse standard names/ parse raw input ... fuzzy match then pass to tkinter
df=stripwhite(df)
for index, row in df.iterrows()
# TODO standardize parish names as done in standardizeschool
# Saving of various files
famcontact.to_csv('family_contact.csv', index=False)
SCsignup.to_csv('current_signups.csv', index=False)
Mastersignups.to_csv('master_signups.csv', index=False)
players.to_csv('players.csv', index=False, date_format='%m/%d/%Y') # use consistent datetime format
# need to specify date format... autoconverts datetime to string
def getCYCname(plakey, players):
''' Returns exact first and last names of player as found on CYC card from player key'''
match = players[(players['Plakey']==plakey)]
if len(match)==0:
print('Player key # ', plakey, 'not found in database.')
return
elif len(match)>1:
print('Multiple matches for player key # ', plakey, 'in database.')
return
else:
first=match.iloc[0]['First']
last=match.iloc[0]['Last']
return first, last |
tkcroat/SC | UnderDev/SC_data_classes.py | import os
import pandas as pd
import filedialog
class playerinfo():
''' loads all dataframes with player info, teams, etc. '''
def __init__(self, *args, **kwargs):
self.path = filedialog.askdirectory()
# open files
self.players=None
self.famcontact=None
self.masterSUs=None
self.teams=None
self.unilist=None
self.open_main_files() # loads above
def open_main_files(self):
''' Auto loads player & family contact info, teams/coaches, master signups
unilog info '''
if os.path.exists('players.csv'):
self.players=pd.read_csv('players.csv', encoding='cp437')
else:
print('players.csv not found.')
self.players=pd.DataFrame()
if os.path.exists('family_contact.csv'):
self.famcontact=pd.read_csv('family_contact.csv', encoding='cp437')
else:
self.famcontact=pd.DataFrame()
if os.path.exists('Teams_coaches.xlsx'):
self.teams=pd.read_excel('Teams_coaches.xlsx', sheetname='Teams')
self.coaches=pd.read_excel('Teams_coaches.xlsx', sheetname='Coaches') # load coach info
else:
self.teams=pd.DataFrame()
self.coaches=pd.DataFrame()
if os.path.exists('master_signups.csv'):
self.mastersignups=pd.read_csv('master_signups.csv', encoding='cp437')
else:
self.mastersignups=pd.DataFrame()
if os.path.exists('Integquantlog.csv'):
self.Integquantlog=pd.read_csv('Integquantlog.csv', encoding='cp437')
else:
self.Integquantlog=pd.DataFrame()
# Print TEM or SEM to console based on beam kV
try:
self.AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv', encoding='utf-8')
except:
self.AESquantparams=pd.DataFrame()
def get_peakinfo(self):
''' takes element strings and energies of background regs and returns tuple for each elem symbol containing all params necessary to find each Auger peak from given spe file
also returns 2-tuple with energy val and index of chosen background regions
'''
# elemental lines (incl Fe2, Fe1, etc.)
self.peaks=self.Smdifpeakslog.PeakID.unique()
self.peakdata=[]
for peak in self.peaks:
try:
# find row in AESquantparams for this element
thispeakdata=self.AESquantparams[(self.AESquantparams['element']==peak)]
thispeakdata=thispeakdata.squeeze() # series with this elements params
# return list of length numelements with 5-tuple for each containing
# 1) peak symbol, 2) ideal negpeak (eV) 3) ideal pospeak (in eV)
# 4)sensitivity kfactor.. and 5) error in kfactor
peaktuple=(peak, thispeakdata.negpeak, thispeakdata.pospeak,
thispeakdata.kfactor, thispeakdata.errkf1) # add tuple with info for this element
self.peakdata.append(peaktuple)
except:
print('AESquantparams not found for ', peak)
print('Found', len(self.peakdata), 'quant peaks in smdifpeakslog' )
|
tkcroat/SC | SC_maintenance_main.py | <reponame>tkcroat/SC
# -*- coding: utf-8 -*-
"""
SC maintenance main/workflow ... assorted functions that are run annually or thereabouts
Created on Wed Aug 23 09:14:51 2017
@author: tkc
"""
# Standardize schools to match Pat Moore table
players, famcontact = SC.loadProcessPlayerInfo() # version w/o signup processing
# Load Pat Moore necessary school matching table
Schoo
# Run these occasionally to possibly clean up underlying data
# Update grades based on school year and gradeadj (run at beginning of school year)
players=SC.graduate_players(players, year)
players.to_csv('private\\players.csv',index=False)
# Updates grade adjustment based on current grade (which must be correct) and DOB (constant)
players=updategradeadjust(players, year)
# Remove high school kids
players=SC.removeHSkids(players)
players.to_csv('private\\players.csv',index=False)
# remove families w/ no associated players (e.g. graduated)
famcontact=SC.removeEmptyFams(players, famcontact)
test=Mastersignups[pd.isnull(Mastersignups['SUkey'])]
players=SC.formatnamesnumbers(players) # format phone numbers, names to title case, standardize schools, etc.
comparefamkeys(players,famcontact) # checks for consistency in family names/keys between main lists
# TODO removedeadfamilies function
# Update family contact w/ associated players |
tkcroat/SC | pkg/schedule_scraping.py | <reponame>tkcroat/SC<filename>pkg/schedule_scraping.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 18 11:06:34 2016
@author: tkc
"""
from lxml import html
import requests
import urllib.request
#%%
# xml.etree.ElementTree -- flexible container
page = requests.get('http://www.judgedowdsoccer.com/content/schedules/Grade_kg.htm')
etree = html.fromstring(page.content) # pulls entire page as html element
# find stop and start of tables ... return list of tables (some nested)
mytables=etree.xpath('//table') # gets 5 tables
for table in mytables:
text=table.xpath('//tr/text()')
tab3=mytables[2]
teamrows=tab3.xpath('//tr') # now extract 23 team entries
thisteam=teamrows[0]
for team in thisteam:
text=team.xpath('//td/text()')
print(text)
for team in teamrows:
for child in text:
print(child.attrib)
rowlist=[]
for row in teamrows:
# rowlist=row.xpath('//td//text()')
# print(etree.tostring(row, pretty_print=True))
str=''.join(row) # list to string conversion
print(str)
thisteam=teamrows[3]
for child in teamrows:
print(child.tag, child.attrib)
namelist=[]
for name in names:
namelist.append([c.text for c in tabl.getchildren()])
namelist=[]
for name in names:
namelist.append([c.tag for c in tabl.getchildren()])
for tabl in mytables:
rowlist.append([c.tag for c in tabl.getchildren()])
rows = mytables.xpath(xpath1)[0].findall("tr")
data=list()
tables=tree.xpath('//table//tr')
print(htmlobjects)
rows=tree.xpath('tbody/tr')
# could find #. to identify all the teams
# table_03 has teams/coachs/numbers and tables_04 and _05 have schedules (parse with id=Table_03)
Teams=[] # list of number, team names
Dates=[]
for tbl in tree.xpath('//table'): # finds tables
|
tkcroat/SC | pkg/SC_signup_functions.py | # -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:30:01 2016
SC process signups functions
@author: tkc
"""
#%%
import pandas as pd
import numpy as np
from datetime import datetime, date
import re, glob, math
from openpyxl import load_workbook # writing to Excel
from PIL import Image, ImageDraw, ImageFont
import tkinter as tk
import pkg.SC_config as cnf # _OUTPUT_DIR and _INPUT_DIR
#%%
def combinephrases(mylist):
''' Combine list of phrases using commas & and '''
if len(mylist)==1:
return str(mylist[0])
elif len(mylist)==2:
tempstr=str(mylist[0])+ ' and ' +str(mylist[1])
return tempstr
else:
rest=mylist[:-1]
rest=[str(i) for i in rest]
last=mylist[-1]
tempstr=', '.join(rest) +' and ' + str(last)
return tempstr#%%
def writetoxls(df, sheetname, xlsfile):
''' Generic write of given df to specified tab of given xls file '''
book=load_workbook(xlsfile)
writer=pd.ExcelWriter(xlsfile, engine='openpyxl', datetime_format='mm/dd/yy', date_format='mm/dd/yy')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df.to_excel(writer,sheet_name=sheetname,index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
return
def loadtransfers(df, signups):
''' Load transferred players and add to signups (then run player ID);
transfers added as normal players but need fake billing entries
'''
df=df.rename(columns={'Fname':'First','Lname':'Last','Street':'Address','Parish':'Parish of Registration'})
df=df.rename(columns={'Phone':'Phone1','Birthdate':'DOB','Sex':'Gender','Open/Closed':'Ocstatus'})
# Replace Girl, Boy with m f
df.loc[:,'Gender']=df.Gender.replace('F','Girl')
df.loc[:,'Gender']=df.Gender.replace('M','Boy')
# Manually enter sport
print('Enter sport for transferred players')
sport=input()
df.loc[:,'Sport']=sport
df=df.dropna(subset=['First']) # remove blank rows if present
mycols=[col for col in df if col in signups]
df=df[mycols]
df=formatnamesnumbers(df)
# place date/transfer in timestamp
mystamp=datetime.strftime(datetime.now(),'%m/%d/%y')+' transfer'
df.loc[:,'Timestamp']=mystamp
mycols=signups.columns
signups=signups.append(df, ignore_index=True)
signups=signups[mycols]
return signups
def packagetransfers(teams, Mastersignups, famcontact, players, season, year, acronyms, messfile):
''' Package roster and contact info by sport- school and save as separate xls files
also generate customized e-mails in single log file (for cut and paste send to appropriate persons)
args:
teams - loaded team list
mastersignups - signups w/ team assignment
players -player DB
famcontact - family contact db
season - Fall, Winter or Spring
year - starting sports year (i.e. 2019 for 2019-20 school year)
acronyms - school/parish specific abbreviations
messfile - e-mail message template w/ blanks
returns:
'''
teams=teams[pd.notnull(teams['Team'])]
transferteams=np.ndarray.tolist(teams[teams['Team'].str.contains('#')].Team.unique())
transSU=Mastersignups[Mastersignups['Team'].isin(transferteams)]
# ensure that these are from correct season/year
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
transSU=transSU.loc[(transSU['Sport'].isin(sportlist)) & (transSU['Year']==year)] # season is not in mastersignups... only individual sports
# get family contact info from famcontacts
transSU=pd.merge(transSU, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
transSU=pd.merge(transSU, players, how='left', on=['Plakey'], suffixes=('','_r2'))
# get division from Teams xls (for roster)
transSU=pd.merge(transSU, teams, how='left', on=['Team'], suffixes=('','_r3')) # effectively adds other team info for roster toall players
transSU.loc[:,'Role']='Player' # add column for role
# transSU['Open/Closed']='Closed'
# Sort by grade pre-split
transSU.loc[:,'Grade']=transSU.Grade.replace('K',0)
transSU.loc[:,'Grade']=transSU.Grade.apply(int)
transSU=transSU.sort_values(['Grade'], ascending=True)
transSU.loc[:,'Grade']=transSU.Grade.replace(0,'K') # replace K with zero to allow sorting
# Column for sorting by transferred to school
transSU.loc[:,'Transchool']=transSU['Team'].str.split('#').str[0]
grouped=transSU.groupby(['Sport','Transchool'])
for [sport, school], group in grouped:
# prepare roster tab
xlsname=cnf._OUTPUT_DIR+'\\Cabrini_to_'+school+'_'+sport+'_'+str(year)+'.xlsx'
writer=pd.ExcelWriter(xlsname, engine='openpyxl')
Transferroster=organizeroster(group)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.to_excel(writer,sheet_name='roster',index=False)
# prep contacts tab
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Team']
Transfercontacts=group[mycols]
Transfercontacts.to_excel(writer, sheet_name='contacts', index=False)
writer.save()
# Now generate list of e-mails for all schools/directors
logfile='transfers_director_emails_log.txt'
with open(logfile,'w+') as emaillog:
# Read generic file to sport director
with open(messfile, 'r') as file:
blankmessage=file.read()
for [sport, school], group in grouped:
plagroup=group.groupby(['Grade', 'Gender'])
platypes=[] # list of # of players by grade, gender
gradedict={'K':'K', 1:'1st', 2:'2nd',3:'3rd',4:'4th',5:'5th',6:'6th', 7:'7th',8:'8th'}
genderdict={'f':'girls', 'F':'girls','m':'boys','M':'boys'}
for [grade, gender], group in plagroup:
numplays=str(int(group['Grade'].count()))
grname=gradedict.get(grade)
genname=genderdict.get(gender)
platypes.append(numplays+' '+grname+' '+genname)
plalist=combinephrases(platypes)
thismess=blankmessage.replace('$SCHOOL', school)
thismess=thismess.replace('$SPORT', sport)
thismess=thismess.replace('$PLALIST', plalist)
emaillog.write(thismess)
emaillog.write('\n\n')
return
def findcards():
'''Search ID cards folder and return player # and file link
cards resized to 450x290 pix jpg in photoshop (scripts-image processor)
keys are either player number as string or coach CYC ID, vals are links to files'''
cardlist=glob.glob('%s\\IDcards\\*.jpg' %cnf._OUTPUT_DIR, recursive=True)
# construct list of [card #, filename]
nums=[i.split('\\')[-1] for i in cardlist]
nums=[i.split('_')[0] if '_' in i else i.split('--')[0] for i in nums ]
cards={} # dict for card numbers/filenames
for i,num in enumerate(nums):
cards.update({num: cardlist[i]})
return cards
def makethiscard(IDlist, team):
''' Passes link to ID card or player name (if missing) From team's list of player numbers (in alphabetical order), find/open card links, and create single image'''
# make the master image and determine image array size
margin=10 # pix on all sides
if len(IDlist)<11: # use 2 x 5 array (horiz)
wide=2
high=5
elif len(IDlist)<13: # 4w x 3 h (vert)
wide=4
high=3
elif len(IDlist)<22: # 3x by 5-7 high (horiz); max 21
wide=3
high=math.ceil(len(IDlist)/3)
else: # more than 21 ... yikes
wide=3
high=math.ceil(len(IDlist)/3)
cardimage = Image.new('RGB', (450*wide+2*margin, 300*high+2*margin), "white") # blank image of correct size
draw=ImageDraw.Draw(cardimage) # single draw obj for adding missing card names
ttfont=ImageFont.truetype('arial.ttf', size=36)
for i,fname in enumerate(IDlist):
row=i//high # remainder is row
col=i%high # mod is correct column
xpos=margin+row*450
ypos=margin+col*300
try:
thiscard=Image.open(fname)
thiscard=thiscard.resize((450, 300), Image.ANTIALIAS)
cardimage.paste(im=thiscard, box=(xpos, ypos)) # paste w/ xpos,ypos as upper left
except: # occurs when "first last" present instead of file name/path
# blankcard=Image.new('RGB', (450, 300)) # make blank image as placeholder
draw.text((xpos+50,ypos+100),fname,font=ttfont, fill="red")
return cardimage
''' TESTING
i=0 team=teamlist[i]
'''
def makeCYCcards(df, players, teams, coaches, season, year, **kwargs):
''' From mastersignups and teams, output contact lists for all teams/all sports separately
team assignments must be finished
args:
df -- mastersignups dataframe
players - player info dataframe
teams - this year's teams csv
coaches - full coach CYC info list
season - Fall, Winter or Spring
kwargs:
showmissing - True (shows missing player's name); False- skip missing player
otherSchools - default False (also make card sheets for transferred teams/players)
kwargs={'showmissing':False}
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':True} )
missing = makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':False} )
'''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
cards=findcards() # dictionary with number: filename combo for existing CYC cards
df=df[(df['Year']==year)]
df=df.reset_index(drop=True)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df[df['Sport'].isin(sportlist)] # season is not in mastersignups... only individual sports
# Make list of teams that need cards (all track and others >1st grade)
def processGrade(val):
if val=='K':
return 0
else:
return int(val)
teams.loc[:,'Grade'] = teams['Grade'].apply(lambda x:processGrade(x))
if not kwargs.get('otherSchools', False):
# all transfer teams with contain # (e.g. SMOS#3G) so remove these
# dropped by default
teams = teams[~teams['Team'].str.contains('#')]
# need track teams or any team from grades 2+
cardTeamList= teams[ (teams['Grade']>1) | (teams['Sport']=='Track') ]['Team'].unique()
df=df[ df['Team'].isin(cardTeamList) ]
df=df.sort_values(['Last'])
# plakeys as string will be easiest for below matching
df.loc[:,'Plakey']=df['Plakey'].astype(int)
df.loc[:,'Plakey']=df['Plakey'].astype(str)
def getName(gr, pk):
# get name from plakey as string
match=gr[gr['Plakey']==pk]
name=match.iloc[0]['First'] + ' ' + match.iloc[0]['Last']
return name
teamgrouped = df.groupby(['Team'])
missinglist=[] # list of plakeys with missing card
for team, gr in teamgrouped:
# keys in card dict are strings
IDlist = [str(int(i)) for i in gr.Plakey.unique()]
missinglist.extend([i for i in gr.Plakey.unique() if i not in cards.keys() ])
if not kwargs.get('showmissing', False):
# Shows only valid cards, drops missing names
IDlist = [ cards.get(i) for i in IDlist if i in cards.keys() ]
filename='Cards_'+ team +'.jpg'
else: # show cards and missing name when card image not in IDcards folder
IDlist = [cards.get(i) if i in cards.keys() else getName(gr, i) for i in IDlist ]
filename='Cards_'+ team +'_all.jpg'
# get team's coaches
IDlist.extend(getcoachIDs(team, teams, coaches, cards)) # add coach ID image file or first/last if missing
cardimage =makethiscard(IDlist, team) # directly saved
# save the card file
cardimage.save(cnf._OUTPUT_DIR+'\\'+filename)
missingcards=players[players['Plakey'].isin(missinglist)]
missingcards=missingcards.sort_values(['Grade','Last'])
return missingcards
def getcoachIDs(team, teams, coaches, cards):
''' Returns CYC IDs for all team's coaches '''
thisteam=teams[teams['Team']==team]
IDlist=[]
if len(thisteam)!=1:
print(team, 'not found in current teams list')
return IDlist # blank list
thisteam=thisteam.dropna(subset=['Coach ID'])
if len(thisteam)!=1:
print('Coach ID not found for', team)
return IDlist # blank list
if thisteam.iloc[0]['Coach ID']!='': # possibly blank
thisID=thisteam.iloc[0]['Coach ID'].strip()
if thisID in cards:
IDlist.append(cards.get(thisID,'')) # file path to this coach's ID
else: # get first/last
thiscoach=coaches[coaches['Coach ID']==thisID]
if len(thiscoach)==1:
IDlist.append(thiscoach.iloc[0]['Fname']+' '+thiscoach.iloc[0]['Lname'])
else:
print("Couldn't find coach ", thisID)
thisteam=thisteam.dropna(subset=['AssistantIDs'])
if len(thisteam)==1: # grab asst IDs if they exist
asstIDs=thisteam.iloc[0]['AssistantIDs']
asstIDs=[str(s).strip() for s in asstIDs.split(",")]
for i, asstID in enumerate(asstIDs):
if asstID in cards:
IDlist.append(cards.get(asstID,'')) # found assistant coaches ID card image
else: # can't find ... get assistant first last
thisasst=coaches[coaches['Coach ID']==asstID] # matching asst coach row
if len(thisasst)==1:
IDlist.append(thisasst.iloc[0]['Fname']+' '+thisasst.iloc[0]['Lname'])
else:
print("Couldn't find coach ", asstID)
return IDlist
def autocsvbackup(df, filename, newback=True):
''' Pass df (i.e players for backup and basename (i.e. "family_contact" for file.. finds list of existing backups and keeps ones of
certain ages based on targetdates list;
can't remember why was newback=False was needed (always true here to make new backup)
'''
# TODO fix this!
pass
return
def parseDate(val):
'''
Conversion of date string to datetime.date (always header line 2 40:60)
Possible date formats: 20180316 (newer style) or 03/15/2018 (older style)
For NGA files Date format changed from 03/15/2018 to 20180316 (on jday 75 in 2018)
time format: 221100 or 22:11:00 (sometimes w/ UTC)
not terribly concerned w/ time
possible date formats: 0) 03/01/2018, 3/1/2018, 3/1/18 or 03/01/18
2) 1/1/19 2) 2019-1-1 3) 2019-01-01
'''
if not isinstance(val, str):
return val
else:
if ' ' in val: # Remove time substring (but will fail for 3 Oct 2019)
val=val.split(' ')[0] # strip time substring if present
patterns=['\d{1,2}/\d{1,2}/\d{2,4}', '\d{4}-\d{1,2}-\d{1,2}', '\d{1,2}-\d{1,2}-\d{4}']
for i, patt in enumerate(patterns):
match=re.search(r'%s' %patt, val)
if match:
if i==0: # Extract 03/16/2018 (or rarely 28/10/2019 style)
try:
(mo,dy,yr)=[int(i) for i in val.split('/')]
if yr<100 and len(str(yr))==2: # handle 2 digit year
yr=int('20'+str(yr))
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return None
if i==1: # extract 2017-01-01 style (year first)
try:
(yr,mo,dy)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
if i==2: # extract 01-01-2019 style (year last)
try:
(mo,dy,yr)=[int(i) for i in val.split('-')]
if mo < 13: # normal US version (month first)
return datetime(yr, mo, dy).date()
# handle day month reversal
elif dy<13: # possible month day reverse
print('Month and day reverse for %s' %val)
return datetime(yr, dy, mo).date() # Assume month/day switch
except:
print('Problem extracting date from ', val)
return val
def loadProcessPlayerInfo():
'''Loads and processes players & family contacts (but not signup file)
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
famcontact=formatnamesnumbers(famcontact)
return players, famcontact
def loadProcessGfiles(gsignups, season, year):
'''Loads and processes players, family contacts and signup file, gets active
season and year
args:
gsignups -- google signups
season - 'Fall', 'Winter', 'Spring
year - 4 digit int (uses fall value all school year.. ie. 2018-19 year is always
2018)
'''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if not isinstance(players.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
players.loc[:,'DOB']=players.DOB.apply(lambda x: parseDate(x))
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
if season=='Winter':
gsignups['Sport']='Basketball'
# TODO determine where multiple sports converted to separate lines
duplicated=gsignups[gsignups.duplicated(subset=['First', 'Last','Grade','Sport'])]
if len(duplicated)>0:
print('Remove duplicate signups for %s' %", ".join(duplicated.Last.unique().tolist()))
gsignups=gsignups.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
gsignups.loc[:,'Sport']=gsignups['Sport'].str.replace('Volleyball','VB')
#gsignups.loc[:,'Sport']=gsignups.loc[:,'Sport'].str.replace('Volleyball','VB').copy()
#gsignups.loc[:,'Sport']=gsignups['Sport'].replace({'Volleyball':'VB'}, regex=True).copy()
missing=[i for i in ['Famkey','Plakey'] if i not in gsignups.columns]
for col in missing: # add blank vals
gsignups.loc[gsignups.index, col]=np.nan
# convert assorted DOB strings to datetime.date
if not isinstance(gsignups.DOB[0], pd.Timestamp):# sometimes direct import to pd timestamp works, other times not
gsignups.loc[:,'DOB']=gsignups.DOB.apply(lambda x: parseDate(x))
# Get year from signup file name
outputduplicates(gsignups) # quick check of duplicates output in console window (already removed from signups)
gsignups=formatnamesnumbers(gsignups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
def processGkey(val):
''' Some plakey/famkey copied to drive... must convert nan(float), whitespace or
number as string to either nan or int
'''
if isinstance(val, str):
val=''.join(val.split(' '))
if val=='':
return np.nan
else:
try:
return int(val)
except:
return np.nan
else:
return np.nan
# ensure gsignups has only int or nan (no whitespace)
gsignups.loc[:,'Plakey']=gsignups['Plakey'].apply(lambda x: processGkey(x))
gsignups.loc[:,'Famkey']=gsignups['Famkey'].apply(lambda x: processGkey(x))
return players, famcontact, gsignups
def loadprocessfiles(signupfile):
'''Loads and processes players, family contacts and signup file, gets active
season and year '''
players=pd.read_csv(cnf._INPUT_DIR + '\\players.csv', encoding='cp437') # load existing player data (need to find correct DOB format)
players.loc[:,'Grade']=players.Grade.replace('K',0)
players.loc[:,'Grade']=players.Grade.replace('pK',0) # just make them 0s for now
players.loc[:,'Grade']=players.Grade.astype(int)
# TODO use fixdates if players imports as string (various formats possible)
# players['DOB']=players['DOB'].apply(lambda x: fixDates(x))
if type(players.DOB[0])!=pd.Timestamp: # sometimes direct import to pd timestamp works, other times not
try:
players.loc[:'DOB']=parseDate(players.DOB) # return properly converted date columns series
except:
print('Failure converting player DOB to datetime/timestamp')
famcontact=pd.read_csv(cnf._INPUT_DIR + '\\family_contact.csv', encoding='cp437') # load family contact info
# read this season's sports signup file and rename columns
if signupfile.endswith('.csv'):
SUraw=pd.read_csv(signupfile)
elif 'xls' in signupfile:
try:
SUraw=pd.read_excel(signupfile, sheetname='Raw') # may or may not have plakey/famkey
except:
SUraw=pd.read_excel(signupfile)
if SUraw.shape[1]==30 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Unisize','Unineed','Plakey','Famkey']
elif SUraw.shape[1]==28 and 'Plakey' in SUraw.columns:
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School','Grade',
'Address','Zip','Parish','Sport','AltPlacement','Ocstatus','Pfirst1',
'Plast1','Phone1','Text1','Email','Othercontact','Coach','Pfirst2','Plast2',
'Phone2','Text2','Email2','Coach2','Plakey','Famkey']
elif SUraw.shape[1]==26 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2']
elif SUraw.shape[1]==28 and 'Plakey' not in SUraw.columns: # Raw value without plakey and famkey
SUraw.columns=['Timestamp','First','Last','DOB','Gender','School',
'Grade','Address','Zip','Parish','Sport','AltPlacement','Ocstatus',
'Pfirst1','Plast1','Phone1','Text1','Email','Othercontact','Coach',
'Pfirst2','Plast2','Phone2','Text2','Email2','Coach2','Unisize','Unineed']
SUraw.loc[SUraw.index,'Plakey']=np.nan # add if absent
SUraw.loc[SUraw.index,'Famkey']=np.nan
signups=SUraw.drop_duplicates(subset=['First', 'Last','Grade','Sport'])
signups['Sport'].replace({'Volleyball':'VB'},inplace=True, regex=True)
# Get year from signup file name
season=re.match(r'(\D+)', signupfile).group(0) # season at string beginning followed by year (non-digit)
if '\\' in season: # remove file path problem
season=season.split('\\')[-1]
year=int(re.search(r'(\d{4})', signupfile).group(0)) # full year should be only number string in signups file
outputduplicates(SUraw) # quick check of duplicates output in console window (already removed from signups)
signups=formatnamesnumbers(signups) # format phone numbers, names to title case, standardize schools, etc.
famcontact=formatnamesnumbers(famcontact)
return players, famcontact, signups, season, year
def findavailablekeys(df, colname, numkeys):
'''Pass df and colname, return a defined number of available keys list
used for players, families, signups, etc.
'''
# list comprehension
allnums=[i for i in range(1,len(df))]
usedkeys=df[colname].unique()
usedkeys=np.ndarray.tolist(usedkeys)
availkeys=[i for i in allnums if i not in usedkeys]
if len(availkeys)<numkeys: # get more keys starting at max+1
needed=numkeys-len(availkeys)
for i in range(0,needed):
nextval=int(max(usedkeys)+1) # if no interior vals are available find next one
availkeys.append(nextval+i)
availkeys=availkeys[:numkeys] # truncate and only return the requested number of needed keys
return availkeys
def organizeroster(df):
''' Renaming, reorg, delete unnecessary columns for CYC roster output
already split by sport and year'''
df=df.rename(columns={'First':'Fname','Last':'Lname','Address':'Street','Parish_registration':'Parish of Registration'})
df=df.rename(columns={'Parish_residence':'Parish of Residence','Phone1':'Phone','DOB':'Birthdate','Gender':'Sex'})
df=df.rename(columns={'Email1':'Email'})
# replace Girl, Boy with m f
df.loc[:,'Sex']=df.Sex.replace('Girl','F').replace('Boy','M')
df.loc[:,'Sex']=df.Sex.str.upper() # ensure uppercase
# Convert date format to 8/25/2010 string format
mycols=['Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
df=df[mycols] # put back in desired order
df=df.sort_values(['Team'])
return df
'''TESTING row=tempplay.iloc[7]
signups=signups[signups['Last']=='Elston']
'''
def processdatachanges(signups, players, famcontact, year):
'''Pass SC signups subset from google drive, update address for more up-to-date
contact information, new address, etc.
must start here if troubleshooting
args:
signups -- online signups file (normally google drive)
players - player DOB, grade, etc
famcontact- family contact info
year - sports year (int); e.g. 2019 for 2019-20 school year
'''
# Using all entries from signups (manual and gdrive)
# Updates from paper signups should be done directly to famcontact and players csv files (skip entirely)
'''
signups.Timestamp=pd.to_datetime(signups.Timestamp, errors='coerce') # converts to naT or timestamp
gdsignups=signups.dropna(subset=['Timestamp']) # drops manual entries (no google drive timestamp)
'''
# merge w/ players and update grade, recalc grade adjustment, and school
# must use left merge to keep correct indices from players df (inner causes reindexing)
players=players.reset_index(drop=True)
tempplay=pd.merge(players, signups, how='inner', on=['Plakey'], suffixes=('','_n'))
tempplay=tempplay.dropna(subset=['Gender_n']) # this drops all without a google drive entry
for index, row in tempplay.iterrows():
upkwargs={}
# Skip approval for grade updates
if row.Grade!=row.Grade_n: # grade discrepancy between players.csv and current signup
match=players[players['Plakey']==row.Plakey]
if len(match)==1:
thisind=match.index[0]
# update player grade (no approval)
players.loc[thisind,'Grade']=row.Grade_n # set to new value from current signup file
print (row.First," ",row.Last," grade changed to ", row.Grade_n)
if row.School!=row.School_n and str(row.School_n)!='nan':
upkwargs.update({'school':True})
# Check for DOB inconsistency between google drive and players.csv
if row.DOB!=row.DOB_n: # don't change grade adjustment if DOB discrepancy
if row.DOB_n.year!=year: # skip birthday instead of DOB error
upkwargs.update({'DOB':True})
else: # recalculate grade adjustment
# Direct adjustment to gradeadj in players (if indicated)
players=updategradeadjust(row, players, year)
if 'school' in upkwargs or 'DOB' in upkwargs:
# Interactively approve school or DOB changes
players=updateplayer_tk(row, players, **upkwargs)
autocsvbackup(players,'players', newback=True) # run autobackup script
outname=cnf._OUTPUT_DIR+'\\players.csv'
players.to_csv(outname,index=False) # direct save of changes from google drive info
# now update new info into family contacts
# faminfo=gdsignups.drop_duplicates(subset=['Famkey']) # only process first kid from family
faminfo=signups.drop_duplicates(subset=['Famkey'])
famcontact=prepcontacts(famcontact)
faminfo=prepcontacts(faminfo)
tempfam=pd.merge(famcontact, faminfo, how='inner', on=['Famkey'], suffixes=('','_n')) # same indices as famcontact
tempfam=tempfam.dropna(subset=['Zip_n']) # drops those without timestamped google drive entry
for index,row in tempfam.iterrows():
# Update/reshuffle phone, email, parent list, parish of registration (direct to famcontact)
famcontact=update_contact(row, famcontact) # update/reshuffle phone,text (list of lists)
autocsvbackup(famcontact,'family_contact', newback=True) # run autobackup script
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return players, famcontact
def updatefamcon_tk(row, famcontact, **upkwargs):
''' Interactive approval of family contact changes
changes directly made to famcontacts (but not yet autosaved)
upkwargs: phone, email, address
'''
root = tk.Tk()
root.title('Update family contact info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Family: '+row.Family+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
tk.Label(root, text='Deselect to remove').grid(row=rownum, column=1)
rownum+=1
# Use listbox of common schools?
if 'parlist' in upkwargs: # indicates new parent found
colnum=0
parlist=upkwargs.get('parlist',[])
# Checkboxes to add new parent
if 'newpar1' in upkwargs:
addpar1=tk.BooleanVar()
addpar1.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar1',[]))+'?')
except:
print('Error adding parent 1', )
mytext=''
tk.Checkbutton(root, variable=addpar1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'newpar2' in upkwargs:
addpar2=tk.BooleanVar()
addpar2.set(True)
try:
mytext='Add parent: '+ (' '.join(upkwargs.get('newpar2',[]))+'?')
except:
mytext=''
tk.Checkbutton(root, variable=addpar2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each parent (default true)
pbools=[] # List of bools for parent inclusion
for i in range(0,len(parlist)):
pbools.append(tk.BooleanVar())
pbools[i].set(True)
tempstr=parlist[i]
tk.Checkbutton(root, variable=pbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'emails' in upkwargs: # indicates new parent found
emaillist=upkwargs.get('emails',[])
# Checkboxes to add new parent
colnum=0
if 'email1' in upkwargs:
addemail1=tk.BooleanVar()
addemail1.set(True)
email1=tk.StringVar()
email1.set(upkwargs.get('email1',''))
tk.Checkbutton(root, variable=addemail1, text='Add new email1').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email1).grid(row=rownum, column=colnum)
rownum+=1
if 'email2' in upkwargs:
addemail2=tk.BooleanVar()
addemail2.set(True)
email2=tk.StringVar()
email2.set(upkwargs.get('email2',''))
tk.Checkbutton(root, variable=addemail2, text='Add new email2').grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=email2).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
ebools=[] # List of bools for parent inclusion
for i in range(0,len(emaillist)):
ebools.append(tk.BooleanVar())
tempstr=emaillist[i]
ebools[i].set(True)
tk.Checkbutton(root, variable=ebools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
rownum+=1
if 'phones' in upkwargs: # indicates new parent found
phlist=upkwargs.get('phones',[])
# Checkboxes to add new parent
colnum=0
if 'phone1' in upkwargs:
addphone1=tk.BooleanVar()
addphone1.set(True)
try:
mytext='Add phone/text: '+ upkwargs.get('phone1','')
except:
mytext=''
tk.Checkbutton(root, variable=addphone1, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
if 'phone2' in upkwargs:
addphone2=tk.BooleanVar()
addphone2.set(True)
try:
mytext='Add phone/text: '+ ', '.join(upkwargs.get('phone2',[]))
except:
mytext=''
tk.Checkbutton(root, variable=addphone2, text=mytext).grid(row=rownum, column=colnum)
colnum+=1
# Checkbutton and boolvar for each email (default true)
phbools=[] # List of bools for parent inclusion
for i in range(0,len(phlist)):
phbools.append(tk.BooleanVar())
tempstr=phlist[i]
phbools[i].set(True)
tk.Checkbutton(root, variable=phbools[i], text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
if 'address' in upkwargs:
colnum=0
tk.Label(root, text='Possible change of address').grid(row=rownum, column=colnum)
rownum+=1
newaddrbool=tk.BooleanVar()
newaddr=tk.StringVar()
newaddrbool.set(False)
newaddr.set(row.Address_n)
newzip=tk.StringVar()
try:
newzip.set(int(row.Zip_n))
except:
print('Non-standard zip value',str(row.Zip_n))
tk.Checkbutton(root, variable=newaddrbool, text='Change address?').grid(row=rownum, column=colnum)
colnum+=1
tk.Label(root, text='Current address').grid(row=rownum, column=colnum)
colnum=0
rownum+=1
tk.Entry(root, textvariable=newaddr).grid(row=rownum, column=colnum)
rownum+=1
tk.Entry(root, textvariable=newzip).grid(row=rownum, column=colnum)
colnum+=1
tempstr=str(row.Address)+' '+str(row.Zip)
tk.Label(root, text=tempstr).grid(row=rownum, column=colnum)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
# Find matching row for family (needed for all changes below)
famkey=row.Famkey
match=famcontact[famcontact['Famkey']==famkey]
if len(match)==1:
thisind=match.index[0]
else:
print('Problem finding unique entry for famkey', str(famkey))
return famcontact # return unaltered
# Reconstruct parent list
if 'parlist' in upkwargs:
newparlist=[] # constructing entirely new parent list from checkbox choices
if 'newpar1' in upkwargs:
if addpar1.get():
newparlist.append(upkwargs.get('newpar1',[np.nan,np.nan]))
#TODO fix nan error
print('Added parent',' '.join(upkwargs.get('newpar1')),' to ',str(row.Family))
for i, val in enumerate(pbools):
if pbools[i].get():
newparlist.append(parlist[i]) # [first, last] format
if 'newpar2' in upkwargs:
if addpar2.get():
newparlist.append(upkwargs.get('newpar2',[np.nan,np.nan]))
print('Added parent 2',' '.join(upkwargs.get('newpar2')),' to ',str(row.Family))
# Now direct update of parents in this family's famcontact entry
newparlist=newparlist[0:3] # limit to 3 entries
while len(newparlist)<3:
newparlist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
famcontact.loc[thisind, fname] = newparlist[i-1][0]
famcontact.loc[thisind, lname] = newparlist[i-1][1]
# Reconstruct email list
if 'emails' in upkwargs:
newemaillist=[]
if 'email1' in upkwargs:
if addemail1.get():
newemaillist.append(email1.get())
print('Added email1', email1.get(), ' to ', str(row.Family))
for i, val in enumerate(ebools):
if ebools[i].get():
newemaillist.append(emaillist[i])
if 'email2' in upkwargs:
if addemail2.get():
# insert in 2nd position
newemaillist.insert(1, email2.get())
print('Added email2', email2.get(), ' to ', str(row.Family))
# Now update emails in famcontact entry
# Direct update of parent list
newemaillist=newemaillist[0:3] # limit to 3 entries
while len(newemaillist)<3:
newemaillist.append(np.nan) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
colname='Email'+str(i)
famcontact.loc[thisind, colname]= newemaillist[i-1]
# Reconstruct phone list
if 'phones' in upkwargs:
newphlist=[]
if 'phone1' in upkwargs:
if addphone1.get():
newphlist.append(upkwargs.get('phone1', [np.nan,np.nan]))
print('Added phone1', ','.join(upkwargs.get('phone1',[])), ' to ', str(row.Family))
for i, val in enumerate(phbools):
if phbools[i].get():
newphlist.append(phlist[i])
# added at end... probably should go
if 'phone2' in upkwargs:
if addphone2.get():
# insert in 2nd position
newphlist.insert(1, upkwargs.get('phone2',[np.nan,np.nan]))
print('Added phone2', ','.join(upkwargs.get('phone2',[])), ' to ', str(row.Family))
# Now update phone, text in famcontact entry
newphlist=newphlist[0:4] # limit to 4 entries
while len(newphlist)<4:
newphlist.append([np.nan, np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,5): # reset max 4 phone entries
phname='Phone'+str(i)
textname='Text'+str(i)
famcontact.loc[thisind, phname] = newphlist[i-1][0]
famcontact.loc[thisind, textname] = newphlist[i-1][1]
# Handle change of address (direct change if approved)
# Also change associated zip code and reset parish of residence
if 'address' in upkwargs:
if newaddrbool:
print('Address changed for ', str(row.Family))
famcontact.loc[thisind, 'Address'] = newaddr.get()
# Reset parish of residence to nan (manually find and replace)
famcontact.loc[thisind, 'Parish_residence'] = np.nan
try:
famcontact.loc[thisind,'Zip']=int(newzip.get())
except:
print('Problem converting zip code ', newzip.get())
# TODO ... handle parish of registration
return famcontact
def update_contact(row, famcontact):
'''Update phone and textable list from google drive entries;
google drive raw entries first processed in process_data_changes (then update
contacts is called)
row is a merge of existing famcontact info and new signup info
existing entries from fam_contact listed first;
pass/modify/return series for family; reorder/replace numbers
has fairly long list of changes made w/o interactive approval:
1) changing order of email or phone numbers (e.g. swap phone1 and phone2)
2) add phone2 (or email2) if current phone2(email2) is nan
3) change order of parents (new parent1)
All other changes done w/ interactive approval using update_famcon_tk
'''
# [phone, text, order]
thisfam=row.Family
match=famcontact[famcontact['Famkey']==row.Famkey]
if len(match)==1:
thisind=match.index[0] # correct index for updating this family in famcontacts
else:
print(str(row.Family), " not found in famcontacts.. shouldn't happen")
return famcontact
upkwargs={} # empty dict for monitoring all changes
# check for possible change in address (housenum as trigger)
match1=re.search(r'\d+', row.Address)
match2=re.search(r'\d+', row.Address_n)
if match1 and match2:
num1=match1.group(0)
num2=match2.group(0)
if num1!=num2: # change in address number strongly suggestive of actual change
upkwargs.update({'address':True})
else:
print('No address # found for', str(thisfam))
phonelist=[] # list of lists with number and textable Y/N
for i in range(1,5): # get 4 existing phone entries (phone1, phone2, etc.)
phname='Phone'+str(i)
txtname='Text'+str(i)
if str(row[phname])!='nan':
phonelist.append([row[phname],row[txtname]]) # as phone and text y/N
# New google drive entries will be Phone1_n.. look for phone/text pair in existing list
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] in phonelist: # new ones phone is required entry
# default move of phone1, text1 to top of list - no confirmation
if [row.Phone1_n,row.Text1_n]!=phonelist[0]: # move if not in first position
phonelist.insert(0,phonelist.pop(phonelist.index([row.Phone1_n,row.Text1_n])))
print('Phone 1 changed for ', str(thisfam))
upkwargs.update({'phchange':True})
if str(row.Phone1_n)!='nan' and [row.Phone1_n,row.Text1_n] not in phonelist: # new ones phone is required entry
if [row.Phone1_n, np.nan] in phonelist: # remove if # present but w/o text indication (no confirm)
phonelist.remove([row.Phone1_n,np.nan])
phonelist.insert(0,[row.Phone1_n,row.Text1_n]) # insert in first position
print('Updated phone 1 to', row.Phone1_n,' for ',str(thisfam))
upkwargs.update({'phchange':True})
else:
# phone1 change to be confirmed
upkwargs.update({'phone1':[row.Phone1_n,row.Text1_n]})
upkwargs.update({'phones': phonelist})
if str(row.Phone2_n)!='nan': # check for phone2 entry (with _n suffix)
if [row.Phone2_n,row.Text2_n] not in phonelist: # add second phone to 2nd position if not present
if [row.Phone2_n,np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([row.Phone2_n,np.nan])
phonelist.insert(1,[row.Phone2_n,row.Text2_n])
print ('Updated phone 2 to ', str(row.Phone2_n), 'for ', str(thisfam))
upkwargs.update({'phchange':True})
else: # get approval for phone 2 addition
upkwargs.update({'phone2':[row.Phone2_n,row.Text2_n]})
upkwargs.update({'phones': phonelist})
# Construct existing list of known email addresses
emaillist=[]
for i in range(1,4): # get 3 existing email entries
emailname='Email'+str(i)
if str(row[emailname])!='nan':
emaillist.append(row[emailname].lower())
# Find new email1 entry in google drive data
if str(row.Email)!='nan' and '@' in row.Email: # real primary gd named email
if row.Email.lower() in emaillist: # add in first position if not present (no confirmation)
if row.Email.lower()!=emaillist[0]: # check if in first position already
emaillist.insert(0,emaillist.pop(emaillist.index(row.Email)))
upkwargs.update({'emchange':True})
print ('Updated email 1 ', str(row.Email.lower()), 'for family', str(thisfam))
else: # confirm email1 if not present
upkwargs.update({'email1':row.Email})
upkwargs.update({'emails':emaillist})
# look for new email in email2 position and add
if str(row.Email2_n)!='nan' and '@' in row.Email2_n:
if row.Email2_n.lower() not in emaillist: # add second email to 2nd position if not present
upkwargs.update({'email2':row.Email2_n})
upkwargs.update({'emails':emaillist})
# Update list of parent names (max 3 entries)
parlist=[] # construct existing list from family contacts
# skip if all nan for entered parents (non-gd entry)
for i in range(1,4): # construct existing parents list
fname='Pfirst'+str(i)
lname='Plast'+str(i)
if str(row[fname])!='nan':
parlist.append([row[fname],row[lname]]) # list of lists [first, last]
if str(row.Pfirst1_n)!='nan': # skip if parent name is nan
if [row.Pfirst1_n,row.Plast1_n] in parlist: # reorder in list
if [row.Pfirst1_n,row.Plast1_n]!=parlist[0]: # check if already in first
# move to first position (everything else requires approval)
parlist.insert(0,parlist.pop(parlist.index([row.Pfirst1_n,row.Plast1_n])))
parlist.insert(0,[row.Pfirst1_n, row.Plast1_n]) # insert in first position
upkwargs.update({'parchange':True})
else: # parent not in list (confirm)
upkwargs.update({'newpar1':[row.Pfirst1_n,row.Plast1_n]})
upkwargs.update({'parlist':parlist})
# inserts in first position while simultaneously removing other entry
if str(row.Pfirst2_n)!='nan': # Check for parent 2 entry
if [row.Pfirst2_n,row.Plast2_n] not in parlist: # add second phone to 2nd position if not present
upkwargs.update({'newpar2':[row.Pfirst2_n,row.Plast2_n]})
upkwargs.update({'parlist':parlist})
# Save auto-changes in phone to family contacts
if 'phchange' in upkwargs: # Record altered phonelist in famcontacts
if 'phones' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'phones': phonelist}) # ensure most current copy
phonelist=phonelist[0:3] # construct proper list
while len(phonelist)<4:
phonelist.append([np.nan,np.nan]) # pad with nan entries if necessary
for i in range(1,5): # reset 4 existing phone entries
phname='Phone'+str(i)
txtname='Text'+str(i)
famcontact.loc[thisind, phname] = phonelist[i-1][0] # first of tuple is phone
famcontact.loc[thisind, txtname] = phonelist[i-1][1] # 2nd of tuple is text y/n
del upkwargs['phchange']
print('automatic phone changes for', thisfam)
# Save auto-changes in emails to family contacts
if 'emchange' in upkwargs: # Record altered phonelist in famcontacts
if 'emails' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'emails': emaillist}) # ensure most current copy
emaillist=emaillist[0:2] # construct proper list
while len(emaillist)<3:
emaillist.append(np.nan) # pad with nan entries if necessary
for i in range(1,4): # reset 4 existing phone entries
emname='Email'+str(i)
famcontact.loc[thisind, emname] =emaillist[i-1]
del upkwargs['emchange']
print('automatic email changes for', thisfam)
if 'parchange' in upkwargs: # Record altered parents list in famcontacts
if 'parlist' in upkwargs: # if present in upkwargs, update list
upkwargs.update({'parlist': parlist}) # ensure most current copy
parlist=parlist[0:2] # construct proper list
while len(parlist)<3:
parlist.append(np.nan) # pad with nan entries if necessary (3 total)
for i in range(1,4): # reset 4 existing phone entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
try:
famcontact.loc[thisind, fname] =parlist[i-1][0]
famcontact.loc[thisind, lname] =parlist[i-1][1]
except:
print('Error updating parents for', thisfam)
del upkwargs['parchange']
print('automatic parent changes for', thisfam)
# now check for any changes needing interactive approval
if len(upkwargs)>0: # something needs interactive approval
famcontact=updatefamcon_tk(row, famcontact, **upkwargs)
return famcontact
def updateplayer_tk(row, players, **upkwargs):
''' Interactive approval of player info updates (except date)
changes directly made to players (but not yet autosaved)
called by processdatachanges
'''
commonschools=['Cabrini','Soulard','SLPS','Charter','Private']
root = tk.Tk()
root.title('Update player info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Player:'+row.First+' '+row.Last+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
rownum+=1
# Use listbox of common schools?
if 'DOB' in upkwargs: # indicates discrepancy
DOB1=date(row.DOB)
DOB2=date(row.DOB_n)
# create and display DOB variables
def ChooseDOB1(event):
DOB.set(datetime.strftime(DOB1,'%m/%d/%y'))
def ChooseDOB2(event):
DOB.set(datetime.strftime(DOB2,'%m/%d/%y'))
DOB=tk.StringVar()
DOB.set(datetime.strftime(DOB1,'%m/%d/%y')) # defaults to original
tk.Label(root, text='Update date of birth?').grid(row=rownum, column=0)
mytxt='current DOB:'+datetime.strftime(DOB1,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB1)
b.grid(row=rownum, column=1)
mytxt='New DOB:'+datetime.strftime(DOB2,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB2)
b.grid(row=rownum, column=2)
tk.Entry(master=root, textvariable=DOB).grid(row=rownum, column=3)
rownum+=1
if 'school' in upkwargs:
school=tk.StringVar()
school.set(row.School) # default to existing value
tk.Label(root, text='Update school?').grid(row=rownum, column=0)
rownum+=1
def newschool(event):
school.set(row.School_n)
def oldschool(event):
school.set(row.School)
def pickschool(event):
# double-click to pick standard school choice
items=lb.curselection()[0] # gets selected position in list
school.set(commonschools[items])
tk.Entry(root, textvariable=school).grid(row=rownum, column=2)
mytxt='new school:'+str(row.School_n)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', newschool)
b.grid(row=rownum, column=1)
mytxt='existing school:'+str(row.School)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', oldschool)
b.grid(row=rownum, column=0)
# also include selectable listbox of common school choices
lb=tk.Listbox(master=root, selectmode=tk.SINGLE)
lb.bind("<Double-Button-1>", pickschool)
lb.grid(row=rownum, column=3)
for i,sch in enumerate(commonschools):
lb.insert(tk.END, sch)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
try:
# make changes directly to players after finding correct index using plakey
plakey=row.Plakey
match=players[players['Plakey']==plakey]
thisind=match.index[0]
if 'school' in upkwargs:
players.loc[thisind,'School']= school.get()
if 'DOB' in upkwargs:
newDOB=datetime.strptime(DOB.get(),'%m/%d/%y')
players.loc[thisind,'DOB']= newDOB
except:
print('Error updating info for', row.Plakey, row.First, row.Last)
return players
def prepcontacts(df):
''' Prepare for update contacts/ matching with google drive info
avoids possible problems/spaces in manually entered info '''
mycols=['Pfirst1', 'Plast1','Pfirst2', 'Plast2', 'Pfirst3', 'Plast3',
'Phone1', 'Text1','Phone2', 'Text2', 'Phone3', 'Text3', 'Phone4',
'Text4', 'Email1','Email2', 'Email3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.strip()
except: # maybe only nan or not present (i.e. in signups)
pass
mycols=['Text1','Text2','Text3']
for i, col in enumerate(mycols):
try:
df.loc[:,col]=df[col].str.replace('No','N', case=False)
df.loc[:,col]=df[col].str.replace('Yes','Y', case=False)
except:
pass
return df
def findyearseason(df):
''' Pass raw signups and determine year and sports season '''
# get year from system clock and from google drive timestamp
now=datetime.now()
val=df.Timestamp[0] # grab first timestamp
if val!=datetime: # if not a timestamp (i.e. manual string entry find one
while type(val)!=datetime:
for index, row in df.iterrows():
val=df.Timestamp[index]
year=val.year # use year value from signup timestamps
if now.year!=val.year:
print ('Possible year discrepancy: Signups are from ',str(val.year))
# now find sports season
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Fall'
mask = np.column_stack([df['Sport'].str.contains("rack", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Spring'
mask = np.column_stack([df['Sport'].str.contains("asket", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Winter'
return season, year
def outputduplicates(df):
'''Prints out names of players with duplicated entries into console... can then delete from google drive signups '''
tempdf=df[df.duplicated(['First','Last','Sport'])] # series with 2nd of duplicated entries as True
firsts=tempdf.First.tolist()
lasts=tempdf.Last.tolist()
for f,l in zip(firsts, lasts):
print('Duplicated signup for player: {} {}'.format(f,l))
return
def formatphone(df):
''' Convert all entered phone numbers in dfs phone columns to 314-xxx-xxxx string and standardize text field '''
def phoneFormat(val):
# lambda function phone number reformatting
if not isinstance(val, str):
return val
# replace/remove any white space
val="".join(val.split(' '))
if val=='': # blank phone causes problems
return np.nan
if not re.search(r'(\d+-\d+-\d+)', val):
val=re.sub("[^0-9]", "", val) # substitute blank for non-number
if len(val)==7:
return '314'+val
elif len(val)==11 and val.startswith('1'): # remove starting 1 if present
return val[1:11]
elif len(val)!=10: # sometimes has ---
# print('Bad number: ',val)
return val
else:
return val[0:3]+'-'+val[3:6]+'-'+val[6:10]
else:
return val # already good
# find phone columns (named phone, phone2, etc.)
phlist=[str(s) for s in df.columns if 'Phone' in s]
for col in phlist:
df.loc[:,col]=df[col].apply(lambda x: phoneFormat(x))
# now change yes in any text field to Y
txtlist=[str(s) for s in df.columns if 'Text' in s]
for col in txtlist:
df.loc[:,col]=df[col].replace('yes','Y')
df.loc[:,col]=df[col].replace('Yes','Y')
return df
def standardizeschool(df):
''' can pass any frame with school column and standardize name as Cabrini and Soulard'''
schstr='frances' + '|' + 'cabrini' + '|' + 'sfca' # multiple school matching string
tempdf=df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='Cabrini'
tempdf = df[df['School'].str.contains('soulard', na=False, case=False)]
df.loc[tempdf.index,'School']='Soulard'
tempdf = df[df['School'].str.contains('public', na=False, case=False)]
df.loc[tempdf.index,'School']='Public'
schstr='city garden' + '|' + 'citygarden' # multiple school matching string
tempdf = df[df['School'].str.contains(schstr, na=False, case=False)]
df.loc[tempdf.index,'School']='City Garden'
return df
def formatnamesnumbers(df):
'''Switch names to title case, standardize gender, call phone/text reformat and standardize school name'''
def titleStrip(val):
try:
return val.title().strip()
except:
return val
processCols=['First','Last','Family','Pfirst1','Plast1','Pfirst2','Plast2','Email','Email2']
processCols=[i for i in processCols if i in df.columns]
for col in processCols:
df.loc[:, col]=df[col].apply(lambda x: titleStrip(x))
if 'Gender' in df:
df.loc[:,'Gender']=df.Gender.replace('Girl','f')
df.loc[:,'Gender']=df.Gender.replace('Boy','m')
if 'Grade' in df:
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.replace('pK',0)
try:
df.loc[:,'Grade']=df.Grade.astype(int)
except:
print('Player grade likely missing from raw signup file... enter manually')
df=formatphone(df) # call phone reformatting string
if 'School' in df:
df=standardizeschool(df) # use "Cabrini" and "Soulard" as school names
return df
def graduate_players(players, year):
''' Recalc grade based on grade adjustment, school year (run once per year in fall) and age.
some player grades will already have been updated (generally google drive entries)... however recalc shouldn't
change grade '''
players.loc[:,'Grade']=players.Grade.replace('K',0)
for index,row in players.iterrows():
# replace K with zero
grade=int(players.iloc[index]['Grade']) # get currently listed grade
gradeadj=players.iloc[index]['Gradeadj']
dob=players.iloc[index]['DOB']
if str(gradeadj)=='nan' or str(dob)=='NaT': # skip grade update if info is missing
continue
dob=date(dob)
# calculate current age at beginning of this school on 8/1
age=date(year,8,1)-dob
age = (age.days + age.seconds/86400)/365.2425
# assign grade based on age (and grade adjustment)
newgrade=int(age)+int(gradeadj)-5
if grade!=newgrade:
first=players.iloc[index]['First']
last=players.iloc[index]['Last']
print('Grade changed from',grade,'to',newgrade,'for', first, last)
players.loc[index, 'Grade'] = newgrade
players.loc[:,'Grade']=players.Grade.replace(0,'K')
return players
def removeEmptyFams(players, famcontact):
'''
Remove empty families (with no remaining players)
'''
# Remove families with no active players
plaset=[int(i) for i in list(players.Famkey.unique())]
famset=[int(i) for i in list(famcontact.Famkey.unique())]
# Empty families
emptykey=[i for i in famset if i not in plaset]
empty=famcontact[famcontact['Famkey'].isin(emptykey)]
print('Remove empty families:')
for ind, row in empty.iterrows():
print(row.Family, ':',row.Pfirst1, row.Plast1)
choice=input("Remove empty families (Y,N)?\n")
if choice.upper()=='Y':
famcontact=famcontact[~famcontact['Famkey'].isin(emptykey)]
outname=cnf._INPUT_DIR+'\\family_contact.csv'
famcontact.to_csv(outname, index=False)
return famcontact
def removeHSkids(players):
''' Drop graduated players (9th graders) from list '''
grlist=[i for i in range(0,9)]
grlist.append('K')
Hs=players.loc[~(players.Grade.isin(grlist))]
for ind, row in Hs.iterrows():
print(row.First, row.Last)
choice=input('Remove above HS players (Y/N)?\n')
if choice.upper()=='Y':
players=players.loc[(players.Grade.isin(grlist))]
print('HS Players removed but not autosaved')
return players
def estimategrade(df, year):
'''Estimate grade for this sports season based on DOB.. not commonly used '''
for index, row in df.iterrows():
grade=df.loc[index]['Grade']
if str(grade)=='nan': # skips any players who already have assigned grade
dob=df.loc[index]['DOB']
dob=date(dob) # convert to datetime date from timestamp
first=df.loc[index]['First']
last=df.loc[index]['Last']
if str(dob)=='nan':
print ('DOB missing for ', first,' ', last)
continue # skip to next if dob entry is missing
currage=date(year,8,1) - dob
currage = (currage.days + currage.seconds/86400)/365.2425 # age on first day of school/ sports season
gradeest=int(currage-5)
if gradeest==0:
gradeest='K'
print(first, last, 'probably in grade', gradeest)
df.loc[index,'Grade']=gradeest
return df
def updateoldteams(teams, year):
''' Load old teams after copy to teams tab in teams_coaches, then auto-update year-grade
must be manually saved with saveteams... then any adjustments made manually in Excel'''
# check to ensure teams are not already updated
if teams.iloc[0]['Year']==year:
print('Teams already updated for ', year,' school year')
return teams # pass back unaltered
# temporarily make the K to 0 replacements
teams.Grade=teams.Grade.replace('K',0)
teams.loc[:'Graderange']=teams['Graderange'].astype(str) # convert all to string
teams.loc[:,'Year']=year
teams.loc[:,'Grade']+=1
for index, row in teams.iterrows():
grade=teams.loc[index]['Grade']
div=teams.loc[index]['Division'] # division must match grade
div=div.replace('K','0') # replace any Ks in string
newdiv=''.join([s if not s.isdigit() else str(grade) for s in div]) # find replace for unknown # w/ new grade
teams.loc[index,'Division'] = newdiv
cycname=teams.loc[index]['Team'] # update grade portion of team name
if cycname.startswith('K'):
newcycname='1'+ cycname[1:]
teams.loc[index,'Team'] = newcycname
elif cycname[0].isdigit(): # now update teams beginning w/ numbers
newcycname=str(grade)+ cycname[1:]
teams.loc[index,'Team']= newcycname
# update grade ranges
grrange=teams.loc[index]['Graderange'] # should be all numbers
grrange=grrange.replace('K','0')
newrange=''.join([str(int(i)+1) for i in grrange])
teams.loc[index,'Graderange'] = newrange # grade range stored as string, right?
# no auto-save... save with saveteams after checking for proper changes
return teams
def splitcoaches(df):
''' Pass CYC teams list, split and duplicate rows with comma separated vals in colname for extra coaches'''
df.loc[:,'Role']='Coach' # add col for head or asst (first entry for head coach)
# df['Open/Closed']='Closed'
assistants=df.dropna(subset=['AssistantIDs']) # drop teams w/ no asst coaches
for index, rows in assistants.iterrows():
val=assistants.loc[index,'AssistantIDs']
asstcoaches=[str(s) for s in val.split(',')] #list of assistants for single team
for i,asst in enumerate(asstcoaches):
newrow=assistants.loc[index] # duplicate entry as series
asst=asst.strip() # strip leading, trailing blanks
newrow.loc['Coach ID'] = asst # set this asst coaches ID
newrow.loc['Role'] = 'Assistant Coach'
df=df.append(newrow)
df=df.sort_values(['Team'],ascending=True)
return df
def addcoachestoroster(teams, coaches):
'''Creates roster entries for coaches for each CYC team
pass teams and coaches (with coach roster info)
needed roster cols are all below (except sport used in output parsing)
args: teams -- team table w/ head and asst coach CYC ids
coaches - coaches table with CYC Id (key) and associated info
returns: coachroster --separate df to be appended to main player roster
'''
# Add team coaches (match by CYC-IDs)
thismask = teams['Team'].str.contains('-', case=False, na=False) # finds this season's CYC level teams
CYCcoach=teams.loc[thismask] # also has associated sport
CYCcoach=splitcoaches(CYCcoach) # makes new row for all assistant coaches on CYC teams
CYCcoach=pd.merge(CYCcoach, coaches, how='left', on=['Coach ID'], suffixes=('','_r'))
mycols=['Sport','Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Coach ID']
for col in [col for col in mycols if col not in CYCcoach.columns]:
CYCcoach[col]='' # birthdate generally missing
CYCcoach=CYCcoach[mycols] # put back in desired order
# drop duplicates on CYC ID, team (sometimes occurs during merge)
CYCcoach=CYCcoach.drop_duplicates(['Coach ID','Team'])
return CYCcoach
def countteamplayers(df, teams, season, year):
''' For each team, summarize number of players (subset those that are younger or older) and list of names
passing mastersignups'''
df=df[df['Year']==year] # removes possible naming ambiguity
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season,[])
df=df[df['Sport'].isin(sportlist)] # only this sports season
df.Grade=df.Grade.replace('K',0)
df.Grade=df.Grade.astype('int')
teams.loc[:,'Grade']=teams.Grade.replace('K',0)
teams.loc[:,'Grade']=teams.Grade.astype('int')
teams.loc[:,'Playerlist']=teams.Playerlist.astype('str')
for index, row in teams.iterrows():
teamname=teams.loc[index]['Team']
match=df[df['Team']==teamname] # all players on this team from master_signups
teams.loc[index,'Number'] = len(match) # total number of players
# compose player list (First L.) and add to teams
playerlist=[]
for ind, ro in match.iterrows():
first=match.loc[ind]['First']
last=match.loc[ind]['Last']
strname=first+' ' +last[0]
playerlist.append(strname)
players=", ".join(playerlist)
teams.loc[index,'Playerlist'] = players
# count players above or below grade level
thisgrade=int(teams.loc[index]['Grade'])
teams.loc[index,'Upper'] = (match.Grade > thisgrade).sum()
teams.loc[index,'Lower'] = (match.Grade < thisgrade).sum()
writetoxls(teams, 'Teams', 'Teams_coaches.xlsx')
return teams
def writecontacts(df, famcontact, players, season, year):
''' From mastersignups and teams, output contact lists for all teams/all sports separately '''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Sort by grade pre-split
df.loc[:,'Grade']=df.Grade.replace('K',0)
df.loc[:,'Grade']=df.Grade.apply(int)
df=df.sort_values(['Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace(0,'K') # replace K with zero to allow sorting
df.loc[:,'Team']=df.Team.replace(np.nan,'None') # still give contacts if team not yet assigned
df.loc[:,'Team']=df.Team.replace('','None')
# Standard sport contacts output for soccer, VB, basketball
if season!='Spring':
for i, sport in enumerate(sportlist):
fname=cnf._OUTPUT_DIR+'\\'+sport+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers to same school
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
# now can organize contacts (and drop sport)
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Thissport=Thissport[mycols] # drop columns and rearrange
for i, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save()
else: # handle spring special case
Balls=df[df['Sport']!='Track'] # all ball-bat sports together
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2',
'Email2', 'Team', 'Pfirst1', 'Plast1', 'Pfirst2', 'Plast2', 'Plakey', 'Famkey', 'Family']
Balls=Balls[mycols]
teamlist= Balls.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# Combine transfers
transchools=[s.split('#')[0] for s in teamlist if '#' in s]
teamlist=[s for s in teamlist if '#' not in s]
teamlist.extend(transchools) # all to same school as single "team"
fname=cnf._OUTPUT_DIR+'\\'+'Batball'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
# create a separate tab for each team and write the contacts
for i, team in enumerate(teamlist):
thisteam=Balls[Balls['Team'].str.contains(team)]
thisteam.to_excel(writer,sheet_name=team,index=False) # this overwrites existing file
writer.save() # overwrites existing
# Entire track team as single file
Track=df[df['Sport']=='Track']
Track=Track[mycols] # drop columns and rearrange
fname=cnf._OUTPUT_DIR+'\\'+'Track'+'_'+str(year)+'_contacts.xlsx'
writer=pd.ExcelWriter(fname, engine='openpyxl')
Track.to_excel(writer,sheet_name='Track',index=False)
writer.save()
return
def makegoogcont(df, famcontact, players, season, year):
'''Create and save a google contacts file for all Cabrini teams
save to csv '''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
df=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year)] # season is not in mastersignups... only individual sports
'''# for transfers to same school (but different grades), combine all into single list for given school
for index,row in df.iterrows():
if str(df.loc[index]['Team'])!='nan': # avoids nan team screwups
if '#' in df.loc[index]['Team']: # this combines Ambrose#2B, Ambrose#3G to single tab
df=df.set_value(index,'Team',df.loc[index]['Team'].split('#')[0])
'''
# get family contact info from famcontacts
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# Drop any players not yet assigned
df=df.dropna(subset=['Team'])
# Full contacts list format for android/google
for i, sport in enumerate(sportlist):
Thissport=df[df['Sport']==sport]
teamlist= Thissport.Team.unique()
teamlist=np.ndarray.tolist(teamlist)
# drop if team is not yet assigned
teamlist=[s for s in teamlist if str(s) != 'nan']
# drop if team is 'drop'
teamlist=[s for s in teamlist if str(s) != 'drop']
# Drop all non-Cabrini transferred teams (which must contain #)
teamlist=[s for s in teamlist if '#' not in s]
# Combine track subteams to single team
teamlist=[s[0:5] if 'Track' in s else s for s in teamlist]
teamlist=set(teamlist)
teamlist=list(teamlist)
# now create google contacts list for each Cabrini team and save
for j, team in enumerate(teamlist):
thisteam=Thissport[Thissport['Team'].str.contains(team)]
# Drop duplicate from same family
thisteam=thisteam.drop_duplicates('Phone1')
thisteam.loc[:,'Name']=thisteam['First']+' '+thisteam['Last']
thisteam.loc[:,'Group']=sport+str(year)
mycols=['Name','Pfirst1','Last','Phone1','Phone2','Email1','Email2','Group']
newcols=['Name','Additional Name','Family Name','Phone 1 - Value','Phone 2 - Value',
'E-mail 1 - Value','E-mail 2 - Value','Group Membership']
thisteam=thisteam[mycols]
thisteam.columns=newcols
thisteam=thisteam.replace(np.nan,'')
fname=cnf._OUTPUT_DIR+'\\google'+team+'.csv'
thisteam.to_csv(fname, index=False)
return
def createsignups(df, Mastersignups, season, year):
''' pass signups and add signups to master list, also returns list of current
player keys by sport; typically use writesignupstoExcel instead
args:
df - signup (dataframe)
Mastersignups - existing all signups db-like file
season - ['Fall','Winter','Spring']
year- 4 digit year as int
returns:
Mastersignups - same with new unique entries
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# Use comma sep on multiple sport entries??
now=datetime.now()
thisdate=date.strftime(now,'%m/%d/%Y') # for signup date
df.loc[:,'SUdate']=thisdate # can do this globally although might also add to signups
startlen=len(Mastersignups) # starting number of signups
intcols=['SUkey','Year']
for i, col in enumerate(intcols):
if col not in df:
df.loc[df.index, col]=np.nan
mycols=Mastersignups.columns.tolist() # desired column order
for i, col in enumerate(mycols):
if col not in df:
df.loc[df.index,col]=np.nan
# TODO one option here would be to clone comma-separated sport entries (i.e. track and softball)
for i, sport in enumerate(sportlist):
# Use caution here due to Tball in Softball string problem (currently set to T-ball)
thissport=df.loc[df['Sport'].str.contains(sport, na=False, case=False)] # also handles multi-sports
# Prepare necessary columns
for index, row in thissport.iterrows():
thissport.loc[index,'Sport'] = sport # set individually to formal sport name
thissport.loc[index,'Year'] = int(year)
thissport.loc[index,'SUkey'] = 0 # assigned actual key below
# Now organize signups and add year
Mastersignups=pd.concat([thissport,Mastersignups], ignore_index=True)
Mastersignups=Mastersignups[mycols] # put back in original order
# drop duplicates and save master signups file (keep older signup if present... already assigned SUkey)
Mastersignups=Mastersignups.sort_values(['Plakey', 'Sport','Year','SUkey'], ascending=False) # keeps oldest signup
Mastersignups=Mastersignups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
newsignups=len(Mastersignups)-startlen # number of new signups added this pass
print('Added ', str(newsignups),' new ', season, ' signups to master list.')
# add unique SUkey (if not already assigned)
neededkeys = Mastersignups[(Mastersignups['SUkey']==0)] # filter by year
availSUkeys=findavailablekeys(Mastersignups, 'SUkey', len(neededkeys)) # get necessary # of unique SU keys
keycounter=0
for index, row in neededkeys.iterrows():
Mastersignups.loc[index,'SUkey'] = availSUkeys[keycounter] # reassign SU key in source master list
keycounter+=1 # move to next available key
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace('K',0)
Mastersignups=Mastersignups.sort_values(['Year', 'Sport', 'Gender','Grade'], ascending=False)
Mastersignups.loc[:,'Grade']=Mastersignups.Grade.replace(0,'K')
# autocsvbackup(Mastersignups,'master_signups', newback=True)
Mastersignups.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False, date_format='mm/dd/yy') # automatically saved
return Mastersignups
def replaceacro(df, acronyms):
''' Pass df column and return with acronyms replaced with full translations (parishes and schools
currently used only for CYC rosters '''
for index, row in acronyms.iterrows():
acro=acronyms.loc[index]['acronym']
transl=acronyms.loc[index]['translation']
# TODO only for parish columns
df.loc[:,'Parish of Registration']=df['Parish of Registration'].replace(acro, transl)
df.loc[:,'Parish of Residence']=df['Parish of Residence'].replace(acro, transl)
df.loc[:,'School']=df['School'].replace(acro, transl)
return df
def createrosters(df, season, year, players, teams, coaches, famcontact, acronyms):
''' From Mastersignups of this season creates Cabrini CYC roster and transfers (for separate sports)
and all junior sports (calculates ages for Judge Dowd); pulls info merged from famcontact, players, teams, and coaches
teams should already be assigned using teams xls and assigntoteams function
returns: None ... direct save to OUTPUT_DIR
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
specials=['Chess','Track']
sports=sportsdict.get(season)
sportlist=[sport for sport in sports if sport not in specials]
speciallist=[sport for sport in sports if sport in specials] # for track, chess, other oddballs
Specials=df[(df['Year']==year) & (df['Sport'].isin(speciallist))] # deal with these at bottom
# Proceed with all normal South Central sports
df = df[(df['Year']==year) & (df['Sport'].isin(sportlist))] # filter by year
# make duplicate entry row for double-rostered players (multiple team assignments)
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
doubles=df.loc[thismask]
for index, rows in doubles.iterrows():
team=doubles.loc[index,'Team']
team=team.split(',')[1] # grab 2nd of duplicate teams
doubles.loc[index, 'Team'] = team
df=pd.concat([df,doubles], ignore_index=True) # adds duplicate entry for double-rostered players with 2nd team
thismask = df['Team'].str.contains(',', na=False) # multiple teams are comma separated
for index, val in thismask.iteritems():
if val:
team=df.loc[index]['Team']
team=team.split(',')[0] # grab 1st of duplicate teams
df.loc[index, 'Team'] = team # removes 2nd team from first entry
# now grab all extra info needed for CYC rosters
# Street, City, State, Zip, Phone, email, Parishreg, parishres from fam-contact
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# Get division from Teams xls
df=pd.merge(df, teams, how='left', on=['Team'], suffixes=('','_r2')) # effectively adds other team info for roster toall players
# DOB, School from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
df.loc[:,'Role']='Player' # add column for role
# df['Open/Closed']=np.nan
df.loc[:,'Coach ID']=''
def formatDOB(val):
# Pat moore date format is 4/4/19.. reformat as string for csv output
try:
return datetime.strftime(val, "%m/%d/%y")
except:
# print('Problem converting %s of type %s to date string format' %(val, type(val)) )
return ''
# Find Cabrini CYC names (containing hyphen)
thismask = df['Team'].str.contains('-', case=False, na=False)
CabriniCYC=df.loc[thismask] # all players on Cabrini CYC teams all sports this season
# Finds info for CYC coaches (all sports) and generate roster entries
coachroster=addcoachestoroster(teams, coaches) # coaches roster already in correct format + sport column
if len(CabriniCYC)>1: # skip if all transfers or junior (i.e. in spring)
# Split by sport
for i, sport in enumerate(sportlist):
Sportroster=CabriniCYC[CabriniCYC['Sport']==sport]
# reformat this mess as single CYC roster
Sportroster=organizeroster(Sportroster)
# Add coaches from this sport to roster
Rostercoaches=coachroster[coachroster['Sport']==sport]
Rostercoaches=organizeroster(Rostercoaches)
Sportroster=pd.concat([Sportroster,Rostercoaches], ignore_index=True) # adds coaches and players together
Sportroster=Sportroster.sort_values(['Team','Role','Grade','Lname'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_'+sport+'roster'+str(year)+'.csv'
Sportroster=replaceacro(Sportroster, acronyms) # replace abbreviations
Sportroster.loc[:,'Birthdate']=Sportroster['Birthdate'].apply(lambda x: formatDOB(x))
Sportroster.to_csv(fname, index=False)
# done with Cabrini CYC rosters
# Break out all other types of teams (transfers, junior teams, Chess/Track)
thismask = df['Team'].str.contains('-', case=False, na=False)
Others=df.loc[~thismask] # no hyphen for all non Cabrini CYC level (Cabrini junior and transfers)
# Cabrini transferred players to CYC teams with # (i.e. Ambrose#8B, OLS#3G)
# Non-CYC cabrini junior teams start with number
thismask = Others['Team'].str.contains('#', na=True) # flag nans and set to true (usually jr teams w/o assignment)
# Transferred teams contain # such as OLS#3G
Transfers=Others.loc[thismask] # transferred teams have # but no hyphen
for i, sport in enumerate(sportlist): # output roster for all transfers (all grades in case of CYC)
Transferroster=Transfers[Transfers['Sport']==sport]
Transferroster=organizeroster(Transferroster)
Transferroster=Transferroster.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
fname=cnf._OUTPUT_DIR+'\\CYC'+sport+'transfers.csv'
Transferroster=replaceacro(Transferroster,acronyms)
Transferroster.loc[:,'Birthdate']=Transferroster['Birthdate'].apply(lambda x: formatDOB(x))
Transferroster.to_csv(fname, index=False)
# Now deal with junior cabrini (should be only thing left after Cabrini CYC<
# transfers, special sports
Juniorteams=Others.loc[~thismask] # remove transfers
Juniorteams=Juniorteams[Juniorteams['Team']!='drop'] # remove dropped players
# now output all junior teams in same format (sometimes needed by <NAME>)
# also calculate current age
if len(Juniorteams)>0:
Juniorteams=organizeroster(Juniorteams) # put in standard South Central roster format
# Calculate current age from DOBs (renamed to Birthdate for roster only)
Juniorteams.loc[:,'Age']=calcage(Juniorteams['Birthdate'])
fname=cnf._OUTPUT_DIR+'\\Cabrini_junior_teams_'+str(year)+'.csv'
Juniorteams=replaceacro(Juniorteams, acronyms)
Juniorteams.loc[:,'Birthdate']=Juniorteams['Birthdate'].apply(lambda x: formatDOB(x))
Juniorteams.to_csv(fname, index=False)
# Deal with special cases -Track and Chess
# Get DOB/school from players.. anything else needed by <NAME>?
Specials=pd.merge(Specials, players, how='left', on='Plakey', suffixes=('','_r'))
# needs address
Specials=pd.merge(Specials, famcontact, how='left', on='Famkey', suffixes=('','_r2'))
for i, sport in enumerate(speciallist): # output roster for all transfers (all grades in case of CYC)
Specials=Specials[Specials['Sport']==sport]
Specials=Specials.rename(columns={'DOB':'Birthdate'})
mycols=['First', 'Last','Gender','Team','Grade','Birthdate','School','Address','Zip']
Specials=Specials[mycols]
Specials=Specials.sort_values(['Gender', 'Birthdate', 'Grade'], ascending=True)
Specials.loc[:,'Birthdate']=Specials['Birthdate'].apply(lambda x: formatDOB(x))
fname= cnf._OUTPUT_DIR+'\\'+ sport+'_'+str(year)+'_rosters.csv'
Specials.to_csv(fname, index=False)
return
def makemultiteam(df):
'''Small utility called by assigntoteams to make temp teams df that has separate entry for each grade if team is mixed grade
then merge to assign teams is straightforward
twoteams- '''
# TODO annoying problem with combining teams due to K1 (string but not int)
mycols=df.dtypes.index
# Deal with K1, K2 and such teams
kteams=[str(s) for s in np.ndarray.tolist(df.Graderange.unique()) if 'K' in str(s)]
kteams=[s for s in kteams if len(s)>1] # combo teams only
kteams=df[df['Graderange'].isin(kteams)]
xtrateams=pd.DataFrame(index=np.arange(0,0),columns=mycols) # empty df
# clones rows to match lower grades in range
for index, row in kteams.iterrows():
tempstr= kteams.loc[index]['Graderange']
gr1=0 # 0 for grade K
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=kteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True)
# get rid of K string problem
df.loc[:,'Graderange']=df.Graderange.replace('K','0', regex=True)
df.loc[:,'Graderange']=df.Graderange.astype('int')
# now handle numbered multiteams (e.g. 45 78 two digit ints)
multiteams=df.loc[df['Graderange']>9] # subset of teams comprised of multiple grades
for index, row in multiteams.iterrows(): # check for 3 or more grades
# TODO make sure it's not 3 grades (i.e. K-2)
tempstr= str(multiteams.loc[index]['Graderange'])
gr1=int(tempstr[0])
gr2=int(tempstr[1])
for gr in range(gr1,gr2):
newrow=multiteams.loc[index] # grabs row as series
newrow.loc['Grade'] = gr # set to correct grade
xtrateams=xtrateams.append(newrow) # add single row to temp df
# Detect gender-grade-sport w/ two teams
# now combine with original df
df=pd.concat([df,xtrateams], ignore_index=True) # complete team set
df=df[mycols] # back in original order
df=df.sort_values(['Gender','Grade'], ascending=True)
# After cloning by grade, look for two teams per grade options
twoteams=df[df.duplicated(['Sport','Gender','Grade'])]
return df, twoteams
def detectrosterchange(PMroster, myroster):
'''Compare submitted and returned rosters to look for unique rows (altered by <NAME>)
first row is <NAME> version (presumably correct to match CYC database) and second row is my
submitted version... make any corrections to appropriate source data files
datetime format conversions can be problematic '''
# all columns by default, false drops both duplicates leaving unique rows
bothrosters=pd.concat([PMroster,myroster])
mycols=bothrosters.columns
nanrows=bothrosters[pd.isnull(bothrosters['Birthdate'])]
nanrows=nanrows.drop_duplicates(keep=False)
# ensure player rows are both in correct format
myroster=myroster[pd.notnull(myroster['Birthdate'])]
PMroster=PMroster[pd.notnull(PMroster['Birthdate'])]
def removeLeadZero(val):
if val.startswith('0'):
return val[1:]
else:
return val
myroster.loc[:,'Birthdate']=myroster['Birthdate'].apply(lambda x:pd.to_datetime(x).strftime('%m/%d/%Y'))
PMroster.loc[:,'Birthdate']=PMroster['Birthdate'].apply(lambda x:pd.to_datetime(x).strftime('%m/%d/%Y'))
myroster.loc[:,'Birthdate']=myroster['Birthdate'].apply(lambda x:removeLeadZero(x))
PMroster.loc[:,'Birthdate']=PMroster['Birthdate'].apply(lambda x:removeLeadZero(x))
bothrosters=pd.concat([PMroster,myroster])
bothrosters=bothrosters.sort_values(['Fname','Lname'])
# Fix date string differences
alteredrows=bothrosters.drop_duplicates(keep=False)
alteredrows=alteredrows.append(nanrows)
alteredrows=alteredrows[mycols]
alteredrows=alteredrows.sort_values(['Lname','Fname'])
return alteredrows
def saveteams(teams):
'''Save teams tab into teams_coaches.xlsx after changes have been made '''
book=load_workbook('Teams_coaches.xlsx')
writer=pd.ExcelWriter('Teams_coaches.xlsx', engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
teams.to_excel(writer,sheet_name='Teams',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
def assigntoteams(df, season, year, teams, overwrite=False):
'''From mastersignups finds CYC team name based on year, grade, gender and sport from teams tab
(which only contains names from this season/year to avoid screwing up old custom team assignments'''
# teamsmult has multi grade range teams with duplicates for merge matching
# twoteams is multiple teams for same grade
Teamsmult, Twoteams =makemultiteam(teams) # makes duplicates team entries to match both grades
# compare grades as ints with K=0
df.loc[:,'Grade']=df.Grade.replace('K','0', regex=True) # convert Ks to zeros
df.loc[:,'Grade']=df['Grade'].astype('int')
Teamsmult.loc[:,'Grade']=Teamsmult['Grade'].astype('int') # ensure these are ints
# left merge keeps all master_signups oentries
df=pd.merge(df, Teamsmult, how='left', on=['Year','Grade','Gender','Sport'], suffixes=('','_r'))
# need to drop SUkey duplicates (keeping first)... occurs if >1 team per grade
df=df.drop_duplicates(subset=['SUkey']) # drops any duplicates by unique SUkey
# Consider all sports except Track (team assignment done separately by DOB)
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# this is post-merge so no chance of getting indices screwed up
# select current sports & year and subset with new team assignment
CurrentSU=df.loc[(df['Sport'].isin(sportlist)) & (df['Year']==year) & (pd.notnull(df['Team_r']))]
if overwrite==False: # if no overwrite, keep only those with nan for team
CurrentSU=CurrentSU.loc[pd.isnull(CurrentSU['Team'])]
# Never overwrite team assignment for known drops
CurrentSU=CurrentSU[CurrentSU['Team']!='drop']
counter=0
for index, row in CurrentSU.iterrows():
# all remaining can be overwritted (those w/ existing team dropped above)
match=df[df['SUkey']==CurrentSU.loc[index]['SUkey']]
if len(match)==1:
thisind=match.index[0]
# add new team assignment to correct index in original master signups
df.loc[thisind, 'Team'] = CurrentSU.loc[index]['Team_r']
counter+=1
print(str(counter),' player(s) newly assigned to teams')
# now drop extra columns and sort
mycols=['SUkey','First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family',
'SUdate', 'Issue date', 'Uniform#','UniReturnDate']
df.loc[:,'Grade']=df.Grade.replace('K',0)
df=df.sort_values(['Year','Sport', 'Gender', 'Grade'], ascending=True)
df.loc[:,'Grade']=df.Grade.replace('0','K', regex=True) # make sure any 0 grades are again replaced with K
df=df[mycols]
autocsvbackup(df,'master_signups', newback=True) # autobackup of master signups
df.to_csv(cnf._INPUT_DIR + '\\master_signups.csv', index=False) # save/overwrite existing csv
return df
def assigntrackgroup(df, year, players):
'''Assign to different track team based on age on May 31 of this year (school year+1)
'''
Track=df[(df['Sport']=='Track') & (df['Year']==year)]
Track=pd.merge(Track,players, how='left', on=['Plakey'], suffixes=('','2'))
numunassigned=len(Track[pd.isnull(Track['Team'])])
for index, row in Track.iterrows():
DOB=Track.loc[index]['DOB'] # merged from players.csv
if isinstance(DOB,str):
DOB=datetime.strptime(DOB,"%m/%d/%Y").date() # convert string to datetime
elif isinstance(DOB, pd.tslib.Timestamp):
DOB=DOB.date() # convert timestamp to datetime
trackage=date(year+1,5,31)-DOB # age on prior year's May 31st (same as school year in current convention)
trackage=(trackage.days + trackage.seconds/86400)/365.2425 # as decimal
trackage=math.floor(trackage)
if trackage <=7:
team='Track7'
elif 8 <= trackage <=9:
team='Track89'
elif 10 <= trackage <=11:
team='Track1011'
elif 12 <= trackage <=13:
team='Track1213'
elif 14 <= trackage <=15:
team='Track1415'
else: # probably some entry error
mystr=Track.loc[index]['First']+' '+Track.loc[index]['Last']+' Grade:'+Track.loc[index]['Grade']
print('Suspected DOB error for',mystr, 'DOB:', datetime.strftime(DOB, "%m/%d/%y") )
team=''
# Now write back altered subset to mastersignups (index is lost so use SUkey)
SUkey=int(Track.loc[index]['SUkey'])
match=df[df['SUkey']==SUkey] # This gives correct index
df.loc[match.index[0], 'Team'] = team # alter/assign team for this signup
newlyassigned=numunassigned-len(Track[pd.isnull(Track['Team'])])
print(newlyassigned,' players assigned to track age group.')
return df
def readbackevents(trackevents):
'''
Reads back choices of track events from summary sheet and prep for
copy to Pat Moore spreadsheet
in 4x100, 4x200, 4x400 col enter start order 1,2,3,4,1A,2A
'''
regcols=['Last', 'First', 'Middle', 'Gender',
'DOB', 'Team Code','Event#1', 'Event#2', 'Event#3', 'Event#4']
# Manually enter order of runners and alternates for relays
events=['50M', '100M', '200M', '400M', '800M', '1600M', 'SoftThrow',
'ShotPut','StdLongJump', 'RunLongJump']
regfile=pd.DataFrame(columns=regcols)
regfile.loc[:,'Team Code']=='SFC'
for index, row in trackevents.iterrows():
# get events for which player is signed up
playerevents=[]
for i, event in enumerate(events):
if str(row[event])!='nan':
playerevents.append(event)
print(event,' for ',row.First, row.Last)
# Check for relay type separately
if row['Relay'] in ['4x100', '4x200','4x400']:
playerevents.append(row['Relay'])
print(row['Relay'],' for ',row.First, row.Last)
if len(playerevents)>4:
print('Too many events for ', row.First, row.Last)
# Now construct player's entry in regfile
thisentry=row
thisentry['Middle']=''
thisentry['Team Code']='SFC'
# Gender is upper case M or F
thisentry['Gender']=thisentry['Gender'].upper()
for i, event in enumerate(playerevents):
colname='Event#'+str(i+1)
thisentry[colname]=event
regfile=regfile.append(thisentry, ignore_index=True)
regfile=regfile[regcols]
return regfile
def maketracksummary(df, year, players):
'''Assign to different track team based on age on May 31 of this year (school year+1)
'''
Track=df[(df['Sport']=='Track') & (df['Year']==year)]
Track=pd.merge(Track,players, how='left', on=['Plakey'], suffixes=('','2'))
Track.loc[Track.index,'Trackage']=np.nan
for index, row in Track.iterrows():
DOB=Track.loc[index]['DOB'] # merged from players.csv
if isinstance(DOB,str):
DOB=datetime.strptime(DOB,"%m/%d/%Y").date() # convert string to datetime
elif isinstance(DOB, pd.tslib.Timestamp):
DOB=DOB.date() # convert timestamp to datetime
trackage=date(year+1,5,31)-DOB # age on prior year's May 31st (same as school year in current convention)
trackage=(trackage.days + trackage.seconds/86400)/365.2425 # as decimal
Track.loc[index,'Trackage'] = trackage
trackage=math.floor(trackage)
if trackage <=7:
team='Track7'
elif 8 <= trackage <=9:
team='Track89'
elif 10 <= trackage <=11:
team='Track1011'
elif 12 <= trackage <=13:
team='Track1213'
elif 14 <= trackage <=15:
team='Track1415'
else: # probably some entry error
mystr=Track.loc[index]['First']+' '+Track.loc[index]['Last']+' Grade:'+Track.loc[index]['Grade']
print('Suspected DOB error for',mystr, 'DOB:', datetime.date.strftime(DOB, "%m/%d/%y") )
team=''
Track.loc[index,'Team'] = team
Track=Track.sort_values(['Trackage'])
mycols=['First', 'Last', 'Grade', 'Gender','DOB','Team','Trackage']
Track=Track[mycols]
return Track
def findrecruits(df, players, famcontact, season, year, signupfile):
'''Read list of signed-up player keys from xls file; compare with last year's set of
players from master Signups log
7/2018 mod... grab DOB to allow easier manual additions to signups '''
mycols=df.columns.tolist() # Same columns as mastersignups
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
Recruits=pd.DataFrame(columns=mycols) # empty frame for recruits
for i, sport in enumerate(sportlist):
thissport=df[df['Sport']==sport]
thissport=thissport.sort_values(['Year'], ascending=False) # most current signups at top
plakeylist=thissport.Plakey.unique() # ndarray with list of unique soccer players
keylist=plakeylist.tolist()
for i, key in enumerate(keylist):
match=thissport[thissport['Plakey']==key]
# recruits ... played in year -1 but not in year
if year-1 in match.Year.unique() and year not in match.Year.unique():
match=match[0:1] # take only last season's signup
Recruits=pd.concat([Recruits,match], ignore_index=True)
# plakey, famkey, first, last, grade, gender,
Recruits.loc[:,'Grade']=Recruits.Grade.replace('K',0) # replace K with zero to allow sorting
Recruits.loc[:,'Grade']=Recruits.Grade.astype(int)
Recruits.loc[:,'Grade']=Recruits.Grade+1 # adjust to correct grade for this year
# Drop if graduated
Recruits=Recruits[Recruits['Grade']<=8]
# adjust grade such that players current grade is in list
# join with famcontact on famkey to get contact info (emails, phones, etc.)
# Inner join on famkey adds the necessary info
Recruits=pd.merge(Recruits, famcontact,how='inner', on='Famkey', suffixes=('','_r'))
# Now need to look up school from players.csv
Recruits=pd.merge(Recruits, players, how='inner', on='Plakey', suffixes=('','_r'))
mycols=['First', 'Last', 'DOB', 'Gender', 'School', 'Grade', 'Address', 'Zip',
'Parish_registration', 'Sport', 'Phone1', 'Text1','Email1', 'Phone2',
'Text2', 'Email2', 'Plakey', 'Famkey', 'Family']
Recruits=Recruits[mycols]
Recruits.loc[:,'Grade']=Recruits.Grade.replace('K',0)
Recruits=Recruits.sort_values(['Grade'], ascending=True)
Recruits.loc[:,'Grade']=Recruits.Grade.replace(0,'K') # replace K with zero to allow sorting
Recruits=Recruits.sort_values(['Sport', 'Gender', 'Grade'], ascending=True)
# now write recruits to tab in master signups file
if signupfile.endswith('.csv'):
fname=cnf._OUTPUT_DIR + '\\%s%s_recruits.csv' %(season, year)
Recruits.to_csv(fname, index=False)
print("Info on possible recruits saved to", fname)
else: # should be excel file
book=load_workbook(signupfile)
writer=pd.ExcelWriter(signupfile, engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
Recruits.to_excel(writer,sheet_name='Recruits',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
print("Info on possible recruits saved in", signupfile)
return
def summarizesignups(df, season, year, **kwargs):
'''Write out summary to date of players by sport, gender, grade with
abbreviated playerlist
args:
df -either Mastersignups (split by discrete sport) or gsignups
can work on either Mastersignups or single season signup
kwargs: 'XLSpath': path to xls signup file (save as separate sheet here)
'toDf': don't save ... return as dataframe
'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],
'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
mycols=['Sport','Gender','Grade','Number','Playerlist','Plakeys']
sportsum=pd.DataFrame(columns=mycols)
# Determine if this is mastersignups (or single season raw signup file)
if 'Basketball' in df.Sport.unique() and 'Track' in df.Sport.unique():
CurrentSU=df[(df['Year']==year)].copy() # this year only
CurrentSU=CurrentSU.reset_index(drop=True)
CurrentSU=CurrentSU[CurrentSU.Sport.isin(sportlist)]
if 'Team' in CurrentSU: # remove drops if using mastersignups
CurrentSU=CurrentSU[CurrentSU['Team']!='drop']
else: # this season's signups
CurrentSU=pd.DataFrame(columns=df.columns)
for i, sport in enumerate(sportlist):
# Use caution here due to Tball in Softball string problem (currently set to T-ball)
thissport=df.loc[df['Sport'].str.contains(sport, na=False, case=False)].copy() # also handles multi-sports
# Prepare necessary columns
thissport.loc[:, 'Sport'] = sport
thissport.loc[:, 'Year'] = int(year)
CurrentSU=pd.concat([thissport,CurrentSU], ignore_index=True)
# Replace K with 0
CurrentSU.loc[:,'Grade']=CurrentSU.Grade.replace('K',0)
CurrentSU.loc[:,'Grade']=CurrentSU.Grade.astype(int) # convert all to int for sorting
grouped=CurrentSU.groupby( ['Sport','Gender','Grade'])
sportsum=[]
for (sp, gen, grade),gr in grouped:
thisEnt={'Sport':sp,'Gender':gen,'Grade': grade,'Number':len(gr)}
thisEnt['Plakeys']=", ".join([str(i) for i in gsignups.Plakey.unique().tolist() if str(i)!='nan'])
thisEnt['Playerlist']=", ".join([i+' '+j for i,j in zip(gr.First.tolist(), gr.Last.tolist())])
sportsum.append(thisEnt)
sportsum=pd.DataFrame(sportsum)
sportsum=sportsum.sort_values(['Sport','Gender','Grade'])
sportsum.Grade=sportsum.Grade.replace(0,'K')
sportsum=sportsum[['Sport','Gender','Grade','Number','Playerlist','Plakeys']]
if kwargs.get('toDf'): # don't save ... return as dataframe
return sportsum
# now write recruits to tab in master signups file
if 'XLSpath' in kwargs: # optional excel write with provided path
signupfile=kwargs.get('XLSpath')
book=load_workbook(signupfile)
# xls signups file should be in SC_files (input dir)
writer=pd.ExcelWriter(cnf._INPUT_DIR+'\\'+signupfile, engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
sportsum.to_excel(writer,sheet_name='Summary',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
else: # default is write to csv file in std output dir
fname="%s\\%s_%i_signup_summary.csv" %(cnf._OUTPUT_DIR, season, year)
sportsum.to_csv(fname,index=False)
return
def findCoaches(gsignups, **kwargs):
''' Find possible coaches for selected gender/grades and sort yes/maybe/only
args:
gsignups -- google drive downloaded signups
kwargs:
gradeGenders -- list of gender grades
'''
# Remove no and blanks
gsignups.Coach=gsignups.Coach.replace('','No')
gsignups.Coach2=gsignups.Coach2.replace('','No')
nocoach=gsignups[(gsignups.Coach=='No') & (gsignups.Coach=='No')]
coaches=gsignups[~gsignups.index.isin(nocoach.index)].copy()
# Filter down to requested genders-grades
if 'gradeGenders' in kwargs:
keep=[]
for gr,gen in kwargs.get('gradeGenders'):
keep.extend(coaches[(coaches['Gender']==gen) & (coaches['Grade']==gr)].index.tolist())
coaches=coaches[coaches.index.isin(keep)]
mycols=['Gender', 'Grade', 'Sport','Coach','Pfirst1', 'Plast1', 'Phone1', 'Text1',
'Email1', 'First', 'Last', 'School', 'Pfirst2', 'Plast2', 'Coach2', 'Phone2',
'Text2', 'Email2']
coaches=coaches[mycols]
return coaches
def findmissinginfo(df, players, famcontact):
''' Using player and family keys, update nan values in SC signups (mainly
for paper/word-of-mouth entries needed for writesignupstoExcel '''
for index, row in df.iterrows():
# manual entries won't have google drive timestamp
if type(row.Timestamp)!=pd._libs.tslib.Timestamp:
thisplakey=row.Plakey
thisfamkey=row.Famkey
# get first, dob, school from master players list
match = players[(players['Plakey']==thisplakey)]
if len(match)==1: # update school, gender
df.loc[index,'School'] = match.iloc[0]['School']
df.loc[index,'Gender'] = match.iloc[0]['Gender']
# get address, zip, parish, phone/text, email, phone2, text2, email2 from famcontact
match = famcontact[(famcontact['Famkey']==thisfamkey)]
if len(match)==1:
df.loc[index,'Address'] = match.iloc[0]['Address']
try:
df.loc[index,'Zip'] = int(match.iloc[0]['Zip'])
except:
print('Problem w/ zip code for', row.Last)
df.loc[index,'Parish'] = match.iloc[0]['Parish_registration']
df.loc[index,'Phone'] = match.iloc[0]['Phone1']
df.loc[index,'Text1'] = match.iloc[0]['Text1']
df.loc[index,'Email1'] = match.iloc[0]['Email1']
df.loc[index,'Phone2'] = match.iloc[0]['Phone2']
df.loc[index,'Text2'] = match.iloc[0]['Text2']
df.loc[index,'Email2'] = match.iloc[0]['Email2']
return df
def makephonedict(famcontact):
''' Construct dictionary w/ all active numbers and associated famkeys for matching operations
called by findplayers'''
phonedict={}
mylist=['Phone1','Phone2','Phone3','Phone4']
for i, col in enumerate(mylist):
phones=famcontact.loc[pd.notnull(famcontact[col])]
for index, row in phones.iterrows():
phnum=phones.loc[index][col]
famkey=phones.loc[index]['Famkey']
phonedict.update({phnum:famkey})
return phonedict
def makelastlist(df, df2):
''' Construct list of tuples (not dict due to possible duplicate keys) w/ all active last names and
associated famkeys for matching operations '''
lnamelist=[] # list of tuples
# handle last names in players.csv
df=df.drop_duplicates(subset=['Famkey']) # one entry per family
for index, row in df.iterrows():
lnamelist.append((df.loc[index]['Last'],df.loc[index]['Famkey']))
mylist=['Plast1','Plast2','Plast3']
for i, col in enumerate(mylist):
temp=df2.loc[pd.notnull(df2[col])]
for index, row in temp.iterrows():
last=temp.loc[index][col]
famkey=temp.loc[index]['Famkey']
if (last, famkey) not in lnamelist:
lnamelist.append((last,famkey))
return lnamelist
''' Testing
row=unmatched.iloc[0]
index=37
'''
def findplayers(signups, players, famcontact, year):
'''Find player key from players df using multiple levels of matching (Plakey already initialized)
if not a perfect match on all characters, create some data output structure to resolve possible problems
plakey and famkey cols added in loadprocess '''
savepla=False # flags to save modified files
savefam=False
phonedict=makephonedict(famcontact) # dict for known phone #s to famkey
# left merge keeping index to do first/last/dob match (works since no duplicates in players)
matches=signups.reset_index().merge(players, how='left', on=['First','Last','DOB'], suffixes=('','_2')).set_index('index')
if len(matches)!=len(signups):
print('Multiple match problem in players csv!')
return signups, players, famcontact
# after first/last/DOB match, copies over assigned plakey as found in players
matches=matches[(pd.isnull(matches['Plakey'])) & (pd.notnull(matches['Plakey_2']))]
signups.loc[matches.index,'Plakey'] = matches['Plakey_2']
signups.loc[matches.index,'Famkey'] = matches['Famkey_2']
# same matching process for (Alias, Last, DOB)
alias=signups.copy().rename(columns={'First':'Alias'})
matches=alias.reset_index().merge(players, how='left', on=['Alias','Last','DOB'], suffixes=('','_2')).set_index('index')
if len(matches)!=len(alias):
print('Multiple match problem in players csv!')
return signups, players, famcontact
# Find newly matched plakeys (using player alias)
matches=matches[(pd.isnull(matches['Plakey'])) & (pd.notnull(matches['Plakey_2']))]
signups.loc[matches.index,'Plakey'] = matches['Plakey_2']
signups.loc[matches.index,'Famkey'] = matches['Famkey_2']
# Continue w/ attempted id via phone or add new
unmatched=signups.loc[pd.isnull(signups['Plakey'])]
nophone = unmatched[pd.isnull(unmatched['Phone1'])]
if len(nophone)>1:
print('Add missing phone for %s' %",".join(nophone.Last.unique().tolist()))
unmatched=unmatched.loc[pd.notnull(unmatched['Phone1'])]
# blank DOBs will be nonetype (after datetime conversion)
nobd=unmatched[pd.isnull(unmatched['DOB'])]
if len(nobd)>0:
print('Enter DOB for %s' %",".join(nobd.Last.unique().tolist()))
unmatched= unmatched[pd.notnull(unmatched.DOB)] # Remove no DOB
# Birthday instead of DOB problem
bd=unmatched[ unmatched['DOB'] > date(year-1,1,1)]
if len(bd)>0:
print('Fix birthday instead of DOB for %s' %",".join(bd.Last.unique().tolist()))
unmatched= unmatched[~unmatched.index.isin(bd.index)] # Remove birthdays
# lastnames=makelastlist(players, famcontact) # set of tuples with last name and assoc famkey
for index, row in unmatched.iterrows(): # row=unmatched.iloc[7]
# no exact match and no phone1... skip and correct manual entry
if str(unmatched.loc[index]['Phone1'])=='nan': # skip if no phone1 entry
print('No exact match for', row.First, row.Last, row.DOB, 'add phone #')
continue
# no exact match ... find matching phone/ family match
phonelist=[]
for i, col in enumerate(['Phone1','Phone2']):
phonelist.append(unmatched.loc[index][col])
phonelist=[s.strip() for s in phonelist if str(s)!='nan']
famkey=[phonedict.get(s,'') for s in phonelist if s in phonedict]
if len(famkey)>0: # found one or mre matching phone numbers
famkey=int(famkey[0])
signups.loc[index, 'Famkey'] = famkey
kids=players[players['Famkey']==famkey]
# now find player (alias/ inexact) or add new kid
choicedict=newplayertk(unmatched.loc[index], phonelist, kids)
if choicedict.get('choice','')=='alias' or choicedict.get('choice','')=='ID':
signups.loc[index, 'Plakey'] = choicedict.get('ID',0)
if choicedict.get('choice','')=='alias':
players=addalias(players, signups.loc[index]) # add alias to this entry and directly save
savepla=True
elif choicedict.get('choice','')=='addkid':
# add kid to existing family
players, plakey=addplayer(signups.loc[index], players)
signups.loc[index, 'Plakey'] = plakey
savepla=True
elif choicedict.get('choice','')=='skip':
# process add alias function
pass
else: # possible new family (could be manually added or new player/family or unique/changed phone)
Ser=signups.loc[index] # convert current SU info to series
choice=newplafamtk(Ser, phonelist) # confirm new player/family
if choice=='adddb': # new family (and new player)
players, famcontact, plakey, famkey=addnewplafam(Ser, players, famcontact) # pass as df row
savepla=True
savefam=True
# Add new #(s) and famkey to phonedict
for i, num in enumerate(phonelist):
phonedict.update({num:famkey})
# Update signups
signups.loc[index, 'Famkey'] = famkey
signups.loc[index, 'Plakey'] = plakey
else:
print('Skipped unidentified player', row.First, row.Last)
if savepla: # save players data csv if modified
players.to_csv(cnf._INPUT_DIR + '\\players.csv',index=False)
if savefam:
famcontact.to_csv(cnf._INPUT_DIR + '\\family_contact.csv',index=False)
return signups, players, famcontact
def addnewplafam(Ser, players, famcontact):
''' Add single new player & family after confirmation with tk '''
# Create family name for new families (added to famcontacts but not needed in signups)
last=str(Ser.Last).title()
plast=str(Ser.Plast1).title() # parent 1 last name
if last==plast: # same name for parent and player
Ser.loc['Family'] = last # assign player last name as family name
elif plast=='nan': # parent last name missing
Ser.loc['Family']= last # assign player last name as family name
elif plast in last: # if kid name is hyphenated multiname, use the hyphenated name
Ser.loc['Family']= last
else: # make a new family name for different first/last
newname=last+'_'+plast
Ser.loc['Family'] = newname
# From new player series entries, create entries for master players table
players, plakey =addplayer(Ser,players) # update master players list, save and return
Ser.loc['Plakey'] = plakey
# update master families lists, save and return
famcontact, famkey=addfamily(Ser,famcontact)
# Still needs to add returned famkey to this player's entry
match=players[players['Plakey']==plakey]
if len(match)==1:
thisind=match.index[0]
players.loc[thisind,'Famkey'] = famkey
return players, famcontact, plakey, famkey # go ahead and pass back modified versions to main
def addfamily(Ser, famcontact):
''' df contains new families to add to master family contact and family billing tables '''
# TODO run autobackup for famcontact?
# find and assign new family key
famkey=findavailablekeys(famcontact, 'Famkey', 1)[0] # get new unique famkey
Ser.loc['Famkey'] = famkey
# Add all the default missing columns
Ser.loc['City'] ='St. Louis'
Ser.loc['State'] ='MO'
for col in ['Parish_residence','Pfirst3','Plast3','Phone3','Text3','Phone4','Text4','Email3']:
Ser[col]=''
Ser=Ser.rename(columns={'Plakey': 'Players', 'Parish': 'Parish_registration',
'Phone': 'Phone1', 'Text': 'Text1', 'Email': 'Email1',})
df=pd.DataFrame()
df=df.append(Ser, ignore_index=True)
# update family contact
df=df.rename(columns={'Plakey': 'Players', 'Parish': 'Parish_registration',
'Phone': 'Phone1', 'Text': 'Text1', 'Email': 'Email1'})
mycols=famcontact.columns.tolist()
dfcols=df.columns.tolist()
missing=[i for i in mycols if i not in dfcols]
for i, col in enumerate(missing): # shouldn't happen but doublecheck
df[col]=''
df=df[mycols] # put back in original order
# concat the two frames (if same names, column order doesn't matter)
famcontact=pd.concat([famcontact,df], ignore_index=True)
famcontact=famcontact.reset_index(drop=True)
famcontact=famcontact.sort_values(['Famkey'], ascending=True)
# autocsvbackup(famcontact, 'family_contact', newback=True)
# famcontact=famcontact.to_csv('family_contact.csv',index =False)
return famcontact, famkey
def addplayer(Ser, players):
''' gets info from confirmed newplayers,reformat and adds to main players list '''
# first assign new player and family keys (index already reset by findfamily)
plakey=findavailablekeys(players, 'Plakey', 1)[0] # gets new unique plakey
# assign new player keys and create df with info to add to master players
Ser.loc['Plakey'] = plakey
mycols=players.columns.tolist() # desired column order
Ser.loc['Gradeadj'] = 0 # add grade adjust col and init to zero
Ser.loc['Alias']=''# add alias column
Ser.loc['Uni#'] = np.nan # add default uniform number (junior teams)
df=pd.DataFrame()
df=df.append(Ser, ignore_index=True)
dfcols=df.columns.tolist()
missing=[i for i in mycols if i not in dfcols]
for i, val in enumerate(missing): # shouldn't happen but check just in case
df[val]='' # set missing col to string
df=df[mycols] # same column set
players=pd.concat([players, df], ignore_index=True) # concat the two frames (if same names, column order doesn't matter)
players=players.reset_index(drop=True)
players=players.sort_values(['Plakey'], ascending=True) # sort by player number
# players['DOB']=pd.to_datetime(players['DOB'], format='m/%d/%Y') # convert DOB to usual format
# autocsvbackup(players,'players', newback=True) # backup of players file
# players=players.to_csv('players.csv',encoding='cp437', index =False)
return players, plakey # full master list with new entries
#%%
def newplayertk(Ser, phonelist, kids):
''' Found family... possible new player or existing player (W/ data entry) or new alias
Choices : (ID existing player from family, ID and add alias, add new players)
pass famkey, phonelist (for convenience); no need for distinction between add player and
add player & add family... both handled by addplafam
args:
Ser - this unmatched row
phonelist - phone numbers entered in signup
kids -dataframe w/ info on potential matching kids (using phone match)
'''
# first print out existing info in various lines
root = tk.Tk()
choice=tk.StringVar() # must be define outside of event called functions
thisplanum=tk.StringVar() # tk var for entered player num (if existing).. will be converted to int
rownum=0
try:
family=str(kids.iloc[0]['Family']) # get fam name from passed matches
except:
print('No kids found for family of', Ser.First, Ser.Last)
family=''
mytext='No match for player '+ Ser.First+' '+Ser.Last+', Family '+ family + ' Parent: '+str(Ser.Pfirst1) + ' '+ str(Ser.Plast1)
a=tk.Label(root, text=mytext)
a.grid(row=rownum)
rownum+=1
# Print out possible kid matches
try:
for index, row in kids.iterrows():
plakey=int(kids.loc[index]['Plakey'])
first=kids.loc[index]['First']
last=kids.loc[index]['Last']
mytext='Possible match: '+str(plakey)+' '+ first+' '+ last
a=tk.Label(root, text=mytext)
a.grid(row=rownum)
rownum+=1
except:
pass
# now add new player button, entry box for ID, entry box for ID and add alias
def addkid(event):
choice.set('addkid')
root.destroy()
def skip(event):
choice.set('skip')
root.destroy()
def IDplayer(event):
choice.set('ID')
root.destroy()
def alias(event):
choice.set('alias')
root.destroy()
def abort(event):
choice.set('abort')
root.destroy()
tk.Label(root, text='Enter existing player ID number').grid(row=rownum, column=0)
# Entry box for player num
tk.Entry(root, textvariable=thisplanum).grid(row=rownum, column=1)
rownum+=1
a=tk.Button(root, text='Add as new player')
a.bind('<Button-1>', addkid)
a.grid(row=rownum, column=0)
a=tk.Button(root, text='Skip player')
a.bind('<Button-1>', skip)
a.grid(row=rownum, column=1)
a=tk.Button(root, text='ID existing player')
a.bind('<Button-1>', IDplayer)
a.grid(row=rownum, column=2)
a=tk.Button(root, text='ID and add alias')
a.bind('<Button-1>', alias)
a.grid(row=rownum, column=3)
a=tk.Button(root, text='abort')
a.bind('<Button-1>', abort)
a.grid(row=rownum, column=4)
root.mainloop()
mychoice=choice.get()
choices={} # dict for choice return (and possibly existing player ID number)
if mychoice=='abort':
print('Execution aborted')
elif mychoice=='ID' or mychoice=='alias':
# ensure entered # is in list
if int(thisplanum.get()) not in kids.Plakey.unique():
print('Invalid player number entered.')
mychoice='skip'
if mychoice=='ID':
choices.update({'ID':int(thisplanum.get())})# Need to return correct chosen player key
elif mychoice=='alias':
choices.update({'ID':thisplanum.get()})# return correct chosen player key
# returned choices are addkid, skip,
choices.update({'choice':mychoice})
return choices
#%%
def newplafamtk(Ser, phonelist):
''' Confirm that player (and family) are new via tkinter and add to players/famcontact lists
pass famkey, phonelist (for convenience); no need for distinction between add player and
add player & add family... both handled by addplafam '''
root = tk.Tk()
choice=tk.StringVar() # must be define outside of event called functions
def addtodb(event):
choice.set('adddb')
root.destroy()
def skip(event):
choice.set('skip')
root.destroy()
famkey=Ser.Famkey # nan or zero if no family match
rownum=0
if famkey>0: # family matched but not player (already checked aliases, other possible errors)
mytext='No match for player '+Ser.First+' '+Ser.Last+' in Famkey '+str(int(famkey))
a=tk.Label(root, text=mytext)
a.grid(row=rownum)
rownum+=1
else:
mytext='New family... no match for '+','.join(phonelist)
a=tk.Label(root, text=mytext)
a.grid(row=rownum)
rownum+=1
mytext='New player '+ Ser.First + ' '+Ser.Last + ' Parent: '+str(Ser.Pfirst1) + ' '+ str(Ser.Plast1)
b=tk.Label(root, text=mytext)
b.grid(row=rownum)
rownum+=1
c=tk.Button(root, text='Add new player (and family) to database')
c.bind('<Button-1>', addtodb)
c.grid(row=rownum)
rownum+=1
d=tk.Button(root, text='Skip player')
d.bind('<Button-1>', skip)
d.grid(row=rownum)
root.mainloop()
mychoice=choice.get()
return mychoice
def addalias(players, Ser):
''' Add alias to existing player (if chosen from tk player GUI) '''
plakey=Ser.Plakey
first=Ser.First # new alias name to add
match=players[players['Plakey']==plakey]
if len(match)!=1:
last=Ser.Last
print('Problem adding alias for', first, last)
return players
alias=players.loc[match.index[0]]['Alias']
if str(alias)=='nan':
players.loc[match.index[0],'Alias'] = first
else:
newalias=alias+', '+first # just make comma separated string
players.loc[match.index[0],'Alias'] = newalias
# direct save of modified
players.to_csv(cnf._INPUT_DIR + '\\players.csv', index=False)
return players
def comparefamkeys(players,famcontact):
'''Utility script to compare family contacts and players list '''
fams_pla=players.Famkey.unique()
fams_pla=np.ndarray.tolist(fams_pla)
fams_con=famcontact.Famkey.unique()
fams_con=np.ndarray.tolist(fams_con)
# compare contacts and billing ... should be identical
noplayers=[i for i in fams_con if i not in fams_pla]
for i,val in enumerate(noplayers):
print("Famkey ", val, " in family contacts but not found among players.")
# Check for family name discrepancies between players and famcontact
for i in range(0,len(famcontact)):
famkey=famcontact.iloc[i]['Famkey'] # grab this key
family=famcontact.iloc[i]['Family']
family=family.title() # switch to title case
family=family.strip() # remove whitespace
match=players[players['Famkey']==famkey]
if len(match)==1: # should be a match already assuming above section finds no discrepancies
family2=match.iloc[0]['Family']
family2=family2.title() # switch to title case
family2=family2.strip() # remove whitespace
if family!=family2: # different family names with same key
print("Family key ", str(famkey), ": ", family, " or ", family2)
return
def calcage(Ser):
'''pass Series with DOB as timestamp and return Age column in years as floats
return column containing age in years (e.g. 6.1 yrs)'''
mytime=datetime.now()
mytime=datetime.date(mytime) # convert time to datetime.date
Ser=pd.to_datetime(Ser)
# Get age in years
Age=mytime-Ser # age in days (timedelta)
Age= Age.dt.total_seconds() / (24 * 60 * 60)/365.25
return Age
def parseOls(fname):
''' Custom parser for goofy Ols spreadsheet containing junior basketball schedule'''
Ols=pd.read_excel(fname, encoding='cp437')
Ols=Ols.iloc[:, 0:7]
mycols=['Date','Time','Junk','Team1','Team2', 'Team3','Team4']
Ols.columns=mycols
mycols=['Date','Time','Team1','Team2', 'Team3','Team4']
Ols=Ols[mycols]
Ols=Ols.loc[pd.notnull(Ols['Time'])]
Ols=Ols[Ols['Time'].str.contains(':')]
Ols=Ols.reset_index(drop=True)
gooddate=Ols['Date'].apply(parseDate).dropna()
datelist=gooddate.tolist() # correct list of dates
starts=Ols.loc[Ols['Date'].str.contains('WEEK', na=False)] # correct parsing of dates
starts=np.ndarray.tolist(starts.index.unique())
starts.append(len(Ols))
for i in range(0,len(Ols)): # correct dates column
# find positions of first val larger than i in ordered starts list
pos=[v for v in starts if v > i] # first value larger than i
position=starts.index(min(pos))-1 # corresponds to index of date value from list to assign
Ols.loc[i,'Date'] = datelist[position]
# now duplicate rows with
Ols2=Ols.copy()
mycols=['Date','Time','Team3','Team4']
Ols2=Ols2[['Date','Time','Team3','Team4']]
Ols2.columns=['Date','Time','Team1','Team2']
Ols['Location']='Court 1'
Ols2['Location']='Court 2'
Ols=Ols[['Date','Time','Team1','Team2','Location']]
Ols=pd.concat([Ols,Ols2], ignore_index=True)
Ols.columns=['Date','Time','Home','Visitor','Location']
return Ols
def updategradeadjust(row, players, year):
''' From row in signup file (with correct current grade) run gradeadj to
see if changes need to be made
called by processdatachanges after merge of signups and players
args:
row - single entry from gsignups file (w/ parent entered grade)
players- DB-like file w/ player DOB and other info
year - Cabrini sports year (i.e. 2019 for 2019-20 school year)
'''
now=datetime.now()
gradeadj=row.Gradeadj
if str(row.DOB)=='NaT' or row.Grade_n=='nan': # skip players with no DOB on file
return players
# Already checked for DOB discrepancy betwee SC signup and players.csv
try:
dob=datetime.date(row.DOB)
except: # could be already converted to datetime.date
pass
if row.Grade_n=='K': # use newly entered grade from signups (not existing from players)
grade=0
else:
grade=row.Grade_n
tempyear=now.year-int(grade)
entryage=datetime.date(tempyear,8,1)-dob
entryage = (entryage.days + entryage.seconds/86400)/365.2425
# Consider all the separate cases for entry age
if 5 < entryage <6: # normal K age
newadj=0
elif 4 < entryage <5: # ahead of schedule
newadj=1
elif 6 < entryage <7: # 1 year back
newadj=-1
elif 7 < entryage <8: # working on grade school mustache
newadj=-2
else: # probably some entry error
print('Suspected DOB or grade error for ', row.First, ' ', row.Last,' Grade ', row.Grade_n, 'DOB', datetime.date.strftime(dob, "%m/%d/%y") )
return players
if gradeadj!=newadj:
match=players[players['Plakey']==row.Plakey]
if len(match)==1:
thisind=match.index[0]
# update player grade (no approval)
players.loc[thisind,'Gradeadj'] = newadj # set to new value from current signup file
print('Grade adjustment changed to', str(newadj),' for ',row.First, ' ', row.Last)
return players
#%% Legacy or one-time use functions
# LEGACY FUNCTIONS
# assignteams probably not needed (using assigntoteams and different flow of information)
def assignteams(df, Teams):
'''Pass contacts summary and assign team name (similar to assigntoteams called by mastersignups
Teams tab must have those for current year; merge based on grade, gender and sport from teams tab
(which only contains names from this season/year to avoid screwing up old custom team assignments'''
Teamsmult, Twoteams =makemultiteam(Teams) # makes duplicates team entries to match both grades
Teamsmult['Grade']=Teamsmult['Grade'].astype('str') # convert grade back to string
Teamsmult.Grade=Teamsmult.Grade.replace('K','0', regex=True) # convert Ks to grade 0
df.loc[:,'Grade']=df['Grade'].astype('str') # these summaries already have K as grade 0
# left merge keeps all master_signups entries
df=pd.merge(df, Teamsmult, how='left', on=['Grade','Gender','Sport'], suffixes=('','_r'))
# now copy over CYC team name from Teams_coaches to this df ... skip copying if null
for i in range(0, len(df)):
if df.iloc[i]['Team']!='nan':
df.loc[i, 'Team'] = df.iloc[i]['Team']
# now drop extra columns
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone', 'Text', 'Email', 'Phone2', 'Text2', 'Email2', 'Team', 'Plakey','Famkey', 'Family']
dropcollist=[s for s in df.dtypes.index if s not in mycols]
df=df.drop(dropcollist, axis=1) # drops extraneous columns
return df
|
tkcroat/SC | pkg/SC_signup_google_API_functions.py | from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from collections import Counter
import pandas as pd
from datetime import datetime
import numpy as np
import pygsheets as pyg
import pkg.SC_config as cnf
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets','https://www.googleapis.com/auth/spreadsheets.readonly']
# SCOPES=[]
#%%
def readPaylog():
''' Read google sheets online paylog and return as dataframe
and as pygsheet
'''
creds = getGoogleCreds() # google.oauth2.credentials
gc = pyg.authorize(custom_credentials=creds) # pygsheets client
sheetID ='<KEY>' # paylog sheet
sh = gc.open_by_key(sheetID)
myPygSheet=sh[0]
mycols=myPygSheet.get_row(1) # gets column names
paylog=pd.DataFrame(myPygSheet.get_all_records())
paylog=paylog[mycols] # reorder cols
def convInt(val):
try:
return int(val)
except:
return np.nan
def convDate(val):
try:
return datetime.strptime(val, '%m/%d/%Y')
except:
try:
return datetime.strptime(val, '%m/%d/%y')
except:
try:
return datetime.strptime(val.split(' ')[0], '%Y-%m-%d')
except:
print('Error converting', val)
return val
paylog['Date']=paylog['Date'].apply(lambda x: convDate(x))
paylog['Year']=paylog['Year'].apply(lambda x: convInt(x))
paylog['Amount']=paylog['Amount'].apply(lambda x: convInt(x))
paylog['Deposit']=paylog['Deposit'].apply(lambda x: convInt(x))
paylog['Paykey']=paylog['Paykey'].apply(lambda x: convInt(x))
paylog['Famkey']=paylog['Famkey'].apply(lambda x: convInt(x))
paylog['Plakey']=paylog['Plakey'].apply(lambda x: convInt(x))
return myPygSheet, paylog
def readUniList():
''' Read of current version of unilist (master list w each unique uniform ahd
person to whom it is assigned)
'''
sheetID ='<KEY>' #
rangeName = 'Unilist!A:M'
# Read of current version of unilist from google sheets
unilist = downloadSheet(sheetID, rangeName)
def convInt(val):
try:
return int(val)
except:
return np.nan
def convDate(val):
try:
return datetime.strptime(val, '%m/%d/%Y')
except:
try:
return datetime.strptime(val, '%m/%d/%y')
except:
print('Error converting', val)
return val
unilist['Date']=unilist['Date'].apply(lambda x: convDate(x))
unilist['Plakey']=unilist['Plakey'].apply(lambda x: convInt(x))
unilist['PriorKey']=unilist['PriorKey'].apply(lambda x: convInt(x))
unilist['Number']=unilist['Number'].apply(lambda x: convInt(x))
return unilist
def readInventory():
''' Read results of recent inventories
'''
sheetID ='<KEY>' #
rangeName = 'Inventory!A:E'
inventory = downloadSheet(sheetID, rangeName)
# Transform inventory into Setname, Size, Number, Date, Location =in
grouped=inventory.groupby(['Setname','Size'])
unis=[]
for (sn, size), gr in grouped:
# TODO keep only most recent version of inventory (by date)
thisDate=gr.iloc[0]['Date']
try:
thisDate=datetime.strptime(thisDate,'%m/%d/%y')
except:
pass
nums=gr.iloc[0]['Numberlist']
if ',' in nums:
nums=nums.split(',')
try:
nums=[int(i) for i in nums]
except:
# Maybe a trailing comma problem
print('error for', nums)
else:
nums=[nums] # single valued list
for num in nums:
thisUni={'Setname':sn, 'Size':size, 'Number':num,'Date': thisDate,'Location':'in'}
unis.append(thisUni)
unis=pd.DataFrame(unis)
return unis
def getGoogleCreds():
''' Load and process credentials.json (generated by Google API)
Enables creation of google Service object to access online google sheets
'''
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
tokenFile=cnf._INPUT_DIR+'\\token.pickle'
if os.path.exists(tokenFile):
with open(tokenFile, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(cnf._INPUT_DIR +
'\\credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(tokenFile, 'wb') as token:
pickle.dump(creds, token)
return creds
def changeColNames(headers):
''' Transform column names (google form questions) to standard abbrev versions
after Google Sheets API file download
'''
# Find header entries (google form questions) that are duplicates
dups = [k for k,v in Counter(headers).items() if v>1]
for dup in dups:
matchinds=[i for i, val in enumerate(headers) if val==dup]
# Replace 2nd instance in header list with val_2
headers=[val if i !=matchinds[1] else val+'_2' for i, val in enumerate(headers) ]
# handle duplicates
renameDict={'Player First Name':'First','Player Last Name':'Last', 'Player Date of Birth':'DOB',
'School Player Attends':'School', 'Grade Level':'Grade','Street Address':'Address',
'Zip Code':'Zip','Parish of Registration':'Parish','Alternate Placement':'AltPlacement',
'Other Roster Status':'Ocstatus', 'Other Contact':'Othercontact',
'Parent/Guardian First Name':'Pfirst1', 'Parent/Guardian First Name_2':'Pfirst2',
'Parent/Guardian Last Name':'Plast1','Parent/Guardian Last Name_2':'Plast2',
'Primary Phone':'Phone1','Primary Phone_2':'Phone2','Textable':'Text1','Textable_2':'Text2',
'Primary Email':'Email1','Primary Email_2':'Email2',
'Primary Email (enter "None" if you do not use e-mail)':'Email1',
'Would you be willing to act as a coach or assistant':'Coach',
'Would you be willing to act as a coach or assistant_2':'Coach2',
"Player's Uniform Size":'Unisize',
"Does your child already have an":'Unineed'}
# substring matching
rename_close={'grade level':'Grade', 'uniform size':'Unisize',
'child already have':'Unineed'}
newNames=[]
for val in headers:
if val in renameDict:
newNames.append(renameDict.get(val))
elif len([i for i in list(rename_close.keys()) if i in val.lower()])>0:
if len([i for i in list(rename_close.keys()) if i in val.lower()])>1:
print('Multiple close colname matches for {}'.val)
newNames.append(val)
continue
else:
matchkey=[i for i in list(rename_close.keys()) if i in val.lower()][0]
newNames.append(rename_close.get(matchkey))
else: # not found in any rename dicts so just keep
newNames.append(val)
unchanged=['Timestamp','Gender','Sport','Gkey','Plakey','Famkey']
# check for invalid header names
validNames=list(renameDict.values()) + unchanged
badNames=[i for i in newNames if i not in validNames]
if len(badNames)>0:
print('Invalid column names:',', '.join(badNames))
return newNames
def downloadSignups(sheetID, rangeName):
''' Download all from current season's signups
'''
creds = getGoogleCreds() # google.oauth2.credentials
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=sheetID,
range=rangeName).execute()
values = result.get('values', []) # list of lists
if len(values)==0:
print('Signup data not found')
return pd.DataFrame()
headers = changeColNames(values[0])
# Google API retrieved rows each become lists truncated at last value
newValList=[]
for vallist in values[1:]:
while len(vallist)<len(headers):
vallist.append('') # add blanks for missing/optional answer
newEntry={}
for i, val in enumerate(vallist):
newEntry[headers[i]]= val
newValList.append(newEntry)
signups=pd.DataFrame(newValList, columns=headers)
return signups
def downloadSheet(sheetID, rangeName):
''' Generic google sheets download
'''
creds = getGoogleCreds() # google.oauth2.credentials
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=sheetID,
range=rangeName).execute()
values = result.get('values', []) # list of lists
if len(values)==0:
print('No data found for google sheet')
return pd.DataFrame()
headers = values[0]
# Google API retrieved rows each become lists truncated at last value
newValList=[]
for vallist in values[1:]:
while len(vallist)<len(headers):
vallist.append('') # add blanks for missing/optional answer
newEntry={}
for i, val in enumerate(vallist):
newEntry[headers[i]]= val
newValList.append(newEntry)
mySheet=pd.DataFrame(newValList, columns=headers)
return mySheet |
tkcroat/SC | pkg/track_stats.py | <filename>pkg/track_stats.py
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 17 11:42:21 2017
@author: tkc
"""
import pandas as pd
import os
os.chdir('C:\\Users\\tkc\\Documents\\Python_Scripts\\SC')
roster=pd.read_csv('Track_2016_rosters.csv', encoding='cp437')
results=pd.read_excel('Cabrini_track_results.xlsx', sheetname='2017')
qualtimes=pd.read_excel('Cabrini_track_results.xlsx', sheetname='Qual')
# Gets team, gender after first name match
fullresult=pd.merge(results, roster, on=['First'], how='left', suffixes=('','_2'))
# Compute estimated qualifying times based on new 2017 age groups
newqual=converttimes(qualtimes)
fullresult=pd.merge(fullresult, newqual, on=['Gender','Distance','Team'], how='left', suffixes=('','_2'))
mycols=['First','Distance','Time','Team','Qualtime']
fullresult=fullresult[mycols]
fullresult['Diff']=fullresult['Time']-fullresult['Qualtime']
fullresult['Difffract']=fullresult['Time']/fullresult['Qualtime']
fullresult=fullresult.sort_values(['Difffract'])
fullresult.to_csv('track_results.csv', index=False)
# return qualifying time for gender, distance and team/group
gettime(qualtimes, 'M', 2300, 'Team7')
def normaltimes(results)
def converttimes(qualtimes):
''' Get qualifying times for new track groups via averaging'''
teams=['Track7', 'Track89','Track1011', 'Track1213', 'Track1415']
matchstr=['6|7','8|9','10|11','12|13','14|15']
genders=['M','F']
distances=[50, 100, 200, 400, 800, 1600]
newqual=pd.DataFrame()
thisqual=pd.Series()
for i, team in enumerate(teams):
for j, sex in enumerate(genders):
for k, dist in enumerate(distances):
thistime=qualtimes[(qualtimes['Distance']==dist) & (qualtimes['Gender']==sex)]
thistime=thistime[thistime['Group'].str.contains(matchstr[i])]
thisqual=thisqual.set_value('Gender', sex)
thisqual=thisqual.set_value('Distance', dist)
thisqual=thisqual.set_value('Team', team)
thisqual=thisqual.set_value('Matches', len(thistime))
if len(thistime)>0:
thisqual=thisqual.set_value('Qualtime',thistime.Qualtime.mean())
newqual=newqual.append(thisqual, ignore_index=True)
return newqual
def gettime(qualtime, sex, dist, team):
''' return qualifying time for gender, distance, age/team '''
match=qualtime[(qualtime['Gender']==sex) & (qualtime['Distance']==dist) & (qualtime['Team']==team)]
return match.Qualtime.mean()
|
tkcroat/SC | pkg/SC_messaging_functions.py | # -*- coding: utf-8 -*-
"""
Created on Wed May 24 16:15:24 2017
Sponsors Club messaging functions
@author: tkc
"""
import pandas as pd
import smtplib
import numpy as np
import datetime
import tkinter as tk
import glob
import re
import math
import textwrap
from tkinter import filedialog
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pkg.SC_signup_functions import findcards
from openpyxl import load_workbook
import pkg.SC_config as cnf
def emailparent_tk(teams, season, year):
''' Inteface for non-billing email messages to parents (non-generic)
Message types include:
recruit - specific inquiry about player from last year not yet signed up; needs signupfile w/ recruits tab
assign - notify of team assignment, optional recruit for short team, CYC card notify; teams/cards/mastersignups
missinguni - ask about missing uniforms; missingunifile
unireturn - generic instructions for uniform return; mastersignups w/ unis issued
askforcards - check for CYC card on file and ask
other -- Generic single all team+coaches message (can have $SCHOOL, $GRADERANGE,$COACHINFO, $SPORT, $PLAYERLIST)
8/9/17 works for team assignments
TODO test recruit, missing unis, unireturn
args:
teams - df w/ active teams
season -'Winter', 'Fall' or 'Spring'
year - starting sport year i.e. 2019 for 2019-20 school year
'''
#%%
# first print out existing info in various lines
root = tk.Tk()
root.title('Send e-mail to parents')
messageframe=tk.LabelFrame(root, text='Message options')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
recruitbool=tk.BooleanVar() # optional recruiting for short teams
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
transmessfile=tk.StringVar() # text of e-mail message for transfers
extravar=tk.StringVar() # use depends on message type... normally filename
extraname=tk.StringVar() # name for additional text entry box (various uses mostly filenames)
extraname.set('Extra_file_name.txt') # default starting choice
choice=tk.StringVar() # test or send -mail
def chooseFile(txtmess, ftypes):
''' tkinter file chooser (passes message string for window and expected
file types as tuple e.g. ('TXT','*.txt')
'''
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = txtmess, filetypes=[ ftypes] )
root.destroy() # closes pop up window
return full_path
def choose_message():
# choose existing message (.txt file)
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = 'Choose message file', filetypes=[ ('TXT','*.txt')] )
root.destroy() # closes pop up window
return full_path
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Assignopts():
''' Display relevant choices for team assignment notification/cyc card/ short team recruiting '''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
extraname.set('n/a')
messfile.set('parent_team_assignment.txt')
transmessfile.set('parent_team_transfer.txt')
emailtitle.set('Fall $SPORT for $FIRST')
def Recruitopts():
''' Display relevant choices for specific player recruiting'''
recruitcheck.config(state=tk.NORMAL)
extraentry.config(state=tk.DISABLED)
messfile.set('player_recruiting.txt')
transmessfile.set('n/a')
extraname.set('n/a')
emailtitle.set('Cabrini-Soulard sports for $FIRST this fall?')
def Missingopts():
''' Display relevant choices for ask parent for missing uniforms '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('finish_me.txt')
transmessfile.set('n/a')
extraname.set('Missing uni file name')
extravar.set('missing_uni.csv')
# TODO look up most recent uni file?
emailtitle.set("Please return $FIRST's $SPORT uniform!")
def Schedopts():
''' Display relevant choices for sending schedules (game and practice) to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('parent_game_schedule.txt')
transmessfile.set('n/a')
extraname.set('Game schedule file')
extravar.set('Cabrini_2017_schedule.csv')
emailtitle.set("Game schedule for Cabrini $GRADERANGE $GENDER $SPORT")
def Cardopts():
''' Display relevant choices for asking parent for missing CYC cards '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.DISABLED)
messfile.set('CYCcard_needed.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("CYC card needed for $FIRST")
def Otheropts():
''' Display relevant choices for other generic message to parents '''
recruitcheck.config(state=tk.DISABLED)
# Used here for name of master file schedule
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
def Allopts():
''' Display relevant choices for generic message to all sports parents '''
recruitcheck.config(state=tk.DISABLED)
extraentry.config(state=tk.NORMAL)
messfile.set('temp_message.txt')
transmessfile.set('n/a')
extraname.set('')
extravar.set('')
emailtitle.set("Message from Cabrini Sponsors Club")
# E-mail title and message file name
rownum=0
tk.Label(messageframe, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(messageframe, textvariable=emailtitle)
titleentry.config(width=50)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(messageframe, textvariable=messfile)
messentry.config(width=50)
messentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(messageframe, text='Transfer messagefile').grid(row=rownum, column=0)
transmessentry=tk.Entry(messageframe, textvariable=transmessfile)
transmessentry.config(width=50)
transmessentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot
tk.Radiobutton(messageframe, text='Team assignment', value='Assign', variable = mtype, command=Assignopts).grid(row=rownum, column=0)
tk.Radiobutton(messageframe, text='Recruit missing', value='Recruit', variable = mtype, command=Recruitopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Missing uni', value='Missing', variable = mtype, command=Missingopts).grid(row=rownum, column=2)
tk.Radiobutton(messageframe, text='Send schedule', value='Schedule', variable = mtype, command=Schedopts).grid(row=rownum, column=3)
rownum+=1
tk.Radiobutton(messageframe, text='Ask for cards', value='Cards', variable = mtype, command=Cardopts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='Other team message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=1)
tk.Radiobutton(messageframe, text='All sport parents', value='All', variable = mtype, command=Allopts).grid(row=rownum, column=2)
rownum+=1
tk.Label(messageframe, text=extraname.get()).grid(row=rownum, column=0)
extraentry=tk.Entry(messageframe, textvariable=extravar)
extraentry.grid(row=rownum, column=1)
# Extra file chooser button
# button arg includes file type extension .. get from messfile
try:
ft = extraname.get().split('.')[-1]
ftypes =("%s" %ft.upper(), "*.%s" %ft)
except:
ftypes =("CSV" , "*.*") # default to all files
# TODO fix extra file chooser
d=tk.Button(messageframe, text='Choose file', command=chooseFile('Choose extra file', ftypes) )
d.grid(row=rownum, column=2)
recruitcheck=tk.Checkbutton(messageframe, variable=recruitbool, text='Recruit more players for short teams?')
recruitcheck.grid(row=rownum, column=3) # can't do immediate grid or nonetype is returned
rownum+=1
messageframe.grid(row=0, column=0)
# Specific team selector section using checkboxes
teamframe=tk.LabelFrame(root, text='Team selector')
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(teamframe, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(teamframe, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(teamframe, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
teamframe.grid(row=1, column=0)
choiceframe=tk.LabelFrame(root)
d=tk.Button(choiceframe, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(choiceframe, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(choiceframe, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(choiceframe, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
choiceframe.grid(row=2, column=0)
root.mainloop()
#%%
mychoice=choice.get()
if mychoice!='abort':
kwargs={}
if mychoice=='KCtest':
# this is a true send test but only to me
kwargs.update({'KCtest':True})
mychoice='send'
kwargs.update({'choice':mychoice}) # test or send
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
# drop duplicates in case of co-ed team (m and f entries)
teams=teams.drop_duplicates(['Team','Sport'])
# Now deal with the different types of messages
#%%
if mtype.get()=='Schedule':
# Send practice and game schedules
try:
sched=pd.read_csv(extravar.get())
except:
print('Problem opening schedule and other required files for sending game schedules')
fname=filedialog.askopenfilename(title='Select schedule file.')
sched=pd.read_csv(fname)
# fields=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Fields')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel('Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
# open and send master CYC schedule
sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Recruit':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
except:
print('Problem loading family contacts')
try: # Recruits stored in CSV
Recruits=pd.read_csv(cnf._OUTPUT_DIR+'\\%s%s_recruits.csv' %(season, year))
print('Loaded possible recruits from csv file')
except:
fname=filedialog.askopenfilename(title='Select recruits file.')
if fname.endswith('.csv'): # final move is query for file
Recruits=pd.read_csv(fname)
else:
print('Recruits file needed in csv format.')
return
emailrecruits(Recruits, famcontact, emailtitle, messagefile, **kwargs)
if mtype.get()=='Assign':
# Notify parents needs teams, mastersignups, famcontacts
if recruitbool.get():
kwargs.update({'recruit':True})
try:
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_csv(cnf._INPUT_DIR+'\\coaches.csv', encoding='cp437')
# INTERNAL TESTING
# Mastersignups=Mastersignups[Mastersignups['Last']=='Croat']
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
tranmessagefile='messages\\'+transmessfile.get()
with open(tranmessagefile, 'r') as file:
blanktransmess=file.read()
except:
print('Problem loading mastersignups, famcontacts')
return
notifyfamilies(teams, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, blanktransmess, **kwargs)
if mtype.get()=='Unis':
try:
missing=pd.read_csv(messfile.get(), encoding='cp437')
oldteams=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'oldteams':oldteams,'missing':missing})
except:
print('Problem loading missingunis, oldteams')
return
# TODO Finish ask for missing uniforms script
askforunis(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Cards':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading famcontacts, mastersignups, or blank message')
return
# TODO Finish ask for missing uniforms script
askforcards(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
if mtype.get()=='Other':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading mastersignups, coaches, ')
return
# TODO Finish ask for missing uniforms script
sendteammessage(teams, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs)
if mtype.get()=='All':
try:
famcontact= pd.read_csv(cnf._INPUT_DIR+'\\family_contact.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches')
coaches=pd.read_excel(cnf._INPUT_DIR+'\\coaches.csv')
with open(messagefile, 'r') as file:
blankmess=file.read()
except:
print('Problem loading mastersignups, coaches, ')
return
# TODO Finish ask for missing uniforms script
sendallmessage(season, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs)
return
''' TESTING of notifyfamilies
[sport, team, graderange, coachinfo, playerlist] =cabteamlist[6] i=6
index=thisteam.index[0]
row=thisteam.loc[index]
'''
def readMessage():
''' Choose text file from messages as template for email or log message (w/ find/replace
of team and individual info)
args: none
returns: string with contents of chosen TXT file
'''
def pickMessage():
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
full_path = tk.filedialog.askopenfilename(initialdir = cnf._INPUT_DIR+'\\messages\\', title = 'Choose blank email template',
filetypes=[ ('txt','*.txt')] )
root.destroy() # closes pop up window
return full_path
full_path = pickMessage()
with open(full_path,'r') as file:
blankmess = file.read()
return blankmess
def askforunis():
# TODO finish me
pass
def askforcards(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs):
''' Notifying players that need cards and ask for them via custom e-mail (one per player)
kwargs:
choice - 'send' or 'test'
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('parent_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# drop non-CYC K and 1 level teams
teams=teams[teams['Grade']>=2]
# Make list of sport/team/school/graderange
teamlist=[]
for index, row in teams.iterrows():
# get school
if '#' not in teams.loc[index]['Team']:
school='Cabrini'
else:
school=teams.loc[index]['Team'].split('#')[0]
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([teams.loc[index]['Sport'], teams.loc[index]['Team'], school,
teams.loc[index]['Graderange']])
# dict. with each team and its players
cards=findcards() # find all player cards
if not cards: # terminate if no cards are found (path error?)
print("Error opening CYC card image database")
return
# Drop all player nums found in cards
cardslist=list(cards.keys())
cardslist=[i for i in cardslist if '-' not in i]
cardslist=[int(i) for i in cardslist]
# Only keep signups without cards
Mastersignups=Mastersignups[~Mastersignups['Plakey'].isin(cardslist)]
CYCSUs=pd.DataFrame()
for i, [sport, team, school, graderange] in enumerate(teamlist):
CYCSUs=CYCSUs.append(Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)])
# only one notice needed per player
CYCSUs=CYCSUs.drop_duplicates('Plakey')
CYCSUs=pd.merge(CYCSUs, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
for index, row in CYCSUs.iterrows():
# Replace first name in e-mail title (default e-mail title is fall $SPORT for $FIRST)
thistitle=emailtitle.replace('$FIRST', row.First)
thistitle=thistitle.replace('$LAST', row.Last)
# custom message for individual player on this team
thismess=blankmess.replace('$FIRST', row.First)
thismess=thismess.replace('$LAST', row.Last)
recipients=getemailadds(row)
# Create custom email message (can have multiple sports in df)
if choice=='send':
# add From/To/Subject to actual e-mail
thisemail='From: Cabrini Sponsors Club <<EMAIL>>\nTo: '
thisemail+=', '.join(recipients)+'\nSubject: '+thistitle+'\n'
thisemail+=thismess
thisemail=thisemail.encode('utf-8')
for i,addr in enumerate(recipients): # Send message to each valid recipient in list
try:
smtpObj.sendmail('<EMAIL>', addr, thisemail)
print ('Message sent to ', addr)
except:
print('Message to ', addr, ' failed.')
if not recipients:
print('No email address for ', row.First, row.Last)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thismess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def sendallmessage(season, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('allparent_email_log.txt','w', encoding='utf-8')
# Get all email addresses from recent parents (default last 3 seasons)
recipients=makeemaillist(Mastersignups, famcontact, season, year, SMS=False)
# add all coach emails
coachemails=np.ndarray.tolist(coaches.Email.unique())
coachemails=[i for i in coachemails if '@' in i]
recipients.extend(coachemails)
recipients=set(recipients)
recipients=list(recipients) # unique only
# Create custom email message (can have multiple sports in df)
if choice=='send':
if 'KCtest' in kwargs: # internal only send test
recipients=['<EMAIL>','<EMAIL>']
msg=MIMEText(blankmess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = emailtitle
msg['From'] = '<NAME> <<EMAIL>>'
msg['To'] = '<NAME> <<EMAIL>>'
msg['Bcc']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
else: # Testing mode
tempstr='Test message to: '+', '.join(recipients)
logfile.write(tempstr+'\n')
logfile.write(blankmess)
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# smtpObj.quit() # close SMTP connection
return
def sendteammessage(teams, year, Mastersignups, famcontact, coaches, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
recruit - T or F -- add recruiting statement for short teams
mformat - not really yet used ... just sending as text not html
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('team_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# drop extra co-ed K or other entries
teams=teams.drop_duplicates(['Team'])
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
except:
coachinfo=''
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport, row.Team, school,
gradetostring(row.Graderange), coachinfo, row.Playerlist])
# Separate notification for each signup is OK
for i, [sport, team, school, graderange, coach, playerlist] in enumerate(teamlist):
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
# Cabrini team base message
thisteammess=blankmess
thistitle=emailtitle
# Make team-specific replacements in message text and e-mail title
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$COACH', '$PLAYERLIST']):
thisteammess=thisteammess.replace(col, textwrap.fill(teamlist[i][j], width=100))
thistitle=thistitle.replace(col, teamlist[i][j])
# get coach emails
recipients=getcoachemails(team, teams, coaches, **{'asst':True})
# Now get all unique team email addresses (single message to coach and team)
recipients=getallteamemails(thisteam, recipients)
# Create custom email message (can have multiple sports in df)
if choice=='send':
msg=MIMEText(thisteammess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = emailtitle
msg['From'] = '<NAME> <<EMAIL>>'
# part2=MIMEText(thismess_html,'alternate')
msg['To']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
if not recipients:
print('No email addresses for team', team)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thisteammess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def makeemaillist(Mastersignups, famcontact, thisseason, thisyear, SMS=False):
'''Return active and inactive families (mainly for e-mail contact list
active if has player in 3 prior sport-seasons (includes current )
'''
# TODO generalize to n prior sports seasons
thisyearSU=Mastersignups[Mastersignups['Year']==thisyear] # take all form
lastyearSU=Mastersignups[Mastersignups['Year']==(thisyear-1)]
lastyearSU=lastyearSU[lastyearSU['Grade']!=8]
seasonlist=['Fall', 'Winter', 'Spring']
pos=seasonlist.index(thisseason)
activeseasons=seasonlist[pos:]
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
activesports=[]
for i, season in enumerate(activeseasons):
sportlist=sportsdict.get(season)
activesports.extend(sportlist)
lastyearSU=lastyearSU[lastyearSU['Sport'].isin(activesports)] # last year's signups incl.
allSU=pd.concat([thisyearSU,lastyearSU],ignore_index=True)
activefams=allSU.Famkey.unique()
emaillist=[]
match=famcontact[famcontact['Famkey'].isin(activefams)]
emails=match.Email1.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emails=match.Email2.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emails=match.Email3.unique()
emails=np.ndarray.tolist(emails)
emaillist.extend(emails)
emaillist=set(emaillist) # eliminate duplicates
emaillist=list(emaillist)
emaillist=[x for x in emaillist if str(x) != 'nan'] # remove nan
emaillist=[x for x in emaillist if str(x) != 'none'] # remove nan
if not SMS: # Drop SMS
emaillist=[x for x in emaillist if not str(x).startswith('314')]
emaillist=[x for x in emaillist if not str(x).startswith('1314')]
return emaillist
def getcabsch(sched, teams, coaches, fields, **kwargs):
''' Return Cabrini containing subset of teams from master schedule
manual save... can then feed csv to sendschedule
kwargs:
sport -- Soccer, VB or whatever
div--- division 5G
school - Cabrini
#TESTING sched=fullsched.copy()
'''
if 'school' in kwargs:
if kwargs.get('school','')=='Cabrini':
# drop transfer teams w/ #
teams=teams[~teams['Team'].str.contains('#')]
if 'sport' in kwargs:
sport=kwargs.get('sport','')
teams=teams[teams['Sport']==sport]
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
# perform any team filtering
sched=sched.rename(columns={'Start':'Time','Venue':'Location','Sched Name':'Division',
'Visitor':'Away'})
teamdict=findschteams(sched, teams, coaches)
cabsched=pd.DataFrame()
for key, [div, schname] in teamdict.items():
match=sched[(sched['Division'].str.startswith(div)) & ((sched['Home'].str.contains(schname)) | (sched['Away'].str.contains(schname)))]
if 'Cabrini' not in schname:
newname=schname.split('/')[0]+'-Cabrini'
match['Home']=match['Home'].str.replace(schname,newname)
match['Away']=match['Away'].str.replace(schname,newname)
# add team column via assign
match=match.assign(Team=key)
# Why isn't team col being copied?
cabsched=cabsched.append(match, ignore_index=True)
print(len(match),' games for team', str(schname))
cabsched['Home']=cabsched['Home'].str.replace('St Frances','')
cabsched['Away']=cabsched['Away'].str.replace('St Frances','')
cabsched=cabsched.sort_values(['Division','Date','Time'])
# now sort
myCols=['Date','Time','Day','Location','Division','Home','Away','Team']
# add col if missing from CYC schedule
for miss in [i for i in myCols if i not in cabsched.columns]:
print(miss,'column missing from full CYC schedule')
cabsched[miss]=''
cabsched=cabsched[myCols] # set in above preferred order
flist=np.ndarray.tolist(cabsched.Location.unique())
missing=[s for s in flist if s not in fields['Location'].tolist()]
if len(missing)>0:
print('Address missing from fields table:',','.join(missing))
# convert to desired string format here (write-read cycle makes it a string anyway)
# cabsched.Time=cabsched.Time.apply(lambda x:datetime.time.strftime(x, format='%I:%M %p'))
#cabsched['Date']=cabsched['Date'].dt.strftime(date_format='%d-%b-%y')
return cabsched
def detectschchange(sched1, sched2):
'''Compare two schedule versions and return unique rows (changed games)
'''
# Convert both to datetime/timestamps if in string format (probably %m/%d/%Y)
if type(sched1.iloc[0]['Date'])==str:
try:
sched1['Date']=sched1['Date'].apply(lambda x:datetime.datetime.strptime(x, "%m/%d/%Y"))
except:
print('Problem with string to datetime conversion for', sched1.iloc[0]['Date'])
if type(sched2.iloc[0]['Date'])==str:
try:
sched2['Date']=sched2['Date'].apply(lambda x:datetime.datetime.strptime(x, "%m/%d/%Y"))
except:
print('Problem with string to datetime conversion for', sched2.iloc[0]['Date'])
if type(sched2.iloc[0]['Time'])==str:
try:
# convert to timestamp
sched2['Time']=sched2['Time'].apply(lambda x:datetime.datetime.strptime(x, "%H:%M:%S").time())
# convert to datetime.time
sched2['Time']=sched2['Time'].apply(lambda x:datetime.time(x))
except:
print('Problem with string to datetime conversion for', sched2.iloc[0]['Date'])
# all columns by default, false drops both duplicates leaving unique rows
bothsch=pd.concat([sched1,sched2])
alteredrows=bothsch.drop_duplicates(keep=False)
alteredrows=alteredrows.sort_values(['Date','Time','Division'])
return alteredrows
def makefieldtable(df, fields):
''' Make separate table of field addresses for all fields in
given team's schedule (called by sendschedule)'''
venues=np.ndarray.tolist(df.Location.unique())
venues=[s.strip() for s in venues]
ft=pd.DataFrame()
ft['Location']=venues
ft=pd.merge(ft, fields, how='left', on=['Location'])
ft=ft[['Location','Address']]
return ft
def notifyfamilies(teams, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, blanktransmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test'
recruit - T or F -- add recruiting statement for short teams
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('parent_email_log.txt','w', encoding='utf-8')
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport.lower(), row.Team, school, gradetostring(row.Graderange),
coachinfo, row.Playerlist])
# dict. with each team and its players
cards=findcards() # find all player cards
if not cards: # terminate if no cards are found (path error?)
print("Error opening CYC card image database")
return
# Separate notification for each signup is OK
for i, [sport, team, school, graderange, coachinfo, playerlist] in enumerate(teamlist):
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
if '#' not in team:
# Cabrini team base message
thisteammess=blankmess
else: # base message for transferred players
thisteammess=blanktransmess
thisteamtitle=emailtitle
# Make team-specific replacements
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$COACH', '$PLAYERLIST']):
thisteammess=thisteammess.replace(col, textwrap.fill(teamlist[i][j], width=100))
thisteamtitle=thisteamtitle.replace(col, teamlist[i][j])
# Check if Cabrini team is short of players (max grade, sport, numplayers)
try:
recmess=makerecmess(team, thisteam['Grade'].max(), sport, len(thisteam))
except:
recmess='' # handles empty teams during testing
# Either blank inserted or generic needs more players request (same for whole team)
thisteammess=thisteammess.replace('$RECRUIT','\n'+recmess)
for index, row in thisteam.iterrows():
# Replace first name in e-mail title (default e-mail title is fall $SPORT for $FIRST)
thistitle=thisteamtitle.replace('$FIRST', row.First)
thistitle=thistitle.replace('$SPORT', row.Sport)
# Check for each players CYC card if necessary (also for older transfer teams)
thiscardmess=makecardmess(row, cards)
# custom message for individual player on this team
thismess=thisteammess.replace('$FIRST', row.First)
thismess=thismess.replace('$LAST', row.Last)
# message is blank if on file or not required and
thismess=thismess.replace('$CYCCARD', '\n'+thiscardmess)
recipients=getemailadds(row)
# Create custom email message (can have multiple sports in df)
if choice=='send':
# add From/To/Subject to actual e-mail
msg=MIMEText(blankmess,'plain')
# msg = MIMEMultipart('alternative') # message container
msg['Subject'] = thistitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
msg['To'] = 'Cabrini Sports Parents <<EMAIL>>'
msg['Bcc']=','.join(recipients) # single e-mail or list
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
thisemail='From: Cabrini Sponsors Club <<EMAIL>>\nTo: '
thisemail+=', '.join(recipients)+'\nSubject: '+thistitle+'\n'
thisemail+=thismess
thisemail=thisemail.encode('utf-8')
for i,addr in enumerate(recipients): # Send message to each valid recipient in list
try:
smtpObj.sendmail('<EMAIL>', addr, thisemail)
print ('Message sent to ', addr)
except:
print('Message to ', addr, ' failed.')
if not recipients:
print('No email address for ', row.First, row.Last)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thismess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
return
def makecardmess(row, cards):
''' Determine if card is needed and add generic message to that effect (called by emailparent_tk, notifyparent)
row is Series
'''
cmess=("$FIRST $LAST needs a CYC ID card to play on this team and we do not have one in our files."
"If your child already has this ID card, please take a picture of it and e-mail to <EMAIL>."
"If you don't have one, you can get one online at: https://idcards.cycstl.net/ or at uniform night. "
"For this you need: 1) picture of the child 2) child's birth certificate (or birth document) and 3) $5 fee")
if str(row.Plakey) in cards:
return '' # already on file
# Now handle teams that don't need CYC cards (generally K or 1st)
if '-' not in row.Team: # non-CYC level teams and transfer teams
if '#' not in row.Team: # non-CYC cabrini team
return '' # junior team doesn't require card
else: # determine grade level for transfer team
tempstr=row.Team
tempstr=tempstr.split('#')[1][0:1]
tempstr=tempstr.replace('K','0')
try:
grade=int(tempstr)
if grade<2: # judge dowd or junior transfer team
return ''
except:
print("couldn't determine grade for transfer team")
return ''
# all remaining players need a card
cmess=cmess.replace('$FIRST',row.First)
cmess=cmess.replace('$LAST',row.Last)
cmess=textwrap.fill(cmess, width=100)
return cmess
'''TESTING
makerecmess('teamname', 2, 'T-ball', 14)
textwrap.fill(recmess, width=80)
'''
def makerecmess(team, grade, sport, numplayers):
''' Figure out if team is short of players (based on grade level, sport, Cabteam or not)
'''
recmess=('This team could use more players. If you know anyone who is interested,'
'please inform us at <EMAIL>.')
recmess=textwrap.fill(recmess, width=100)
if '#' in team: # no recruiting for transfer teams
return ''
if grade=='K':
grade=0
else:
grade=int(grade)
if sport=='VB': # 8 for all grades
if numplayers<8:
return recmess
if sport=='Soccer':
if grade>=5: # 11v11 so need 16
if numplayers<16:
return recmess
elif grade<=4 and grade>=2: # 8v8 from 2nd to 4th so 12 is OK
if numplayers<13:
return recmess
elif grade==1: # 7v7 so 11 is OK
if numplayers<12:
return recmess
else: # k is 6v6 so 10 is OK
if numplayers<11:
return recmess
if sport=='Basketball': # 5v5 for all grades so 10 is good
if numplayers<11:
return recmess
if sport=='T-ball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
if sport=='Baseball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
if sport=='Softball': # 9v9 ish so 13 is good
if numplayers<14:
return recmess
return ''
def emailcoach_tk(teams, coaches, gdrivedict):
''' tk interface for e-mails to team coaches
some required datasets (players, famcontact, mastersignups) are directly loaded depending on choice
message types (mtypes) are:
unis - send summary of missing uniforms to team coaches
contacts - send contacts and current google drive link
bills - send summary of outstanding bills
'''
root = tk.Tk()
root.title('Send e-mail to coaches')
unifilename=tk.StringVar()
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
billname=tk.StringVar() # file
try:
billfiles=glob.glob('billlist*')
if len(billfiles)>1:
billfile=findrecentfile(billfiles) # return single most recent file
else:
billfile=billfiles[0]
# find most recent billlist file name
billname.set(billfile)
except:
billname.set('billist.csv')
asstbool=tk.BooleanVar() # optional labelling of elements
emailtitle=tk.StringVar() # e-mail title
mtype=tk.StringVar() # coach message type
messfile=tk.StringVar() # text of e-mail message
choice=tk.StringVar() # test or send -mail
# Functions to enable/disable relevant checkboxes depending on radiobutton choice
def Uniopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.NORMAL)
messfile.set('coach_email_outstanding_unis.txt')
# clear current team selector... this autoloads oldteams
for i, val in enumerate(teamdict):
teamlist[i].set(0)
emailtitle.set('Return of uniforms for your Cabrini team')
def Contactopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.DISABLED)
messfile.set('coach_email_contacts.txt')
emailtitle.set('Contact list for your Cabrini team')
def Billopts():
''' Disable irrelevant checkboxes '''
billentry.config(state=tk.NORMAL)
unientry.config(state=tk.DISABLED)
messfile.set('coach_email_outstanding_bills.txt')
emailtitle.set('Fees still owed by your Cabrini team')
def Otheropts():
''' Display relevant choices for other generic message to parents '''
billentry.config(state=tk.DISABLED)
unientry.config(state=tk.DISABLED)
messfile.set('temp_message.txt')
emailtitle.set('Message from Sponsors Club')
# e-mail title and message file name
rownum=0
tk.Label(root, text='Title for e-mail').grid(row=rownum, column=0)
titleentry=tk.Entry(root, textvariable=emailtitle)
titleentry.config(width=30)
titleentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(root, text='messagefile').grid(row=rownum, column=0)
messentry=tk.Entry(root, textvariable=messfile)
messentry.config(width=30)
messentry.grid(row=rownum, column=1)
rownum+=1
# Choose counts, deriv, both or peaks plot (radio1)
tk.Radiobutton(root, text='Missing uniforms', value='Unis', variable = mtype, command=Uniopts).grid(row=rownum, column=0)
tk.Radiobutton(root, text='Send contact info', value='Contacts', variable = mtype, command=Contactopts).grid(row=rownum, column=1)
tk.Radiobutton(root, text='Send bill info', value='Bills', variable = mtype, command=Billopts).grid(row=rownum, column=2)
tk.Radiobutton(root, text='Other message', value='Other', variable = mtype, command=Otheropts).grid(row=rownum, column=3)
rownum+=1
asstcheck=tk.Checkbutton(root, variable=asstbool, text='Email asst coaches?')
asstcheck.grid(row=rownum, column=0) # can't do immediate grid or nonetype is returned
rownum+=1
tk.Label(root, text='Bill_list file name').grid(row=rownum, column=0)
billentry=tk.Entry(root, textvariable=billname)
billentry.grid(row=rownum, column=1)
rownum+=1
tk.Label(root, text='Missing uni file name').grid(row=rownum, column=0)
unientry=tk.Entry(root, textvariable=unifilename)
unientry.grid(row=rownum, column=1)
rownum+=1
# insert team selector
# Specific team selector section using checkboxes
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(root, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCtest(event):
choice.set('KCtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(root, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(root, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
d=tk.Button(root, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(root, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(root, text='KCtest')
d.bind('<Button-1>', KCtest)
d.grid(row=rownum, column=4)
d=tk.Button(root, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
root.mainloop()
if choice.get()!='abort':
kwargs={}
if choice.get()=='KCtest':
kwargs.update({'KCtest':True})
kwargs.update({'choice':'send'})
else:
kwargs.update({'choice':choice.get()}) # send, KCtest (internal) or test (to log file)
if asstbool.get()==True:
kwargs.update({'asst':True}) # Optional send to asst. coaches if set to True
emailtitle=emailtitle.get()
messagefile='messages\\'+messfile.get()
# Handle selection of team subsets
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
teams=teams.drop_duplicates(['Team','Sport'])
if mtype.get()=='Contacts':
mtype='contacts'
try:
Mastersignups = pd.read_csv('master_signups.csv', encoding='cp437')
players= pd.read_csv('players.csv', encoding='cp437')
famcontact= pd.read_csv('family_contact.csv', encoding='cp437')
kwargs.update({'SUs':Mastersignups,'players':players,'famcontact':famcontact})
except:
print('Problem loading mastersignups, players, famcontact')
return
elif mtype.get()=='Bills':
mtype='bills'
try:
Mastersignups = pd.read_csv('master_signups.csv', encoding='cp437')
billlist=pd.read_csv(billfile.get(), encoding='cp437')
kwargs.update({'bills':billlist, 'SUs':Mastersignups})
kwargs.update({'SUs':Mastersignups,'players':players,'famcontact':famcontact})
except:
print('Problem loading billlist, mastersignups')
return
elif mtype.get()=='Unis':
mtype='unis'
try:
missing=pd.read_csv(unifilename.get(), encoding='cp437')
oldteams=pd.read_excel('Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'oldteams':oldteams,'missing':missing})
except:
print('Problem loading missingunis, oldteams')
return
elif mtype.get()=='Other':
# nothing special to load?
pass
emailcoaches(teams, coaches, mtype, emailtitle, messagefile, gdrivedict, **kwargs)
return
def maketextsched(sched,teams, coaches, fields, messagefile, logfile, **kwargs):
''' Concise textable game schedule for cell only people from extracted Cabrini schedule'''
# Convert dates/ times from timestamp to desired string formats for proper output
if type(sched.iloc[0]['Time'])==datetime.time:
sched.Time=sched.Time.apply(lambda x:datetime.time.strftime(x, format='%I:%M %p'))
else:
print('Time format is', type(sched.iloc[0]['Time']))
if type(sched.iloc[0]['Date'])==datetime.time:
sched['Date']=sched['Date'].dt.strftime(date_format='%d-%b-%y')
else:
print('Date format is', type(sched.iloc[0]['Date']))
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
log=open(logfile,'w', encoding='utf-8')
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
# Open generic message header
with open('messages\\'+messagefile, 'r') as file:
blankmess=file.read()
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=myteams.loc[index]['Fname']+' '+ myteams.loc[index]['Lname']+' ('+myteams.loc[index]['Email']+')'
except:
coachinfo=''
else:
school=myteams.loc[index]['Team'].split('#')[0]
coachinfo=''
# Get gender
if row.Gender.lower()=='f':
gender='girls'
elif row.Gender.lower()=='m':
gender='boys'
else:
print('Problem finding team gender')
grrang=str(myteams.loc[index]['Graderange'])
if len(grrang)==2:
grrang=grrang[0]+'-'+grrang[1]
if grrang.endswith('2'):
grrang+='nd'
elif grrang.endswith('3'):
grrang+='rd'
else:
grrang+='th'
grrang=grrang.replace('0','K')
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([myteams.loc[index]['Sport'], myteams.loc[index]['Team'], school,
grrang, gender, coachinfo, myteams.loc[index]['Playerlist']])
# get dictionary of teams found/matched in CYC schedule
for i, [sport, team, school, graderange, gender, coachinfo, playerlist] in enumerate(teamlist):
# Either have cabrini only schedule or full CYC schedule
if 'Team' in sched:
thissched=sched[sched['Team']==team].copy()
thissched=thissched[['Date','Time','Day', 'Location']]
else:
print("Couldn't find schedule for", school, str(graderange), sport, team)
continue
if len(thissched)==0:
print('Games not found for ', team)
continue
# TODO construct textable message in log
games=''
for index, row in thissched.iterrows():
# output date, day, time, location
games+=row.Date+' '+row.Day+' '+row.Time+' '+row.Location+'\n'
thismess=blankmess.replace('$SCHEDULE', games)
thismess=thismess.replace('$GRADERANGE', graderange)
thismess=thismess.replace('$GENDER', gender)
thismess=thismess.replace('$SPORT', sport)
# now create/ insert location and address table
thisft=makefieldtable(thissched, fields)
myfields=''
for index, row in thisft.iterrows():
# output date, day, time, location
myfields+=row.Location+' '+row.Address+'\n'
thismess=thismess.replace('$FIELDTABLE', myfields)
log.write(thismess+'\n')
log.close()
return
''' TESTING
teamnamedict=findschteams(sched, teams, coaches)
'''
''' TESTING
sched=pd.read_csv('Cabrini_Bball2018_schedule.csv')
sport, team, school, graderange, gender, coachinfo, playerlist=teamlist[0] i=0
recipients=['<EMAIL>','<EMAIL>']
'''
def sendschedule(teams, sched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess, **kwargs):
''' Top level messaging function for notifying families of team assignment/ CYC card
+ optional short-team-player-recruiting
via custom e-mail; one per player
currently not including SMS
kwargs:
choice - 'send' or 'test' (defaults to test)
recruit - T or F -- add recruiting statement for short teams
mformat - not really yet used ... just sending as text not html
'''
# convert date- time from extracted schedule to desired str format
# type will generally be string (if reloaded) or timestamp (if direct from prior script)
''' if already string just keep format the same, if timestamp or datetime then convert below
if type(sched.iloc[0]['Time'])==str:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M:%S') # convert string to timestamp
'''
if type(sched.iloc[0]['Time'])!=str:
# Then convert timestamp to datetime to desired string format
sched.Time=sched.Time.apply(lambda x:pd.to_datetime(x).strftime(format='%I:%M %p'))
if type(sched.iloc[0]['Date'])==str:
try:
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
try:
sched.Date=pd.to_datetime(sched.Date, format='%Y-%m-%d')
except:
print('Difficulty converting date with format', type(sched.iloc[0]['Date']))
# convert to desired date string format
sched['Date']=sched['Date'].dt.strftime(date_format='%d-%b-%y')
choice=kwargs.get('choice','test')
if choice=='send' or choice=='KCtest':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open(cnf._OUTPUT_DIR+'\\parent_email_log.txt','w', encoding='utf-8')
#%%
# this years signups only (later match for sport and team)
Mastersignups=Mastersignups[Mastersignups['Year']==year]
# Should be only one entry per coach
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
teamlist=[]
for index, row in myteams.iterrows():
# get school
if '#' not in myteams.loc[index]['Team']:
school='Cabrini'
try:
coachinfo=row.Fname+' '+ row.Lname+' ('+row.Email+')'
except:
coachinfo=''
else:
school=row.Team.split('#')[0]
coachinfo=''
# Get gender
if row.Gender.lower()=='f':
gender='girl'
elif row.Gender.lower()=='m':
gender='boys'
else:
print('Problem finding team gender')
# Get sport, team, graderange, coach info (first/last/e-mail), playerlist
teamlist.append([row.Sport, row.Team, school, gradetostring(row.Graderange),
gender, coachinfo, row.Playerlist])
# get dictionary of teams found/matched in CYC schedule
teamnamedict=findschteams(sched, teams, coaches)
# TESTING sport, team, school, graderange, gender, coachinfo, playerlist=teamlist[i] i=1
#%%
for i, [sport, team, school, graderange, gender, coachinfo, playerlist] in enumerate(teamlist):
# Either have cabrini only schedule or full CYC schedule
if 'Team' in sched:
thissched=sched[sched['Team']==team].copy()
# shorten team name
thissched['Home']=thissched['Home'].str.split('/').str[0]
thissched['Away']=thissched['Away'].str.split('/').str[0]
thissched['Home']=thissched['Home'].str.strip()
thissched['Away']=thissched['Away'].str.strip()
# Times/dates already reformatted
thissched=thissched[['Date','Time','Day','Home','Away','Location']]
else: # handle if an unsorted CYC schedule (not Cab only)
if team in teamnamedict:
[div,schname]=teamnamedict.get(team,'')
thissched=getgameschedule(div,schname, sched)
thissched=thissched[['Date','Time','Day','Division','Home','Away','Location']]
else:
print("Couldn't find schedule for", school, str(graderange), sport, team)
continue
if len(thissched)==0:
print('Games not found for ', team)
continue
thisteam=Mastersignups[(Mastersignups['Sport']==sport) & (Mastersignups['Team']==team)]
thisteam=pd.merge(thisteam, famcontact, on='Famkey' , how='left', suffixes =('','_2'))
# Make all team-specific replacements in message body and email title
thisteammess=blankmess
thistitle=emailtitle
# have to use caution due to $TEAMTABLE (common) and $TEAMNAME (rarely used)
for j, col in enumerate(['$SPORT', '$TEAMNAME', '$SCHOOL', '$GRADERANGE', '$GENDER', '$COACH', '$PLAYERLIST']):
if j!='$SPORT':
val=teamlist[i][j]
else: # lower-case sport name for replace
val=teamlist[i][j].lower()
try:
thisteammess=thisteammess.replace(col, textwrap.fill(val, width=100))
thistitle=thistitle.replace(col, val)
except:
print("Problem with teamname", val)
continue
# Convert thissched to string table and insert into message
thisteammess=thisteammess.replace('$SCHEDULE', thissched.to_string(index=False, justify='left'))
#Make and insert field table
thisft=makefieldtable(thissched, fields)
thisteammess=thisteammess.replace('$FIELDTABLE', thisft.to_string(index=False, justify='left'))
# Get coach emails
recipients=getcoachemails(team, teams, coaches, **{'asst':True})
# Now get all unique team email addresses (single message to coach and team)...drops nan
recipients=getallteamemails(thisteam, recipients)
if choice=='KCtest': # internal send test
recipients=['<EMAIL>','<EMAIL>']
choice='send'
# Create custom email message (can have multiple sports in df)
if choice=='send':
try: # single simultaneous e-mail to all recipients
msg=MIMEText(thisteammess,'plain')
msg['Subject'] = thistitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
msg['To']=','.join(recipients)
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
except:
print('Message to ', team, 'failed.')
if not recipients:
print('No email addresses for team ', team)
else: # Testing mode ... just write to log w/o e-mail header and such
logfile.write(thistitle+'\n')
logfile.write(thisteammess+'\n')
# close log file (if testing mode)
if choice!='send':
logfile.close()
else:
pass
# TODO fix this attempted close
# smtpObj.quit() # close SMTP connection
#%%
return
# TESTING
#%%
def makegcals(sched, teams, coaches, fields, season, year, duration=1, **kwargs):
''' Turn standard CYC calendar into google calendar
description: 1-2 girls soccer vs opponent
kwargs:
div - get only calendar for given division
school - Cabrini ... drop transfer teams w/ #
splitcals - separate calendar for each team (default True),
'''
#TODO ... Test after alteration of address field
if 'school' in kwargs:
if kwargs.get('school','')=='Cabrini':
# drop transfer teams w/ #
teams=teams[~teams['Team'].str.contains('#')]
if 'div' in kwargs:
div=kwargs.get('div','')
grade=int(div[0])
if div[1].upper()=='G':
gender='f'
elif div[1].upper()=='B':
gender='m'
teams=teams[(teams['Grade']==grade) & (teams['Gender']==gender)]
# ensure correct formats for separate date and time columns
if type(sched.iloc[0]['Date'])==str:
try: # format could be 10/18/2018 0:00
sched.Date=sched.Date.str.split(' ').str[0]
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
pass
try:
sched.Date=pd.to_datetime(sched.Date, format='%m/%d/%Y')
except:
pass
try:
sched.Date=pd.to_datetime(sched.Date, format='%Y-%m-%d')
except:
print('Problem converting date format of ', sched.iloc[0]['Date'])
# gcal needs %m/%d/%y (not zero padded)
sched['Date']=sched['Date'].dt.strftime(date_format='%m/%d/%Y')
''' Reformat of time shouldn't be required i.e. 4:30 PM
if type(sched.iloc[0]['Time'])==str:
try:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M %p')
except:
try:
sched.Time=pd.to_datetime(sched.Time, format='%H:%M:%S') # convert string to timestamp
except:
print('Failed conversion of time column... check format')
'''
# Common reformatting of all gcals
sched=sched.rename(columns={'Date':'Start Date','Time':'Start Time'})
# Calculate end time while still a timestamp
sched['End Time']=pd.to_datetime(sched['Start Time']) + datetime.timedelta(hours=1)
sched['End Time']=pd.to_datetime(sched['End Time'])
sched['End Time']=sched['End Time'].apply(lambda x:pd.to_datetime(x).strftime('%I:%M %p'))
# Then convert timestamp to datetime to desired string format
sched['Start Time']=sched['Start Time'].apply(lambda x:pd.to_datetime(x).strftime(format='%I:%M %p'))
# Standard google calendar column names
gcalcols=['Subject','Start Date', 'Start Time', 'End Date','End Time', 'All Day Event', 'Description', 'Location','Private']
sched['All Day Event']='FALSE'
sched['Private']='FALSE'
sched['End Date']=sched['Start Date']
# append short address to location field
sched=pd.merge(sched, fields, on='Location', how='left', suffixes=('','_2'))
# replace blank address (in case not found but shouldn't happen)
sched['Address']=sched['Address'].replace(np.nan,'')
sched['Location']=sched['Location']+' '+sched['Address']
# Cabrini extracted schedule has team name column
teamlist=np.ndarray.tolist(sched.Team.unique())
shortnames=shortnamedict2(teams)
# Get head coach email for team from coaches list
teams=pd.merge(teams, coaches, how='left', on=['Coach ID'], suffixes=('','_2'))
# Optional single calendar format
if not kwargs.get('splitcal', True):
combocal=pd.DataFrame(columns=gcalcols)
for i, team in enumerate(teamlist):
thissch=sched[sched['Team']==team]
# Need to get associated sport from teams
match=teams[teams['Team']==team]
if len(match)==1:
sport=match.iloc[0]['Sport']
email=match.iloc[0]['Email']
else:
sport=''
email=''
print('Sport not found for team', team)
# skip these teams (usually non-Cabrini team w/ Cab players)
continue
# Make unique description column
descr=shortnames.get(team,'')+' '+ sport.lower()
# Use 1-2nd girl soccer as calendar event title/subject
thissch['Subject']=descr
# Prepend grade/gender sport string to team opponents
thissch['Description']=thissch['Home'].str.split('/').str[0] +' vs '+thissch['Away'].str.split('/').str[0]
# prepend string 1-2 girls soccer to each event
thissch['Description']=thissch['Description'].apply(lambda x:descr+': '+x)
cancel='Contact '+str(email)+' for cancellation/reschedule info'
# Add line w/ coach email for cancellation
thissch['Description']=thissch['Description'].apply(lambda x:x+'\r\n'
+ cancel)
thissch=thissch[gcalcols]
if kwargs.get('splitcal', True): # separate save of gcal for each team
fname=cnf._OUTPUT_DIR+'\\gcal_'+descr+'.csv'
thissch.to_csv(fname, index=False)
else: # add to single jumbo cal
combocal=pd.concat([combocal, thissch], ignore_index=True)
if not kwargs.get('splitcal', True):
fname=cnf._OUTPUT_DIR+'\\Cabrini_gcal_'+season.lower()+str(year)+'.csv'
combocal.to_csv(fname, index=False)
return
#%%
def getgameschedule(div, schname, sched):
''' Find and extract game schedule for team with matching name '''
sched=sched.rename(columns={'Game Time':'Time','Division Name':'Division', 'Field Name':'Location','Visitor':'Away','AwayTeam':'Away','Home Team':'Home'})
thissched=sched[sched['Division'].str.startswith(div)]
thissched=thissched[(thissched['Home'].str.contains(schname)) | (thissched['Away'].str.contains(schname))]
# already sorted and date-time strings already converted to preferred format in getcabsch
return thissched
def findschteams(sched, teams, coaches):
''' Find team names as listed in schedule or elsewhere based on division (e.g. 1B)
plus coach and/or school
return dictionary with all internal names and associated CYC schedule div & name '''
sched=sched.rename(columns={'Game Time':'Time','Field Name':'Location','AwayTeam':'Away','Home Team':'Home'})
sched=sched[pd.notnull(sched['Home'])] # drop unscheduled games
# Get identifying info for all teams
myteams=pd.merge(teams, coaches, on='Coach ID', how='left', suffixes=('','_2'))
# Make list of sport/team/school/graderange/coachinfo/playerlist
# Need double entry for double-rostered teams
double=myteams.copy()
double=double[double['Team'].str.contains('!')]
for index, row in double.iterrows():
doublename=row.Team
tok=doublename.split('-')
name1=tok[0]+'-'+tok[1]+'-'+tok[2].split('!')[0]+'-'+tok[3]
name2=tok[0]+'-'+tok[1]+'-'+tok[2].split('!')[1]+'-'+tok[3]
double=double.set_value(index, 'Team', name2)
myteams['Team']=myteams['Team'].str.replace(doublename, name1)
myteams=myteams.append(double)
# First find all Cabrini teams
#%%
teamnamedict={}
for index, row in myteams.iterrows():
# get division index=1 row= myteams.iloc[index]
if '-' in row.Team:
school='Cabrini'
coach=str(row.Lname)
try:
tok=row.Team.split('-')
div=tok[2]
except:
print("Couldn't find division for", row.Team)
continue
# non-cabrini team w/ transfers
elif '#' in row.Team:
school=row.Team.split('#')[0]
coach=str(row.Coach)
if '??' in coach:
coach='nan'
if row.Gender=='m':
div=str(row.Grade)+'B'
else:
div=str(row.Grade)+'G'
else: # typically junior teams
print("no schedule for ", row.Team)
continue
# Get sport, school, division, coach last nameteam, graderange, coach info (first/last/e-mail), playerlist
thisdiv=sched[sched['Division'].str.startswith(div)]
# On rare occasions teams can only have away games
divteams=np.ndarray.tolist(thisdiv['Home'].unique())
divteams.extend(np.ndarray.tolist(thisdiv['Away'].unique()))
divteams=set(divteams)
divteams=list(divteams)
# find this schools teams
thisteam=[team for team in divteams if school.lower() in team.lower()]
# handle multiple teams per grade
if len(thisteam)>1:
thisteam=[team for team in thisteam if coach.lower() in team.lower()]
if len(thisteam)>1: # Same last name? use exact coach match
coach=str(myteams.loc[index]['Coach'])
thisteam=[team for team in thisteam if coach in team.lower()]
if len(thisteam)==1: # found unique name match
# Need division and name due to duplicates problem
try:
teamnamedict.update({row.Team: [div, thisteam[0].strip()]})
except:
print("Couldn't hash", row.Team)
else:
print("Couldn't find unique schedule team name for", row.Team, div)
#%%
return teamnamedict
def shortnamedict(teams):
''' From teams list, make shortened name dictionary for tk display (i.e. 1G-Croat or 3G-Ambrose)'''
teamdict={}
for index, row in teams.iterrows():
# Get coach name or school
if '#' in teams.loc[index]['Team']:
name=teams.loc[index]['Team'].split('#')[0]
else:
name=str(teams.loc[index]['Coach'])
if teams.loc[index]['Gender']=='m':
gend='B'
else:
gend='G'
grrange=str(teams.loc[index]['Graderange'])
grrange=grrange.replace('0','K')
thisname=grrange+gend+'-'+name
teamdict.update({teams.loc[index]['Team']:thisname})
return teamdict
def shortnamedict2(teams):
''' From teams list, make shortened name dictionary for gcal (i.e. 1-2 girls)'''
teamdict={}
for index, row in teams.iterrows():
if teams.loc[index]['Gender']=='m':
gend=' boys'
else:
gend=' girls'
grrange=str(teams.loc[index]['Graderange'])
grrange=grrange.replace('0','K')
if len(grrange)>1:
grrange=grrange[0]+'-'+grrange[1]
if grrange.endswith('2'):
grrange+='nd'
elif grrange.endswith('3'):
grrange+='rd'
try:
if int(grrange[-1]) in range(4,9):
grrange+='th'
except:
pass # p
thisname=grrange+gend
teamdict.update({teams.loc[index]['Team']:thisname})
return teamdict
def findrecentfile(filelist):
''' Return most recently dated file from list of autonamed files .. date format is always 27Jan17 '''
dates=[s.split('_')[1].split('.')[0] for s in filelist]
try:
dates=[datetime.datetime.strptime(val, "%d%b%y") for val in dates]
datepos=dates.index(max(dates)) # position of newest date (using max)
newfile=filelist[datepos]
except:
print('File date comparison failed... using first one')
newfile=filelist[0]
return newfile
'''TESTING
missing=pd.read_csv('missingunilist_29Dec17.csv')
'''
def emailcoaches(teams, coaches, mtype, emailtitle, messagefile, gdrivedict, **kwargs):
''' Send e-mails to all coach: types are contacts, bills, unis (missing uniforms info)
various dfs are passed via kwargs when necessary
4/1/17 works for contacts and unpaid bill summary
HTML message w/ plain text alternate
kwargs: choice -- send, test or KCtest (real internal send )
'''
choice=kwargs.get('choice','test') # send or test (KCtest kwarg true set separately)
print(choice)
if choice=='send': # true send or internal live send to tkc@wustl
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('coach_email_log.txt','w', encoding='utf-8')
# Iterate over teams
teaminfo=[] # list w/ most team info
if mtype=='unis' and 'missing' in kwargs:
# Iterate only over old teams with missing uniforms
missing=kwargs.get('missing',pd.DataFrame())
for index, row in missing.iterrows():
if row.Gender.lower()=='f':
gend='girls'
else:
gend='boys'
# coach and graderange are nan... not needed for missing unis
teaminfo.append([row.Year, row.Sport, gradetostring(row.Grade), gend, row.Team, 'coach',
'graderange', row.Number])
# Replace teams with oldteams
teams=kwargs.get('oldteams',pd.DataFrame())
else: # iterate through current teams for contacts, bills, cards
for index, row in teams.iterrows():
if row.Gender.lower()=='f':
gend='girls'
else:
gend='boys'
teaminfo.append([row.Year, row.Sport, gradetostring(row.Grade), gend, row.Team, row.Coach,
gradetostring(row.Graderange), row.Number])
with open(messagefile,'r') as mess:
message=mess.read() # generic e-mail message body with limited find/replace
# insert up to date google drive link for this season (various options)
if 'GDRIVE' in message: # global replacements (for all teams)
for key, val in gdrivedict.items():
message=message.replace(key, val)
for i, [year, sport, grade, gender, team, coach, graderange, numplayers] in enumerate(teaminfo):
# Make all team-specific replacements in message body and email title
thisteammess=message
thistitle=emailtitle
for j, col in enumerate(['$YEAR', '$SPORT', '$GRADE', '$GENDER', '$TEAMNAME', '$COACH', '$GRADERANGE', '$NUMBER']):
thisteammess=thisteammess.replace(col, str(teaminfo[i][j]))
thistitle=thistitle.replace(col, str(teaminfo[i][j]))
# Replace missing CYC cards list if requested by message
if '$MISSINGCARDS' in message:
if 'SUs' not in kwargs:
print('Signups needed to find missing cards')
return
carddict=findcards()
SUs=kwargs.get('SUs','')
missingstr=findmissingcards(team, SUs, carddict)
thisteammess=thisteammess.replace('$MISSINGCARDS', missingstr)
# Get head coach e-mail address (list optionally incl. assistants in kwargs)
if 'KCtest' not in kwargs:
coachemail=getcoachemails(team, teams, coaches, **kwargs)
else:
if i==0: # send first message only as live test
coachemail=['<EMAIL>','<EMAIL>']
else:
coachemail=[]
if coachemail==[]: # list of head and asst coaches
print('No valid coach e-mail for '+ team +'\n')
continue
# handle the special message cases
if mtype=='bills': # replacement of teamtable for bills
if 'SUs' and 'bills' not in kwargs:
print('Signups and billing list needed for e-mail send bill to coaches option.')
return
SUs=kwargs.get('SUs','')
bills=kwargs.get('bills','')
teambilltable=makebilltable(team, bills, SUs)
if teambilltable=='': # team's all paid up, skip e-mail send
print('All players paid for team'+team+'\n')
continue # no e-mail message sent
thismess_html=thisteammess.replace('$TEAMTABLE', teambilltable.to_html(index=False))
thismess_plain=thisteammess.replace('$TEAMTABLE', teambilltable.to_string(index=False))
elif mtype=='contacts': # replacement of teamtable for contact
if 'SUs' and 'players' and 'famcontact' not in kwargs:
print('Signups, player and family contact info needed for contact lists to coaches option.')
return
SUs=kwargs.get('SUs','')
# Collapse track sub-teams to single track team
SUs['Team']=SUs['Team'].str.replace(r'track\d+', 'Track', case=False)
SUs=SUs[SUs['Year']==year] # in case of duplicated team name, filter by year
players=kwargs.get('players','')
famcontact=kwargs.get('famcontact','')
contacttable=makecontacttable(team, SUs, players, famcontact)
# Find/replace e-mail addresses
# Convert df to text
thismess_html=thisteammess.replace('$TEAMTABLE', contacttable.to_html(index=False))
thismess_plain=thisteammess.replace('$TEAMTABLE', contacttable.to_string(index=False))
elif mtype=='unis': # replacement of teamtable for uniform returns
# Probably need current and former teams
if 'SUs' and 'oldteams' and 'missing' not in kwargs:
print('Signups, old teams and missing uniform list needed for e-mail unis to coaches option.')
return
# in this case teams iterator with have old not current teams
unitable=makeunitable(team, missing)
thismess_html=thisteammess.replace('$TEAMTABLE', unitable.to_html(index=False))
thismess_plain=thisteammess.replace('$TEAMTABLE', unitable.to_string(index=False))
else: # generic message w/o $TEAMTABLE
thismess_html=thisteammess
thismess_plain=thisteammess
if choice=='send':
try:
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative') # message container
msg['Subject'] = emailtitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
part1=MIMEText(thismess_plain,'plain')
part2=MIMEText(thismess_html,'alternate')
msg['To']=','.join(coachemail) # single e-mail or list
msg.attach(part1) # plain text
msg.attach(part2) # html (last part is preferred)
# Simultaneous send to all in recipient list
smtpObj.sendmail('<EMAIL>', coachemail, msg.as_string())
print ('Message sent to ', ','.join(coachemail))
except:
print('Message to ', ','.join(coachemail), ' failed.')
else: # testing only... open log file
logfile.write(emailtitle+'\n')
logfile.write(thismess_plain+'\n')
return
def gradetostring(val):
''' Turns grade or grade ranges into strings with appropriate ending 23 becomes '2-3rd' '''
if len(str(val))==2:
val=str(val)[0]+'-'+str(val)[1]
else:
val=str(val)
if val.endswith('1'):
val+='st'
elif val.endswith('2'):
val+='nd'
elif val.endswith('3'):
val+='rd'
else:
val+='th'
return val
def getallteamemails(df, emails):
''' Get all unique e-mails associated with team
passed emails contains coach emails already extracted
email1, email2, and email3 columns all present in family contacts'''
emails=np.ndarray.tolist(df.Email1.unique())
emails.extend(np.ndarray.tolist(df.Email2.unique()))
emails.extend(np.ndarray.tolist(df.Email3.unique()))
emails=set(emails)
emails=list(emails)
emails=[i for i in emails if str(i)!='nan']
return emails
def getcoachemails(team, teams, coaches, **kwargs):
''' Returns head coach e-mail for given team and optionally asst coaches '''
teams=teams.drop_duplicates('Team') # drop coed team duplicate
thisteam=teams[teams['Team']==team]
emails=[]
IDs=[]
if len(thisteam)!=1:
print(team, 'not found in current teams list')
return emails # blank list
thisteam=thisteam.dropna(subset=['Coach ID'])
if len(thisteam)!=1:
print('Coach ID not found for', team)
return emails # blank list
if thisteam.iloc[0]['Coach ID']!='': # possibly blank
IDs.append(thisteam.iloc[0]['Coach ID'])
thisteam=thisteam.dropna(subset=['AssistantIDs'])
if kwargs.get('asst', False): # optional send to asst coaches
if len(thisteam)==1: # grab asst IDs if they exist
asstIDs=thisteam.iloc[0]['AssistantIDs']
asstIDs=[str(s).strip() for s in asstIDs.split(",")]
IDs.extend(asstIDs)
# now find email addresses for this set of CYC IDs
thesecoaches=coaches[coaches['Coach ID'].isin(IDs)]
thesecoaches=thesecoaches.dropna(subset=['Email'])
emails=np.ndarray.tolist(thesecoaches.Email.unique()) # surely won't have blank string
return emails
def makeunitable(team, missing):
''' Make missing uniform table for auto-emailing to head coach; looping over old teams and
unis identified as not yet returned from prior seasons
'''
# Could be problem with non-unique team
thisteam=missing[missing['Team']==team]
mycols=['First', 'Last', 'Issue date', 'Sport', 'Year', 'Uniform#', 'Team']
thisteam=thisteam[mycols]
thisteam=thisteam.replace(np.nan,'')
return thisteam
def makecontacttable(team, SUs, players, famcontacts):
''' Make team contacts list for auto-emailing to head coach
first, last, grade, school, phone/text/email 1&2 '''
# Find subset of all signups from this team
thisteam=SUs[SUs['Team']==team]
# Get school from players
thisteam=pd.merge(thisteam, players, on='Plakey', how='left', suffixes=('','_r'))
# Get other contact info from family contacts
thisteam=pd.merge(thisteam, famcontacts, on='Famkey', how='left', suffixes=('','_r'))
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2', 'Email2']
thisteam=thisteam[mycols]
thisteam=thisteam.replace(np.nan,'')
return thisteam
def makebilltable(team, billlist, Mastersignups):
''' Create billing message summary tabl for individual team, return as message string, called by
e-mail loop or e-mail log test '''
thisteam=billlist[billlist['Teams'].str.contains(team)]
if len(thisteam)==0:
return '' # pass blank string and test for it (skip e-mail )
# Construct comment with "also owes for Sibling basketball"
thisteam['Comments']=''
for index, row in thisteam.iterrows():
SUs=thisteam.loc[index]['SUkeys'] # this family's current season signups
if ',' in SUs: # Need to match the sibling (not on this team)
SUkeys=[int(i) for i in SUs.split(',')]
theseSUs=Mastersignups[Mastersignups['SUkey'].isin(SUkeys)]
otherSUs=theseSUs[theseSUs['Team']!=team] # other family signups... summarize in comments
otherSUs=otherSUs.sort_values(['Sport'], ascending=True)
tempstr='also owes for: '
for ind, ro in otherSUs.iterrows():
tempstr+=otherSUs.loc[ind]['First']+' '
tempstr+=otherSUs.loc[ind]['Sport'].lower()
thisteam=thisteam.set_value(index,'Comments',tempstr)
mycols=['Family','Players','Charges','CurrPayments','Balance','Email1','Phone1','Comments']
thisteam['Balance']*=-1
thisteam=thisteam[mycols] # summary for insertion into e-mail to team's coaches
teamsum=thisteam.to_html() # convert to html table for insertion
return teamsum # table with this team's currently outstanding bills
def emailrecruits(Recruits, famcontact, emailtitle, messagefile, **kwargs):
''' Top level messaging function for recruiting players via custom e-mail; one for each player (not by family)
currently not including SMS '''
choice=kwargs.get('choice','test') # send or test
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
else: # testing only... open log file
logfile=open('player_recruit_email_log.txt','w', encoding='utf-8')
# Get address, full e-mail/phone list via family merge
Recruits=pd.merge(Recruits, famcontact, how='left', on='Famkey', suffixes=('','_2'))
Recruits=Recruits[pd.notnull(Recruits['Email1'])]
Recs=Recruits.groupby(['First','Last'])
for [first, last], row in Recs:
recipients=getemailadds(row.iloc[0]) # list of recipients
# customized email title w/ first name commonly used
thistitle=emailtitle.replace('$FIRST', first)
# create custom email message (can have multiple sports in df)
try:
thismess=makerecinfomessage(row, messagefile)
except:
print("Rec message error for", first, last)
if choice=='send':
msg=MIMEText(thismess,'plain')
msg['Subject'] = thistitle
msg['From'] = '<NAME> <<EMAIL>>'
# part2=MIMEText(thismess_html,'alternate')
msg['To']=','.join(recipients) # single e-mail or list
# Add From/To/Subject to actual e-mail
try:
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
except:
print('Message to ', ','.join(recipients), ' failed.')
if not recipients:
print('No email address for ', first, last)
else: # testing mode
logfile.write(thistitle+'\n')
logfile.write(thismess+'\n')
if choice!='send':
logfile.close()
else: # TODO need to close connection?
pass
return
def makerecinfomessage(row, messagefile):
''' Make e-mail message for family from billrow (passed as Series)
pass family's row from bill, signup details, payment logbook, and list of email recipients
row is groupby df (not Series)
'''
#TODO 8/27 this will need some testing after redesign
sports=np.ndarray.tolist(row.Sport.unique())
sports=[s.lower() if s!='VB' else 'volleyball' for s in sports]
phonelist=[]
for i, col in enumerate(['Phone1','Phone2']):
if str(row.iloc[0][col])!='nan':
phonelist.append(row.iloc[0][col])
emaillist=[]
for i, col in enumerate(['Email1','Email2']):
if str(row.iloc[0][col])!='nan':
emaillist.append(row.iloc[0][col])
# load default message and then make custom substitutions depending on contents of each row in
with open(messagefile,'r') as file:
# email title, recipients,
message=file.read()
message=message.replace('$SPORTS', ' and '.join(sports))
message=message.replace('$FIRST', row.iloc[0]['First'])
message=message.replace('$LAST', row.iloc[0]['Last'])
# contact info is tagged in message so recruited parent can check it
message=message.replace('$PHONELIST', ', '.join(phonelist))
message=message.replace('$EMAILLIST', ', '.join(emaillist))
try:
message=message.replace('$ADDRESS', row.iloc[0]['Address']+', '+str(row.iloc[0]['Zip']) )
except:
print('No address for',row.iloc[0]['First'],row.iloc[0]['Last'])
message=message.replace('$ADDRESS','')
return message
def getemailadds(thisrow):
'''Find email address(es) from series (single family row from bill or other df
and return as list '''
email1=str(thisrow.Email1)
email2=str(thisrow.Email2)
recipients=[]
if '@' in email1:
recipients.append(email1)
if '@' in email2:
recipients.append(email2)
return recipients
''' TESTING
i=0 team=teamlist[i]
'''
def gamecardmaker(teams, coaches, Mastersignups, sched, pastelist, gctemplate):
''' Somewhat generic insertion into existing excel file template
open Excel_python_insert_template
args:
teams -
coaches
Mastersignups
unilist -
sched - team's extracted game schedule
pastelist - Instructions on copy of info to excel template
gctemplate - CYC template for sport
'''
teamlist=np.ndarray.tolist(sched.Team.unique())
# only need teams in schedule list (Cabrini CYC only)
teams=teams[teams['Team'].isin(teamlist)]
# get player lists (last, first), player number list, and coach list (last, first)
teaminfo=[]
myplayers=pd.merge(teams, Mastersignups, how='inner', on=['Team','Sport','Year'], suffixes=('','_2'))
removelist=[]
for i, team in enumerate(teamlist):
thisteam=myplayers[myplayers['Team']==team]
thisteam=thisteam.sort_values(['Last'])
plalist=[]
planumlist=[]
for index, row in thisteam.iterrows():
plalist.append(row.Last+', '+row.First)
planumlist.append(row['Uniform#'])
# now need to get coach(es) last, first
headID=[]
match=teams[teams['Team']==team]
match=match.drop_duplicates(['Team']) # handle co-ed duplicates
match=match[pd.notnull(match['Coach ID'])]
if len(match)==1:
headID.append(match.iloc[0]['Coach ID'])
else:
print('No coach found for team', team)
removelist.append(team)
# Remove from above teamlist
continue
match=match[pd.notnull(match['AssistantIDs'])]
asstIDs=match.iloc[0]['AssistantIDs'].split(',')
hcoach=coaches[coaches['Coach ID'].isin(headID)]
asstcoaches=coaches[coaches['Coach ID'].isin(asstIDs)]
coachlist=[]
for index, row in hcoach.iterrows():
coachlist.append(row.Lname+', '+row.Fname)
for index, row in asstcoaches.iterrows():
coachlist.append(row.Lname+', '+row.Fname)
teaminfo.append([plalist, planumlist, coachlist])
for i, team in enumerate(removelist):
teamlist.remove(team)
for i, team in enumerate(teamlist):
thissch=sched[sched['Team']==team]
maketeamgamecards(team, thissch, teaminfo[i], gctemplate, pastelist)
return
''' TESTING
info=teaminfo[0] team=teamlist[0]
row=thissch.loc[0]
'''
def maketeamgamecards(teamname, thissch, info, gctemplate, pastelist):
''' Make xls file full of game card specific to this team
info contains 1) playerlist (last, first) 2) player number list 3) head (and asst) coach names
gctemplate - sheet w/ basic structure
pastelist- list of items and locations for find replace '''
plalist=info[0]
planums=info[1]
planums=[str(i) if str(i)!='nan' else '' for i in planums] # change any np.nans
coachlist=info[2]
# need new excel file for this team's cards
gcname=teamname+'_gamecards.xlsx'
book=load_workbook(gctemplate)
'''
writer=pd.ExcelWriter(gctemplate, engine='openpyxl', datetime_format='mm/dd/yy', date_format='mm/dd/yy')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
'''
templsheet=book.active
thissch=thissch.reset_index(drop=True)
# TODO generic version w/o game schedule?
for index, row in thissch.iterrows():
# Nake a new worksheet in existing opened workbook
newsheet=book.copy_worksheet(templsheet)
thistitle='Game'+str(index+1)
newsheet.title=thistitle
newsheet.page_margins.left=0.45
newsheet.page_margins.right=0.45
newsheet.page_margins.top=0.2
newsheet.page_margins.bottom=0.2
newsheet.page_margins.header=0.05
newsheet.page_margins.footer=0.05
gamedt=[row.Day + " "+ row.Date+" "+':'.join(row.Time.split(':')[0:2])]
gdate=row.Date
gtime=':'.join(row.Time.split(':')[0:2])
thislocation=[row.Location]
thisdiv=[row.Division]
if 'Cabrini' in row.Home:
homeflag=True
try:
oteam=[row.Away.split('/')[0]] # all openpyxl pastes are handled as list
ocoach=[row.Away.split('/')[1]]
except: # for JD schedule
oteam=[row.Away]
ocoach=[]
else:
homeflag=False
try:
oteam=[row.Home.split('/')[0]]
ocoach=[row.Home.split('/')[1]]
except:
oteam=[row.Home]
ocoach=[]
newsheet=makethisgc(newsheet, pastelist, plalist, planums, coachlist, gamedt, gdate, gtime,
thislocation, thisdiv, homeflag, ocoach, oteam, teamname)
# Setting page margins correctly
# remove game_generic template and save as "teamname"_gamecards
book.remove_sheet(book.active)
book.save(gcname)
print('Saved game cards for ', teamname)
return
''' TESTING
row=pastelist.iloc[0]
newsheet['A1'].value
newsheet.cell(row=1,column=1).value='kapow'
newsheet['E42'].value
startrow=10 startcol=5 celldir='Down'
'''
def makethisgc(newsheet, pastelist, plalist, planums, coachlist, gamedt, gdate, gtime,
thislocation, thisdiv, homeflag, ocoach, oteam, teamname):
''' Handle find replace for this worksheet '''
# print('Coachlist is', ",".join(coachlist), ' for team', teamname)
for index, row in pastelist.iterrows():
if row.Data=='plalist':
newsheet=pastechunk(newsheet, plalist, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='planums':
newsheet=pastechunk(newsheet, planums, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='coaches':
newsheet=pastechunk(newsheet, coachlist, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='location':
newsheet=pastechunk(newsheet, thislocation, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='datetime':
newsheet=pastechunk(newsheet, gamedt, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='date':
newsheet=pastechunk(newsheet, gdate, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='time':
newsheet=pastechunk(newsheet, gtime, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='league':
newsheet=pastechunk(newsheet, thisdiv, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='teamname':
newsheet=pastechunk(newsheet, teamname, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='headcoach': # used for BBall
newsheet=pastechunk(newsheet, coachlist[0], row.Startrow, row.Startcol, row.Direction)
elif row.Data=='asstcoach1' and len(coachlist)>1: # used for BBall
newsheet=pastechunk(newsheet, coachlist[1], row.Startrow, row.Startcol, row.Direction)
elif row.Data=='asstcoach2' and len(coachlist)>2: # used for BBall
newsheet=pastechunk(newsheet, coachlist[2], row.Startrow, row.Startcol, row.Direction)
elif row.Data=='hcoach':
if homeflag:
newsheet=pastechunk(newsheet, coachlist[:1], row.Startrow, row.Startcol, row.Direction)
else:
newsheet=pastechunk(newsheet, ocoach, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='vcoach':
if homeflag:
newsheet=pastechunk(newsheet, ocoach, row.Startrow, row.Startcol, row.Direction)
else:
newsheet=pastechunk(newsheet, coachlist[:1], row.Startrow, row.Startcol, row.Direction)
elif row.Data=='hteam':
if homeflag:
newsheet=pastechunk(newsheet, ['St. <NAME>'], row.Startrow, row.Startcol, row.Direction)
else:
newsheet=pastechunk(newsheet, oteam, row.Startrow, row.Startcol, row.Direction)
elif row.Data=='vteam':
if homeflag:
newsheet=pastechunk(newsheet, oteam, row.Startrow, row.Startcol, row.Direction)
else:
newsheet=pastechunk(newsheet, ['St. <NAME>'], row.Startrow, row.Startcol, row.Direction)
return newsheet
def pastechunk(newsheet, mylist, startrow, startcol, celldir):
''' write list to cells in sheet '''
if type(mylist)==str:
mylist=[mylist] # convert to list if inadvertantly passed as string
if celldir=='Down':
for i, val in enumerate(mylist):
# using zero-based indexing for rows and columns
thisrow=startrow+i
newsheet.cell(row=thisrow, column=startcol).value=mylist[i]
else:
for i, val in enumerate(mylist):
thiscol=startcol+i
newsheet.cell(row=startrow, column=thiscol).value=mylist[i]
return newsheet
|
tkcroat/SC | pkg/SC_uniform_functions.py | <reponame>tkcroat/SC
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 11:06:45 2018
@author: tkc
"""
import pandas as pd
import numpy as np
import datetime
import pkg.SC_config as cnf
def checkuni_duplicates(unilist):
''' Check uniform set to see if numbers are unique '''
grouped=unilist.groupby(['Setname','Size','Number'])
for [sname, size, num],group in grouped:
if len(group)>1:
print(len(group),' unis from', sname, size, num)
return
def checkInUnis(unis, unilist):
''' Using results of recent physical inventory, check in any uniforms found
in closet but still charged out to some player
still using this in a manual mode to track down
args:
unis -- List of each checked in uniform (Setname, Size, Number)
unilist - master list w/ status of each uni
'''
unicheck =pd.merge(unilist, unis, on=['Setname','Size','Number'], suffixes=('','_2'), how='inner')
checkins=unicheck[ (unicheck['Location']=='out') & (unicheck['Location_2']=='in') ]
# find checked out and/or missing (not assigned) unis
out = pd.merge(unilist, unis, on=['Setname','Size','Number'], suffixes=('','_2'), how='left')
out = out[pd.isnull(out['Location_2'])]
return checkins
def getuniinfo(teams, unilogfile, Mastersignups, unilist, year):
'''Readback of uniform numbers, sizes, issue dates from uniform night forms/log
and return date after season
all financial info goes manually into paylog
'''
thisyearSU=Mastersignups[Mastersignups['Year']==year]
# find Cabrini teams from this season needing uniforms
uniteams=teams[teams['Uniforms']!='N']
uniteamlist=np.ndarray.tolist(uniteams.Team.unique() )
# regenerate name of tab in which these are stored (lower case, 3 letter name of sport)
mycols=['First', 'Last', 'School', 'Issue date', 'Uniform#', 'Size', 'Amount',
'Deposit type', 'Deposit date', 'UniReturnDate', '$ returned',
'Comments', 'Plakey', 'Famkey','Sport']
alluniplayers=pd.DataFrame(columns=mycols)
for i,team in enumerate(uniteamlist): # find team's tab and import new uni # info
match=teams[teams['Team']==team]
sport=match.iloc[0]['Sport'].lower()
tabname=sport[0:3]+team[0:3] # must match name when log was generated
thisteam=pd.read_excel(unilogfile, sheetname=tabname)
thisteam['Sport']=sport # add associated sport needed to match signup
alluniplayers=pd.concat([alluniplayers,thisteam], ignore_index=True)
# remove entries with nan in uniform number (no info) .. will be a number or 'xx'
# nan here should mean uniform not issued (unreported drop)
alluniplayers=alluniplayers.dropna(subset=['Uniform#'])
nonum=alluniplayers[pd.isnull(alluniplayers['Uniform#'])]
# TODO replace loop with merge --> then inspect each row
uniinfo=pd.merge(alluniplayers, thisyearSU, how='left', on=['Sport','Plakey'],
suffixes=('','_2'))
mycols=['First', 'Last', 'Uniform#', 'Uniform#_2','Size', 'Issue date', 'School', 'Amount',
'Deposit type', 'Deposit date', 'UniReturnDate', '$ returned',
'Comments', 'Plakey', 'Famkey', 'Sport', 'SUkey',
'Grade', 'Gender', 'Year', 'Team', 'SUdate',
'Issue date_2', 'UniReturnDate_2']
uniinfo=uniinfo[mycols]
# now update the associated signups in mastersignups
for index, row in alluniplayers.iterrows():
pd.merge(row.to_frame(), thisyearSU, how='left', on=['Plakey','Sport'], suffixes=('','_2'))
plakey=int(alluniplayers.loc[index]['Plakey'])
sport=alluniplayers.loc[index]['Sport']
# match plakey and sport and find associated index (year already filtered)
thisplay=thisyearSU[thisyearSU['Plakey']==plakey]
mask=thisplay['Sport'].str.contains(sport, case=False)
match=thisplay.loc[mask]
try:
number=int(alluniplayers.loc[index]['Uniform#'])
size=alluniplayers.loc[index]['Size']
except: # could be 'xx' if uniform definitely issued but number unknown
number=alluniplayers.loc[index]['Uniform#']
size=alluniplayers.loc[index]['Size']
issuedate=alluniplayers.loc[index]['Issue date']
if type(issuedate)==datetime.datetime:
issuedate=datetime.date.strftime(issuedate,'%m/%d/%Y')
returndate=alluniplayers.loc[index]['UniReturnDate']
if type(returndate)==datetime.datetime:
returndate=datetime.date.strftime(issuedate,'%m/%d/%Y')
# Now write Uniform#, issue date and return date from log
if len(match==1): # match is between alluni and mastersignups
# compare numbers if uni # not np.nan or '??'
if str(match.iloc[0]['Uniform#'])!='nan' and str(match.iloc[0]['Uniform#'])!='??':
if match.iloc[0]['Uniform#']!=str(number):
print('Player', match.iloc[0]['First'], match.iloc[0]['Last'],' assigned uni#',
match.iloc[0]['Uniform#'],' or', str(number))
# Interactive conflict resolution??
else: # assign new number, size, date, etc.
# match.index is original row in Mastersignups
Mastersignups=Mastersignups.set_value(match.index,'Uniform#',number)
Mastersignups=Mastersignups.set_value(match.index,'Size', size)
Mastersignups=Mastersignups.set_value(match.index,'Issue date',issuedate)
Mastersignups=Mastersignups.set_value(match.index,'UniReturnDate',returndate)
else:
print('Error: Matching signup not found for player key and sport:', plakey, sport)
return Mastersignups
def makemissingunilog(df, paylog, players, fname='missingunilist.csv'):
'''Pass master signups and find unreturned uniforms... return as long single lists'''
df=df.dropna(subset=['Issue date']) # only signups with uniform issued
df=df.loc[pd.isnull(df['UniReturnDate'])] # keeps only unreturned uniforms
df['Amount']=''
df['Deposit type']=''
df['Deposit date']=''
df['Deposit date']=pd.to_datetime(df['Deposit date'], format='%d%b%Y:%H:%M:%S.%f', errors='ignore') # force this to datetime format first
df['Comments']=''
df['$ returned']='' # this field always blank in uni-logs..
# get families with outstanding uniforms
famlist=np.ndarray.tolist(df.Famkey.unique()) # deal with family's deposits and unis simultaneously
paylog=paylog.sort_values(['Date'], ascending=True) # chronological so deposit check works correctly
for i, famkey in enumerate(famlist): # now find associated deposits from family
unis=df[df['Famkey']==famkey] # this family's outstanding uniforms
depmatch=paylog[paylog['Famkey']==famkey]
depmatch=depmatch.dropna(subset=['Deposit']) # drop payments w/o associated deposits
# find last negative value and take the positive deposits after that
for j in range(len(depmatch)-1,-1,-1): # backwards through the rows
if depmatch.iloc[j]['Deposit']<0: # stop at negative value
depmatch=depmatch.drop(depmatch.index[0:j+1]) # drops negative row and all preceding it
break
if len(depmatch)==0: # fully handle no deposits case (no entries or last entry negative for returned deposit)
unis['Comments']='No deposit on file'
unis['Amount']=0
# if any deposits exist, just spread total amt over all outstanding uniforms (even if inadequate)
else:
unis['Deposit date']=depmatch.iloc[0]['Date'] # date of oldest deposit if multiple
unis['Comments']=depmatch.iloc[0]['Depcomment'] # may miss 2nd comment in rare case of multiple
unis['Amount']=depmatch.Deposit.sum()/len(unis) # split amount across all uniforms
# check to ensure not deposits are not of different type (i.e. all cash or all check)
if len(depmatch.Deptype.unique())==1: # all same type
unis['Deposit type']=depmatch.iloc[0]['Deptype'] # generally cash or check
else:
unis['Deposit type']='mixed' # oddball case of some cash/ some check
df.loc[unis.index,unis.columns]=unis # Copy altered subset back to main df
# this is probably sufficient for uniform log... if further info needed can just check paylog directly
for index,row in df.iterrows(): # get school for each player with issued uniform
plakey=df.loc[index]['Plakey']
match=players[players['Plakey']==plakey]
if len(match)!=1:
print('Error locating school for player ', plakey)
else:
df=df.set_value(index,'School',match.iloc[0]['School'])
# organize output from this file
mycols=['First', 'Last', 'Gender', 'Grade','School', 'Issue date', 'Sport','Year',
'Uniform#', 'Team', 'Amount', 'Deposit type', 'Deposit date',
'UniReturnDate', '$ returned', 'Comments', 'Plakey', 'Famkey']
df=df[mycols]
df=df.sort_values(['Last'])
df.to_csv(fname, index=False, date_format='%m-%d-%y')
return df
def writeuniformlog(df, teams, players, season, year, paylog):
''' From mastersignups and teams, output contact lists for all teams/all sports separately into separate tabs of xls file
autosaves to "Fall2016"_uniform_log.xls'''
# Slice by sport: Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
# Load missing uniforms
missing=df.dropna(subset=['Issue date']) # only signups with uniform issued
missing=missing.loc[pd.isnull(missing['UniReturnDate'])] # keeps only unreturned uniforms
# Groupby plakey and sport for missing unis
df=df[df['Year']==year] # remove prior years in case of duplicate name
df=df.reset_index(drop=True)
# get school from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r'))
# Find Cabrini teams from this season needing uniforms
thismask = teams['Uniforms'].str.contains('y', case=False, na=False)
uniformteams=teams.loc[ teams['Uniforms']!='N']
uniformlist= uniformteams.Team.unique()
uniformlist=np.ndarray.tolist(uniformlist)
# single uniform log per season
contactfile='\\'+str(season)+'_'+str(year)+'_uniform_log.xlsx'
writer=pd.ExcelWriter(cnf._OUTPUT_DIR+contactfile, engine='openpyxl',date_format='mm/dd/yy')
# Can just eliminate any entries not in uniform deposit list
df=df[df['Team'].isin(uniformlist)] # only players on teams needing uniforms
# Columns needed for log output
mycols=['First', 'Last', 'School', 'Issue date', 'Uniform#', 'Size', 'Amount',
'Deposit type', 'Deposit date', 'UniReturnDate', '$ returned',
'Comments', 'Plakey', 'Famkey']
tabnamelist=[]
# TODO Find size from this year's sport signup
for team in uniformlist:
thismask = df['Team'].str.contains(team, case=False, na=False)
thisteam=df.loc[thismask] # this team's signups
sport=thisteam.iloc[0]['Sport'].lower()
thisteam=finddeposits(thisteam, paylog) # thisteam is this team's slice of info from master_signups
missing=[i for i in mycols if i not in thisteam.columns]
for miss in missing:
thisteam[miss]=''
thisteam=thisteam[mycols] # organize in correct format for xls file
tabname=sport[0:3]+team[0:3] # name tab with team's name..
if tabname in tabnamelist:
tabname+='2' # handles two teams per grade
tabnamelist.append(tabname)
thisteam.to_excel(writer, sheet_name=tabname,index=False) # this overwrites existing file
writer.save()
return
def finddeposits(df, paylog):
''' Pass a single team and look up most recent uniform deposit ...
if positive we have deposit on file and copy this to uniform log '''
# find matching family in paylog
# add columns pertaining to financials of deposits (pulled from paylog)
df['Amount']=''
df['Deposit type']=''
df['Deposit date']=''
df['Comments']=''
df['$ returned']='' # this field always blank in uni-logs..
# pen entries for money returned go into paylog as negative numbers in deposit field
for index,row in df.iterrows():
famkey=df.loc[index]['Famkey']
last=df.loc[index]['Last']
match=paylog[paylog['Famkey']==famkey]
# Knock out entries without deposits (int or float)... doesn't get nans?
match=match.dropna(subset=['Deposit']) # drop those w/o values
# require int or float (probably shouldn't happen)
match=match[match['Deposit'].apply(lambda x: isinstance(x, (int, np.int64, float)))]
if len(match)==0:
print('No deposit for player ', last)
df=df.set_value(index,'Comments','No deposit on file')
continue
elif match.iloc[-1]['Deposit']>0: # gets last matching value (if positive then not a deposit return)
df=df.set_value(index,'Deposit date',match.iloc[-1]['Date'])
df=df.set_value(index,'Amount',match.iloc[-1]['Deposit'])
df=df.set_value(index,'Deposit type',match.iloc[-1]['Deptype'])
df=df.set_value(index,'Comments',match.iloc[-1]['Depcomment'])
print('$', match.iloc[-1]['Deposit'],' for player ',last)
else: # last deposit entry is negative (returned deposit) so none on file
print('Last deposit returned for player ', last)
# formatting of return date
retdate=match.iloc[-1]['Date']
commstr='$'+str(match.iloc[-1]['Deposit'])+' returned on ', retdate
df=df.set_value(index,'Comments',commstr)
return df
def updateunisumm(unisumm, unilist):
''' Using info in unilist update the excel summary sheet
unilist is master for this, summary is just an updatable view
'''
# First compare overall numbers
unisets=np.ndarray.tolist(unisumm.Setname.unique())
# Drop nan, ensure compatible names
unisets=[i for i in unisets if str(i)!='nan']
unisets2=np.ndarray.tolist(unilist.Setname.unique())
miss=[i for i in unisets if i not in unisets2]
if miss:
print('Uniform set(s) missing:', ",".join(miss))
miss2=[i for i in unisets2 if i not in unisets]
if miss2:
print('Uniform set(s) missing:', ",".join(miss2))
# Compare total counts
for i, val in enumerate(unisets):
match=unisumm[unisumm['Setname']==val]
match2=unilist[unilist['Setname']==val] # holds number of unis
if match.iloc[0]['Total']!=len(match2):
print('Totals discrepancy for ', val)
unisumm=unisumm.set_value(match.index[0],'Total',len(match2))
# Now go over these as function of size
sizes=['YM','YL','YXL', 'S','M','L','XL','2XL']
# testing size=sizes[2]
for i, size in enumerate(sizes):
thissize=match2[match2['Size']==size]
if match.iloc[0][size+'total']!=len(thissize):
print('Size discrepancy for ', val, size)
unisumm=unisumm.set_value(match.index[0],size+'total',len(thissize))
# now groupby in/out/miss and update summary
status=['in','out','miss'] # in closet, out to player or missing/unassigned
# testing stat=status[0]
for i, stat in enumerate(status):
thisstat=thissize[thissize['Location']==stat]
if match.iloc[0][size+stat]!=len(thisstat):
print('Size discrepancy for ', val, size, stat)
unisumm=unisumm.set_value(match.index[0],size+stat,len(thisstat))
# Now update totals by type
unisumm['Total in']=unisumm['YMin']+unisumm['YLin']+unisumm['YXLin']+unisumm['Sin']+unisumm['Min']+unisumm['Lin']+unisumm['XLin']+unisumm['2XLin']
unisumm['Total out']=unisumm['YMout']+unisumm['YLout']+unisumm['YXLout']+unisumm['Sout']+unisumm['Mout']+unisumm['Lout']+unisumm['XLout']+unisumm['2XLout']
unisumm['Total miss']=unisumm['YMmiss']+unisumm['YLmiss']+unisumm['YXLmiss']+unisumm['Smiss']+unisumm['Mmiss']+unisumm['Lmiss']+unisumm['XLmiss']+unisumm['2XLmiss']
return unisumm
def transferunis(df, season, year):
''' Transfer unreturned uniform from last season to next season's signup if never returned (using mastersignups)'''
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
sportlist=sportsdict.get(season)
# Current signups with uniform not yet assigned
currentSU=df[(df['Sport'].isin(sportlist)) & (df['Year']==year) & (pd.isnull(df['Issue date']))]
# Previously issued, unreturned uniforms from this sport-season
priorSU=df[(df['Sport'].isin(sportlist)) & (df['Year']==year-1) & (pd.notnull(df['Issue date'])) & (pd.isnull(df['UniReturnDate']))]
# Find player-sport combos in both currentSU and priorSU (unreturned uni);
tranunis=pd.merge(priorSU, currentSU, how='inner', on=['Plakey','Sport'], suffixes=('','_2'))
thisdate=datetime.datetime.strftime(datetime.datetime.now(), '%m/%d/%y')
for index, row in tranunis.iterrows():
old=df[df['SUkey']==tranunis.loc[index]['SUkey']]
new=df[df['SUkey']==tranunis.loc[index]['SUkey_2']]
if len(old)==1 and len(new)==1:
oldind=old.index[0]
newind=new.index[0]
# copy old uni info over to new signup
df=df.set_value(newind, 'Issue date', df.loc[oldind]['Issue date'])
df=df.set_value(newind, 'Uniform#', df.loc[oldind]['Uniform#'])
# Mark old signup as effectively having uniform returned
df=df.set_value(oldind, 'UniReturnDate', thisdate)
print('Uni info transferred for', tranunis.loc[index]['First'],tranunis.loc[index]['Last'], tranunis.loc[index]['Sport'])
else:
print('Problem transferring uni info for', tranunis.loc[index]['First'],tranunis.loc[index]['Last'])
return df
def transferunisVBBB(df, year):
''' Transfer VB uniforms over to basketball in same year for common players'''
# Current signups with uniform not yet assigned
currseas=df[(df['Sport'].isin(['Basketball'])) & (df['Year']==year) & (pd.isnull(df['Issue date']))]
# Previously issued, unreturned uniforms from this sport-season
priorseas=df[(df['Sport'].isin(['VB'])) & (df['Year']==year) & (pd.notnull(df['Issue date'])) & (pd.isnull(df['UniReturnDate']))]
# Find player-sport combos in both currentSU and priorSU (unreturned uni);
tranunis=pd.merge(priorseas, currseas, how='inner', on=['Plakey'], suffixes=('','_2'))
thisdate='01/01/'+str(year) # use artificial date of 1/1 for return/reissue
for index, row in tranunis.iterrows():
old=df[df['SUkey']==row.SUkey]
new=df[df['SUkey']==row.SUkey_2]
if len(old)==1 and len(new)==1:
oldind=old.index[0]
newind=new.index[0]
# copy old uni info over to new signup
df=df.set_value(newind, 'Issue date', thisdate) # use artificial 1/1 date
df=df.set_value(newind, 'Uniform#', df.loc[oldind]['Uniform#'])
# Mark old signup as effectively having uniform returned
df=df.set_value(oldind, 'UniReturnDate', thisdate)
print('Uni info transferred for', tranunis.loc[index]['First'],tranunis.loc[index]['Last'], tranunis.loc[index]['Sport'])
else:
print('Problem transferring uni info for', tranunis.loc[index]['First'],tranunis.loc[index]['Last'])
return df
|
tkcroat/SC | pkg/SC_config.py | <reponame>tkcroat/SC
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 21:29:30 2019
@author: kevin
"""
from os.path import expanduser
homedir = expanduser("~")
_OUTPUT_DIR=homedir+"\\Documents\\Sponsors_Club"
_INPUT_DIR=homedir+"\\Documents\\Sponsors_Club\\SC_files" |
tkcroat/SC | SC_schedules_main.py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 3 11:22:49 2017
@author: tkc
"""
import pandas as pd
import os
import datetime
import sys
import numpy as np
import pkg.SC_messaging_functions as SCmess
import pkg.SC_schedule_functions as SCsch
import pkg.SC_config as cnf # specifies input/output file directories
#%%
from importlib import reload
reload(SCsch)
reload(SCmess)
#%% Download from google sheets Cabrini basketball schedule
sheetID = '1-uX2XfX5Jw-WPw3YBm-Ao8d2DOzou18Upw-Jb6UiPWg'
rangeName = 'Cabrini!A:G'
cabsched = SCapi.downloadSheet(sheetID, rangeName)
#%% Load of other commonly needed info sheets
teams=pd.read_csv(cnf._INPUT_DIR +'\\Teams_2019.csv', encoding='cp437')
coaches=pd.read_csv(cnf._INPUT_DIR +'\\coaches.csv', encoding='cp437')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR +'\\\master_signups.csv', encoding='cp437')
players, famcontact = SC.loadProcessPlayerInfo() # version w/o signup processing
season='Winter'
year=2019
#%% Create all schedules and write to text log (non-auto emailed version)
emailtitle='Game Schedules for $TEAMNAME'
blankmess=SCmess.readMessage() # choose and read blank message from chosen *.txt
cabsched=SCsch.alterSchedule(cabsched) # day and division changes for consistency to previous
# write of all team schedules to parent_email_log (default choice)
SCmess.sendschedule(teams, cabsched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess)
#%%
# Read tentative google drive Pat Moore schedules (after delete of cols before header row)
sched=pd.read_csv(cnf._OUTPUT_DIR+'\\Schedules\\Bball2019_full_schedule.csv')
# Read of full xlxs by league file... good format for schedule extraction
# However csv file has separated schedules
sched=pd.read_csv(cnf._OUTPUT_DIR+'\\Schedules\\Bball2019_full_schedule.csv')
sched=pd.read_excel('C:\\Temp\\allTeams.xlsx')
sched=pd.read_excel(cnf._OUTPUT_DIR+'\\Schedules\\BB2019_full_schedule.xlsx')
fullsched=pd.read_excel('CYC_soccer_2019.xlsx')
# Load full schedule (Pat moore excel format)
fullsched=pd.read_excel('Soccer2019 By League.xlsx')
fullsched=pd.read_csv('Soccer.csv')
fullsched=SCsch.prepGdSchedule(fullsched, teams, 'Soccer')
# Find changed schedules, return altered games
sched=SCsch.loadSchedule() # Reload existing Cabrini-only schedule (post-processing)
sched=pd.read_csv('Cab_soccer_schedule_30Aug18.csv')
oldsch=pd.read_csv('Cab_soccer_schedule_26Aug18.csv')
changed=SCsch.compareSched(sched, oldsch)
# Load CYC full schedule and produce sub-schedule
fullsched=pd.read_excel('BB2018_schedule.xlsx')
sched=pd.read_csv('Cab_Bball_schedule_24Dec18.csv')
sched=pd.read_csv('Cabrini_2017_VB_soccer_schedule.csv')
sched2=pd.read_csv('Cabrini_VB2017_schedule.csv')
# load old teams
# Get subset of full schedule for Cabrini teams (and Cab transfer teams)
kwargs={}
kwargs.update({'div':'5G'}) # optional sub-sch for only
kwargs.update({'school':'Cabrini'}) # get cabrini schedules by school
kwargs.update({'sport':'Soccer'})
kwargs.update({'sport':'VB'})
cabsched=SCmess.getcabsch(fullsched, teams, coaches, fields, **kwargs)
cabsched.to_csv(cnf._OUTPUT_DIR + '\\Cab_Basketball2019_schedule_26Dec19.csv', index=False) # save (used for sendschedule, maketextsch, gcal, etc.)
# Compare schedule to previous and return altered rows
#%% CYC game rescheduler (prior to release)... swapping team opponents
sched=pd.read_excel(cnf._OUTPUT_DIR+'\\Schedules\\BB2019_full_schedule.xlsx')
sched=SCsch.prepSched(sched)
teamName='<NAME>-Clavin-6GD'
badDay=datetime(2020,1,11) # Team has two impossible to play games
teamName='<NAME>-Croat-7BD1'
badDay=datetime(2020,1,25) # Team has two impossible to play games
swapOld, swapNew, swapTeam = findTeamSwap(teamName, badDay, sched, gameRank=0)
swapOld, swapNew, swapTeam = findTeamSwap(teamName, badDay, sched, **{'badTime':'7:30 PM'})
# swapping same teams to new datetime/venue.. harder
gymsched=pd.read_excel(cnf._OUTPUT_DIR+'\\Schedules\BB2019_Schedule_ByGym.xlsx')
avail = findGymSlot(gymsched, datetime(2020,2,15)) #Sun
thisSched=getSchedule(sched, 'Annunciation-Handal-6GD')
#%%
# Make sports google calendars
kwargs={}
kwargs.update({'splitcal':False}) # single jumbo calendar option
kwargs.update({'school':'Cabrini'}) # Cabrini teams only
kwargs.update({'division':'6B'})
SCmess.makegcals(cabsched, teams, coaches, fields, season, year, duration=1, **kwargs)
# make game cards from given schedule
sched=pd.read_csv('Cab_soccer_schedule_23Aug18.csv') # reload
gctemplate=cnf._INPUT_DIR+'\\game_card_soccer_template.xlsx' # for soccer
gctemplate='game_card_VB_template.xlsx'
gctemplate='game_card_bball_template.xlsx' # for soccer
pastelist=pd.read_excel(cnf._INPUT_DIR+'\\excel_python_insert_template.xlsx', sheet_name=1) # currently same for soccer and VB
pastelist=pd.read_excel('excel_python_insert_template.xlsx', sheet_name='bball')
SCmess.gamecardmaker(teams, coaches, Mastersignups, sched, pastelist, gctemplate)
# Make all available game schedules for SMS (save to txt file for send to SMS email addresses or direct text)
messagefile='parent_game_scheduleSMS.txt'
logfile='basketball_gameschedules_SMS.txt'
logfile='test.txt'
SCmess.maketextsched(sched, teams, coaches, fields, messagefile, logfile, **kwargs)
# Look for changed games between two versions
altered=SCmess.detectschchange(cabsched, sched2)
altered.to_csv('altered_games.csv',index=False)
# other optional subsets of schedule
cabsched.to_csv('Cabrini_2017_soccer_schedule_8sep17.csv', index=False)
cabsched=pd.read_csv('Cabrini_2017_soccer_schedule.csv')
# Make Cabrini sports calendar from extracted CYC schedule
makegcalCYC(thisteam,'Ethan baseball', 1.5) # single team
# Venue list
venues=np.ndarray.tolist(cabsched.Location.unique())
thisteam=SCmess.getCYCschedules(cabsched, **kwargs)
#%%
# Older version
league='4B' # grade and B or G
school='Heller' # coach or school name
thisteam=schedule[(schedule['League']==league) & (schedule['Home'].str.contains(school)|schedule['Visitor'].str.contains(school))]
thisteam.to_csv('Ethan_4B_schedule.csv', index=False)
# Load league results
# Create google calendar from single team
makegcalCYC(thisteam,'Ethan baseball', 1.5)
# Load and read from standard Epiphany Tball schedule
Tball=pd.read_excel('Epiphany_1B_Tball.xlsx')
schedule=Tball[(Tball['HOME'].str.contains('frances',case=False)) | (Tball['AWAY'].str.contains('frances',case=False))]
schedule.to_csv('Tball_2017.csv',index=False)
Tballgcal=makegcal(schedule)
#%% Pulling schedules for SFC teams from OLS
fname='C:\\Users\\tkc\\Documents\\Python_Scripts\\SC\\OLS_2017.xlsx'
Ols2017=parseOls(fname) # makes flattened OLS schedule
team='OLS/SFC/SSP KDG-1ST G'
team='OLS 2ND G'
team='OLS/SFC 1ST B 1'
team='OLS/SFC/SSP 2ND B 2'
team='OLS/SFC KDG B'
# Getting subset of Cabrini teams
team=teams[2]
thisteam=makeOLS(team,Ols)
teams=np.ndarray.tolist(Ols.Home.unique())+np.ndarray.tolist(Ols.Visitor.unique())
teams=set(teams)
teams=list(teams)
cabteams=[n for n in teams if 'SFC' in n]
Ols=Ols[Ols['Date']>datetime.date(2017, 3, 11)] # schedule going forward
Olsold=pd.read_csv('OLS_2017.csv', encoding='cp437')
thisteam=makeOLS(team,Olsold)
duration=1.5
df=thisteam
test=makegcalCYC(thisteam,'Ethan baseball', 1.5)
test.to_csv('test_cal.csv', index=False)
def makegcal(schedule):
''' Turn Epiphany Tball schedule into google calendar '''
mycols=['Start Date', 'Start Time', 'End Time', 'All Day Event', 'Description', 'Location','Private']
df=schedule.copy()
df=df.rename(columns={'DATE':'Start Date','TIME':'Start Time'})
df['HOME']=df['HOME'].str.title()
df['AWAY']=df['AWAY'].str.title()
df['All Day Event']='False'
df['Location']='Epiphany '+ df['FIELD']
df['Private']='FALSE'
df['End Time']=df['Start Time'] + datetime.timedelta(hours=1)
df['End Time']=pd.to_datetime(df['Start Time']) + datetime.timedelta(hours=1)
df['All Day Event']='FALSE'
df['End Time']=df['Start Time'].topydatetime
df['Description']='K-1 coed Tball: '+df['HOME']+' vs '+df['AWAY']
df=df[mycols]
return df
def makeOLS(team, Ols):
''' Make team schedule from OLS master schedule (after flattening structure into DB style)
columns are ['Date', 'Time', 'Home', 'Visitor', 'Court'] '''
team=team.strip()
mask=Ols['Home'].str.contains(team) |Ols['Visitor'].str.contains(team)
thisteam=Ols.loc[mask]
fname=team.replace('/','-') +'.csv'
thisteam.to_csv(fname, index=False)
return thisteam
# Need to finish combination scripts
pd.to_datetime(df['Start Date']+ ' ' + df['Start Time'])
df['Datetime']=datetime.datetime.combine(pd.to_datetime(df['Start Date']), df['Start Time'])
df['Start Date']+df['Start Time']
df['End Time']=df['Start Time'] + pd.Timedelta(hours=1)
val
df['End Time']=pd.to_datetime(df['Start Time']) + pd.Timedelta(hours=1) |
tkcroat/SC | UnderDev/SC_db_alchemy.py | <filename>UnderDev/SC_db_alchemy.py
# -*- coding: utf-8 -*-
"""
Created on Wed May 10 08:58:05 2017
Implementation of database model (players, families) using SQLAlchemy
@author: tkc
"""
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
# new base class.. triggers creation of Table and mapper
# accessible using class.__table__ and class.__mapper__
Base = declarative_base()
class Player(Base):
# definition of player table
__tablename__ = 'player'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
plakey = Column(Integer, primary_key=True)
first = Column(String(250), nullable=False)
last = Column(String(250), nullable=False)
alias = Column(String(250), nullable=False) # alt first name
family = Column(String(250), nullable=False) # family name (necessary?)
DOB = Column(String(250))
gender = Column(String(250))
school = Column(String(250))
grade= Column(Integer) # K=0
gradeadj= Column(Integer) # ahead or behind of expected grade
uninum = Column(Integer) # Tshirt uniform
famkey = Column(Integer, ForeignKey('family.famkey'))
class Family(Base):
__tablename__ = 'family'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
famkey = Column(Integer, primary_key=True)
family = Column(String(250), nullable=False)
address = Column(String(250)) # number and name as string
city = Column(String(250))
state = Column(String(250))
post_code = Column(String(250), nullable=False)
parish_registration = Column(String(250))
parish_residence = Column(String(250))
pfirst1 = Column(String(250), nullable=False)
plast1 = Column(String(250), nullable=False)
pfirst2 = Column(String(250), nullable=False)
plast2 = Column(String(250), nullable=False)
pfirst3 = Column(String(250), nullable=False)
plast3 = Column(String(250), nullable=False)
# How to associate multiple players w/ family
player = relationship(Player)
# Create engine to store above declarative data definitions
engine = create_engine('sqlite:///SC_sqlalchemy.db')
|
tkcroat/SC | pkg/SC_database_transfer_scripts.py | <reponame>tkcroat/SC
# -*- coding: utf-8 -*-
"""
Created on Wed May 25 08:44:02 2016
@author: tkc
"""
#%% Transfer database tables to new pandas dataframe structure (probably one time use)
import pandas as pd
#%%
# Transfer parent names/numbers/textable from parents to famcontact
# Using phonelist and check for unique numbers
parents=pd.read_csv('parents.csv', encoding='cp437') # use Excel compatible encoding cp437 instead of utf-8
players=pd.read_csv('players.csv', encoding='cp437') # use Excel compatible encoding cp437 instead of utf-8
famcontact=pd.read_csv('family_contact.csv', encoding='cp437')
mmparfam=pd.read_csv('mm_par_fam.csv', encoding='cp437') # many-to-many table for parent to family
mmplafam=pd.read_csv('mm_pla_fam.csv', encoding='cp437') # many-to-many table for player to family
#%%
# add family # to parents
for i in range(0,len(parents)):
parkey=parents.iloc[i]['Parkey'] # get parent number
match= mmparfam[(mmparfam['Parkey']==parkey)]
if len(match)!=1:
print('Error: Parent # ',parkey,' matches ',len(match),' families.')
continue
famkey=match.iloc[0]['Famkey']
parents=parents.set_value(i,'Famkey',famkey)
# add family # to players
for i in range(0,len(parents)):
plakey=players.iloc[i]['Plakey'] # get parent number
match= mmplafam[(mmplafam['Plakey']==plakey)]
if len(match)!=1:
print('Error: Parent # ',plakey,' matches ',len(match),' families.')
continue
famkey=match.iloc[0]['Famkey']
players=players.set_value(i,'Famkey',famkey)
# add family name to players
for i in range(0,len(players)):
famkey=players.iloc[i]['Famkey'] # get parent number
match= famcontact[(famcontact['Famkey']==famkey)]
if len(match)!=1:
print('Error: Player # ',i,' matches ',len(match),' families.')
continue
if len(match)==1:
family=match.iloc[0]['Family']
players=players.set_value(i,'Family',family)
# Add family name to parents
for i in range(0,len(parents)):
famkey=parents.iloc[i]['Famkey'] # get parent number
match= famcontact[(famcontact['Famkey']==famkey)]
if len(match)!=1:
print('Error: Player # ',i,' matches ',len(match),' families.')
continue
if len(match)==1:
family=match.iloc[0]['Family']
players=players.set_value(i,'Family',family)
# Check if parents are at different addresses (only one address allowed per player)
for i in range(0,len(parents)):
famkey=parents.iloc[i]['Famkey'] # get family key
match= parents[(parents['Famkey']==famkey)]
if len(match)>1:
add1=match.iloc[0]['Address']
add2=match.iloc[1]['Address']
if add1!=add2 and type(add2)==str:
print('Different addresses for famkey ',famkey," ", add1," ",add2)
# Check for consistent parish information between parents
for i in range(0,len(parents)):
famkey=parents.iloc[i]['Famkey'] # get family key
match= parents[(parents['Famkey']==famkey)]
if len(match)>1:
par1=match.iloc[0]['Zip']
par2=match.iloc[1]['Zip']
if type(par1)==str and type(par2)==str: # eliminate nan
if par1!=par2:
print("Family # ", famkey,' ', par1, ' ', par2)
if type(par1)!=str and type(par2)==str: # eliminate nan
print("Family # ", famkey,' ', par1, ' ', par2)
if type(par2)==str and not type(par1)==str:
parents=parents.set_value(i,'Parish_Residence',par1)
if add1!=add2 and type(add2)==str:
print('Different addresses for famkey ',famkey," ", add1," ",add2)
''' script to transfer address, phone, email, names from parent table to matching famcontact dataframe
probably only for one time use '''
for i in range(0,len(famcontact)):
famkey=famcontact.iloc[i]['Famkey'] # get family key
match= parents[(parents['Famkey']==famkey)]
phonelist=[]
textlist=[]
emaillist=[]
'''
famcontact=famcontact.set_value(i,'Zip',match.iloc[0]['Zip'])
famcontact=famcontact.set_value(i,'Address',match.iloc[0]['Address'])
famcontact=famcontact.set_value(i,'Parish_residence',match.iloc[0]['Parish_residence'])
famcontact=famcontact.set_value(i,'Parish_registration',match.iloc[0]['Parish_registration'])
famcontact=famcontact.set_value(i,'City',match.iloc[0]['City'])
famcontact=famcontact.set_value(i,'State',match.iloc[0]['State'])
'''
if type(match.iloc[0]['Phone1'])== str: # numbers from parent 1
phonelist.append(match.iloc[0]['Phone1'].strip())
textlist.append(match.iloc[0]['Text1'])
if type(match.iloc[0]['Phone2'])== str:
if match.iloc[0]['Phone2'].strip() not in phonelist:
phonelist.append(match.iloc[0]['Phone2'].strip())
textlist.append(match.iloc[0]['Text2'])
if type(match.iloc[0]['Phone3'])== str:
if match.iloc[0]['Phone3'].strip() not in phonelist:
phonelist.append(match.iloc[0]['Phone3'].strip())
textlist.append(match.iloc[0]['Text3'])
if type(match.iloc[0]['Email1'])== str: # emails from parent 1
emaillist.append(match.iloc[0]['Email1'].strip())
if type(match.iloc[0]['Email2'])== str: # emails from parent 1
if match.iloc[0]['Email2'].strip() not in emaillist:
emaillist.append(match.iloc[0]['Email2'].strip())
famcontact=famcontact.set_value(i,'Pfirst',match.iloc[0]['Pfirst'])
famcontact=famcontact.set_value(i,'Plast',match.iloc[0]['Plast'])
if(len(match)>1):# numbers from parent 1
if type(match.iloc[1]['Phone1'])== str:
if match.iloc[1]['Phone1'].strip() not in phonelist:
phonelist.append(match.iloc[1]['Phone1'].strip())
textlist.append(match.iloc[1]['Text1'])
if type(match.iloc[1]['Phone2'])== str:
if match.iloc[1]['Phone2'].strip() not in phonelist:
phonelist.append(match.iloc[1]['Phone2'].strip())
textlist.append(match.iloc[1]['Text2'])
if type(match.iloc[1]['Phone3'])== str:
if match.iloc[1]['Phone3'].strip() not in phonelist:
phonelist.append(match.iloc[1]['Phone3'].strip())
textlist.append(match.iloc[1]['Text3'])
if type(match.iloc[1]['Email1'])== str: # email 1 from parent 2
if match.iloc[1]['Email1'].strip() not in emaillist:
emaillist.append(match.iloc[1]['Email1'].strip())
if type(match.iloc[1]['Email2'])== str: # email 2 from parent 2
if match.iloc[1]['Email2'].strip() not in emaillist:
emaillist.append(match.iloc[1]['Email2'].strip())
famcontact=famcontact.set_value(i,'Pfirst2',match.iloc[1]['Pfirst']) # add 2nd parent name to famcontact
famcontact=famcontact.set_value(i,'Plast2',match.iloc[1]['Plast'])
if(len(match)>2):# numbers from parent 3
if type(match.iloc[2]['Phone1'])== str:
if match.iloc[2]['Phone1'].strip() not in phonelist:
phonelist.append(match.iloc[2]['Phone1'].strip())
textlist.append(match.iloc[2]['Text1'])
if type(match.iloc[2]['Phone2'])== str:
if match.iloc[2]['Phone2'].strip() not in phonelist:
phonelist.append(match.iloc[2]['Phone2'].strip())
textlist.append(match.iloc[2]['Text2'])
if type(match.iloc[2]['Phone3'])== str:
if match.iloc[2]['Phone3'].strip() not in phonelist:
phonelist.append(match.iloc[2]['Phone3'].strip())
textlist.append(match.iloc[2]['Text3'])
famcontact=famcontact.set_value(i,'Pfirst3',match.iloc[2]['Pfirst']) # add 2nd parent name to famcontact
famcontact=famcontact.set_value(i,'Plast3',match.iloc[2]['Plast'])
for num in range(0,len(phonelist)): # write phonelist and textlist to famcontact
phonecol='Phone'+str(num+1)
textcol='Text'+str(num+1)
famcontact=famcontact.set_value(i,phonecol,phonelist[num])
famcontact=famcontact.set_value(i,textcol,textlist[num])
for num in range(0,len(emaillist)): # write phonelist and textlist to famcontact
emailcol='Email'+str(num+1)
famcontact=famcontact.set_value(i,emailcol,emaillist[num])
def addfamilies(df, famcontact, fambills):
''' Old version of addfamilies when fambills was also used... new version generates bill summaries in real-time from paylog and signups
df contains new families to add to master family contact and family billing tables '''
df=df.reset_index(drop=True) # first reset index for proper for loops
dfcon=df # make copy of original for famcontact below
# backup existing family contact and billing tables
mytime=datetime.datetime.now()
datestr='_' + str(mytime.day) + mytime.strftime("%B") + str(mytime.year)[2:] # current date as in 3Jun16
filestr='family_contact'+datestr+'.bak'
famcontact.to_csv(filestr, index=False) # backup existing players master list
filestr='family_bill'+datestr+'.bak'
fambills.to_csv(filestr, index=False) # backup existing players master list
# update family billing
datestr=str(mytime.month)+'/'+str(mytime.day)+'/'+str(mytime.year)
df=dropcolumns(df,fambills) # drops all cols from df that are not in fambills
# df=df.iloc[:,0:3]
# df.drop('Plakey', axis=1, inplace=True)
df['Startdate']=datestr # add and initialize other necessary columns
df['Lastupdate']=datestr
df['Startbal']=0
df['Currbalance']=0
df['Billing_note']=''
colorder=fambills.columns.tolist()
fambills=pd.concat([fambills,df]) # concat the two frames (if same names, column order doesn't matter)
fambills=fambills[colorder] # put back in original order
fambills=fambills.reset_index(drop=True)
fambills=fambills.to_csv('family_bill.csv',index =False)
# update family contact
dfcon.rename(columns={'Plakey': 'Players', 'Parish': 'Parish_registration', 'Phone': 'Phone1', 'Text': 'Text1', 'Email': 'Email1',}, inplace=True)
dfcon=dropcolumns(dfcon, famcontact) # drop unnecessary columns from dfcon (not in famcontact)
#dfcon.drop('Timestamp', axis=1, inplace=True)
#dfcon.drop('First', axis=1, inplace=True)
#dfcon.drop('Last', axis=1, inplace=True)
#dfcon.drop('DOB', axis=1, inplace=True)
#dfcon.drop('Gender', axis=1, inplace=True)
#dfcon.drop('School', axis=1, inplace=True)
#dfcon.drop('Grade', axis=1, inplace=True)
#dfcon.drop('AltPlacement', axis=1, inplace=True)
#dfcon.drop('Ocstatus', axis=1, inplace=True)
#dfcon.drop('Othercontact', axis=1, inplace=True)
#dfcon.drop('Coach', axis=1, inplace=True)
#dfcon.drop('Coach2', axis=1, inplace=True)
#dfcon.drop('Sport', axis=1, inplace=True)
dfcon['City']='St. Louis'
dfcon['State']='MO'
dfcon['Parish_residence']=''
dfcon['Pfirst3']=''
dfcon['Plast3']=''
dfcon['Phone3']=''
dfcon['Text3']=''
dfcon['Phone4']=''
dfcon['Text4']=''
dfcon['Email3']=''
colorder=fambills.columns.tolist()
famcontact=pd.concat([famcontact,dfcon]) # concat the two frames (if same names, column order doesn't matter)
famcontact=famcontact[colorder] # put back in original order
famcontact=famcontact.reset_index(drop=True)
famcontact=famcontact.to_csv('family_contact.csv',index =False)
return famcontact, fambills
def comparefamkeys(players,famcontact, fambills):
'''Old version of utility script to compare family contacts, family bills and players list
fambills has now been removed '''
fams_pla=players.Famkey.unique()
fams_pla=np.ndarray.tolist(fams_pla)
fams_con=famcontact.Famkey.unique()
fams_con=np.ndarray.tolist(fams_con)
fams_bill=fambills.Famkey.unique()
fams_bill=np.ndarray.tolist(fams_bill)
# compare contacts and billing ... should be identical
billonly=[i for i in fams_bill if i not in fams_con]
cononly=[i for i in fams_con if i not in fams_bill]
noplayers=[i for i in fams_con if i not in fams_pla]
for i,val in enumerate(billonly):
print("Famkey ", val, " in family billing but not in family contacts.")
for i,val in enumerate(cononly):
print("Famkey ", val, " in family contacts but not in family billing.")
for i,val in enumerate(noplayers):
print("Famkey ", val, " in family contacts but not found among players.")
# look for different names between family contacts and family bills
for i in range(0,len(famcontact)):
famkey=famcontact.iloc[i]['Famkey'] # grab this key
family=famcontact.iloc[i]['Family']
family=family.title() # switch to title case
family=family.strip() # remove whitespace
match=fambills[fambills['Famkey']==famkey]
if len(match)==1: # should be a match already assuming above section finds no discrepancies
family2=match.iloc[0]['Family']
family2=family2.title() # switch to title case
family2=family2.strip() # remove whitespace
if family!=family2: # different family names with same key
print("Family key ", str(famkey), ": ", family, " or ", family2)
# Now check for family name discrepancies between players and famcontact
for i in range(0,len(famcontact)):
famkey=famcontact.iloc[i]['Famkey'] # grab this key
family=famcontact.iloc[i]['Family']
family=family.title() # switch to title case
family=family.strip() # remove whitespace
match=players[players['Famkey']==famkey]
if len(match)==1: # should be a match already assuming above section finds no discrepancies
family2=match.iloc[0]['Family']
family2=family2.title() # switch to title case
family2=family2.strip() # remove whitespace
if family!=family2: # different family names with same key
print("Family key ", str(famkey), ": ", family, " or ", family2)
return
def writecontactsold(df, Teams, season, signupfile):
''' Default data frame with shortened names into which google drive sheets are read; sheets are created with google
form and contain fresh player data from forms; For paper signups some data will be missing and will be found
from existing player database
New version starts with mastersignups with team already assigned'''
# Slice by sport (also find season) Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
from openpyxl import load_workbook #
book=load_workbook(signupfile)
writer=pd.ExcelWriter(signupfile, engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
if season=='Fall':
thismask = df['Sport'].str.contains('soccer', case=False, na=False) & df['Gender'].str.contains('f', case=False, na=False)
Girlsoccer=df.loc[thismask]
Girlsoccer['Sport']='Soccer' # set to standard value
Girlsoccer=assignteams(Girlsoccer, Teams) # add team assignment
Girlsoccer=organizecontacts(Girlsoccer) # organize in correct format for xls file
Girlsoccer.to_excel(writer,sheet_name='Girlsoccer',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('soccer', case=False, na=False) & df['Gender'].str.contains('m', case=False, na=False)
Boysoccer=df.loc[thismask]
Boysoccer['Sport']='Soccer'
Boysoccer=assignteams(Boysoccer, Teams) # add team assignment
Boysoccer=organizecontacts(Boysoccer) # organize in correct format for xls file
Boysoccer.to_excel(writer,sheet_name='Boysoccer',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('v', case=False, na=False) & df['Gender'].str.contains('m', case=False, na=False)
BoyVB=df.loc[thismask]
BoyVB['Sport']='VB'
BoyVB=assignteams(BoyVB, Teams) # add team assignment
BoyVB=organizecontacts(BoyVB) # organize in correct format for xls file
BoyVB.to_excel(writer,sheet_name='BoyVB',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('v', case=False, na=False) & df['Gender'].str.contains('f', case=False, na=False)
GirlVB=df.loc[thismask]
GirlVB['Sport']='VB'
GirlVB=assignteams(GirlVB, Teams) # add team assignment
GirlVB=organizecontacts(GirlVB) # organize in correct format for xls file
GirlVB.to_excel(writer,sheet_name='GirlVB',index=False) # this overwrites existing file
if season=='Spring':
thismask = df['Sport'].str.contains('baseball', case=False, na=False)
Baseball=df.loc[thismask]
Baseball['Sport']='Baseball' # set to std value
Baseball=assignteams(Baseball, Teams) # add team assignment
Baseball=organizecontacts(Baseball) # organize in correct format for xls file
Baseball.to_excel(writer,sheet_name='Baseball',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('softball', case=False, na=False)
Softball=df.loc[thismask]
Softball['Sport']='Softball' # set to std value
Softball=assignteams(Softball, Teams) # add team assignment
Softball=organizecontacts(Softball) # organize in correct format for xls file
Softball.to_excel(writer,sheet_name='Softball',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('t-ball', case=False, na=False)
Tball=df.loc[thismask]
Tball['Sport']='Tball' # set to std value
Tball=assignteams(Tball, Teams) # add team assignment
Tball=organizecontacts(Tball) # organize in correct format for xls file
Tball.to_excel(writer,sheet_name='Tball',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('track', case=False, na=False)
Track=df.loc[thismask]
Track['Sport']='Track' # set to std value
Track=assignteams(Track, Teams) # add team assignment
Track=organizecontacts(Track) # organize in correct format for xls file
Track.to_excel(writer,sheet_name='Track',index=False) # this overwrites existing file
if season=='Winter': # currently only basketball (no mask by sport)
df['Sport']='Basketball' # set to std name
Basketball=assignteams(df, Teams, sport='Basketball') # add team assignment
thismask = Basketball['Gender'].str.contains('f', case=False, na=False)
Girlbasketball=Basketball.loc[thismask]
Girlbasketball=organizecontacts(Girlbasketball) # organize in correct format for xls file
Girlbasketball.to_excel(writer,sheet_name='GirlBasketball',index=False) # this overwrites existing file
thismask = Basketball['Gender'].str.contains('m', case=False, na=False)
Boybasketball=Basketball.loc[thismask]
Boybasketball=organizecontacts(Boybasketball)
Boybasketball.to_excel(writer,sheet_name='BoyBasketball',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
return
def organizecontacts(df):
''' Corresponding old version of organizecontacts takes a sport-gender and organizes in manner for output to excel signup summary file '''
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone', 'Text','Email', 'Phone2', 'Text2', 'Email2', 'Team', 'Plakey', 'Famkey', 'Family']
thisdf=pd.DataFrame(columns=mycols) # temp df for dropping unnecessary columns
df['Team']=''
df=dropcolumns(df,thisdf) # drop columns not working
df.Grade=df.Grade.replace('K',0)
df=df.sort_values(['Grade'], ascending=True)
df.Grade=df.Grade.replace(0,'K') # replace K with zero to allow sorting
df=df[mycols] # put back in desired order
return df
#%% Saving of various files
famcontact.to_csv('family_contact.csv', index=False)
parents.to_csv('parents.csv', index=False)
players.to_csv('players.csv', index=False)
|
tkcroat/SC | UnderDev/SC_tk_underdev.py | <filename>UnderDev/SC_tk_underdev.py
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 15 09:43:09 2017
@author: tkc
"""
import pandas as pd
import smtplib
import numpy as np
import datetime
import tkinter as tk
import glob
import sys
import textwrap
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\SC' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\SC')
from SC_signup_functions import findcards
import math
#%%
emailparent_tk(teams, signupfile, year)
# 8/11/17 updating interactive approval of family contact changes
# not sure about status
def updatefamcon_tk(row, famcontact, **upkwargs):
''' Interactive approval of family contact changes
changes directly made to famcontacts (but not yet autosaved)
upkwargs: phone, email, address
'''
root = tk.Tk()
root.title('Update family contact info')
choice=tk.StringVar() # must be define outside of event called functions
rownum=0
mytxt='Family: '+row.Family+' # '+str(row.Plakey)
tk.Label(root, text=mytxt).grid(row=rownum, column=0)
rownum+=1
# Use listbox of common schools?
if 'parlist' in upkwargs: # indicates new parent found
parlist=upkwargs.get('parlist',[])
p=tk.Listbox(master=root, listvariable=parlist)
# create and display DOB variables
def add1(event):
DOB.set(datetime.datetime.strftime(DOB1,'%m/%d/%y'))
def add2(event):
DOB.set(datetime.datetime.strftime(DOB2,'%m/%d/%y'))
DOB=tk.StringVar()
DOB.set(datetime.datetime.strftime(DOB1,'%m/%d/%y')) # defaults to original
tk.Label(root, text='Update date of birth?').grid(row=rownum, column=0)
mytxt='current DOB:'+datetime.datetime.strftime(DOB1,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB1)
b.grid(row=rownum, column=1)
mytxt='New DOB:'+datetime.datetime.strftime(DOB2,'%m/%d/%y')
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', ChooseDOB2)
b.grid(row=rownum, column=2)
tk.Entry(master=root, textvariable=DOB).grid(row=rownum, column=3)
rownum+=1
if 'school' in upkwargs:
school=tk.StringVar()
school.set(row.School) # default to existing value
tk.Label(root, text='Update school?').grid(row=rownum, column=0)
rownum+=1
def newschool(event):
school.set(row.School_n)
def oldschool(event):
school.set(row.School)
def pickschool(event):
# double-click to pick standard school choice
items=lb.curselection()[0] # gets selected position in list
school.set(commonschools[items])
tk.Entry(root, textvariable=school).grid(row=rownum, column=2)
mytxt='new school:'+str(row.School_n)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', newschool)
b.grid(row=rownum, column=1)
mytxt='existing school:'+str(row.School)
b=tk.Button(master=root, text=mytxt)
b.bind('<Button-1>', oldschool)
b.grid(row=rownum, column=0)
# also include selectable listbox of common school choices
lb=tk.Listbox(master=root, selectmode=tk.SINGLE)
lb.bind("<Double-Button-1>", pickschool)
lb.grid(row=rownum, column=3)
for i,sch in enumerate(commonschools):
lb.insert(tk.END, sch)
rownum+=1
# Now set up select/close buttons
def skip(event):
choice.set('skip')
root.destroy()
def change(event):
choice.set('change')
root.destroy()
f=tk.Button(root, text='Skip')
f.bind('<Button-1>', skip)
f.grid(row=rownum, column=0)
g=tk.Button(root, text='Change')
g.bind('<Button-1>', change)
g.grid(row=rownum, column=1)
root.mainloop()
mychoice=choice.get()
if mychoice=='change':
# Find matching row for family (needed for all changes below)
famkey=row.Famkey
match=famcontact[famcontact['Famkey']==famkey]
if len(match)==0:
thisind=match.index[0]
else:
print('Problem finding unique entry for famkey', str(famkey))
return famcontact # return unaltered
# Direct update of parent list
parlist=parlist[0:3] # limit to 3 entries
while len(parlist)<3:
parlist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
ser=ser.set_value(fname,parlist[i-1][0])
ser=ser.set_value(lname,parlist[i-1][1])
try:
# make changes directly to players after finding correct index using plakey
plakey=row.Plakey
match=players[players['Plakey']==plakey]
thisind=match.index[0]
if 'school' in upkwargs:
players=players.set_value(thisind,'School',school.get())
if 'DOB' in upkwargs:
newDOB=datetime.datetime.strptime(DOB.get(),'%m/%d/%y')
players=players.set_value(thisind,'DOB',newDOB)
except:
print('Error updating info for', row.Plakey, row.First, row.Last)
return famcontact
def update_contact(ser, famcontact):
'''Update phone and textable list from google drive entries; existing entries from fam_contact listed first;
pass/modify/return series for family; reorder/replace numbers '''
# [phone, text, order]
thisfam=ser.Family
phonelist=[] # list of lists with number and textable Y/N
for i in range(1,5): # get 4 existing phone entries (phone1, phone2, etc.)
phname='Phone'+str(i)
txtname='Text'+str(i)
if str(ser[phname])!='nan':
phonelist.append([ser[phname],ser[txtname]]) # as phone and text y/N
# New google drive entries will be Phone1_n.. look for phone/text pair in existing list
if str(ser.Phone1_n)!='nan' and [ser.Phone1_n,ser.Text1_n] not in phonelist: # new ones phone is required entry
if [ser.Phone1_n, np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([ser.Phone1_n,np.nan])
phonelist.insert(0,[ser.Phone1_n,ser.Text1_n]) # insert in first position
else:
upkwargs.update({'phone1':[ser.Phone1_n,ser.Text1_n]})
else: # move this pair to first position in existing list (already in list)
phonelist.insert(0,phonelist.pop(phonelist.index([ser.Phone1_n,ser.Text1_n])))
# Inserts desired primary in first position while simultaneously removing other entry
if str(ser.Phone2_n)!='nan': # check for phone2 entry (with _n suffix)
if [ser.Phone2_n,ser.Text2_n] not in phonelist: # add second phone to 2nd position if not present
if [ser.Phone2_n,np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([ser.Phone2_n,np.nan])
phonelist.insert(1,[ser.Phone2_n,ser.Text2_n])
print ('Added phone ', str(ser.Phone2_n), 'for family', thisfam)
else: # get approval for phone 2 addition
upkwargs.update({'phone2':[ser.Phone2_n,ser.Text2_n]})
# Construct existing list of known email addresses
emaillist=[]
for i in range(1,4): # get 3 existing email entries
emailname='Email'+str(i)
if str(ser[emailname])!='nan':
emaillist.append(ser[emailname].lower())
# Find new email1 entry in google drive data
if str(ser.Email)!='nan' and '@' in ser.Email: # real primary gd named email
if ser.Email.lower() not in emaillist: # add in first position if not present
emaillist.insert(0,ser.Email.lower())
print ('Added email ', str(ser.Email.lower()), 'for family', thisfam)
else: # if already present move to first position
emaillist.insert(0,emaillist.pop(emaillist.index(ser.Email)))
# look for new email in email2 position and add
if str(ser.Email2_n)!='nan' and '@' in ser.Email2_n:
if ser.Email2_n.lower() not in emaillist: # add second phone to 2nd position if not present
emaillist.insert(1,ser.Email2_n.lower())
print('Added email', ser.Email2_n.lower(),'for family', thisfam)
# Construct and record updated email list
emaillist=emaillist[0:3] # limit to 3 entries
while len(emaillist)<3:
emaillist.append(np.nan) # pad with nan entries if necessary
for i in range(1,4): # reset 3 email entries
emailname='Email'+str(i)
ser=ser.set_value(emailname,emaillist[i-1])
# Update list of parent names (max 3 entries)
parlist=[] # construct existing list from family contacts
for i in range(1,4):
fname='Pfirst'+str(i)
lname='Plast'+str(i)
if str(ser[fname])!='nan':
parlist.append([ser[fname],ser[lname]]) # list of lists [first, last]
if [ser.Pfirst1_n,ser.Plast1_n] not in parlist: # phone 1 is required entry
upkwargs.update('newpar1':[ser.Pfirst1_n,ser.Plast1_n])
upkwargs.update('parlist':parlist)
parlist.insert(0,[ser.Pfirst1_n, ser.Plast1_n]) # insert in first position
print ('added parent', ser.Pfirst1_n, ser.Plast1_n, 'for family', thisfam)
else: # move this pair to first position in existing list
parlist.insert(0,parlist.pop(parlist.index([ser.Pfirst1_n,ser.Plast1_n])))
# inserts in first position while simultaneously removing other entry
if str(ser.Pfirst2_n!='nan'): # Check for parent 2 entry
if [ser.Pfirst2_n,ser.Plast2_n] not in parlist: # add second phone to 2nd position if not present
upkwargs.update('newpar2':[ser.Pfirst2_n,ser.Plast2_n])
upkwargs.update('parlist':parlist)
parlist.insert(1,[ser.Pfirst2_n,ser.Plast2_n])
# now run interactive approval if necessary
if len(upkwargs)>0: # something needs interactive approval
if 'phone1' in upkwargs or 'phone2' in upkwargs:
upkwargs.update({'phonelist':phonelist}) # add phonelist after any alterations
# TODO need to be careful about incorporating both auto-approved and tk approved changes
# Truncate list to max 4 entries (older ones lost)
phonelist=phonelist[0:3]
while len(phonelist)<4:
phonelist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset phone number/text combos in series
for i in range(1,5): # reset 4 existing phone entries
phname='Phone'+str(i)
txtname='Text'+str(i)
ser=ser.set_value(phname,phonelist[i-1][0])
ser=ser.set_value(txtname,phonelist[i-1][1])
# update parish of registration (if new gd entry and no existing entry)
# otherwise changes are skipped to keep parish names consistent
if str(ser.Parish_registration)=='nan' and str(ser.Parish)!='nan':
ser.Parish_registration=ser.Parish # Set parish of registration
return ser
# TODO separate e-mail recruits script... hasn't this been done already
for in
Recruits=Recruits[pd.notnull(Recruits['Email1'])]
Recs=Recruits.groupby(['First','Last'])
for pla, rows in Recs:
recipients=getemailadds(rows.iloc[0]) # list of recipients
first=rows.iloc[0]['First']
thistitle=emailtitle.replace('$FIRST', first)
# create custom email message (can have multiple sports in df)
thismess=makerecmessage(rows, recipients, thistitle, messagefile)
thismess=thismess.encode('utf-8')
for i,addr in enumerate(recipients): # Send message to each in list
try:
smtpObj.sendmail('<EMAIL>', addr, thismess)
print ('Message sent to ', addr)
except:
print('Message to ', addr, ' failed.')
return
|
tkcroat/SC | pkg/SC_billing_functions.py | <reponame>tkcroat/SC<filename>pkg/SC_billing_functions.py
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:30:01 2016
SC billing functions
@author: tkc
"""
#%%
import pandas as pd
import datetime
import smtplib
import re
import numpy as np
from email.mime.text import MIMEText
import tkinter as tk
import math
import glob
#%%
def sendbills_tk(Mastersignups, paylog, famcontact, players, season, year, teams):
''' Inteface for billing email messages to parents (non-generic)
TODO test recruit, missing unis, unireturn
'''
# first print out existing info in various lines
root = tk.Tk()
root.title('Send e-mail bills to parents')
unifilename=tk.StringVar()
billfilename=tk.StringVar()
# Look for most recent missing uniform list
try:
unifiles=glob.glob('missingunilist*') # find most recent uniform file name
if len(unifiles)>1:
unifile=findrecentfile(unifiles) # return single most recent file
else:
unifile=unifiles[0]
# find most recent missing uni file name
unifilename.set(unifile)
except: # handle path error
unifilename.set('missingunilist.csv')
# Look for most recent billing files
try:
billfiles=glob.glob('billlist*') # find most recent uniform file name
if len(billfiles)>1:
billfile=findrecentfile(billfiles) # return single most recent file
else:
billfile=billfiles[0]
# find most recent missing uni file name
billfilename.set(billfile)
except: # handle path error
billfilename.set('billlist.csv')
emailtitle=tk.StringVar() # e-mail title
messfile=tk.StringVar() # text of e-mail message
SMSmessfile=tk.StringVar() # find replace text for SMS message?
mtype=tk.StringVar() # uniform night or generic
''' Already baked into billlist creation
pastseasons=tk.IntVar()
pastseasons.set(1)
'''
oldunibool=tk.BooleanVar()
oldunibool.set(True)
newunibool=tk.BooleanVar()
newunibool.set(True)
feesbool=tk.BooleanVar() #
feesbool.set(True)
extraname=tk.StringVar() # name for additional text entry box (various uses mostly filenames)
extraname.set('Extra file name') # default starting choice
extravar=tk.StringVar() # name for additional text entry box (various uses mostly filenames)
sendemailbool=tk.BooleanVar() # regular send e-mail option
sendemailbool.set(True)
sendSMSbool=tk.BooleanVar() # send e-mail to SMS option
sendSMSbool.set(False)
choice=tk.StringVar() # test or send -mail
# E-mail title and message file name
tk.Label(root, text='Title for e-mail').grid(row=0, column=0)
titleentry=tk.Entry(root, textvariable=emailtitle)
titleentry.config(width=50)
titleentry.grid(row=0, column=1)
tk.Label(root, text='messagefile').grid(row=1, column=0)
messentry=tk.Entry(root, textvariable=messfile)
messentry.config(width=50)
messentry.grid(row=1, column=1)
tk.Label(root, text='SMSmessagefile').grid(row=2, column=0)
messentry=tk.Entry(root, textvariable=SMSmessfile)
messentry.config(width=50)
messentry.grid(row=2, column=1)
#tk.Label(root, text='# of past seasons to include').grid(row=3, column=0)
#tk.Entry(root, textvariable=pastseasons).grid(row=3, column=1)
tk.Label(root, text='Uniform file name').grid(row=3, column=0)
unientry=tk.Entry(root, textvariable=unifilename)
unientry.grid(row=3, column=1)
tk.Label(root, text='Billing file name').grid(row=4, column=0)
billentry=tk.Entry(root, textvariable=billfilename)
billentry.grid(row=4, column=1)
extranameentry= tk.Entry(root, text=extraname)
extranameentry.grid(row=5, column=0)
extravalentry=tk.Entry(root, textvariable=extravar)
extravalentry.grid(row=5, column=1)
def Olduniopts():
''' Display relevant choices for old uniforms'''
if oldunibool.get()==True:
unientry.config(state=tk.NORMAL)
else:
unientry.config(state=tk.DISABLED)
def Feesopts():
''' Display relevant choices for fees '''
if feesbool.get()==True:
billentry.config(state=tk.NORMAL)
else:
billentry.config(state=tk.DISABLED)
tk.Checkbutton(root, variable=feesbool, text='Ask for fees?', command=Feesopts).grid(row=0, column=2)
tk.Checkbutton(root, variable=oldunibool, text='Ask for old uni return?', command=Olduniopts).grid(row=1, column=2)
tk.Checkbutton(root, variable=newunibool, text='Inform about new unis needed?').grid(row=2, column=2)
tk.Checkbutton(root, variable=sendemailbool, text='Send email bills?').grid(row=3, column=2)
tk.Checkbutton(root, variable=sendSMSbool, text='Send email bills via SMS?').grid(row=4, column=2)
def Uninightopts():
''' Display relevant choices for team assignment notification/cyc card/ short team recruiting '''
messfile.set('ebill_uninight_allseasons.txt')
SMSmessfile.set('ebill_uninight_allseasons_SMS.txt')
emailtitle.set('Cabrini Sports Uniform Night Info for $UNIDATETIME')
extranameentry.config(state=tk.NORMAL)
extravalentry.config(state=tk.NORMAL)
extraname.set('Uniform night date-time')
extravar.set('Sun 11/15/17 from 12-2 PM')
def Ebillopts():
''' Display relevant choices for generic e-billing'''
messfile.set('ebill_generic.txt')
emailtitle.set('Please pay your Cabrini sports fees.')
extranameentry.config(state=tk.DISABLED)
extravalentry.config(state=tk.DISABLED)
SMSmessfile.set('ebill_generic_SMS.txt')
extraname.set('n/a')
extravar.set('n/a')
# Choose generic billing or uniform night billing
tk.Radiobutton(root, text='Uniform night billing', value='Uninight', variable = mtype, command=Uninightopts).grid(row=7, column=0)
tk.Radiobutton(root, text='Generic ebilling', value='Ebill', variable = mtype, command=Ebillopts).grid(row=7, column=1)
# Specific team selector section using checkboxes
teamdict=shortnamedict(teams)
teamlist=[] # list of tk bools for each team
# Make set of bool/int variables for each team
for i, val in enumerate(teamdict):
teamlist.append(tk.IntVar())
if '#' not in val:
teamlist[i].set(1) # Cabrini teams checked by default
else:
teamlist[i].set(0) # transfer team
rownum=8
# make checkbuttons for each team
for i, val in enumerate(teamdict):
thisrow=i%5+1+rownum # three column setup
thiscol=i//5
thisname=teamdict.get(val,'')
tk.Checkbutton(root, text=thisname, variable=teamlist[i]).grid(row=thisrow, column=thiscol)
rownum+=math.ceil(len(teamlist)/5)+2
# Decision buttons bottom row
def chooseall(event):
''' Select all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(1)
def clearall(event):
''' deselect all teams '''
for i, val in enumerate(teamdict):
teamlist[i].set(0)
def abort(event):
choice.set('abort')
root.destroy()
def test(event):
choice.set('test')
root.destroy()
def KCsend(event):
''' Live send but only send to <EMAIL> or <EMAIL> '''
choice.set('kcsendtest')
root.destroy()
def send(event):
choice.set('send')
root.destroy()
rownum+=1
d=tk.Button(root, text='All teams')
d.bind('<Button-1>', chooseall)
d.grid(row=rownum, column=0)
d=tk.Button(root, text='Clear teams')
d.bind('<Button-1>', clearall)
d.grid(row=rownum, column=1)
d=tk.Button(root, text='Abort')
d.bind('<Button-1>', abort)
d.grid(row=rownum, column=2)
d=tk.Button(root, text='Test')
d.bind('<Button-1>', test)
d.grid(row=rownum, column=3)
d=tk.Button(root, text='KC send test')
d.bind('<Button-1>', KCsend)
d.grid(row=rownum, column=4)
d=tk.Button(root, text='Send')
d.bind('<Button-1>', send)
d.grid(row=rownum, column=5)
root.mainloop()
mychoice=choice.get()
if mychoice!='abort':
kwargs={}
if mychoice=='kcsendtest':
mychoice='send'
kwargs.update({'kcsendtest':True})
kwargs.update({'choice':mychoice}) # test, kcsendtest, or send
emailtitle=emailtitle.get()
# load blank message
messagefile='messages\\'+messfile.get()
try:
with open(messagefile,'r') as file:
blankmessage=file.read()
except:
print("Couldn't open message file(s)")
if sendSMSbool.get():
messagefile='messages\\'+SMSmessfile.get()
try:
with open(messagefile,'r') as file:
blankSMS=file.read()
kwargs.update({'SMS':blankSMS})
except:
print('Failed load of alt SMS message.')
try:
billlist=pd.read_csv(billfilename.get(), encoding='cp437')
except:
print("Couldn't open billing list")
'''
# Filter bill list using teams? Necessary?
selteams=[]
for i, val in enumerate(teamdict):
if teamlist[i].get()==1:
selteams.append(val)
# Filter teams based on checkbox input
teams=teams[teams['Team'].isin(selteams)]
# drop duplicates in case of co-ed team (m and f entries)
teams=teams.drop_duplicates('Team')
'''
# handle the boolean options
if oldunibool.get():
kwargs.update({'oldunis':True})
if newunibool.get():
kwargs.update({'newunis':True})
if feesbool.get():
kwargs.update({'fees':True})
if sendemailbool.get():
kwargs.update({'ebills':True})
if sendSMSbool.get():
messagefile='messages\\'+SMSmessfile.get()
with open(messagefile,'r') as file:
blankSMS=file.read()
kwargs.update({'SMS':blankSMS}) # pass blank alt SMS message in kwargs
ebilllist, skiplist = sendebills(billlist, Mastersignups, season, year, emailtitle, blankmessage, **kwargs)
return ebilllist, skiplist
def loadoldteams(seasons, years):
''' For retroactive billing, load teams from any prior season/year combination or from
lists of seasons & years
'''
teams=pd.read_excel('Teams_coaches.xlsx', sheetname='Oldteams')
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
if isinstance(seasons,str):
sportlist=sportsdict.get(seasons)
else: # construct sport list from list of seasons
sportlist=[]
for i, seas in enumerate(seasons):
sportlist.extend(sportsdict.get(seas))
if isinstance(years,int):
years=[years] # convert single year to list of years
teams=teams.loc[(teams['Sport'].isin(sportlist)) & (teams['Year'].isin(years))]
return teams
def getpayments(thispay, priorpay):
'''df rows with lists of this and prior season's payments '''
# thispay is df with one families payments from this season/year
# TODO convert paykey permanently to int (avoid possible conversion problems)
# TODO also maybe check if payment date in expected range (in getpayments)
paykeys=[]
currpayment=0.0 # set default amount for return
priorpayment=0.0
paydetail=''
paytext=[] # for string with SMS
if len(thispay)>0:
paykeys=list(thispay.Paykey.unique())
paykeys=[int(i) for i in paykeys] # convert list to integers
currpayment=thispay.Amount.sum()
for index, row in thispay.iterrows():
paydate=thispay.loc[index]['Date'].to_pydatetime() # get timestamp as datetime obj
paydetail+=thispay.loc[index]['Season']+' '+str(thispay.loc[index]['Year'])+' payment: $'
paydetail+=str(int(thispay.loc[index]['Amount']))+' on '
paydetail+=datetime.datetime.strftime(paydate, "%m/%d/%Y")+' \n'
if len(priorpay)>0:
priorkeys=list(priorpay.Paykey.unique())
priorkeys=[int(i) for i in priorkeys] # convert list to integers
paykeys.extend(priorkeys)
priorpayment+=priorpay.Amount.sum() # add past payments
for index, row in priorpay.iterrows():
paydate=priorpay.loc[index]['Date'].to_pydatetime() # get timestamp as datetime obj
paydetail+=priorpay.loc[index]['Season']+' '+str(int(priorpay.loc[index]['Year'])) +' payment: $'
paydetail+=str(int(priorpay.loc[index]['Amount']))+' on '
paydetail+=datetime.datetime.strftime(paydate, "%m/%d/%Y")+' \n'
if priorpayment+currpayment>0: # make SMS string with current and prior season payments
paytext.append('minus $'+str(int(priorpayment+currpayment))+' prior payments received')
return paykeys, currpayment, priorpayment, paydetail, paytext
def getmissingunis(df, famkey):
'''Called by main billing loop... get signup keys with missing uniforms '''
df=df.dropna(subset=['Issue date']) # only signups with uniform issued
mask=pd.isnull(df['UniReturnDate'])
df=df.loc[mask] # keeps only unreturned uniforms
df=df[df['Famkey']==famkey] # this family's outstanding uniforms
unikeys=np.ndarray.tolist(df.SUkey.unique()) # list of signup keys with outstanding uniforms
# create string for e-bill with outstanding uniforms
unistr=''
oldunilist=[]
oldunitext=[]
if len(df)>0:
unistr+='Old uniforms to return\n'
unistr+='Player\tSport\tUni #\tTeam\n'
for index, row in df.iterrows():
first=df.loc[index]['First']
sport=df.loc[index]['Sport']
oldunilist.append(first+' '+sport.lower())
num=df.loc[index]['Uniform#']
team=df.loc[index]['Team']
unistr+=first + '\t' + sport + '\t' +str(num) + '\t'+ team +'\n'
ending=' uniform' # for SMS string
if len(oldunilist)>1:
tempstr=' and '+str(oldunilist[-1]) # prepend and to last list item
oldunilist[-1]=tempstr
ending+='s'
oldunitext=[' return '+', '.join(oldunilist)+ending] # construct SMS string
# SMS string "return Ben soccer and Ethan VB uniforms"
return unistr, unikeys, oldunitext
def getnamesschool(plalist, players):
'''Return school(s) associated with a list of players from players.csv (with Cabrini listed first)... called by
createbilllist'''
match=players[players['Plakey'].isin(plalist)]
schoollist=match.School.unique()
schoollist=np.ndarray.tolist(schoollist)
if "Cabrini" in schoollist:
schoollist.insert(0,schoollist.pop(schoollist.index('Cabrini'))) # move Cabrini to first entry
# now shorted player name list
namelist=[]
for index, row in match.iterrows():
first=match.loc[index]['First']
last=match.loc[index]['Last']
strname=first+' ' +last[0]
namelist.append(strname)
return schoollist, namelist
def calccharges(df, season):
'''Pass signups for single family for season and return charges and text fragment for SMS '''
totalcharge=0.0
cheapsports=['T-ball','Track']
cheapmask=df['Sport'].isin(cheapsports)
cheapSU=df.loc[cheapmask]
regularSU=df.loc[~cheapmask] # signup not in cheap list
totalcharge=10*len(cheapSU)+30*len(regularSU)
if len(df)>2 and totalcharge>75: # use max family bill amount
totalcharge=75.0
currtext='$'+str(int(totalcharge))+' for '+ season
return totalcharge, currtext
def calcpriorcharges(df):
'''Pass signups for single family for season and return charges and detailed about charges'''
# Need to determine seasons and sort by season-year
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
cheapsports=['T-ball','Track']
df['Season']='' # new
totalcharge=0
paystr=''
for index, row in df.iterrows():
sport=df.loc[index]['Sport']
seasonlist=[seas for seas,key in sportsdict.items() if sport in key] # key is list of sports in this case
df=df.set_value(index,'Season',seasonlist[0])
# now calculate charges for each season-year separately
yrs=np.ndarray.tolist(df.Year.unique()) # list of years represented
yrs=[int(i) for i in yrs]
priortext=[] # list of strings describing all prior season fees (not current)
for i, year in enumerate(yrs):
yeardf=df[df['Year']==year]
seasons=np.ndarray.tolist(yeardf.Season.unique())
for j, seas in enumerate(seasons): # calc for each season separately
thisseason=yeardf[yeardf['Season']==seas] # signups from this season
thismask=thisseason['Sport'].isin(cheapsports)
cheapsports=thisseason.loc[thismask]
normalsports=thisseason.loc[~thismask]
# count and list sports from cheap and normal
sportstr=[]
tempdf=cheapsports['Sport'].value_counts()
for ind, val in tempdf.iteritems():
sportstr.append(str(val)+'x '+ind)
tempdf=normalsports['Sport'].value_counts()
for ind, val in tempdf.iteritems():
sportstr.append(str(val)+'x '+ind)
sportstr=', '.join(sportstr)
thischarge=10*len(cheapsports)+30*len(normalsports)
if thischarge>75:
thischarge=75 # reduce to max family charge per season
totalcharge+=thischarge # grand total of all prior seasons (but not current season charges)
priortext.append('$'+str(int(thischarge))+' for '+ seas + str(year))
paystr+=' '+seas+' '+str(year)+' '+sportstr+': $'+str(thischarge)+';'
return totalcharge, paystr, priortext
def getdepositinfo(df, famkey, depneeded):
'''Called by main billing loop... gets family's deposits (by paykey) from paylog (passed as df)
returns list of paykeys to quickly find info on deposit
differs from makemissingunilog in that this also deals with new uniforms to issue '''
depmatch=df[df['Famkey']==famkey]
depmatch=depmatch.dropna(subset=['Deposit']) # drop payments w/o associated deposits
# find last negative value in payments logbook deposit col
# Take all positive deposits after that (although should be only one)
for j in range(len(depmatch)-1,-1,-1): # backwards through the rows
if depmatch.iloc[j]['Deposit']<0: # stop at negative value
depmatch.drop(depmatch.index[0:j+1], inplace=True) # drops negative row and all preceding it
break
depamt=0
depstr=''
depkeys=[]
deptext=[] # List for SMS text message
# Grab total deposit amount on file
depamt=depmatch.Amount.sum() # works even on empty lists
if depamt==0: # handle both no deposit on file situations
if depneeded==0: # dispense with those without Cabrini uniform issues
return depkeys, depamt, depstr, deptext
if depneeded>0: # new uniform deposit required
depstr+='$'+str(int(depneeded))+' refundable uniform deposit required (separate check is preferred)'
deptext.append('deliver a $'+str(int(depneeded))+' uniform deposit')
return depkeys, depamt, depstr, deptext
# now handle cases with uniform deposit on file (depamt>0 and thus len(depmatch)>0)
# get details about existing deposits (skip for SMS when more deposit not required)
depkeys.extend(np.ndarray.tolist(depmatch.Paykey.unique()))
depstr+='Uniform deposits on file:\n' # no-deposit on file cases already handled
for index, row in depmatch.iterrows(): # construct and print details on all existing deposits
depstr+=str(int(depmatch.loc[index]['Amount']))+'\t'
depstr+=depmatch.loc[index]['Deptype']+'\t' # deposit type (cash, check, paypal, etc.)
depdate=depmatch.loc[index]['Date'].to_pydatetime() # deposit date conv to datetime object
depstr+=datetime.date.strftime(depdate,'%m/%d/%Y')+'\t' # formatted deposit date
if str(depmatch.loc[index]['Depcomment'])!='nan': # skip nan comments
depstr+=depmatch.loc[index]['Depcomment']+'\n' #
else:
depstr+='\n'
if depneeded==depamt: # deposit on file matches required deposit
depstr+='No additional deposit required if all outstanding uniforms are returned'
elif depamt>depneeded: # refund, hold or destroy check situation
# check if type of existing deposits are all cash
deptypelist=np.ndarray.tolist(depmatch.Deptype.unique())
if len(depmatch.Deptype.unique())==1 and 'cash' in deptypelist: # all cash deposits
refundamt=int(depamt-depneeded)
depstr+='$'+str(refundamt)+' cash deposit refunded if all old uniforms are returned.'
elif depneeded>depamt: # some deposits on file but more is needed
extradep=int(depneeded-depamt)
depstr+='Additional $'+str(extradep)+' deposit needed: separate check preferred.'
return depkeys, depamt, depstr, deptext # list of unique keys to active deposits in paylog for this family
def makenewunistring(curmatch, uniteams, transferteams):
'''Curmatch is given family's list of current signups with team assignment.. check team against uniteams (subset
with Y in uniforms col)
Return detailed string for new unis to be issued and total deposit needed on file '''
# get subset of current signups that require issuing a new uniform
depneeded=0
newunikeys=[]
depstr=''
if len(curmatch)>0: # new uniforms to be issued
depstr='New uniforms for this season:\n'
cabunis=pd.merge(curmatch,uniteams, how='inner', on=['Team'], suffixes=('','_r'))
transferunis=pd.merge(curmatch,transferteams, how='inner', on=['Team'], suffixes=('','_r'))
unitext=[] # return as list item
if len(cabunis)>0: # Cabrini uniforms will be issued
unitextlist=[] # string(s) for SMS
# return required deposit amount
depneeded=25*len(cabunis) # update required deposit amount
newunikeys=np.ndarray.tolist(cabunis.SUkey.unique())
# Make SMS string (saying pick up Ben's soccer and Ethan's VB uniform)
for index, row in cabunis.iterrows():
unitextlist.append(cabunis.loc[index]['First']+' '+cabunis.loc[index]['Sport'])
depstr+=cabunis.loc[index]['First']+' \t'
depstr+=cabunis.loc[index]['Sport'].lower()+'\t'
depstr+='- Pick up at Cabrini Uniform Night\n'
ending=' uniform'
if len(unitextlist)>1:
unitextlist[-1]='and '+ unitextlist[-1] # prepend and to last item
ending+='s'
unitext=['pick up '+', '.join(unitextlist)+ending]
# easier to construct entire string if passed as list item
# now mention transfer and junior team uniform requirements
if len(transferunis)>0:
for index, row in transferunis.iterrows():
depstr+=transferunis.loc[index]['First']+'\t'
depstr+=transferunis.loc[index]['Sport']+'\t'
depstr+='- Pick up from '
school=transferunis.loc[index]['Team'].split('#')[0]
depstr+=school+'\n'
# Now find Cabrini junior team signups
jrteamlist=np.ndarray.tolist(curmatch.Team.unique()) # assigned teams
uniteamlist=np.ndarray.tolist(uniteams.Team.unique())
transferteamlist=np.ndarray.tolist(transferteams.Team.unique())
jrteamlist=[str(team) for team in jrteamlist if team not in uniteamlist and team not in transferteamlist]
jrunis=curmatch[curmatch['Team'].isin(jrteamlist)]
if len(jrunis)>0:
for index, row in jrunis.iterrows():
depstr+=jrunis.loc[index]['First']+'\t'
depstr+=jrunis.loc[index]['Sport']+'\t'
depstr+='- Use Cabrini navy uniform T-shirt (available for $10 at Uniform Night)\n'
# depneeded -- amount of required deposit (passed to getdepositinfo for handling)
return depstr, depneeded, newunikeys, unitext
def getthisperiod(df, season, year, priorseasons):
''' Return subset of df from this sports season and also n prior seasons, year is starting school year
date-based slicing of dfs would be easier, but not a good match for season-year based accounting normally used
typically pass either paylog or mastersignups to get payments or signups respectively'''
# first grab current signups
mycols=df.dtypes.index
priordf=pd.DataFrame(columns=mycols) # frame for returned data subset
seasonorder={'Fall':0, 'Winter':1,'Spring':2}
seasonnum=seasonorder.get(season)
# this year's allowed seasons
thisyrseasons=[seas for seas,num in seasonorder.items() if num<seasonnum and seasonnum-num<priorseasons+1] # prior seasons in range from this year (not including current one)
numlastseasons=priorseasons-len(thisyrseasons)
# Now return any seasons in range from last year (again based on # of priorseasons)
lastyrseasons=['Spring','Winter','Fall'] # looking back so these seasons in reverse
lastyrseasons=lastyrseasons[0:numlastseasons]
if 'Season' in df: # paylog or some other dfs have direct season column
tempdf=df[df['Year']==year]
currentdf=tempdf[tempdf['Season']==season] # i.e for this season's payments
tempdf=tempdf[tempdf['Season'].isin(thisyrseasons)] # prior ones from this year but not this season
priordf=pd.concat([tempdf,priordf]) # add rows from this year (usually payments from paylog)
tempdf=df[df['Year']==year-1]
tempdf=tempdf[tempdf['Season'].isin(lastyrseasons)]
priordf=pd.concat([tempdf,priordf])
return currentdf, priordf #
elif 'Sport' in df: # usually for Mastersignups
sportsdict={'Fall':['VB','Soccer'], 'Winter':['Basketball'],'Spring':['Track','Softball','Baseball','T-ball']}
currentsports=sportsdict.get(season) # this season's signups separate
thisyrsports=[val for key,val in sportsdict.items() if key in thisyrseasons] # other signups this year prior seasons
thisyrsports=[item for sublist in thisyrsports for item in sublist] # flatten if it's a list of lists
tempdf=df[df['Year']==year]
currentdf=tempdf[tempdf['Sport'].isin(currentsports)]
tempdf=tempdf[tempdf['Sport'].isin(thisyrsports)]
priordf=pd.concat([tempdf,priordf]) # add rows from this year (usually payments from paylog)
# now get allowed row (usually signups) from last year
lastyrsports=[val for key,val in sportsdict.items() if key in lastyrseasons]
lastyrsports=[item for sublist in lastyrsports for item in sublist] # flatten if it's a list of lists
tempdf=df[df['Year']==year-1]
tempdf=tempdf[tempdf['Sport'].isin(lastyrsports)]
priordf=pd.concat([tempdf,priordf]) # add rows from this year (usually payments from paylog)
return currentdf, priordf
else:
print('Error: Season and Sport columns missing from passed dataframe')
return
def createbilllist(df, Paylog, famcontact, players, season, year, teams, priorseasons=1, fname='Billlist8Dec16.csv', **kwargs):
''' Pass mastersignsups and charges/signups from current season and n priorseasons, output billing list by family
priorseason is chronological lookback incl last seasons
unikeys/depkeys are SUkey and paykey containing outstanding uniforms and uniform deposit (since uni info is
stored in master_signups and deposit info in paylog
calls calccharges and calcpriorcharges separately for set of included season-years
kwargs: olduni - ask for old missing uniforms
newuni - info about new uniforms to be issued
'''
season=season.title()
season=season.strip() # remove any spaces
Paylog['Season']=Paylog['Season'].str.title()
Paylog['Season']=Paylog['Season'].str.strip() # remove spaces
if len(Paylog.Season.unique())!=3:
print('Check payment logbook season column for improper entry.. must be Fall, Winter, or Spring')
# Remove players than have dropped (drop as team assignment)
thismask=df['Team'].str.contains('drop',na=False,case=False)
df=df.loc[~thismask]
df=df.dropna(subset=['Team']) # also drop those not yet assigned to a team
CurrentSU, PriorSU =getthisperiod(df, season, year, priorseasons) # returns subset of signups in specified period
AllSU=pd.concat([CurrentSU,PriorSU])
Currentpay,Priorpay=getthisperiod(Paylog, season, year, priorseasons) # returns payments in specified period
# Copy currentsu df ... drop duplicates using famkey
# Selects only active families in this billing period
fambills=AllSU # copy and use as template for family bills
fambills=fambills.drop_duplicates('Famkey') # single row per family
fambills=fambills.reset_index(drop=True)
# depkey is paykey containing deposit, unikey is SUkey with issued outstanding uniform
# signups split into this season and prior season(s) with lookback set by priorseasons
newcols=['Feepaydetail','Plakeys','SUkeys','PriorSUkeys','Paykeys','Depkeys','Newunikeys','Oldunikeys','Unidetail','Textmessage','Teams','Comments','School','Players']
for i, col in enumerate(newcols):
fambills[col]='' # init to np.nan causes later write problems so use ''
fambills['Charges']=0.0
fambills['PriorCharges']=0.0 # From prior sports season
fambills['Balance']=0.0
fambills['PriorBalance']=0.0
fambills['CurrPayments']=0.0
fambills['PriorPayments']=0.0
# Get subset of teams that'll be issued new uniforms
uniteams=teams[teams['Uniforms']=='Y'] # Cabrini uniform teams (also typically with hyphen)
transferteams=teams[teams['Team'].str.contains('#')]
# now merge in contact info from famcontact
fambills=pd.merge(fambills, famcontact, how='inner', on=['Famkey'], suffixes=('','_r'))
fambills['Family']=fambills['Family_r'] # missing from some entries in AllSU
for index, row in fambills.iterrows():
# get playerkeys and signup keys for this family
famkey=fambills.iloc[index]['Famkey']
match=AllSU[AllSU['Famkey']==famkey] # find this family in current signups
curmatch=CurrentSU[CurrentSU['Famkey']==famkey]
plalist=np.ndarray.tolist(match.Plakey.unique()) # add keys with recently active players
plalist=[i for i in plalist if str(i)!='nan'] # shouldn't be nan but somehow occurred
plalist=[int(i) for i in plalist] # ensure conversion to ints
schools, planames =getnamesschool(plalist, players) # Determine if at least one kid is from Cabrini
tempstr=', '.join([str(i) for i in plalist]) # convert to comma separated str and save
fambills=fambills.set_value(index,'Plakeys', tempstr)
sukeys=np.ndarray.tolist(curmatch.SUkey.unique()) # add signup keys for this season only
sukeys=[int(i) for i in sukeys]
fambills=fambills.set_value(index,'School',', '.join(schools))
fambills=fambills.set_value(index,'Players',', '.join(planames)) # list of abbrev player names
if len(sukeys)>0:
tempstr=', '.join([str(i) for i in sukeys])
fambills=fambills.set_value(index,'SUkeys',tempstr) # save string of keys (current season only)
teamlist=list(match.Team.unique()) # full team list for these players in specified period
fambills=fambills.set_value(index,'Teams',', '.join(teamlist)) # save teams as string
textfee=[] # list of strings for fees text message
currentcharge, currtext=calccharges(curmatch, season) # returns cost for this sports season
textfee.append(currtext) # info on this seasons charges for text message
priormatch=PriorSU[PriorSU['Famkey']==famkey] # prior signups within specified period
textmess=[] # list of strings for text message uniform details
priorcharge, priorfeestr, priortext =calcpriorcharges(priormatch) #
# priortext is a list of strings (one for each prior season)
textfee.extend(priortext) # info on prior fees for text message; merge both lists
priorSUkeys=np.ndarray.tolist(priormatch.SUkey.unique())
priorSUkeys=[int(i) for i in priorSUkeys]
if len(priorSUkeys)>0:
tempstr=', '.join([str(i) for i in priorSUkeys]) # convert int to str and then comma sep. list
fambills=fambills.set_value(index,'PriorSUkeys',tempstr)
fambills=fambills.set_value(index,'Charges',currentcharge) # store in billing df
fambills=fambills.set_value(index,'PriorCharges',priorcharge)
# Call function to generate uniform/deposit messages strings if desired
unistuff={} # blank dictionary to check for optional returns
if kwargs.get('newuni', False) or kwargs.get('olduni', False):
textmess, unistuff = makeunidepstring(df, famkey, Paylog, curmatch, uniteams, transferteams, textmess, **kwargs)
# note: uniforms if required are associated with each signup in mastersignups
# Write all the optional uniform keys and such to fambills
if 'depkeys' in unistuff: # uniform key values from mastersignups optionally returned in list
depkeys=unistuff.get('depkeys',[])
tempstr=tempstr=", ".join([str(i) for i in depkeys])
fambills=fambills.set_value(index,'Depkeys',tempstr)
if 'oldunikeys' in unistuff: # keys from mastersignup for old uniforms
oldunikeys=unistuff.get('oldunikeys',[])
tempstr=tempstr=", ".join([str(i) for i in oldunikeys])
fambills=fambills.set_value(index,'Oldunikeys',tempstr)
if 'newunikeys' in unistuff:
newunikeys=unistuff.get('newunikeys',[])
tempstr=tempstr=", ".join([str(i) for i in newunikeys])
fambills=fambills.set_value(index,'Newunikeys',tempstr)
if 'fullunistr' in unistuff:
fullunistr=unistuff.get('fullunistr','')
fambills=fambills.set_value(index,'Unidetail',fullunistr) # string w/ all unis for return (will be use by ebill)
# Get payment amounts, paykeys, and paystring from current and priorpay for this family
thispay=Currentpay[Currentpay['Famkey']==famkey] # df rows with this family's current payments
priorpay=Priorpay[Priorpay['Famkey']==famkey] # df rows with this family's prior payments
paykeys, currpayment, priorpayment, paydetails, paytext = getpayments(thispay, priorpay)
textfee.extend(paytext) # add info about prior payment to text message
if len(paykeys)!=0: # keep as nan if no payments
tempstr=', '.join([str(i) for i in paykeys])
fambills=fambills.set_value(index,'Paykeys',tempstr) # list of payment keys (usually 1 or 0)
fambills=fambills.set_value(index,'CurrPayments',currpayment) # total of rec'd payments for this season
fambills=fambills.set_value(index,'PriorPayments',priorpayment)
fambills=fambills.set_value(index,'PriorBalance',priorpayment-priorcharge)
balance=currpayment+priorpayment-priorcharge-currentcharge # total fee balance (negative if amt owed)
fambills=fambills.set_value(index,'Balance',balance) # update balance
# Feepaydetail has fees from prior seasons and payment detail for current and prior
fambills=fambills.set_value(index,'Feepaydetail', priorfeestr+paydetails) # prior charges and pay
# construct custom portion of text message
# "on jan 4th from 6-7:30 at the Cabrini gym please"... message header
# if money owed, add fee portion to long text message (includes textfee)
if balance<0:
tempstr='pay your sports fee balance of $'+str(int(-balance)) +' '
tempstr+='('+', '.join(textfee)+')'
textmess.append(tempstr) # add phrase with fees
if len(textmess)>1: # prepend and to last phrase
tempstr=' and '+ str(textmess[-1])
textmess[-1]=tempstr
tempstr=', '.join(textmess)
fambills=fambills.set_value(index,'Textmessage',tempstr)
# TODO
mycols=['Family', 'Charges', 'PriorCharges','CurrPayments', 'PriorPayments', 'PriorBalance','Balance','Feepaydetail', 'Email1','Email2','Phone1','Phone2','Pfirst1','Plast1','Teams', 'SUkeys', 'PriorSUkeys', 'Plakeys', 'Paykeys','Depkeys','Newunikeys','Oldunikeys','Unidetail','Textmessage','Famkey','School','Players','Comments']
fambills=fambills[mycols]
fambills=fambills.sort_values(['Balance'], ascending=True)
fambills.to_csv(fname, index=False)
return fambills
def makeunidepstring(df, famkey, Paylog, curmatch, uniteams, transferteams, textmess, **kwargs):
''' Optional call to make message string about old, new uniforms and deposit on file info
unistuff dictionary can include: newunikeys, oldunikeys -- key values from mastersignups for this uniform
depneeded - amount of new deposit required '''
# Uniform and deposits section (depending on kwargs)
# Now generate new unistring containing info about new uniforms to be issued (curmatch and assigned team)
# deposit info needed if for old or new unis (and fnct not called unless 1 of 2 is true)
unistuff={} # optional returns of various items to createbilllist
if kwargs.get('newuni', False): # new uniforms and deposit info required are linked
newunistr, depneeded, newunikeys, unitext =makenewunistring(curmatch, uniteams, transferteams)
depkeys, depamt, depstr, deptext=getdepositinfo(Paylog, famkey, depneeded) # retrieve active deposits (independent of current period)
textmess.extend(unitext) # extend list with info about new unis to pick up
textmess.extend(deptext) # add info about deposit
unistuff.update({'newunikeys':newunikeys}) # list of keys for new uniforms to be issued
unistuff.update({'depneeded':depneeded}) # amount of deposit required
unistuff.update({'newunistr':newunistr})
unistuff.update({'depkeys':depkeys})
unistuff.update({'depamt':depamt})
unistuff.update({'depstr':depstr})
if kwargs.get('olduni', False):
# returns key from paylog containing uni deposit and unistr w/ details on outstanding uniforms
oldunistr, oldunikeys, oldunitext=getmissingunis(df, famkey) # df from master_signups
textmess.extend(oldunitext)
unistuff.update({'oldunistr':oldunistr})
unistuff.update({'oldunikeys':oldunikeys})
# TODO reminder to return unis from transfer teams
# Unistr, newunis and deposit info rolled into single text string (stored in Unidetail)
# now construct full uniform info string for message
fullunistr=''
if 'oldunistr' in unistuff:
fullunistr+=unistuff.get('oldunistr','') # append stuff about old uniforms
if 'newunistr' in unistuff:
fullunistr+=unistuff.get('newunistr','') # append stuff about new uni pickup
if 'depstr' in unistuff:
fullunistr+=unistuff.get('depstr','') # definitely needed if makeunidepstring is called
unistuff.update({'fullunistr':fullunistr})
return textmess, unistuff
def matchpayment(df,players):
'''Find famkey (and plakeys secondarily) associated with a given payment by matching player and family names
First field can have multiple names; '''
# match only needed for payments not yet assigned to family
needmatch=df[df['Famkey'].isnull()]
# make frame for payments with no obvious match
newplayers=pd.DataFrame(columns=['Paykey', 'Date', 'First', 'Last', 'Amount', 'Paytype', 'Comment',
'Sport', 'Season', 'Year', 'Famkey', 'Family name', 'Plakey', 'Email',
'Phone', 'Delivered', 'Notes'])
for index, row in needmatch.iterrows():
last=needmatch.loc[index]['Last']
last=last.title() # title case
first=needmatch.loc[index]['First'] # shouldn't have any nan values
first=first.title() # switch to title case
if ',' in first: # sometimes multiple first names in paylog entry
first=first.split(',')[0] # just take first of the first names
# Check for last name match
match = players[(players['Last']==last)]
if len(match)==0:
print ('No last name match for player', last, '. Check for consistency')
newplayers=newplayers.append(df.loc[index])
continue
# Check for first and last match
match = players[(players['Last']==last) & (players['First'].str.contains(first, na=False, case=False))]
if len(match)==1: # first/last match with players.csv
df=df.set_value(index,'Plakey',match.iloc[0]['Plakey'])
df=df.set_value(index,'Famkey',match.iloc[0]['Famkey'])
df=df.set_value(index,'Family',match.iloc[0]['Family'])
print('Payment processed for family',match.iloc[0]['Family'])
continue
elif len(match)>1: # first/last matches multiple players (name is too common?)
print('First and last matches multiple players:',first, ' ',last)
else:
print('Recheck name against players db.. no match for player :',first,' ',last,'.' ) #no first/last match, no last/DOB match
newplayers=newplayers.append(df.loc[index])
continue
return df, newplayers # same df but with all available player numbers added in playkey column
def sendemaillogic(billlist, **kwargs):
'''Filtering criteria that decides who gets an e-mail (using flags set by kwargs
if any flag is true (i.e. outstanding uniform, new uni to issue, owes money then e-mail is sent
kwargs are: olduni, newuni,fees booleans, defaulting to False, False, True '''
# Now check for fees (negative balance), new unis(newunikeys col), old unis (oldunikeys) depending on passed flags
# concat multiple masks together via or (if any are true, text is sent)
# replace nan with blank strings
billlist.Oldunikeys=billlist.Oldunikeys.fillna(value='')
billlist.Newunikeys=billlist.Newunikeys.fillna(value='')
billlist.Newunikeys=billlist.Newunikeys.astype(str)
billlist.Oldunikeys=billlist.Oldunikeys.astype(str)
if kwargs.get('olduni', False):
oldmask1=billlist['Oldunikeys']!='' # must deal with possible blanks or nans (depending on reimport or not)
oldmask2=~billlist['Oldunikeys'].isnull()
oldmask=(~oldmask1 |oldmask2) # true means has old uni to return
# oldmask=~billlist['Oldunikeys'].isnull()
else: # if arg is false, pass mask that is always false (since combined with pandas OR)
oldmask=pd.Series(data=False, index=billlist.index)
if kwargs.get('newuni', False):
newmask1=billlist['Newunikeys']!=''
newmask2=~billlist['Newunikeys'].isnull()
newmask=(~newmask1 | newmask2) # true means new uni to pick up
else: # if arg is false, pass mask that is always false (since combined with pandas OR)
newmask=pd.Series(data=False, index=billlist.index)
if kwargs.get('fees', True):
feemask=billlist['Balance']<0 # negative balance means fees owed
else: # if arg is false, pass mask that is always false (since combined with pandas OR)
feemask=pd.Series(data=False, index=billlist.index)
fullmask=(oldmask | newmask | feemask) # combine multiple boolean series with OR
emaillist=billlist.loc[fullmask] # if any are true (old uni, new uni, owes money)
skiplist=billlist.loc[~fullmask] # if all are false
return emaillist, skiplist
def getemailadds(thisbillrow):
'''Find email address(es) from series (single family row from bill and return as list '''
email1=str(thisbillrow.Email1)
email2=str(thisbillrow.Email2)
recipients=[]
if '@' in email1:
recipients.append(email1)
if '@' in email2:
recipients.append(email2)
return recipients
def sendebills(billlist, Mastersignups, season, year, emailtitle, blankmessage, **kwargs):
'''From bill list of payments, balances, and signups, generate and send email bill
currently not including SMS
kwargs: olduni, newuni, fees -- passed through to sendemaillogic
'''
choice=kwargs.get('choice','test')
if choice=='send':
smtpObj = smtplib.SMTP('smtp.gmail.com', 587) # port 587
smtpObj.ehlo() # say hello
smtpObj.starttls() # enable encryption for send
print('Enter password for sponsors club gmail ')
passwd=input()
smtpObj.login('<EMAIL>', passwd)
# string to record in comments that bill was sent
commstring='email '+ datetime.date.strftime(datetime.datetime.now(), "%m/%d/%y")
# email generation and send loop
billlist.Comments=billlist.Comments.astype(str) # info about send recorded in comments
else: # testing only... open log file
logfile=open('ebilling_log.txt','w', encoding='utf-8')
# somewhat redundant though since emails/texts recorded to text log files
ebilllist=billlist[(pd.notnull(billlist['Email1'])) & (billlist['Balance']<0)] # only families with valid email1
ebilllist, skiplist=sendemaillogic(ebilllist, **kwargs) # Decides who gets an e-mail
if 'kcsendtest' in kwargs:
# Filter bill list and only send to Croat (internal send testing option)
ebilllist=ebilllist[ebilllist['Family']=='Croat']
print('Send test to KC only.')
if 'SMS' in kwargs:
blankSMS=kwargs.get('SMS','') # get blank alternate SMS message
for index, billrow in ebilllist.iterrows():
recipients=getemailadds(billrow) # list of recipients
# create custom email message
if detectSMS(recipients) and 'SMS' in kwargs:
# use alternate SMS message if SMS address indicated
thismessage=makebillmessage(billrow, Mastersignups, season, year, blankSMS)
else:
thismessage=makebillmessage(billrow, Mastersignups, season, year, blankmessage)
msg=MIMEText(thismessage,'plain')
msg['Subject'] = emailtitle
msg['From'] = 'Cabrini Sponsors Club <<EMAIL>>'
msg['To']=','.join(recipients)
if choice=='send':
# single message to both parents
try:
smtpObj.sendmail('<EMAIL>', recipients, msg.as_string())
print ('Message sent to ', ','.join(recipients))
except:
print('Message to ', ','.join(recipients), ' failed.')
# append comment when bill is sent to valid email address
comm=ebilllist.loc[index]['Comments']
if str(comm)!='nan':
thiscomm=comm+commstring
else:
thiscomm=commstring
# TODO maybe return billlist (not ebilllist) with comments
ebilllist=ebilllist.set_value(index,'Comments',thiscomm)
else: # testing mode
logfile.write(msg.as_string()+'\n')
# now copy altered ebilllist back to main billlist (with added comment about e-mail sent)
billlist.loc[ebilllist.index,ebilllist.columns]=ebilllist
return ebilllist, skiplist
def makebillmessage(thisbillrow, Mastersignups, season, year, blankmessage):
''' Make e-mail message for family from billrow (passed as Series)
pass family's detailed bill row (already contains signups, payments, etc,'''
balance=-thisbillrow.Balance # int or float (negative means family owes money)
# Make current signups string from SUkeys (if applicable)
SUstring=''
tempstr=str(thisbillrow.SUkeys) # signups for this season as string
if tempstr!='nan' and tempstr!='':
currSUkeys=[int(s) for s in tempstr.split(',')] # convert str of int(s) to list ofints
else:
currSUkeys=[] # no current signups (this season)
#TODO replace CR-LF ...\r\n didn't seem to work
if len(currSUkeys)>0:
SUstring+='Sports signups for this season:\n'
for i, SU in enumerate(currSUkeys):
thisSU=Mastersignups[Mastersignups['SUkey']==SU] # returns single row match
first=thisSU.iloc[0]['First']
last=thisSU.iloc[0]['Last']
sport=thisSU.iloc[0]['Sport']
thisstr=first + ' ' + last + ' - ' + sport + '\n'
SUstring+=thisstr
message=blankmessage.replace('$SUSTRING',SUstring)
# add current season charges
currcharge=int(thisbillrow.Charges)
tempstr='Current charges: $'
tempstr+=str(currcharge)
message=message.replace('$CURRENT_CHARGES',tempstr)
# add current payments
currpay=int(thisbillrow.CurrPayments)
tempstr='Payments for ' + season + ' '+ str(int(year))+': $'
tempstr+=str(currpay)+'\n'
message=message.replace('$CURRENT_PAYMENTS',tempstr)
else: # zero out stuff about current charges
message=blankmessage.replace('$SUSTRING','')
message=message.replace('$CURRENT_CHARGES','')
message=message.replace('$CURRENT_PAYMENTS','')
# If family has prior charges, insert details here
# TODO add players to prior fee list?
if thisbillrow.PriorCharges>thisbillrow.PriorPayments:
tempstr='Fees and payments from prior seasons:\n'
tempstr+=thisbillrow.Feepaydetail+'\n'
message=message.replace('$FEEPAYDETAIL',tempstr)
else:
message=message.replace('$FEEPAYDETAIL','')
# Now insert outstanding balance for all
tempstr='$ '+ str(balance)
message=message.replace('$BALANCE',tempstr)
# insert section with old uni return, new unis to use, deposits needed or on file
#TODO mention return of unis for transferred players
unistring=thisbillrow.Unidetail
if str(unistring)!='nan':
message=message.replace('$UNIDETAIL',unistring)
else:
message=message.replace('$UNIDETAIL','')
return message
def shortnamedict(teams):
''' From teams list, make shortened name dictionary for tk display (i.e. 1G-Croat or 3G-Ambrose)'''
teamdict={}
for index, row in teams.iterrows():
# Get coach name or school
if '#' in teams.loc[index]['Team']:
name=teams.loc[index]['Team'].split('#')[0]
else:
name=str(teams.loc[index]['Coach'])
if teams.loc[index]['Gender']=='m':
gend='B'
else:
gend='G'
grrange=str(teams.loc[index]['Graderange'])
grrange=grrange.replace('0','K')
thisname=grrange+gend+'-'+name
teamdict.update({teams.loc[index]['Team']:thisname})
return teamdict
def findrecentfile(filelist):
''' Return most recently dated file from list of autonamed files .. date format is always 27Jan17 '''
dates=[s.split('_')[1].split('.')[0] for s in filelist]
try:
dates=[datetime.datetime.strptime(val, "%d%b%y") for val in dates]
datepos=dates.index(max(dates)) # position of newest date (using max)
newfile=filelist[datepos]
except:
print('File date comparison failed... using first one')
newfile=filelist[0]
return newfile
def detectSMS(recipients):
'''Determine if primary (first) e-mail address is very likely SMS (9 to 10 leading numbers)'''
if len(recipients)==0:
return False
tempstr=recipients[0].split('@')[0]
SMSmatch=re.match(r'\d{9}', tempstr)
if SMSmatch:
return True
else:
return False
|
tkcroat/SC | SC_messaging_main.py | <reponame>tkcroat/SC<filename>SC_messaging_main.py
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 17:41:41 2017
@author: tkc
"""
import pandas as pd
import os, sys
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\SC' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\SC')
print ('SC folder added')
import pkg.SC_signup_functions as SC
import pkg.SC_messaging_functions as SCmess
import pkg.SC_config as cnf # specifies input/output file directories
#%%
from importlib import reload
reload(SCmess)
#%%
os.chdir('C:\\Users\\tkc\\Documents\\Python_Scripts\\SC')
signupfile='Winter2017_signups.xlsx'
signupfile='Spring2017_signups.xlsx'
signupfile='Fall2018_signups.xlsx'
# Load signups,player and family contact info; format names/numbers, eliminate duplicates
players, famcontact, SCsignup, season, year = SC.loadprocessfiles(signupfile)
teams=pd.read_csv(cnf._INPUT_DIR +'\\Teams_2019.csv', encoding='cp437')
coaches=pd.read_csv(cnf._INPUT_DIR +'\\coaches.csv', encoding='cp437') # common excel file encoding
#teams=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Teams')
#teams=pd.read_csv(cnf._INPUT_DIR+'\\Teams_2019.csv', encoding='cp437')
#coaches=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Coaches') # load coach info
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
# Teams folder under each season?
gdrivedict={
'$GDRIVEWINTER':'https://drive.google.com/drive/u/0/folders/1oQQUiIKneC36P7mvJrVQNfC5M70NFrDW',
'$GDRIVEFALL':'https://drive.google.com/open?id=1DU-6x6wqOkiiAh5OvlzKAsombspgYAnq',
'$GDRIVE_SCHEDULING':'https://docs.google.com/forms/d/e/1FAIpQLSf_f7d1eHXn8Kfm75sqM0Wvv3CKPUemI-GWRWddSkIAqdd_6Q/viewform'
}
#%%
''' Messages to parents: 1) team assignment 2) Recruit missing players 3) missing unis
4) send schedule 5) other message 6) all parent message
'''
SCmess.emailparent_tk(teams, season, year)
# testing ssl connections/ troubleshooting
from urllib.request import urlopen
res = urlopen('https://www.howsmyssl.com/a/check').read() # tls version is 1.2
#%% Messages to coaches
# 1) missing uniforms (coach summary) 2) send team contact lists 3) send bill summary
# 4) other/generic
# missing unis will auto-load old teams
# TODO add sendschedule option
SCmess.emailcoach_tk(teams, coaches, gdrivedict)
# Testing
notifyfamilies(teams, Mastersignups, year, famcontact, emailtitle, blankmess, **kwargs)
teams=teams.drop_duplicates('Team')
mtype='recruit'
mtype='teamassign' # notification of team assignment and CYC card status
#%% Messages to recruits (load after editing)
Recruits=pd.read_excel(signupfile, sheetname='Recruits')
emailtitle='Cabrini-Soulard sports for $FIRST this fall?'
messagefile='messages\\player_recruiting.txt'
SCmess.emailrecruits(Recruits, emailtitle, messagefile)
#%% Messages to all sports parents (typically last 3 seasons)
# Return email list for all players this season and up to prior year of same season
emaillist=SCmess.makeemaillist(Mastersignups, famcontact, season, year, SMS=False)
emailstr=', \r\n'.join(emaillist)
emaillist.to_csv('email_list_3Oct18.csv')
#%% Messages to coaches
SCmess.emailcoach_tk(teams, coaches, gdrivedict)
# Send team billing summary to (head) coaches: team bill summary, contact list,
mtype='bills'; mtype='contacts'; mtype='unis'; # choose message type
kwargs={}
# needed for billing
emailtitle='Fees still owed by your Cabrini team'
messagefile='messages\\coach_email_outstanding_bills.txt'
kwargs.update({'asst':False}) # Optional send to asst. coaches if set to True
billlist=pd.read_csv('Billlist_18Jan17.csv', encoding='cp437') # pruned bill list current season only balances owed
Mastersignups = pd.read_csv('master_signups.csv', encoding='cp437')
kwargs.update({'bills':billlist, 'SUs':Mastersignups})
# needed for team contacts (mtype contacts)
emailtitle='Contact list for your Cabrini team'
messagefile='messages\\coach_email_contacts.txt'
gdrive='https://drive.google.com/open?id=0B9k6lJXBTjfiVDJ3cU9DRkxEMVU' # Sharable link for this season
kwargs.update({'asst':True}) # Optional send to asst. coaches if set to True
kwargs.update({'SUs':Mastersignups,'players':players,'famcontact':famcontact})
kwargs.update({'gdrive':gdrive}) # google drive link for this season
# Needed for outstanding uniform return
kwargs={}
mtype='unis'
missing=pd.read_csv('missingunilist_27Apr17.csv', encoding='cp437')
oldteams=pd.read_excel('Teams_coaches.xlsx', sheetname='Oldteams') # loads all old teams in list
kwargs.update({'mformat':'txt'}) # html or string/text message format (testing only)
kwargs.update({'oldteams':oldteams,'missing':missing})
kwargs.update({'asst':False}) # Optional send to asst. coaches if set to True
messagefile='messages\\coach_email_outstanding_unis.txt'
emailtitle='Return of uniforms for your Cabrini team'
messagefile='coach_email_log_29Apr17.html' # test send
# Write batch e-mails to coaches into html log file
SCbill.testcoachemail(teams, coaches, mtype, emailtitle, messagefile, **kwargs)
SCbill.emailcoaches(teams, coaches, mtype, emailtitle, messagefile, **kwargs)
|
tkcroat/SC | pkg/SC_schedule_functions.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 22 06:46:15 2017
@author: tkc
"""
import pandas as pd
import re
from datetime import datetime, timedelta
import tkinter as tk
def alterSchedule(sched):
''' Convert/prepare google docs online schedule version to work with
previous scheduler version.. mainly Division and Day columns
sched=cabsched.copy()
'''
def convDate(val):
# Datetime conversion for string dates
try:
return datetime.strptime(val,'%m/%d/%y')
except:
try:
return datetime.strptime(val,'%m/%d/%Y')
except:
print('Could not convert', val)
return val
def setWeekDay(val):
# Find day of week from date
val=convDate(val) # always attempt datetime conversion (w/ try-except)
# determine day of week from date
days=['Mon','Tues','Wed','Thurs','Fri','Sat','Sun'] # day order for .weekday()
try:
return days[val.weekday()]
except:
print('Value is', val)
print('Error with', val.weekday())
return ''
def findDivision(val):
try:
return val.split('-')[2]
except:
return ''
if not "Day" in sched.columns:
sched['Day']=sched['Date'].apply(lambda x:setWeekDay(x))
if 'Division' not in sched.columns: # mod for google drive calendar version
sched['Division']=sched['Team'].apply(lambda x:findDivision(x))
return sched
def findByeWeek(teamName, sched):
''' Finds bye weekend for team
args:
teamName - str as seen in schedule
sched - full dataframe w/ team schedules
returns:
list w/ first and last day of bye weekend
'''
allDates=list(set(sched.Date.to_list()))
allDates=[i.to_pydatetime() for i in allDates]
allDates.sort()
# Would be best to parse into weekends
sched = sched[ (pd.notnull(sched['Home'])) & (pd.notnull(sched['Visitor']))]
gameDates= list(sched[ (sched['Home'].str.contains(teamName)) | (sched['Visitor'].str.contains(teamName)) ].Date.to_list())
# Convert to datetime (from timestamp)
gameDates=[i.to_pydatetime() for i in gameDates]
offDates=[i for i in allDates if i not in gameDates]
byes=groupConsecutiveDates(offDates)
byes=[i for i in byes if isinstance(i, list)]
return byes
def groupConsecutiveDates(dates):
''' Combines team off dates into ranges for finding bye week
args:
dates -- list of datetimes
TODO use itertools groupby instead?
returns:
list of dates with each item either a single datetime or a list w/ 1st and last
day of break
'''
def group_consecutive(dates):
dates_iter = iter(sorted(set(dates))) # de-dup and sort
run = [next(dates_iter)]
for d in dates_iter:
if (d.toordinal() - run[-1].toordinal()) == 1: # consecutive?
run.append(d)
else: # [start, end] of range else singleton
yield [run[0], run[-1]] if len(run) > 1 else run[0]
run = [d]
yield [run[0], run[-1]] if len(run) > 1 else run[0]
# vals = list(group_consecutive(dates)) if dates else False
# vals=[i for i in vals if isinstance(val, list)]
return list(group_consecutive(dates)) if dates else False
def prepSched(sched):
''' For Pat's CYC schedule to prepare for algorithm
After loading excel, prepare schedule for algorithmic searches
i.e. datetime conversion, strip string args, etc.
'''
def convDate(val):
try:
return datetime.strptime(val,'%m/%d/%Y')
except:
return val
sched['Date']=sched['Date'].apply(lambda x:convDate(x))
sched['Visitor']=sched['Visitor'].str.strip()
sched['Home']=sched['Home'].str.strip()
return sched
def writeCabSchedule(sched):
'''
Convert date format to correct string and save as csv
'''
def convDate(val):
try:
return val.strftime('%m/%d/%Y')
except:
return None
sched['Date']=sched['Date'].apply(lambda x:convDate(x))
def askSavename(sched):
# save as via pop-up
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.asksaveasfile(title = 'Save schedule',
filetypes=[ ('csv','*.csv')] )
root.destroy() # closes pop up window
return full_path
return
def convertDts(sched):
'''
'''
# sometimes imports as 10/30/2018 0:00
sched['Date']=sched['Date'].str.split(' ').str[0]
# TODO finish me
return
def loadSchedule():
''' Choose schedule file and open
'''
def get_file_path():
'''
Popup dialog box to find db path if non-standard
'''
root=tk.Tk() # creates pop-up window
root.update() # necessary to close tk dialog after askopenfilename is finished
# tk dialog asks for a single station file
full_path = tk.filedialog.askopenfilename(title = 'Choose schedule name',
filetypes=[ ('XLS','*.xls*'), ('csv','*.csv')] )
root.destroy() # closes pop up window
return full_path
myPath = get_file_path()
if myPath.endswith('.csv'):
sched=pd.read_csv(myPath, encoding='cp437')
elif myPath.endswith('.xls) or myPath.endswith('.xlsx):
sched=pd.read_excel(myPath)
else:
print('Schedule file must be CSV or Excel')
return sched
def compareSched(sched, oldsch):
'''
Detect schedules which have altered games, print teams and return changed
games
'''
bothsch=pd.concat([sched,oldsch])
changed=bothsch.drop_duplicates(['Date','Time','Location'],keep=False)
changed=changed.sort_values(['Division','Date','Time'])
if len(list(changed.Team.unique()))>0:
print('Changed schedule for ', ', '.join(list(changed.Team.unique())))
return changed
def getTeamsDict(teams, sport):
'''
CYC sched has Cab teams with embedded coach name and division
make dict w/ coach name, div and team name as value
'''
tdict={}
cteams=teams[~teams['Team'].str.contains('#')]
coachlist=list(cteams.Coach.unique())
for i, coac in enumerate(coachlist):
tdict[coac]=list(cteams[cteams['Coach']==coac].Team.unique())
# TODO implement sport filter
cteams=cteams[cteams['Team'].str.contains('-')] # CYC teams w/ division
# TODO finish me
return
def getTeamDicts(teams, sport):
'''
Need dict for lookup of team name (Team col) by sport
Key is 3B, 5G and val is team name, coach last name
'''
teams2=teams[ teams['Sport']==sport]
teamdivdict={}
coachdict={}
# Ensure no duplicates for division
grouped=teams2.groupby(['Gender','Grade'])
for (gend, gr), group in grouped:
if len(group)!=1:
print('Multiple teams for ', gr, gend)
continue
else:
try:
coachdict[group.iloc[0]['Coach']]=group.iloc[0]['Team']
except:
pass
if gend=='m':
# division should match that from Pat Moore schedule
mykey=str(group.iloc[0]['Grade'])+'B'
teamdivdict[mykey]=group.iloc[0]['Team']
elif gend=='f':
mykey=str(group.iloc[0]['Grade'])+'G'
teamdivdict[mykey]=group.iloc[0]['Team']
else:
print('Gend problem for', gr, gend)
return teamdivdict, coachdict
def prepGdSchedule(sched, teams, sport):
'''
Convert allteams schedule (from <NAME>/CYC google drive) usually Excel to usable Cabrini
teams format
Need to find matching Cabrini team name
'''
#sched.columns=['Gamenum','Visitor', 'Vis', 'Home', 'Home2', 'Date','Time', 'Venue','Ven','Days']
if len(sched.columns)==10:
sched.columns=['GameNum','Away', 'Vis', 'Home', 'Home2', 'Date','Time', 'Location','Ven','Days']
elif len(sched.columns)==9:
sched.columns=['GameNum','Away', 'Vis', 'Home', 'Home2', 'Date','Time', 'Location','Ven']
elif len(sched.columns)==13: # 8/2019 <NAME>re structure
sched.columns=['GameNum','Date','Time','Day','Home','Away','Location','HScore','VScore', 'Division','Status','Assignments','Notes']
else:
print('Examine for new column structure')
return
# Filter for Cabrini teams only
sched=sched[sched['Home'].str.contains('Cabrini') | sched['Away'].str.contains('Cabrini') ]
''' no longer needed w/ new division column
sched['Division']=''
# Find division from within name field
for index, row in sched.iterrows():
if re.match('\d{1}\w{2}',row.Vis):
sched.loc[index]['Division']=re.match('\d{1}\w{2}',row.Vis).group(0)
# sched=sched.set_value(index, 'Div', re.match('\d{1}\w{2}',row.Vis).group(0) )
# Needed because all schedule games listed under both teams (no longer true)
sched=sched.drop_duplicates(['Date','Time','Vis','Home2'])
'''
# Sorting by date requires datetime
# TESTING val=sched.iloc[0]['Date'] datetime.strptime(val, '%m/%d/%Y')
if isinstance(sched.iloc[0]['Date'],str):
sched['Date']=sched['Date'].str.split(' ').str[0] # strip of time string
sched.Date=sched.Date.apply(lambda x:datetime.strptime(x,'%m/%d/%Y'))
elif isinstance(sched.iloc[0]['Date'],datetime):
sched.Date=sched.Date.apply(lambda x:x.date()) # just convert to date
# TODO check formatting of time column
# Drop duplicates... can pick up same game twice (from Cab schedule and opposing team)
sched=sched.sort_values(['Division','Date','Time'])
# lookup of cabrini teams from division and/or coach name
teamdivdict, coachdict=getTeamDicts(teams, sport)
# Find day of week from date
sched['Team']=''
def setWeekDay(val):
# determine day of week from date
days=['Mon','Tues','Wed','Thurs','Fri','Sat','Sun'] # day order for .weekday()
try:
return days[val.weekday()]
except:
return ''
sched['Day']=sched['Date'].apply(lambda x:setWeekDay(x))
def setTeam(div):
# Set Team column to match Cabrini team name (used by SC_messaging)
# div will be "2BD" but teamdict match for Cabrini teams is "2B"
try:
if div[0:2] in teamdivdict:
return teamdivdict.get(div[0:2])
else:
return ''
except:
print('Problem setting Cabrini team name')
sched['Team']=sched['Division'].apply(lambda x:setTeam(x))
sched=sched[['Date','Day','Time','Home','Away','Division','Location','Team']]
return sched
|
tkcroat/SC | pkg/SC_legacy_functions.py | <filename>pkg/SC_legacy_functions.py
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 29 10:24:49 2016
SC legacy funct
@author: tkc
"""
import pandas as pd
# Testing option now built in to sendebills
def testautobilling(billlist, Mastersignups, season, year, emailtitle, messagefile, **kwargs):
''' Test e-billing and SMSbilling by writing each to log file using payment and uniform info strings made by
createbilllist and saved in columns Feepaydetail and Unidetail (for e-mail) and Textmessage (for SMS)
run and check these logs (autonamed by date) before live send of e-mail bills or SMS bills
kwargs: SMS -- contains header for text messages if SMS via e-mail is chosen
sendemaillogic kwargs (incl. newuni, olduni, fees)
newuni, olduni-- bool flag to decide to send email to those with new or old uniform issues
however message with old, new or both is constructed by createbilllist
fees- bool to include fee payment (defaults True)
'''
# autonaming of testing text log files as emaillog_date and SMSlog_date
now=datetime.datetime.now()
emailfile='email_test_log'+datetime.date.strftime(now, "%d%b%y")+'.txt'
SMSfile='SMS_test_log'+datetime.date.strftime(now, "%d%b%y")+'.txt'
paperfile='paper_bills'+datetime.date.strftime(now, "%d%b%y")+'.txt'
thismask=billlist['Email1'].isnull()
paperlist=billlist.loc[thismask] # just write to paper log file if no e-mail and bad SMS gateway
ebilllist=billlist.loc[~thismask] # includes e-mail addresses and SMS gateways
ebilllist, skiplist=sendemaillogic(ebilllist, **kwargs) # decides who gets e-mail depending on fees/uniforms/ etc.
with open(emailfile,'w+') as emaillog, open(SMSfile,'w+') as SMSlog:
for index, row in ebilllist.iterrows():
# Skip send if zero balance and no info in unidetail(no outstanding unis, none to be issued)
if ebilllist.loc[index]['Balance']>0 and str(ebilllist.loc[index]['Unidetail'])=='nan':
thisfam=ebilllist.loc[index]['Family']
message='Skip send for family '+ thisfam+'. No outstanding balance or uniform issues.\n'
emaillog.write(message)
continue
# determine if SMSgateway or e-mail address
thisaddress=ebilllist.loc[index]['Email1'].split('@')[0]
# if 9 or 10 digit number it's an SMS gateway (send short text message)
thismatch=re.match(r'\d{9}', thisaddress) # if 9 or 10 digit # it's SMS gateway
if thismatch:
if 'SMS' in kwargs: # switch to include or exclude SMS sending (otherwise skipped)
textheader=kwargs.get('SMS','Message from Cabrini')
message=ebilllist.loc[index]['Family']+ ' '+str(ebilllist.loc[index]['Famkey'])+'\n'
SMSlog.write(message) # output family name and key
customSMS=ebilllist.loc[index]['Textmessage'] # pulls family specific SMS from bill list
mySMS=textheader+customSMS+'; to '+thismatch.group(0) +'via SMS gateway\n' # combined with passed header
SMSlog.write(mySMS)
# also throw longer detail e-mail formatted message into text log file
thisbillrow=ebilllist.loc[index]
recipients=getemailadds(thisbillrow) # list of recipients
message=makemessage(thisbillrow, Mastersignups, season, year, recipients, emailtitle, messagefile)
SMSlog.write(message)
else: # normal e-mail address(es)
message=ebilllist.loc[index]['Family']+ ' '+str(ebilllist.loc[index]['Famkey'])+'\n'
emaillog.write(message) # family name for log file only
thisbillrow=ebilllist.loc[index] # this family's bill info as series
recipients=getemailadds(thisbillrow) # list of recipients
# create custom email message
message=makemessage(thisbillrow, Mastersignups, season, year, recipients, emailtitle, messagefile)
emaillog.write(message)
with open(paperfile,'w+') as paperlog: # now process paper only ones
for index, row in paperlist.iterrows():
# Skip send if zero balance and no info in unidetail(no outstanding unis, none to be issued)
message=paperlist.loc[index]['Family']+ ' '+str(paperlist.loc[index]['Famkey'])
paperlog.write(message) # family name for log file only
if paperlist.loc[index]['Balance']>0 and str(paperlist.loc[index]['Unidetail'])=='nan':
thisfam=paperlist.loc[index]['Family']
message='Skip send for family '+ thisfam+'. No outstanding balance or uniform issues.\n'
paperlog.write(message)
continue
else:
thisbillrow=paperlist.loc[index] # this family's bill info as series
recipients='None' # list of recipients
# create custom email message
message=makemessage(thisbillrow, Mastersignups, season, year, recipients, emailtitle, messagefile)
paperlog.write(message)
return skiplist
def changeaddresses(newadd, famcontact):
'''Pass newadd after visual check, the merge/alter that subset and set parish of residence to nan '''
autocsvbackup(famcontact,'family_contact', newback=True) # Run file backup script
for index,row in newadd.iterrows():
famcontact=famcontact.set_value(index,'Address',newadd.loc[index]['Address_n']) # change address
famcontact=famcontact.set_value(index,'Zip',newadd.loc[index]['Zip_n']) # change Zip
famcontact=famcontact.set_value(index,'Parish_residence','nan') # delete parish of res and manually re-enter
famcontact.to_csv('family_contact.csv', index=False)
return famcontact
def checkaddresses(df, famcontact):
'''Pass SCsignups, compare address #s and zip to detect true address changes from entire frame '''
df.Timestamp=pd.to_datetime(df.Timestamp, errors='coerce') # converts to naT or timestamp
gdsignups=df.dropna(subset=['Timestamp']) # drops manual entries (no google drive timestamp)
faminfo=gdsignups.drop_duplicates(subset=['Famkey']) # only process first kid from family
tempfam=pd.merge(famcontact, faminfo, how='left', on=['Famkey'], suffixes=('','_n')) # same indices as famcontact
tempfam=tempfam.dropna(subset=['Zip_n']) # drops values with no gd info
changelist=[]
for index, row in tempfam.iterrows():
match=re.search(r'\d+',tempfam.loc[index]['Address'])
num1=match.group(0)
match=re.search(r'\d+',tempfam.loc[index]['Address_n'])
num2=match.group(0)
if num1!=num2: # change in address number strongly suggestive of actual change
changelist.append(tempfam.loc[index]['Famkey'])
else:
continue
newadd=tempfam[tempfam.Famkey.isin(changelist)] # subset with different address number
mycols=['Famkey', 'Family', 'Address', 'Zip', 'Address_n', 'Zip_n'] # drop extraneous cols and reorder
dropcollist=[s for s in newadd.dtypes.index if s not in mycols]
newadd=newadd.drop(dropcollist, axis=1) # drops extraneous columns
newadd=newadd[mycols]
return newadd # create list of indices with suspected changes
# old version before interactive tk approval
def update_contact(ser):
'''Update phone and textable list from google drive entries; existing entries from fam_contact listed first;
pass/modify/return series for family; reorder/replace numbers '''
# [phone, text, order]
thisfam=ser.Family
phonelist=[] # list of lists with number and textable Y/N
for i in range(1,5): # get 4 existing phone entries (phone1, phone2, etc.)
phname='Phone'+str(i)
txtname='Text'+str(i)
if str(ser[phname])!='nan':
phonelist.append([ser[phname],ser[txtname]]) # as phone and text y/N
# New google drive entries will be Phone1_n.. look for phone/text pair in existing list
if str(ser['Phone1_n'])!='nan' and [ser['Phone1_n'],ser['Text1_n']] not in phonelist: # new ones phone is required entry
if [ser['Phone1_n'],np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([ser['Phone1_n'],np.nan])
phonelist.insert(0,[ser['Phone1_n'],ser['Text1_n']]) # insert in first position
print ('Added phone ', str(ser['Phone1_n']), 'for family', thisfam)
# TODO look for same phone but nan for text and remove it
else: # move this pair to first position in existing list (already in list)
phonelist.insert(0,phonelist.pop(phonelist.index([ser['Phone1_n'],ser['Text1_n']])))
# Inserts desired primary in first position while simultaneously removing other entry
if str(ser.Phone2_n)!='nan': # check for phone2 entry (with _n suffix)
if [ser['Phone2_n'],ser['Text2_n']] not in phonelist: # add second phone to 2nd position if not present
if [ser['Phone2_n'],np.nan] in phonelist: # remove if # present but w/o text indication
phonelist.remove([ser['Phone2_n'],np.nan])
phonelist.insert(1,[ser['Phone2_n'],ser['Text2_n']])
print ('Added phone ', str(ser['Phone2_n']), 'for family', thisfam)
# Truncate list to max 4 entries (older ones lost)
phonelist=phonelist[0:3]
while len(phonelist)<4:
phonelist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset phone number/text combos in series
for i in range(1,5): # reset 4 existing phone entries
phname='Phone'+str(i)
txtname='Text'+str(i)
ser=ser.set_value(phname,phonelist[i-1][0])
ser=ser.set_value(txtname,phonelist[i-1][1])
# Construct existing list of known email addresses
emaillist=[]
for i in range(1,4): # get 3 existing email entries
emailname='Email'+str(i)
if str(ser[emailname])!='nan':
emaillist.append(ser[emailname].lower())
# Find new email1 entry in google drive data
if str(ser.Email)!='nan' and '@' in ser.Email: # real primary gd named email
if ser.Email.lower() not in emaillist: # add in first position if not present
emaillist.insert(0,ser.Email.lower())
print ('Added email ', str(ser.Email.lower()), 'for family', thisfam)
else: # if already present move to first position
emaillist.insert(0,emaillist.pop(emaillist.index(ser['Email'])))
# look for new email in email2 position and add
if str(ser.Email2_n)!='nan' and '@' in ser.Email2_n:
if ser.Email2_n.lower() not in emaillist: # add second phone to 2nd position if not present
emaillist.insert(1,ser.Email2_n.lower())
print('Added email', ser.Email2_n.lower(),'for family', thisfam)
# Construct and record updated email list
emaillist=emaillist[0:3] # limit to 3 entries
while len(emaillist)<3:
emaillist.append(np.nan) # pad with nan entries if necessary
for i in range(1,4): # reset 3 email entries
emailname='Email'+str(i)
ser=ser.set_value(emailname,emaillist[i-1])
# Update list of parent names (max 3 entries)
parlist=[] # construct existing list from family contacts
for i in range(1,4):
fname='Pfirst'+str(i)
lname='Plast'+str(i)
if str(ser[fname])!='nan':
parlist.append([ser[fname],ser[lname]]) # list of lists [first, last]
if [ser['Pfirst1_n'],ser['Plast1_n']] not in parlist: # phone 1 is required entry
parlist.insert(0,[ser['Pfirst1_n'],ser['Plast1_n']]) # insert in first position
print ('added parent', ser['Pfirst1_n'], ser['Plast1_n'], 'for family', thisfam)
else: # move this pair to first position in existing list
parlist.insert(0,parlist.pop(parlist.index([ser['Pfirst1_n'],ser['Plast1_n']])))
# inserts in first position while simultaneously removing other entry
if str(ser.Pfirst2_n!='nan'): # check for parent 2 entry
if [ser['Pfirst2_n'],ser['Plast2_n']] not in parlist: # add second phone to 2nd position if not present
parlist.insert(1,[ser['Pfirst2_n'],ser['Plast2_n']])
parlist=parlist[0:3] # limit to 3 entries
while len(parlist)<3:
parlist.append([np.nan,np.nan]) # pad with nan entries if necessary
# now reset parent name entries
for i in range(1,4): # reset 3 existing parents entries
fname='Pfirst'+str(i)
lname='Plast'+str(i)
ser=ser.set_value(fname,parlist[i-1][0])
ser=ser.set_value(lname,parlist[i-1][1])
# update parish of registration (if new gd entry and no existing entry)
# otherwise changes are skipped to keep parish names consistent
if str(ser.Parish_registration)=='nan' and str(ser.Parish)!='nan':
ser['Parish_registration']=ser['Parish'] # Set parish of registration
return ser
# used before tk confirmation when siblings were added simultaneously (now done sequentially w/ famcontact update)
def processsiblings(newfams, newplayers):
''' Finds multiple kids from same family (siblings/step-siblings) via phone match and assigns single
famkey, generates common family name (before entering this info into family contacts)
probably no longer called since using tkinter select method
'''
# Phone contact match is probably most robust
phonenums=np.ndarray.tolist(newfams.Phone.unique())
phonenums.extend(np.ndarray.tolist(newfams.Phone2.unique()))
phonenums=[str(s) for s in phonenums if str(s) != 'nan']
processed=[] # keep track of plakeys that are already combined
for i, num in enumerate(phonenums):
# Matches gets player list from same family (phone match)
matches=newfams[(newfams['Phone']==num) | (newfams['Phone2']==num)]
matches=matches[~matches['Plakey'].isin(processed)] # drop if already processed
if len(matches)>0: # skips if already processed on different common family number
# generate new family name
lasts=np.ndarray.tolist(matches.Last.unique())
lasts.extend(np.ndarray.tolist(matches.Plast1.unique())) # also include primary parent
lasts=[s.strip() for s in lasts if str(s) !='nan'] # drop nan
family=' '.join(lasts) # Just concat with space for entire family name
# Assign first famkey and to all players
famkey=matches.iloc[0]['Famkey'] # grab first one
plakeys=np.ndarray.tolist(matches.Plakey.unique())
plakeys=[int(i) for i in plakeys]
# now just reassign famkey and family for subset of plakeys in both newfams and newplayers
match=newfams[newfams['Plakey'].isin(plakeys)]
for index, row in match.iterrows():
newfams=newfams.set_value(index,'Famkey',famkey)
newfams=newfams.set_value(index,'Family',family)
# same for newplayers
match=newplayers[newplayers['Plakey'].isin(plakeys)]
for index, row in match.iterrows():
newplayers=newplayers.set_value(index,'Famkey',famkey)
newplayers=newplayers.set_value(index,'Family',family)
processed.extend(plakeys) # Add keys to processed list
newfams=newfams.drop_duplicates(subset='Famkey') # only need one entry for fam, not duplicate
# TODO This might drop a step-dad phone # for kids w/ same mom, different dad
return newfams, newplayers
# old way of finding associated families ... now using phonedict and last name tuple list
def findfamily(newplayers,famcontact):
''' For confirmed new players, find existing family name and key (if it exists)'''
newplayers=newplayers.reset_index(drop=True) # reset index to avoid problems
if 'Famkey' not in newplayers: # add player key column to sign-ups file if not present
newplayers.insert(1,'Famkey',0)
if 'Family' not in newplayers: # add player key column to sign-ups file if not present
newplayers.insert(0,'Family','')
for index, row in newplayers.iterrows():
first=newplayers.loc[index]['First']
last=newplayers.loc[index]['Last']
phstr='' # build phone matching string
if pd.notnull(newplayers.loc[index]['Phone']):
phstr+='|' + newplayers.loc[index]['Phone']
if pd.notnull(newplayers.loc[index]['Phone2']):
phstr+='|' + newplayers.loc[index]['Phone2']
mask = famcontact['Phone1'].str.contains(phstr, na=False, case=False) | famcontact['Phone2'].str.contains(phstr, na=False) | famcontact['Phone3'].str.contains(phstr, na=False)
match=famcontact.loc[mask] # filter df with above phone matching mask
if len(match)==1:
print('Phone match of player ', first, last,' to family ', match.iloc[0]['Family'])
newplayers=newplayers.set_value(index,'Family',match.iloc[0]['Family']) #
newplayers=newplayers.set_value(index,'Famkey',match.iloc[0]['Famkey'])
continue # famkey assigned and move to next if phone match
mask = famcontact['Family'].str.contains(last, na=False, case=False) # check for last name match
match=famcontact.loc[mask] # filter df with above phone matching mask
if len(match)>0: # check for possible family matches (even though no phone match)
for i in range(0,len(match)):
tempstr=''
tempstr=tempstr+str(match.iloc[i]['Famkey'])+' '
tempstr=tempstr+str(match.iloc[i]['Family'])+' ' # string with family key # and name for all possibles
print('Possible match of player ', first, last,' to family ', tempstr,'not yet assigned') # lists all
# don't assign number but can be done manually if reasonable
else:
print('No match for player ', first, last)
return newplayers # send back same df with family name and key
def organizesignups(df, year):
''' takes SCsignup file subset (split by sport) and organizes for output into master signup file '''
mycols=['SUkey','First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family', 'SUdate', 'Issue date', 'Uniform #','Uni return date']
df.Grade=df.Grade.replace('K',0)
df=df.sort_values(['Gender','Grade'], ascending=True) # nested sort gender then grade
df.Grade=df.Grade.replace(0,'K') # replace K with zero to allow sorting
# add missing columns to df
df['Team']=''
df['Issue date']=''
df['Uniform #']=''
df['Uni return date']=''
df['Year']= int(year)
df['SUkey']=0 # column for unique signup key (zero means not yet assigned)
df=df[mycols] # put back in desired order
return df
def organizesignups2(df):
''' Adding teams to master signups (all cols already present '''
mycols=['SUkey','First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family', 'SUdate', 'Issue date', 'Uniform #','Uni return date']
dropcollist=[s for s in df.dtypes.index if s not in mycols] #
df=df.drop(dropcollist, axis=1) # unnecessary columns dropped
df.Grade=df.Grade.replace('K',0)
df=df.sort_values(['Year','Sport', 'Gender', 'Grade'], ascending=True)
df.Grade=df.Grade.replace('0','K', regex=True) # make sure any 0 grades are again replaced with K
df=df[mycols] # put back in desired order
return df
# older way of organizing output from dataframes after merges and such
def organizelog(df):
''' takes a sport-gender and organizes in manner for output to excel signup summary file '''
mycols=['First', 'Last', 'School', 'Issue date', 'Uniform #', 'Amount', 'Deposit type', 'Deposit date', 'Uni return date', '$ returned', 'Comments', 'Plakey', 'Famkey']
thisdf=pd.DataFrame(columns=mycols) # temp df for dropping unnecessary columns
df=dropcolumns(df,thisdf) # drop columns not working
df=df[mycols] # put back in desired order
return df
# old version before more comprehensive, sophisticated find-replace version
def makemessage(thisbillrow, signups, recipients, emailtitle, longmessage, Paylog):
''' Make and send email bill with amounts, signup details
pass family's row from bill, signup details, payment logbook, and list of email recipients '''
balance=-thisbillrow.Balance # int or float
SUstring=''
for index,row in signups.iterrows():
first=signups.loc[index]['First']
last=signups.loc[index]['Last']
sport=signups.loc[index]['Sport']
thisstr=first + ' ' + last + ' - ' + sport + '\n'
SUstring+=thisstr
paystring=thisbillrow.Feepaydetail # string with prior fees, prior payment details
recipientstr=','.join(recipients) # convert list to string
message='From: Cabrini Sponsors Club <<EMAIL>>\nTo: '+ recipientstr + '\nSubject: '+ emailtitle +'\n' \
'Please pay your outstanding balance of $'+ str(balance) +' for Cabrini sports ($30 per player ' \
+ 'per sport; $75 family max). \n\nPlayers signed up from your family this school year:\n' + SUstring +'\nPayments received ' \
'this year: \n'+ paystring +'\nOutstanding balance: $'+str(balance) +'\n' + longmessage
return message
# find yearseason ... now wrapped in with loadprocessfiles
def findyearseason(df):
''' Pass raw signups and determine year and sports season '''
# get year from system clock and from google drive timestamp
now=datetime.datetime.now()
val=df.Timestamp[0] # grab first timestamp
if val!=datetime.datetime: # if not a timestamp (i.e. manual string entry find one
while type(val)!=datetime.datetime:
for i in range(0,len(df)):
val=df.Timestamp[i]
year=val.year # use year value from signup timestamps
if now.year!=val.year:
print ('Possible year discrepancy: Signups are from ',str(val.year))
# now find sports season
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Fall'
mask = np.column_stack([df['Sport'].str.contains("rack", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Spring'
mask = np.column_stack([df['Sport'].str.contains("asket", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Winter'
return season, year
def dropcolumns(df1,df2):
''' Pass two dfs with df2 being the template.. extra unnecessary columns dropped from df1
inplace=True modifies both passed and returned df '''
cols1=df1.columns.tolist()
if type(df2)==list:
cols2=df2
else: # should be dataframe
cols2=df2.columns.tolist()
newdf=df1 # avoids modification of passed df
uniquelist=[i for i in cols1 if i not in cols2]
for i,colname in enumerate(uniquelist): # remove cols from df1 that are absent from df2
# newdf.drop(colname, axis=1, inplace=True) # this modifies both passed and returned dfs
newdf=newdf.drop(colname, axis=1)
return newdf
# older method of calculating age in loop
def calculateage(df):
'''pass df such as Juniorteams with birthdate column/timestamp format
return with an added column containing age in years (e.g. 6.1 yrs)'''
mytime=datetime.datetime.now()
mytime=datetime.datetime.date(mytime) # convert time to datetime.date
for index, row in df.iterrows():
dob=df.loc[index]['Birthdate']
if str(dob)=='NaT' or str(dob)=='nan': # skip age calc if DOB is missing
continue
dob=datetime.datetime.date(dob) # convert pandas timestamp dob to datetime.date
age=mytime-dob # datetime timedelta
age = round((age.days + age.seconds/86400)/365.2425,1) # get age as float
df=df.set_value(index,'Age',age)
return df
# Old way of doing grade adjustment (now direct from SCsignup by row)
def updategradeadjust(df, year):
'''Recalc grade adjustment if blank in players.csv
only really needs to be run every new school year
'''
now=datetime.datetime.now()
for index, row in df.iterrows():
grade=df.loc[index]['Grade'] # assume all grades present in players.csv
gradeadj=df.loc[index]['Gradeadj']
dob=df.loc[index]['DOB']
if str(dob)=='NaT' or grade=='nan': # skip players with no DOB on file
continue
dob=datetime.datetime.date(dob) # conver
if str(gradeadj)=='nan': # calc gradeadj only for those with missing values
if grade=='K':
grade=0
tempyear=now.year-int(grade) # year player entered K
entryage=datetime.date(tempyear,8,1)-dob # age at Aug 1 school cutoff in year kid entered K
entryage = (entryage.days + entryage.seconds/86400)/365.2425 # age entering K
if 5 < entryage <6: # normal K age
gradeadj=0
elif 4 < entryage <5: # ahead of schedule
gradeadj=1
elif 6 < entryage <7: # 1 year back
gradeadj=-1
elif 7 < entryage <8: # working on grade school mustache
gradeadj=-2
else: # probably some entry error
first= df.loc[index]['First']
last= df.loc[index]['Last']
print('Suspected DOB or grade error for ', first, ' ', last,' Grade ', grade, 'DOB', datetime.date.strftime(dob, "%m/%d/%y") )
continue # don't make gradeadj entry
# now update grade and gradeadj in players database
df=df.set_value(index,'Gradeadj',gradeadj)
return df # updated with grades and grade adjustments
|
tkcroat/SC | SC_analysis_main.py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 24 11:30:49 2017
@author: tkc
"""
os.chdir('C:\\Users\\tkc\\Documents\\Python_Scripts\\SC')
resfile='C:\\Users\\tkc\\Documents\\Sponsors_Club\\League Request Template_BBall_2018.xlsx'
stats=pd.read_excel(resfile, sheetname='Results by League')
stats['Level']=''
for index, row in stats.iterrows():
if row.Grade<5:
stats=stats.set_value(index,'Level','X')
else:
stats=stats.set_value(index,'Level',row.League[2])
# Normalize scoring to grade/gender/level average
grgengroup=stats.groupby(['Gender','Grade','Level'])
for [gen,gr, lev], group in grgengroup:
avgsc=group['Avg Scrd'].mean()
avgal=group['Avg Allwed'].mean()
for index, row in group.iterrows():
stats=stats.set_value(index,'Avg Scrd', row['Avg Scrd']-avgsc)
stats=stats.set_value(index,'Avg Allwed', row['Avg Allwed']-avgal)
stats=stats[ (stats['Gender']=='G') & (stats['Grade']==5)]
lggroup=stats.groupby(['League'])
# Pts scored/allowed vs gender/grade/level
grgengroup=stats.groupby(['Gender','Grade','Level'])
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(16,9), squeeze=False)
colorlist=['b','r','g','c','m','y','k', 'olive','pink','purple']
marklist=['o','v','^','<','>','s','p','*','h','+','x','X','D','.']
grnum=0
mylegend=[]
for [gen,gr, lev], group in grgengroup:
mylegend.append(str(gr)+gen+lev)
avgsc=group['Avg Scrd'].mean()
stdsc=group['Avg Scrd'].std()
avgal=group['Avg Allwed'].mean()
stdal=group['Avg Allwed'].std()
plt.errorbar(x=avgsc, y=avgal, xerr=stdsc, yerr=stdal, color=colorlist[grnum%10], marker=marklist[grnum//10])
grnum+=1
axes[0,0].legend(mylegend, loc='best', fontsize=8)
# Look at PF, PD (differential) by league
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(16,9), squeeze=False)
colorlist=['b','r','g','c','m','y','k', 'olive','pink','purple']
marklist=['.','o','v','^','<','>','s','p','*','h','+','x','X','D']
grnum=0
for key, group in lggroup:
group.plot.scatter(x='Avg Scrd',y='Avg Allwed', color=colorlist[grnum%10], marker=marklist[grnum//10], ax=axes[0,0])
grnum+=1
# Results by parish
lggroup=stats.groupby(['Parish'])
lg.scatt
markers =['s','v','o','x']
for (name, group), marker in zip(lg, cycle(markers)):
ax.plot(group.x, group.y, marker=marker, linestyle='', ms=12, label=name)
# Setting up leagues (weigh past result, geography, coach request)
|
tkcroat/SC | SC_signups_main.py |
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:25:19 2016
Process signups from google drive
@author: tkc
"""
#%%
import pandas as pd
import os
import pkg.SC_signup_functions as SC
import pkg.SC_config as cnf
import pkg.SC_signup_google_API_functions as SCapi
# /from pandas_ods_reader import read_ods # too slow
#%%
from importlib import reload
reload(SC)
reload(cnf)
#%% Load and process raw signup file from Fall2016_signups.xlsx
# raw tab contains unprocessed google drive signups plus selected info from paper
os.chdir('C:\\Users\\kevin\\Documents\\Python_Scripts\\SC\\')
os.chdir(cnf._OUTPUT_DIR)
signupfile='Winter2017_signups.xlsx'
signupfile='Spring2019_signups.xlsx'
signupfile='Fall2018_signups.xlsx'
signupfile=cnf._INPUT_DIR +'\\Fall2019_signups.csv'
signupfile=cnf._INPUT_DIR +'\\Fall2019_signups.xlsx'
signupfile=cnf._INPUT_DIR +'Spring2019_signups.xlsx'
#%% Testing new google sheets API download
# ID and range of Fall 2020 (up to Gkey)
sheetID = '1mexU5HW8Va1QXN43eN2zvHQysJINw6tdwJ7psOKmQng'
rangeName = 'Form Responses!A:AX' # get allinclude plakey/famkey manual mode'
# ID and range of Winter 2019 basketball
sheetID = '182QFOXdz0cjQCTlxl2Gb9b_oEqInH93Peo6EKkKod-g'
rangeName = 'Form Responses 1!A:AC' # include plakey/famkey manual mode'
# spring signups
sheetID='1lppbr8srsVbN48RYrfRr58sd7yfUnJM21sSSx2C0mG8'
rangeName = 'Form Responses!A:Z' # include plakey/famkey manual mode'
gsignups = SCapi.downloadSignups(sheetID, rangeName)
# TODO write unique Gkey column... assign values
season='Fall'
year=2020
# Load signups,player and family contact info; format names/numbers, eliminate duplicates
players, famcontact, gsignups = SC.loadProcessGfiles(gsignups, season, year)
players, famcontact = SC.loadProcessPlayerInfo() # version w/o signup processing
# Preliminary summary of signups (w/o ID or master signups assignments)
coach=SC.findCoaches(gsignups, **{'gradeGenders':
[ [0,'m'],[0,'f'],[1,'m'],[1,'f']] }) # Co-ed K-1 team
coach=SC.findCoaches(gsignups) # coach candidates all grade/genders
#%%
# Find player number and assign to signup rows
# SCsignup, players, famcontact =SC.findplayers(SCsignup, players, famcontact)
# test w/ gsignups
gsignups, players, famcontact =SC.findplayers(gsignups, players, famcontact, year)
# Save SC signups back to xls file (incl. altered names)
SC.writetoxls(SCsignup,'Raw', signupfile)
os.chdir(cnf._INPUT_DIR)
SCsignup.to_csv(signupfile,index=False) # CSV version
#TODO save method back to google signups?
# Update missing info for manually entered players (no full google drive entry info)
SCsignup = SC.findmissinginfo(gsignups, players, famcontact)
SCsignup = findmissinginfo(SCsignup, players, famcontact)
unmatch=gsignups[pd.isnull(gsignups['Plakey'])]
#%% Process data changes from google drive info... works but check/correct using log
# email, phone, parent names, address changes (house # detection)
players, famcontact=SC.processdatachanges(gsignups, players, famcontact, year)
players, famcontact=processdatachanges(gsignups, players, famcontact, year)
# load Mastersignups and add signups to master signups list (duplicates eliminated so no danger with re-run)
Mastersignups = pd.read_csv(cnf._INPUT_DIR +'\\\master_signups.csv', encoding='cp437')
Mastersignups = SC.createsignups(gsignups, Mastersignups, season, year) # new signups are auto-saved
# Summarize signups by sport-gender-grade (written into signup file)
# TODO fix... redirect to output_dir
SC.summarizesignups(Mastersignups, season, year, **{'XLSpath':signupfile}) # write to tab in excel signup file
SC.summarizesignups(Mastersignups, season, year, **{'saveCSV':True}) # save to season_yr_signup_summary.csv (not Excel)
# gsignups version
# TODO make a summary tool before/without adding to master signups
# Feasibility before official signup, but needs split of multiple signups
sportsumm=SC.summarizesignups(gsignups, season, year, **{'toDf':True})
SC.summarizesignups(gsignups, season, year) # save to csv
SC.summarizesignups(gsignups, season, year, **{'XLSpath':signupfile}) # save to sheet in xls signup
# Manually create desired teams in Teams_coaches.xlsx (teams tab should only have this sport season not older teams)
# TODO really slow... find a replacement method for .ods reads
teams=pd.read_csv(cnf._INPUT_DIR +'\\Teams_2019.csv', encoding='cp437')
#teams=pd.read_excel('Teams_coaches.xlsx', sheetname='Teams') #
# teams = read_ods(cnf._INPUT_DIR +'\\Teams_coaches.ods', 'Teams') # read ods team file
#coaches = read_ods(cnf._INPUT_DIR +'\\Teams_coaches.ods', 'Coaches') # read ods team file
#coaches=pd.read_excel('private\\Teams_coaches.xlsx', sheetname='Coaches') # load coach info
coaches=pd.read_csv(cnf._INPUT_DIR +'\\coaches.csv', encoding='cp437') # common excel file encoding
coaches.to_csv('coaches.csv', index=False)
# Update teams (manual edit or using update script)
teams=SC.updateoldteams(teams,year)
teams.to_csv('private\\Teams_2019.csv', index=False)
SC.writetoxls(teams,'Teams','teams_coaches.xlsx') # save fsupdated teams to tab in teams_coaches xls file
# Now assign this season/years players to teams based on Teams xls file
# Overwrite=True resets all existing custom player assignment (i.e. single 2nd grader playing on 3rd team)
# Overwrite=False will not change any existing team assignments (only finds team for new signups)
Mastersignups=SC.assigntoteams(Mastersignups, season, year, teams, overwrite=False)
Mastersignups=assigntoteams(Mastersignups, season, year, teams, overwrite=False)
temp=Mastersignups[(Mastersignups['Year']==2017) & (Mastersignups['Sport']=='Track')]
# Track sub-team assigned based on DOB calculation (TEAM ASSIGNMENTS NOT AUTOSAVED)
Mastersignups=SC.assigntrackgroup(Mastersignups, year, players)
Mastersignups.to_csv(cnf._INPUT_DIR + '\\master_signups.csv',index=False)
# if any players are playing up at different grade, just manually change team name in master_signups.csv (and use overwrite False)
# also manually edit select players to open status
# team contact lists to separate sport Excel tabs (Warning... this overwrites existing version)
SC.writecontacts(Mastersignups, famcontact, players, season, year)
# Make google compatible contacts list for all Cabrini teams (auto-save to csv)
SC.makegoogcont(Mastersignups, famcontact, players, season, year)
# Find missing players and add to recruits tab of signupfile
# after all new signups added, just looks for those signed up last year but not this
SC.findrecruits(Mastersignups, players, famcontact, season, year, signupfile)
# TODO fix countteamplayers for co-ed teams
teams=SC.countteamplayers(Mastersignups, teams, season, year) # summarizes players assigned to teams, autosaved to teams tab
Mastersignups.to_csv('master_signups.csv', index=False)
# Create 5 separate rosters (Cabrini CYC soccer & VB, soccer & VB transfers, junior teams (incl. age) incl. coaches
acronyms=pd.read_csv(cnf._INPUT_DIR+'\\acronyms.csv') # parish and school acronyms
SC.createrosters(Mastersignups, season, year, players, teams, coaches, famcontact, acronyms)
# Create contact lists for each team's coach
# Package transferred player info and create email messages to other directors/schools
messfile=cnf._INPUT_DIR+'\\messages\\player_transfer_director.txt'
SC.packagetransfers(teams, Mastersignups, famcontact, players, season, year, acronyms, messfile)
# Load transfers from other schools
transroster=pd.read_excel('Cecilia_to_Cabrini_fall_2017.xls')
SCsignup=loadtransfers(transroster, SCsignup)
SC.writetoxls(SCsignup,'Raw', signupfile) # saves changes to master Excel file
# TODO Add fake payments for transferred players ... done manually
# Track event registration (Pat Moore spreadsheet)
os.chdir('C:\\Users\\tkc\\Documents\\Python_Scripts\\SC')
trackevents=pd.read_excel('track_summary_2018.xlsx', sheetname='Summary')
# output file for Pat Moore (not needed but check consistency)
regsheet=pd.read_excel('Track_Registration_Form_2018.xlsx',
sheetname='RegistrationSheet', skiprows=2)
# using track summary input sheet, translate into Pat Moore format
regfile=SC.readbackevents(trackevents)
# Copy and paste this file into
regfile.to_csv('track_reg_file.csv', index=False)
# TODO finish this
SC.maketrackroster(Mastersignups, players, year)
# rename team in teams_coaches, mastersignups,
# Detect any roster changes made by <NAME>
myroster=pd.read_csv(cnf._OUTPUT_DIR+'\\Cabrini_Basketballroster2019.csv',encoding='cp437')
PMroster=pd.read_csv(cnf._OUTPUT_DIR+'\\Cabrini_Basketballroster2019_PM.csv',encoding='cp437')
myroster=pd.read_csv('Cabrini_VBroster2019.csv',encoding='cp437')
PMroster=pd.read_csv('Cabrini_VBroster2019_PM.csv',encoding='cp437')
alteredrows=SC.detectrosterchange(PMroster,myroster)
alteredrows.to_csv(cnf._OUTPUT_DIR+'\\roster_changes.csv', index=False)
test=alteredrows[alteredrows['Lname']=='Chavez']
# Make CYC card images for 3rd and up teams
missing = SC.makeCYCcards(Mastersignups, players, teams, coaches, season, year) # only good cards
missing = SC.makeCYCcards(Mastersignups, players, teams, coaches, season, year, **{'showmissing':True} )
|
tkcroat/SC | UnderDev/SCdownload_signups.py | <reponame>tkcroat/SC<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:12:03 2016
@author: tkc
"""
import gspread
import pandas as pd
#%% Authorization
oauthfile = '/path/to/file/your-api-key.json',
scope = ['https://spreadsheets.google.com/feeds']
json_key = json.load(open(oauthfile))
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
gspreadclient = gspread.authorize(credentials) # Authorize
#%%
gc = gspread.login('<EMAIL>', 'tI60cR30')
book = gc.open('Spreadsheet name')
sheet = book.sheet1 #choose the first sheet
dataframe = pandas.DataFrame(sheet.get_all_records())
|
tkcroat/SC | SC_billing_main.py | # -*- coding: utf-8 -*-
"""
Sponsors club billing main program
Created on Sat Oct 1 13:53:36 2016
@author: tkc
"""
#%%
import pandas as pd
import os, sys
import pkg.SC_signup_functions as SC
import pkg.SC_billing_functions as SCbill
import pkg.SC_signup_google_API_functions as SCapi
import pkg.SC_config as cnf
#%%
from importlib import reload
reload(SCbill)
#%%
paylog = SCapi.readPaylog()
payPygSheet, paylog = readPaylog() # Return as pyg sheet and as dataframe
paylog_mod, newplayers=SCbill.matchpayment(paylog, players)
new=paylog[paylog['Paykey']>=349]
players, famcontact = SC.loadProcessPlayerInfo()
Mastersignups = pd.read_csv(cnf._INPUT_DIR +'\\master_signups.csv', encoding='cp437')
teams=pd.read_csv(cnf._INPUT_DIR +'\\Teams_2019.csv', encoding='cp437')
coaches=pd.read_csv(cnf._INPUT_DIR +'\\coaches.csv', encoding='cp437')
season='Winter'
year=2019
#%% New method w/ pygsheets
pygSheet, paylog = readPaylog()
#%%
# Backup of paylog to local excel file
# load old teams from any prior season/ year combination (overwrites current teams)
teams=SCbill.loadoldteams('Spring', 2017)
teams=SCbill.loadoldteams(['Fall','Winter'], [2015,2016]) # load a bunch of old teams
# Family Billing
# assign existing payments to players (needed every time new payment is entered)... does not autosave
paylog_mod, newplayers=SCbill.matchpayment(paylog, players)
# save modified paylog to tab of xls file (not autosaved )
SC.writetoxls(paylog,'Paylog','Payment_logbook.xlsx')
# load teams from prior sports season
teams=SCbill.loadoldteams('Fall', 2017) # load prior season's teams
teams=SCbill.loadoldteams(['Fall','Winter'], [2016,2017])
# New tk interface for send/test ebills
ebilllist, skiplist=SCbill.sendbills_tk(Mastersignups, paylog, famcontact, players, season, year, teams)
# Create billing list for current sports season (auto-saved to file)
kwargs={}
kwargs.update({'olduni':True}) # make message about uni return
kwargs.update({'newuni':True}) # include info about new uni pick-up (skip for later send)
bills=SCbill.createbilllist(Mastersignups, paylog, famcontact, players, season, year,
teams, priorseasons=1, fname='Billlist_26Sept18.csv', **kwargs)
# Reload if any manual edist are made
bills=pd.read_csv('Billlist_26Sept18.csv', encoding='cp437')
# Save above mastersignups after copy over of uniform night info (STILL TESTING... ensure same length)
Mastersignups.to_csv('master_signups.csv',index=False)
# Check master signups against master uniform log and update this
# Load billing test list
billlist=pd.read_csv('billlist_12Aug17.csv', encoding='cp437')\
# unilist ... is this missing uniforms list?
unilist=pd.read_csv('uni_return_no_fee_12May17.csv', encoding='cp437')
# EMAIL BILLING
messagefile='messages\\ebill_uninight_fall.txt' # longer text file with some find replace strings
messagefile='messages\\ebill_spring.txt'
messagefile='messages\\ebill_spring_uni_only_no_fee.txt'
coachmessage='messages\\ebill_uninight_coaches.txt'
emailtitle='Cabrini Sports Uniforms tomorrow at Open House 2-4 PM'
emailtitle='Cabrini Sports Fees for Spring (and Winter) are due.'
emailtitle='Info for your team on Cabrini Uniform Night Wed Jan 4 6-8PM' # for e-mail to coaches
textheader='On Jan 4th from 6-7:30PM at the Cabrini gym, please '
textheader='Please ' # generic payment or return request (but need to remove pick up of unis)
# E-MAIL LOGIC for who gets an SMS gateway or email message...
# If olduni, newuni or fees are true, having outstanding uniform, needing new uni or owing fees will trigger a message
# Last 3 true only skips people who have nothing to be contacted about (i.e. paid up junior team player)
# TEST OF FAMILY SPECIFIC E-MAIL AND SMS (write to two separate txt log files)
kwargs={}
kwargs.update({'SMS':textheader}) # if sending SMS bills, pass beginning of text message
kwargs.update({'fees':True}) # send e-mail to subset that owe fees
kwargs.update({'olduni':True}) # send e-mail to those with old uniform issues
kwargs.update({'newuni':True}) # send e-mail to those with new uniforms to pick up
SCbill.testautobilling(billlist, Mastersignups, season, year, emailtitle, messagefile, **kwargs)
skiplist=testautobilling(unilist, Mastersignups, season, year, emailtitle, messagefile, **kwargs)
billlist, skiplist=SCbill.sendebills(billlist, Mastersignups, season, year, emailtitle, messagefile, **kwargs)
# Send e-mail message to skiplist (fees paid but do have unis to return)
billlist, skiplist=sendebills(unilist, Mastersignups, season, year, emailtitle, messagefile, **kwargs)
# E-mail list of outstanding team fees to team's coach
|
tkcroat/SC | pkg/SC_signup_functions_13Julbackup.py | <reponame>tkcroat/SC
# -*- coding: utf-8 -*-
"""
Created on Sun May 22 10:30:01 2016
SC process signups functions
@author: tkc
"""
#%%
import pandas as pd
import numpy as np
import datetime
import re
#%%
def findavailablekeys(df, colname, numkeys):
'''Pass df and colname, return a defined number of available keys list
used for players, families, signups, etc.
'''
# list comprehension
allnums=[i for i in range(1,len(df))]
usedkeys=df[colname].unique()
usedkeys=np.ndarray.tolist(usedkeys)
availkeys=[i for i in allnums if i not in usedkeys]
if len(availkeys)<numkeys: # get more keys starting at max+1
needed=numkeys-len(availkeys)
for i in range(0,needed):
nextval=int(max(usedkeys)+1) # if no interior vals are available find next one
availkeys.append(nextval+i)
return availkeys
def dropcolumns(df1,df2):
''' Pass two dfs with df2 being the template.. extra unnecessary columns dropped from df1
inplace=True modifies both passed and returned df '''
cols1=df1.columns.tolist()
cols2=df2.columns.tolist()
newdf=df1 # avoids modification of passed df
uniquelist=[i for i in cols1 if i not in cols2]
for i,colname in enumerate(uniquelist): # remove cols from df1 that are absent from df2
# newdf.drop(colname, axis=1, inplace=True) # this modifies both passed and returned dfs
newdf=newdf.drop(colname, axis=1)
return newdf
def organizerecruits(df):
''' takes a sport-gender and organizes in manner for output to excel signup summary file '''
mycols=['First', 'Last', 'Grade', 'Gender', 'Sport', 'School', 'Phone1', 'Text1','Email1', 'Phone2', 'Text2', 'Email2', 'Plakey', 'Famkey', 'Family']
thisdf=pd.DataFrame(columns=mycols) # temp df for dropping unnecessary columns
df=dropcolumns(df,thisdf) # drop columns not working
df.Grade=df.Grade.replace('K',0)
df=df.sort_values(['Grade'], ascending=True)
df.Grade=df.Grade.replace(0,'K') # replace K with zero to allow sorting
df=df[mycols] # put back in desired order
return df
def organizecontacts(df):
''' takes a sport-gender and organizes in manner for output to excel signup summary file '''
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone', 'Text','Email', 'Phone2', 'Text2', 'Email2', 'Team', 'Plakey', 'Famkey', 'Family']
thisdf=pd.DataFrame(columns=mycols) # temp df for dropping unnecessary columns
df['Team']=''
df=dropcolumns(df,thisdf) # drop columns not working
df.Grade=df.Grade.replace('K',0)
df=df.sort_values(['Grade'], ascending=True)
df.Grade=df.Grade.replace(0,'K') # replace K with zero to allow sorting
df=df[mycols] # put back in desired order
return df
def organizesignups(df, year):
''' takes SCsignup file subset (split by sport) and organizes for output into master signup file '''
mycols=['SUkey','First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family']
thisdf=pd.DataFrame(columns=mycols) # temp df for dropping unnecessary columns
df=dropcolumns(df,thisdf) # drop columns not working
df.Grade=df.Grade.replace('K',0)
df=df.sort_values(['Gender','Grade'], ascending=True) # nested sort gender then grade
df.Grade=df.Grade.replace(0,'K') # replace K with zero to allow sorting
# add missing columns to df
df['Team']=''
df['Year']= int(year)
df['SUkey']=0 # column for unique signup key (zero means not yet assigned)
df=df[mycols] # put back in desired order
return df
def organizeroster(df):
''' Renaming, reorg, delete unnecessary columns for CYC roster output
already split by sport and year'''
df.rename(columns={'First':'Fname','Last':'Lname','Address':'Street','Parish_registration':'Parish of Registration'}, inplace=True)
df.rename(columns={'Parish_residence':'Parish of Residence','Phone1':'Phone','DOB':'Birthdate','Gender':'Sex'}, inplace=True)
df.rename(columns={'Email1':'Email'}, inplace=True)
# replace Girl, Boy with m f
df.Sex=df.Sex.replace('Girl','F')
df.Sex=df.Sex.replace('Boy','M')
mycols=['Fname', 'Lname', 'Street', 'City', 'State', 'Zip', 'Phone', 'Email', 'Birthdate', 'Sex', 'Role', 'Division', 'Grade', 'Team', 'School', 'Parish of Registration', 'Parish of Residence', 'Open/Closed','Coach ID']
tempdf=pd.DataFrame(columns=mycols) # temp df for dropping unnecessary columns
df=dropcolumns(df,tempdf) # drop unnecessary columns
df=df[mycols] # put back in desired order
return df
def findyearseason(df):
''' Pass raw signups and determine year and sports season '''
# get year from system clock and from google drive timestamp
now=datetime.datetime.now()
val=df.Timestamp[0] # grab first timestamp
if val!=datetime.datetime: # if not a timestamp (i.e. manual string entry find one
while type(val)!=datetime.datetime:
for i in range(0,len(df)):
val=df.Timestamp[i]
year=val.year # use year value from signup timestamps
if now.year!=val.year:
print ('Possible year discrepancy: Signups are from ',str(val.year))
# now find sports season
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Fall'
mask = np.column_stack([df['Sport'].str.contains("rack", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Spring'
mask = np.column_stack([df['Sport'].str.contains("asket", na=False)])
if len(df.loc[mask.any(axis=1)])>0:
season='Winter'
return season, year
def outputduplicates(df):
'''Prints out names of players with duplicated entries into console... can then delete from google drive signups '''
tempdf=df.duplicated(['First','Last']) # series with 2nd of duplicated entries as True
for i in range(0,len(tempdf)):
if tempdf[i]==True:
first=df.iloc[i]['First']
last=df.iloc[i]['Last']
print('Duplicated signup for player: ', first,' ', last)
return
def formatphone(df):
''' Convert all entered phone numbers in dfs phone columns to 314-xxx-xxxx string and standardize text field '''
# find phone columns (named phone, phone2, etc.)
allcols=df.columns
phlist=[str(s) for s in allcols if 'Phone' in s]
for i, colname in enumerate(phlist):
for j in range(0,len(df)):
num=str(df.iloc[j][colname]) # string with phone number
if num=='nan':
continue # skip reformat if nan
if not re.search(r'(\d+-\d+-\d+)',num):
num=re.sub("[^0-9]", "", num) # substitute blank for non-number
if len(num)==7:
num='314'+num # assume 314 area code
if len(num)==11 and num.startswith('1'): # remove starting 1 if present
num=num[1:11]
if len(num)!=10:
print('Bad number: ',num)
num=num[0:3]+'-'+num[3:6]+'-'+num[6:10]
df.set_value(j,colname,num) # write back in correct format
# now change yes in any text field to Y
txtlist=[str(s) for s in allcols if 'Text' in s]
for i, colname in enumerate(txtlist):
for j in range(0,len(df)):
tempstr=str(df.iloc[j][colname]) # string with phone number
if tempstr=='yes':
df.set_value(j,colname,'Y')
if tempstr=='Yes':
df.set_value(j,colname,'Y')
return df
def standardizeschool(df):
''' can pass any frame with school column and standardize name as Cabrini and Soulard'''
schstr='frances' + '|' + 'cabrini' + '|' + 'sfca' # multiple school matching string
mask = df['School'].str.contains(schstr, na=False, case=False)
for i in range(0, len(df)):
if mask[i]==1:
df=df.set_value(i,'School','Cabrini')
schstr='soulard' # multiple school matching string
mask = df['School'].str.contains(schstr, na=False, case=False)
for i in range(0, len(df)):
if mask[i]==1:
df=df.set_value(i,'School','Soulard')
schstr='public' # multiple school matching string
mask = df['School'].str.contains(schstr, na=False, case=False)
for i in range(0, len(df)):
if mask[i]==1:
df=df.set_value(i,'School','SLPS')
return df
def calculateage(df):
'''pass df such as Juniorteams with birthdate column/timestamp format
return with an added column containing age in years (e.g. 6.1 yrs)'''
df=df.reset_index(drop=True)
mytime=datetime.datetime.now()
mytime=datetime.datetime.date(mytime) # convert time to datetime.date
df['Age']=0.0
for i in range(0, len(df)):
dob=df.iloc[i]['Birthdate']
if str(dob)=='NaT' or str(dob)=='nan': # skip age calc if DOB is missing
continue
dob=datetime.datetime.date(dob) # convert pandas timestamp dob to datetime.date
age=mytime-dob # datetime timedelta
age = round((age.days + age.seconds/86400)/365.2425,1) # get age as float
df=df.set_value(i,'Age',age)
return df
def formatnamesnumbers(df):
'''Switch names to title case, standardize gender, call phone/text reformat and standardize school name'''
if 'First' in df:
df['First']=df['First'].str.title()
if 'Last' in df:
df['Last']=df['Last'].str.title()
if 'Family' in df:
df['Family']=df['Family'].str.title()
if 'Pfirst' in df:
df['Pfirst']=df['Pfirst'].str.title()
if 'Plast' in df:
df['Plast']=df['Plast'].str.title()
if 'Pfirst2' in df:
df['Pfirst']=df['Pfirst'].str.title()
if 'Plast2' in df:
df['Plast2']=df['Plast2'].str.title()
df=df.replace('Girl','f')
df=df.replace('Boy','m')
df=formatphone(df) # call phone reformatting string
if 'School' in df:
df=standardizeschool(df) # use "Cabrini" and "Soulard" as school names
return df
def writecontacts(df, season, signupfile):
''' Default data frame with shortened names into which google drive sheets are read; sheets are created with google
form and contain fresh player data from forms; For paper signups some data will be missing and will be found
from existing player database '''
# Slice by sport (also find season) Basketball (null for winter?), Soccer, Volleyball, Baseball, T-ball, Softball, Track)
from openpyxl import load_workbook #
book=load_workbook(signupfile)
writer=pd.ExcelWriter(signupfile, engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
if season=='Fall':
thismask = df['Sport'].str.contains('soccer', case=False, na=False) & df['Gender'].str.contains('girl', case=False, na=False)
Girlsoccer=df.loc[thismask]
Girlsoccer=organizecontacts(Girlsoccer) # organize in correct format for xls file
Girlsoccer=assignteams(Girlsoccer) # add team assignment
Girlsoccer.to_excel(writer,sheet_name='Girlsoccer',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('soccer', case=False, na=False) & df['Gender'].str.contains('boy', case=False, na=False)
Boysoccer=df.loc[thismask]
Boysoccer=organizecontacts(Boysoccer) # organize in correct format for xls file
Boysoccer=assignteams(Boysoccer) # add team assignment
Boysoccer.to_excel(writer,sheet_name='Boysoccer',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('v', case=False, na=False) & df['Gender'].str.contains('boy', case=False, na=False)
BoyVB=df.loc[thismask]
BoyVB=organizecontacts(BoyVB) # organize in correct format for xls file
BoyVB=assignteams(BoyVB) # add team assignment
BoyVB.to_excel(writer,sheet_name='BoyVB',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('v', case=False, na=False) & df['Gender'].str.contains('girl', case=False, na=False)
GirlVB=df.loc[thismask]
GirlVB=organizecontacts(GirlVB) # organize in correct format for xls file
GirlVB=assignteams(GirlVB) # add team assignment
GirlVB.to_excel(writer,sheet_name='GirlVB',index=False) # this overwrites existing file
if season=='Spring':
thismask = df['Sport'].str.contains('baseball', case=False, na=False)
Baseball=df.loc[thismask]
Baseball=organizecontacts(Baseball) # organize in correct format for xls file
Baseball=assignteams(Baseball) # add team assignment
Baseball.to_excel(writer,sheet_name='Baseball',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('softball', case=False, na=False)
Softball=df.loc[thismask]
Softball=organizecontacts(Softball) # organize in correct format for xls file
Softball=assignteams(Softball) # add team assignment
Softball.to_excel(writer,sheet_name='Softball',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('t-ball', case=False, na=False)
Tball=df.loc[thismask]
Tball=organizecontacts(Tball) # organize in correct format for xls file
Tball=assignteams(Tball) # add team assignment
Tball.to_excel(writer,sheet_name='Tball',index=False) # this overwrites existing file
thismask = df['Sport'].str.contains('track', case=False, na=False)
Track=df.loc[thismask]
Track=organizecontacts(Track) # organize in correct format for xls file
Track=assignteams(Track) # add team assignment
Track.to_excel(writer,sheet_name='Track',index=False) # this overwrites existing file
if season=='Winter': # currently only basketball
Basketball=organizecontacts(df) # organize in correct format for xls file
Basketball=assignteams(Basketball) # add team assignment
Basketball.to_excel(writer,sheet_name='Basketball',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
return
def createsignups(df, Mastersignups, season, year):
''' pass SCsignup and add signups to master list, also returns list of current player keys by sport
typically use writesignupstoExcel instead
'''
df.Grade=df.Grade.replace('K',0)# replace K with zero to allow sorting
if season=='Fall':
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
Allsoccer=df.loc[mask.any(axis=1)]
Allsoccer=Allsoccer.reset_index(drop=True)
for i in range(0,len(Allsoccer)):
Allsoccer.set_value(i, 'Sport', 'soccer')
Allsoccer=organizesignups(Allsoccer, year)
mask = np.column_stack([df['Sport'].str.contains("olleyball", na=False)])
Allvb=df.loc[mask.any(axis=1)]
Allvb=Allvb.reset_index(drop=True)
for i in range(0,len(Allvb)):
Allvb.set_value(i, 'Sport', 'VB')
Allvb=organizesignups(Allvb, year)
# concatenate new soccer and VB signups with master signups file
colorder=Mastersignups.columns.tolist() # desired column order
Mastersignups=pd.concat([Allsoccer,Mastersignups], ignore_index=True) # concat the two frames (if same names, column order doesn't matter)
Mastersignups=pd.concat([Allvb,Mastersignups], ignore_index=True) # concat the two frames (if same names, column order doesn't matter)
Mastersignups=Mastersignups[colorder] # put back in original order
# drop duplicates and save master signups file
Mastersignups=Mastersignups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
# Signups=Signups.reset_index(drop=True)
# add unique SUkey (if not already assigned)
neededkeys = Mastersignups[(Mastersignups['SUkey']==0)] # filter by year
availSUkeys=SC.findavailablekeys(Mastersignups, 'SUkey', len(neededkeys)) # get necessary # of unique SU keys
keycounter=0
for index, row in neededkeys.iterrows():
Mastersignups.set_value(index,'SUkey',availSUkeys[keycounter]) # reassign SU key in source master list
keycounter+=1 # move to next available key
Mastersignups.to_csv('master_signups.csv', index=False)
return Mastersignups
if season=='Winter':
AllBball=df # no need to filter by sport in winter
for i in range(0,len(AllBball)):
AllBball.set_value(i, 'Sport', 'Bball')
AllBball=organizesignups(AllBball, year)
# concatenate new Bball signups with master signups file
colorder=Mastersignups.columns.tolist() # desired column order
Mastersignups=pd.concat([AllBball,Mastersignups], ignore_index=True) # concat the two frames (if same names, column order doesn't matter)
Mastersignups=Mastersignups[colorder] # put back in original order
# drop duplicates and save master signups file
Mastersignups=Mastersignups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
# Signups=Signups.reset_index(drop=True)
# add unique SUkey (if not already assigned)
neededkeys = Mastersignups[(Mastersignups['SUkey']==0)] # filter by year
availSUkeys=SC.findavailablekeys(Mastersignups, 'SUkey', len(neededkeys)) # get necessary # of unique SU keys
keycounter=0
for index, row in neededkeys.iterrows():
Mastersignups.set_value(index,'SUkey',availSUkeys[keycounter]) # reassign SU key in source master list
keycounter+=1 # move to next available key
Mastersignups.to_csv('master_signups.csv', index=False)
return Mastersignups
if season=='Spring':
mask = np.column_stack([df['Sport'].str.contains("track", case=False, na=False)])
Track=df.loc[mask.any(axis=1)]
Track=Track.reset_index(drop=True)
for i in range(0,len(Track)):
Track.set_value(i, 'Sport', 'Track')
Track=organizesignups(Track, year)
mask = np.column_stack([df['Sport'].str.contains("soft", case=False, na=False)])
Softball=df.loc[mask.any(axis=1)]
Softball=Softball.reset_index(drop=True)
for i in range(0,len(Softball)):
Softball.set_value(i, 'Sport', 'Softball')
Softball=organizesignups(Softball, year)
mask = np.column_stack([df['Sport'].str.contains("base", case=False, na=False)])
Baseball=df.loc[mask.any(axis=1)]
Baseball=Baseball.reset_index(drop=True)
for i in range(0,len(Baseball)):
Baseball.set_value(i, 'Sport', 'Baseball')
Baseball=organizesignups(Baseball, year)
mask = np.column_stack([df['Sport'].str.contains("t-ball", case=False, na=False)])
Tball=df.loc[mask.any(axis=1)]
Tball=Tball.reset_index(drop=True)
for i in range(0,len(Tball)):
Tball.set_value(i, 'Sport', 'Tball')
Tball=organizesignups(Tball, year)
# concatenate new track and ?ball signups with master signups file
colorder=Mastersignups.columns.tolist() # desired column order
Mastersignups=pd.concat([Track,Mastersignups], ignore_index=True) # concat the two frames
Mastersignups=pd.concat([Softball,Mastersignups], ignore_index=True) # concat the two frames
Mastersignups=pd.concat([Baseball,Mastersignups], ignore_index=True) # concat the two frames\
Mastersignups=pd.concat([Tball,Mastersignups], ignore_index=True) # concat the two frames
Mastersignups=Mastersignups[colorder] # put back in original order
# drop duplicates and save master signups file
Mastersignups=Mastersignups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
# add unique SUkey (if not already assigned)
neededkeys = Mastersignups[(Mastersignups['SUkey']==0)] # filter by year
availSUkeys=SC.findavailablekeys(Mastersignups, 'SUkey', len(neededkeys)) # get necessary # of unique SU keys
keycounter=0
for index, row in neededkeys.iterrows():
Mastersignups.set_value(index,'SUkey',availSUkeys[keycounter]) # reassign SU key in source master list
keycounter+=1 # move to next available key
# Signups=Signups.reset_index(drop=True)
Mastersignups.to_csv('master_signups.csv', index=False)
return Mastersignups
def createrosters(df, season, year, players, teams, famcontact):
''' From Mastersignups of this season creates Cabrini CYC roster and transfers (for separate sports)
and all junior sports (calculates ages for Judge Dowd); pulls info merged from famcontact, players, teams, and coaches
teams should already be assigned using teams xls and assigntoteams function'''
if season=='Fall':
df = df[(df['Year']==year)] # filter by year
# now grab all extra info needed for CYC rosters
# Street, City, State, Zip, Phone, email, Parishreg, parishres from fam-contact
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get division from Teams xls
teams.rename(columns={'CYCname':'Team'}, inplace=True)
df=pd.merge(df, teams, how='left', on=['Team'], suffixes=('','_r2'))
# DOB, School from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# set Role=Player
df['Role']='Player' # add column for role
df['Open/Closed']='Closed'
df['Coach ID']=''
# Find Cabrini CYC names (containing hyphen)
thismask = df['Team'].str.contains('-', case=False, na=False)
CabriniCYC=df.loc[thismask] # all players on Cabrini CYC teams (for single soccer roster file creation)
# Now split by sport and save
thismask = CabriniCYC['Sport'].str.contains('soccer', case=False, na=False)
Soccer=CabriniCYC.loc[thismask] # all soccer players
Soccer=organizeroster(Soccer) # reformat this mess as single CYC roster
fname='Cabrini_soccer_rosters_'+str(year)+'.csv'
Soccer.to_csv(fname, index=False)
thismask = CabriniCYC['Sport'].str.contains('VB', case=False, na=False)
VB=CabriniCYC.loc[thismask] # all VB players
VB=organizeroster(VB) # reformat this mess as single CYC roster
fname='Cabrini_VB_rosters_'+str(year)+'.csv'
VB.to_csv(fname, index=False)
# Find CYC transfers and junior teams
thismask = df['Team'].str.contains('-', case=False, na=False)
Others=df.loc[~thismask] # all teams that are not Cabrini CYC level (junior and transfers)
# Cabrini transferred players (teams are school names without a number)
# non-CYC cabrini junior teams start with number
pattern=r'[0-9]' # starts with letter
thismask = Others['Team'].str.contains(pattern, na=True) # flag nans and set to true (usually jr teams w/o assignment)
Transfers=Others.loc[~thismask]
Juniorteams=Others.loc[thismask] # junior non-CYC cabrini team has number but not hyphen
# output single roster for all transferred soccer players
SoccerTransfers= Transfers[Transfers['Sport'].str.contains('soccer', case=False, na=False)]
SoccerTransfers=organizeroster(SoccerTransfers)
SoccerTransfers=SoccerTransfers.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
fname='CYCSoccer_transfers'+str(year)+'.csv'
SoccerTransfers.to_csv(fname, index=False)
# now output CYC roster for all transferred VB players
VBTransfers= Transfers[Transfers['Sport'].str.contains('VB', case=False, na=False)]
VBTransfers=organizeroster(VBTransfers)
VBTransfers=VBTransfers.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
fname='CYCVB_transfers'+str(year)+'.csv'
VBTransfers.to_csv(fname, index=False)
# now output all junior teams in same format (sometimes needed by <NAME>)
# also calculate current age
Juniorteams=organizeroster(Juniorteams)
Juniorteams=calculateage(Juniorteams)
fname='Cabrini_junior_teams_'+str(year)+'.csv'
Juniorteams.to_csv(fname, index=False)
return
if season=='Winter':
df = df[(df['Year']==year)] # filter by year
# now grab all extra info needed for CYC rosters
# Street, City, State, Zip, Phone, email, Parishreg, parishres from fam-contact
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get division from Teams xls
teams.rename(columns={'CYCname':'Team'}, inplace=True)
df=pd.merge(df, teams, how='left', on=['Team'], suffixes=('','_r2'))
# DOB, School from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# set Role=Player
df['Role']='Player' # add column for role
df['Open/Closed']='Closed'
df['Coach ID']=''
# Find Cabrini CYC names (containing hyphen)
thismask = df['Team'].str.contains('-', case=False, na=False)
CabriniCYC=df.loc[thismask] # all players on Cabrini CYC teams
# Now split by sport (basketball only and save)
thismask = CabriniCYC['Sport'].str.contains('basketball', case=False, na=False)
Basketball=CabriniCYC.loc[thismask] # all soccer players
Basketball=organizeroster(Basketball) # reformat this mess as single CYC roster
fname='Cabrini_basketball_rosters_'+str(year)+'.csv'
Basketball.to_csv(fname, index=False)
# Find CYC transfers and junior teams
thismask = df['Team'].str.contains('-', case=False, na=False)
Others=df.loc[~thismask] # all teams that are not Cabrini CYC level (junior and transfers)
# Cabrini transferred players (teams are school names without a number)
# non-CYC cabrini junior teams start with number
pattern=r'[0-9]' # starts with letter
thismask = Others['Team'].str.contains(pattern, na=True) # flag nans and set to true (usually jr teams w/o assignment)
Transfers=Others.loc[~thismask]
Juniorteams=Others.loc[thismask] # junior non-CYC cabrini team has number but not hyphen
# output single roster for all transferred soccer players
Bballtransfers= Transfers[Transfers['Sport'].str.contains('soccer', case=False, na=False)]
Bballtransfers=organizeroster(Bballtransfers)
Bballtransfers=Bballtransfers.sort_values(['Team', 'Sex', 'Grade'], ascending=True)
fname='CYC_basketball_transfers'+str(year)+'.csv'
Bballtransfers.to_csv(fname, index=False)
# now output all junior teams in same format (sometimes needed by <NAME>)
# also calculate current age
Juniorteams=organizeroster(Juniorteams)
Juniorteams=calculateage(Juniorteams)
fname='Cabrini_junior_teams_'+str(year)+'.csv'
Juniorteams.to_csv(fname, index=False)
return
if season=='Spring':
df = df[(df['Year']==year)] # filter by year
# now grab all extra info needed for CYC rosters
# Street, City, State, Zip, Phone, email, Parishreg, parishres from fam-contact
df=pd.merge(df, famcontact, how='left', on=['Famkey'], suffixes=('','_r'))
# get division from Teams xls
teams.rename(columns={'CYCname':'Team'}, inplace=True)
df=pd.merge(df, teams, how='left', on=['Team'], suffixes=('','_r2'))
# DOB, School from players.csv
df=pd.merge(df, players, how='left', on=['Plakey'], suffixes=('','_r3'))
# set Role=Player
df['Role']='Player' # add column for role
df['Open/Closed']='Closed'
df['Coach ID']=''
# Find Cabrini CYC names (containing hyphen)
thismask = df['Team'].str.contains('-', case=False, na=False)
allCYCball=df.loc[thismask] # all players on Cabrini CYC teams
# all CYC softball, baseball, petball teams can be in single file (track and Tball already out)
allCYCball=organizeroster(allCYCball) # reformat this mess as single CYC roster
fname='Cabrini_allCYCball_rosters_'+str(year)+'.csv'
allCYCball.to_csv(fname, index=False)
# track rosters
Track= df[df['Sport'].str.contains('track', case=False, na=False)]
Track=organizeroster(Track) # format same as CYC roster
# TODO calculate track age group
# calculate age for track team
Track=calculateage(Track)
fname='Track_roster_'+str(year)+'.csv'
Track.to_csv(fname, index=False)
# Don't need any formal roster for Tball... just contact sheet per team
return
def assignteams(df, Teams, sport):
'''Pass contacts summary and assign team name (similar to assigntoteams called by mastersignups
Teams tab must have those for current year; merge based on grade, gender and sport from teams tab
(which only contains names from this season/year to avoid screwing up old custom team assignments'''
Teamsmult=makemultiteam(Teams) # makes duplicates team entries to match both grades
Teamsmult['Grade']=Teamsmult['Grade'].astype('str') # convert grade back to string
Teamsmult.Grade=Teamsmult.Grade.replace('0','K', regex=True) # convert 0 back to K
df['Grade']=df['Grade'].astype('str')
# left merge keeps all master_signups entries
df=pd.merge(df, Teamsmult, how='left', on=['Grade','Gender','Sport'], suffixes=('','_r'))
df['Team']=df['Team'].astype('str') # convert grade back to string
# now copy over CYC team name from Teams_coaches to this df ... skip copying if null
for i in range(0, len(df)):
if df.iloc[i]['CYCname']!='nan':
df.set_value(i, 'Team', df.iloc[i]['CYCname'])
# now drop extra columns
mycols=['First', 'Last', 'Grade', 'Gender', 'School', 'Phone', 'Text', 'Email', 'Phone2', 'Text2', 'Email2', 'Team', 'Plakey','Famkey', 'Family']
tempdf=pd.DataFrame(columns=mycols) # formatted to match usual contact sheet
df=dropcolumns(df,tempdf)
return df
def makemultiteam(df):
'''Small utility called by assigntoteams to make temp teams df that has separate entry for each grade if team is mixed grade
then merge to assign teams is straightforward'''
df['Graderange']=df['Graderange'].astype('str')
df.Graderange=df.Graderange.replace('K','0', regex=True) # replace K with 0
mask=df['Graderange'].str.len() > 1
multiteams=df.loc[mask]
multiteams['Grade']=multiteams['Grade'].astype('int')
multiteams=multiteams.reset_index(drop=True)
# Use this to make a multiple entry at lower grade(s).. normally just one extra
for i in range(0,len(multiteams)):
# TODO make sure it's not 3 grades (i.e. K-2)
multiteams.set_value(i,'Grade',multiteams.iloc[i]['Grade']-1)
# now combine with original df
df=pd.concat([df,multiteams], ignore_index=True)
return df
def assigntoteams(df, season, year, Teams):
'''Finds CYC team name based on year, grade, gender and sport from teams tab
(which only contains names from this season/year to avoid screwing up old custom team assignments'''
Teamsmult=makemultiteam(Teams) # makes duplicates team entries to match both grades
Teamsmult['Grade']=Teamsmult['Grade'].astype('str') # convert grade back to string
Teamsmult.Grade=Teamsmult.Grade.replace('0','K', regex=True) # convert 0 back to K
df['Grade']=df['Grade'].astype('str')
# left merge keeps all master_signups entries
df=pd.merge(df, Teamsmult, how='left', on=['Year','Grade','Gender','Sport'], suffixes=('','_r'))
df['Team']=df['Team'].astype('str') # convert grade back to string
# now copy over CYC team name from Teams_coaches to mastersignups ... skip copying if null
for i in range(0, len(df)):
# entry will be nan for prior years/seasons
if df.iloc[i]['CYCname']!='nan':
df.set_value(i, 'Team', df.iloc[i]['CYCname'])
# now drop extra columns
mycols=['First', 'Last', 'Grade', 'Gender', 'Sport', 'Year', 'Team', 'Plakey','Famkey', 'Family']
tempdf=pd.DataFrame(columns=mycols) # temp df for dropping unnecessary columns
df=dropcolumns(df,tempdf) # drop columns not working
df.to_csv('master_signups.csv', index=False) # save/overwrite existing csv
return df
def findrecruits(df, players, famcontact, season, year, signupfile):
'''Read list of signed-up player keys from xls file; compare with last year's set of
players from master Signups log '''
mycols=df.columns.tolist()
if season=='Fall':
Recruits=pd.DataFrame(columns=mycols) # single file with info on all recruits
soccer=df[df['Sport']=='soccer'] # filter for soccerd
plakeylist=soccer.Plakey.unique() # ndarray with list of unique soccer players
keylist=plakeylist.tolist()
for i, key in enumerate(keylist):
match=soccer[soccer['Plakey']==key]
# recruits ... played in year -1 but not in year
if year-1 in match.Year.unique() and year not in match.Year.unique():
match=match[0:1]
Recruits=pd.concat([Recruits,match], ignore_index=True)
vb=df[df['Sport']=='VB'] # filter for soccer
plakeylist=vb.Plakey.unique()
keylist=plakeylist.tolist()
for i, key in enumerate(keylist):
match=vb[vb['Plakey']==key]
# recruits ... played in year -1 but not in year
if year-1 in match.Year.unique() and year not in match.Year.unique():
match=match[0:1]
Recruits=pd.concat([Recruits,match], ignore_index=True)
# plakey, famkey, first, last, grade, gender,
Recruits.Grade=Recruits.Grade.replace('K',0) # replace K with zero to allow sorting
Recruits.Grade=Recruits.Grade.astype(int)
# adjust grade such that players current grade is in list
for i in range(len(Recruits)):
grade=Recruits.iloc[i]['Grade']
Recruits=Recruits.set_value(i,'Grade',grade+1)
# join with famcontact on famkey to get contact info (emails, phones, etc.)
# Inner join on famkey adds the necessary info
Recruits=pd.merge(Recruits, famcontact,how='inner', on='Famkey', suffixes=('','_r'))
Recruits['School']='' # temporarily add school column (not yet looked up from players)
Recruits=organizerecruits(Recruits) # reformat into same as Excel signups, summary
# now need to look up school from players.csv
Recruits=pd.merge(Recruits, players, how='inner', on='Plakey', suffixes=('','_r'))
Recruits.drop('School', axis=1, inplace=True)
Recruits.rename(columns={'School_r':'School'}, inplace=True)
Recruits=organizerecruits(Recruits) # reformat into same as Excel signups, summary
Recruits=Recruits.sort_values(['Sport', 'Gender', 'Grade'], ascending=True)
# now write recruits to tab in master signups file
from openpyxl import load_workbook
book=load_workbook(signupfile)
writer=pd.ExcelWriter(signupfile, engine='openpyxl')
writer.book=book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
Recruits.to_excel(writer,sheet_name='Recruits',index=False) # this overwrites existing file
writer.save() # saves xls file with all modified data
def findmissinginfo(df, players, famcontact):
''' using player and family keys, update nan values in SC signups (mainly for paper/word-of-mouth entries
needed for writesignupstoExcel '''
for i in range(0, len(df)):
if type(df.iloc[i]['Timestamp'])!=datetime.datetime:
thisplakey=df.iloc[i]['Plakey']
thisfamkey=df.iloc[i]['Famkey']
# get first, dob, school from master players list
match = players[(players['Plakey']==thisplakey)]
if len(match)==1:
df=df.set_value(i,'First', match.iloc[0]['First']) # probably same but doublecheck/ set to CYC card value
df=df.set_value(i,'DOB', match.iloc[0]['DOB'])
df=df.set_value(i,'School', match.iloc[0]['School'])
# get address, zip, parish, phone/text, email, phone2, text2, email2 from famcontact
match = famcontact[(famcontact['Famkey']==thisfamkey)]
if len(match)==1:
df=df.set_value(i,'Address', match.iloc[0]['Address'])
df=df.set_value(i,'Zip', match.iloc[0]['Zip'])
df=df.set_value(i,'Parish', match.iloc[0]['Parish_registration'])
df=df.set_value(i,'Phone', match.iloc[0]['Phone1'])
df=df.set_value(i,'Text', match.iloc[0]['Text1'])
df=df.set_value(i,'Email', match.iloc[0]['Email1'])
df=df.set_value(i,'Phone2', match.iloc[0]['Phone2'])
df=df.set_value(i,'Text2', match.iloc[0]['Text2'])
df=df.set_value(i,'Email2', match.iloc[0]['Email2'])
return df
def findplayernumbers(SCsignup, players):
'''Find player key from players df using multiple levels of matching (Plakey already initialized)
if not a perfect match on all characters, create some data output structure to resolve possible problems'''
if 'Plakey' not in SCsignup.columns: # add player key column to sign-ups file if not present
SCsignup.insert(0,'Plakey',0) # insert as first col and init to 0
if 'Famkey' not in SCsignup.columns: # add family key to sign-ups file
SCsignup.insert(1,'Famkey',0) # add as 2nd col and init to 0
if 'Family' not in SCsignup.columns: # add family key to sign-ups file
SCsignup.insert(2,'Family', '') # add as 2nd col and init to blank
for su in range(0,len(SCsignup)):
if SCsignup.iloc[su]['Plakey']==0: # some may already have assigned player number from prior run
first=SCsignup.iloc[su]['First']
last=SCsignup.iloc[su]['Last']
DOB=SCsignup.iloc[su]['DOB'] # usually datetime format
# TODO case insensitive matches to avoid DeFord problem
match = players[(players['Last']==last) & (players['First']==first) & (players['DOB']==DOB)]
if len(match)==1: # single perfect match on all 3
SCsignup=SCsignup.set_value(su,'Plakey',match.iloc[0]['Plakey'])
SCsignup=SCsignup.set_value(su,'Famkey',match.iloc[0]['Famkey'])
SCsignup=SCsignup.set_value(su,'Family',match.iloc[0]['Family'])
continue # move to next signup
elif len(match)==0: # try last and DOB match (first name error)
match = players[(players['Last']==last) & (players['DOB']==DOB)]
if len(match)==1: # single perfect match on last and DOB
SCsignup=SCsignup.set_value(su,'Plakey',match.iloc[0]['Plakey'])
SCsignup=SCsignup.set_value(su,'Famkey',match.iloc[0]['Famkey'])
SCsignup=SCsignup.set_value(su,'Family',match.iloc[0]['Family'])
continue # move to next signup
elif len(match)>1: # multiple match on last and DOB (twins problem with slight first name difference)
SCsignup=SCsignup.set_value(su,'Plakey',match.iloc[0]['Plakey'])
SCsignup=SCsignup.set_value(su,'Famkey',match.iloc[0]['Famkey'])
SCsignup=SCsignup.set_value(su,'Family',match.iloc[0]['Family'])
continue
elif len(match)==0: # no last/DOB match so check first/last (possible DOB error)
match = players[(players['Last']==last) & (players['First']==first)] #check for possible DOB entry error
if len(match)==1: # first/last exact match but not DOB
print('DOB entry error suspected for found player: ', first, ' ', last)
SCsignup=SCsignup.set_value(su,'Plakey',match.iloc[0]['Plakey'])
SCsignup=SCsignup.set_value(su,'Famkey',match.iloc[0]['Famkey'])
SCsignup=SCsignup.set_value(su,'Family',match.iloc[0]['Family'])
continue
# TODO maybe write these to a check DOB file??
if len(match)>1: # first/last matches multiple players (name is too common?)
print('No DOB match. First last matches multiple players:',first, ' ',last)
else:
print('Likely new player :',first,' ',last,'.' ) #no first/last match, no last/DOB match
try: # try appending to existing newplayers dataframe... if fail, then create dataframe
newplayers=newplayers.append(SCsignup.iloc[su])
except:
newplayers=SCsignup.iloc[[su]] # new df with current row copied over
# generate entries for new players list (maybe temporarily split)
continue
elif len(match)>1: # somehow duplicate entries in players dataframe
print('Remove duplicated entry for ', first," ",last ,' from players list.')
continue
try:
isinstance(newplayers,pd.DataFrame) # check for non-existant dataframe
except:
newplayers=pd.DataFrame # in case no new players are found, create and return empty frame
return SCsignup, newplayers # same df but with all available player numbers added in playkey column
def findfamily(newplayers,famcontact):
''' For confirmed new players, find existing family name and key (if it exists)'''
newplayers=newplayers.reset_index(drop=True) # reset index to avoid problems
if 'Famkey' not in newplayers.columns: # add player key column to sign-ups file if not present
newplayers.insert(1,'Famkey',0)
if 'Family' not in newplayers.columns: # add player key column to sign-ups file if not present
newplayers.insert(0,'Family','')
for su in range(0,len(newplayers)):
first=newplayers.iloc[su]['First']
last=newplayers.iloc[su]['Last']
phone1=newplayers.iloc[su]['Phone']
phstr=last + '|' + phone1 # multiple phone matching string
phone2=str(newplayers.iloc[su]['Phone2']) # need to test for nan
if phone2!='nan': # if entered add 2nd phone to matching string
phstr=phstr + '|' + phone2
mask = famcontact['Phone1'].str.contains(phstr, na=False, case=False) | famcontact['Phone2'].str.contains(phstr, na=False) | famcontact['Phone3'].str.contains(phstr, na=False)
match=famcontact.loc[mask] # filter df with above phone matching mask
if len(match)==1:
print('Phone match of player ', first, last,' to family ', match.iloc[0]['Family'])
newplayers.set_value(su,'Family',match.iloc[0]['Family']) #
newplayers.set_value(su,'Famkey',match.iloc[0]['Famkey'])
continue # famkey assigned and move to next if phone match
mask = famcontact['Family'].str.contains(last, na=False, case=False) # check for last name match
match=famcontact.loc[mask] # filter df with above phone matching mask
if len(match)>0: # check for possible family matches (even though no phone match)
for i in range(0,len(match)):
tempstr=''
tempstr=tempstr+str(match.iloc[i]['Famkey'])+' '
tempstr=tempstr+str(match.iloc[i]['Family'])+' ' # string with family key # and name for all possibles
print('Possible match of player ', first, last,' to family ', tempstr,'not yet assigned') # lists all
# don't assign number but can be done manually if reasonable
else:
print('No match for player ', first, last)
return newplayers # send back same df with family name and key
def addplayers(df, players):
''' gets info from confirmed newplayers,reformat and adds to main players list '''
# playerentries = pd.DataFrame(index=np.arange(len(newplayers)), columns=players.columns)
mytime=datetime.datetime.now()
datestr='_' + str(mytime.day) + mytime.strftime("%B") + str(mytime.year)[2:] # current date as in 3Jun16
filestr='players'+datestr+'.bak'
players.to_csv(filestr, index=False) # backup existing players master list
df=df.iloc[:,0:10] # cut off extra columns at end
df.drop('Timestamp', axis=1, inplace=True)
df['Gradeadj']=0 # add grade adjust col and init to zero
colorder=players.columns.tolist() # desired column order
players=pd.concat([df,players]) # concat the two frames (if same names, column order doesn't matter)
players=players[colorder] # put back in original order
players=players.reset_index(drop=True)
players=players.to_csv('players.csv',index =False)
return players # full master list with new entries
def addfamilies(df,famcontact, fambills):
''' df contains new families to add to master family contact and family billing tables '''
df=df.reset_index(drop=True) # first reset index for proper for loops
dfcon=df # make copy of original for famcontact below
# backup existing family contact and billing tables
mytime=datetime.datetime.now()
datestr='_' + str(mytime.day) + mytime.strftime("%B") + str(mytime.year)[2:] # current date as in 3Jun16
filestr='family_contact'+datestr+'.bak'
famcontact.to_csv(filestr, index=False) # backup existing players master list
filestr='family_bill'+datestr+'.bak'
fambills.to_csv(filestr, index=False) # backup existing players master list
# update family billing
datestr=str(mytime.month)+'/'+str(mytime.day)+'/'+str(mytime.year)
df=dropcolumns(df,fambills) # drops all cols from df that are not in fambills
# df=df.iloc[:,0:3]
# df.drop('Plakey', axis=1, inplace=True)
df['Startdate']=datestr # add and initialize other necessary columns
df['Lastupdate']=datestr
df['Startbal']=0
df['Currbalance']=0
df['Billing_note']=''
colorder=fambills.columns.tolist()
fambills=pd.concat([fambills,df]) # concat the two frames (if same names, column order doesn't matter)
fambills=fambills[colorder] # put back in original order
fambills=fambills.reset_index(drop=True)
fambills=fambills.to_csv('family_bill.csv',index =False)
# update family contact
dfcon.rename(columns={'Plakey': 'Players', 'Parish': 'Parish_registration', 'Phone': 'Phone1', 'Text': 'Text1', 'Email': 'Email1',}, inplace=True)
dfcon=dropcolumns(dfcon, famcontact) # drop unnecessary columns from dfcon (not in famcontact)
#dfcon.drop('Timestamp', axis=1, inplace=True)
#dfcon.drop('First', axis=1, inplace=True)
#dfcon.drop('Last', axis=1, inplace=True)
#dfcon.drop('DOB', axis=1, inplace=True)
#dfcon.drop('Gender', axis=1, inplace=True)
#dfcon.drop('School', axis=1, inplace=True)
#dfcon.drop('Grade', axis=1, inplace=True)
#dfcon.drop('AltPlacement', axis=1, inplace=True)
#dfcon.drop('Ocstatus', axis=1, inplace=True)
#dfcon.drop('Othercontact', axis=1, inplace=True)
#dfcon.drop('Coach', axis=1, inplace=True)
#dfcon.drop('Coach2', axis=1, inplace=True)
#dfcon.drop('Sport', axis=1, inplace=True)
dfcon['City']='St. Louis'
dfcon['State']='MO'
dfcon['Parish_residence']=''
dfcon['Pfirst3']=''
dfcon['Plast3']=''
dfcon['Phone3']=''
dfcon['Text3']=''
dfcon['Phone4']=''
dfcon['Text4']=''
dfcon['Email3']=''
colorder=fambills.columns.tolist()
famcontact=pd.concat([famcontact,dfcon]) # concat the two frames (if same names, column order doesn't matter)
famcontact=famcontact[colorder] # put back in original order
famcontact=famcontact.reset_index(drop=True)
famcontact=famcontact.to_csv('family_contact.csv',index =False)
return famcontact, fambills
def addnewplafams(newplayers, players, famcontact, fambills):
'''Newplayers contains google drive entries for players that don't match master players list
Manual/visual check should be done before adding to players list '''
''' TODO # double check again to ensure these haven't been added... pass to findplayernumbers above
SCsignup, doublecheck = findplayernumbers(newplayers,players)
if len(doublecheck)!=len(newplayers):
print("Doublecheck to ensure that these players haven't already been entered")
break '''
# first assign new player and family keys (index already reset by findfamily)
# get new lists of available player and family keys
availplakeys=findavailablekeys(players, 'Plakey', len(newplayers)) # gets needed # of new plakey values
availfamkeys=findavailablekeys(famcontact, 'Famkey', len(newplayers))
# assign new player keys and create df with info to add to master players
for i,val in enumerate(availplakeys): # i is row of newplayers, val is next available key
newplayers.set_value(i,'Plakey', val)
# create family name for new families
for i in range(0,len(newplayers)): # all new players
if newplayers.iloc[i]['Famkey']==0: # only assign names to new families (skip those with existing)
last=str(newplayers.iloc[i]['Last'])
plast=str(newplayers.iloc[i]['Plast']) # parent last name
if last==plast: # same name for parent and player
newplayers.set_value(i,'Family', newplayers.iloc[i]['Last']) # assign player last name as family name
elif str(newplayers.iloc[i]['Plast'])=='nan': # parent last name missing
newplayers.set_value(i,'Family', newplayers.iloc[i]['Last']) # assign player last name as family name
elif plast in last: # if kid name is hyphenated multiname, use the hyphenated name
newname=str(newplayers.iloc[i]['Last'])+'_'+str(newplayers.iloc[i]['Plast'])
newplayers.set_value(i,'Family', newplayers.iloc[i]['Last'])
else: # make a new family name for different first/last
newname=last+'_'+plast
newplayers.set_value(i,'Family', newname)
# find rows in newplayers that need a family
newfams=[] # keep a list of the new family numbers (for write to famcontact)
for i in range(0,len(newplayers)):
if newplayers.iloc[i]['Famkey']==0:
newfams.append(i)
for i,val in enumerate(availfamkeys): # this misses some unassigned keys but that's fine
if i in newfams:
newplayers.set_value(i,'Famkey', val) # assign new unique famkey to players without families
# from newplayers entries, create entries for master players table
players=addplayers(newplayers,players) # update master players list, save and return
# slice newplayers to knock out those w/ existing families (using above list)
df=newplayers[newplayers.index.isin(newfams)]
# update master families lists, save and return
famcontact, fambills =addfamilies(df,famcontact, fambills)
return players, famcontact, fambills # go ahead and pass back modified versions to main
def comparefamkeys(players,famcontact, fambills):
'''Utility script to compare family contacts, family bills and players list '''
fams_pla=players.Famkey.unique()
fams_pla=np.ndarray.tolist(fams_pla)
fams_con=famcontact.Famkey.unique()
fams_con=np.ndarray.tolist(fams_con)
fams_bill=fambills.Famkey.unique()
fams_bill=np.ndarray.tolist(fams_bill)
# compare contacts and billing ... should be identical
billonly=[i for i in fams_bill if i not in fams_con]
cononly=[i for i in fams_con if i not in fams_bill]
noplayers=[i for i in fams_con if i not in fams_pla]
for i,val in enumerate(billonly):
print("Famkey ", val, " in family billing but not in family contacts.")
for i,val in enumerate(cononly):
print("Famkey ", val, " in family contacts but not in family billing.")
for i,val in enumerate(noplayers):
print("Famkey ", val, " in family contacts but not found among players.")
# look for different names between family contacts and family bills
for i in range(0,len(famcontact)):
famkey=famcontact.iloc[i]['Famkey'] # grab this key
family=famcontact.iloc[i]['Family']
family=family.title() # switch to title case
family=family.strip() # remove whitespace
match=fambills[fambills['Famkey']==famkey]
if len(match)==1: # should be a match already assuming above section finds no discrepancies
family2=match.iloc[0]['Family']
family2=family2.title() # switch to title case
family2=family2.strip() # remove whitespace
if family!=family2: # different family names with same key
print("Family key ", str(famkey), ": ", family, " or ", family2)
# Now check for family name discrepancies between players and famcontact
for i in range(0,len(famcontact)):
famkey=famcontact.iloc[i]['Famkey'] # grab this key
family=famcontact.iloc[i]['Family']
family=family.title() # switch to title case
family=family.strip() # remove whitespace
match=players[players['Famkey']==famkey]
if len(match)==1: # should be a match already assuming above section finds no discrepancies
family2=match.iloc[0]['Family']
family2=family2.title() # switch to title case
family2=family2.strip() # remove whitespace
if family!=family2: # different family names with same key
print("Family key ", str(famkey), ": ", family, " or ", family2)
return
#%% Legacy or one-time use functions
def createsignupsold(df, Signups, season, year):
''' pass SCsignup and add signups to master list, also returns list of current player keys by sport
typically use writesignupstoExcel instead
'''
df.Grade=df.Grade.replace('K',0)# replace K with zero to allow sorting
if season=='Fall':
mask = np.column_stack([df['Sport'].str.contains("occer", na=False)])
Allsoccer=df.loc[mask.any(axis=1)]
Allsoccer=Allsoccer.reset_index(drop=True)
for i in range(0,len(Allsoccer)):
Allsoccer.set_value(i, 'Sport', 'soccer')
Allsoccer=organizesignups(Allsoccer, year)
Soccerlist=Allsoccer.Plakey.unique()
Soccerlist=Soccerlist.tolist()
mask = np.column_stack([df['Sport'].str.contains("olleyball", na=False)])
Allvb=df.loc[mask.any(axis=1)]
Allvb=Allvb.reset_index(drop=True)
for i in range(0,len(Allvb)):
Allvb.set_value(i, 'Sport', 'VB')
Allvb=organizesignups(Allvb, year)
VBlist=Allvb.Plakey.unique()
VBlist=VBlist.tolist() # construct list of player keys for this season
# concatenate new soccer and VB signups with master signups file
colorder=Signups.columns.tolist() # desired column order
Signups=pd.concat([Allsoccer,Signups], ignore_index=True) # concat the two frames (if same names, column order doesn't matter)
Signups=pd.concat([Allvb,Signups], ignore_index=True) # concat the two frames (if same names, column order doesn't matter)
Signups=Signups[colorder] # put back in original order
# drop duplicates and save master signups file
Signups=Signups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
# Signups=Signups.reset_index(drop=True)
Signups.to_csv('master_signups.csv', index=False)
return Soccerlist, VBlist, Signups
if season=='Winter':
AllBball=df # no need to filter by sport in winter
for i in range(0,len(AllBball)):
AllBball.set_value(i, 'Sport', 'Bball')
AllBball=organizesignups(AllBball, year)
Bballlist=AllBball.Plakey.unique()
Bballlist=Bballlist.tolist() # create list of keys of all Bball players
# concatenate new Bball signups with master signups file
colorder=Signups.columns.tolist() # desired column order
Signups=pd.concat([AllBball,Signups], ignore_index=True) # concat the two frames (if same names, column order doesn't matter)
Signups=Signups[colorder] # put back in original order
# drop duplicates and save master signups file
Signups=Signups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
# Signups=Signups.reset_index(drop=True)
Signups.to_csv('master_signups.csv', index=False)
return Bballlist, Signups
if season=='Spring':
mask = np.column_stack([df['Sport'].str.contains("rack", na=False)])
Track=df.loc[mask.any(axis=1)]
Track=Track.reset_index(drop=True)
for i in range(0,len(Track)):
Track.set_value(i, 'Sport', 'Track')
Track=organizesignups(Track, year)
Tracklist=Track.Plakey.unique()
Tracklist=Tracklist.tolist()
mask = np.column_stack([df['Sport'].str.contains("soft", case=False, na=False)])
Softball=df.loc[mask.any(axis=1)]
Softball=Softball.reset_index(drop=True)
for i in range(0,len(Softball)):
Softball.set_value(i, 'Sport', 'Softball')
Softball=organizesignups(Softball, year)
SBlist=Softball.Plakey.unique()
SBlist=SBlist.tolist()
mask = np.column_stack([df['Sport'].str.contains("base", case=False, na=False)])
Baseball=df.loc[mask.any(axis=1)]
Baseball=Baseball.reset_index(drop=True)
for i in range(0,len(Baseball)):
Baseball.set_value(i, 'Sport', 'Baseball')
Baseball=organizesignups(Baseball, year)
BBlist=Baseball.Plakey.unique()
BBlist=BBlist.tolist()
mask = np.column_stack([df['Sport'].str.contains("t-ball", case=False, na=False)])
Tball=df.loc[mask.any(axis=1)]
Tball=Tball.reset_index(drop=True)
for i in range(0,len(Tball)):
Tball.set_value(i, 'Sport', 'Tball')
Tball=organizesignups(Tball, year)
TBlist=Tball.Plakey.unique()
TBlist=TBlist.tolist()
# concatenate new track and ?ball signups with master signups file
colorder=Signups.columns.tolist() # desired column order
Signups=pd.concat([Track,Signups], ignore_index=True) # concat the two frames
Signups=pd.concat([Softball,Signups], ignore_index=True) # concat the two frames
Signups=pd.concat([Baseball,Signups], ignore_index=True) # concat the two frames\
Signups=pd.concat([Tball,Signups], ignore_index=True) # concat the two frames
Signups=Signups[colorder] # put back in original order
# drop duplicates and save master signups file
Signups=Signups.drop_duplicates(subset=['Plakey', 'Sport','Year']) # drop duplicates (for rerun with updated signups)
# Signups=Signups.reset_index(drop=True)
Signups.to_csv('master_signups.csv', index=False)
return Tracklist, SBlist, BBlist, TBlist, Signups
|
tkcroat/SC | SC_uniforms_main.py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 10:10:08 2018
@author: tkc
"""
import os, sys
import pandas as pd
import pkg.SC_billing_functions as SCbill
import pkg.SC_uniform_functions as SCuni
import pkg.SC_signup_google_API_functions as SCapi
import pkg.SC_config as cnf
import pkg.SC_signup_functions as SC
#%%
import pandas_utilities as pdutil
from importlib import reload
reload(SCuni)
#%% Load of commonly-needed files
teams=pd.read_csv(cnf._INPUT_DIR+'\\teams_2019.csv')
Mastersignups = pd.read_csv(cnf._INPUT_DIR+'\\master_signups.csv', encoding='cp437')
players, famcontact = SC.loadProcessPlayerInfo() # version w/o signup processing
paylog=SCapi.readPaylog()
#%% Uniform inventory prior to uniform night
# Read results of uniform inventory (
# g-docs inventory sheet with setname, size and list of numbers (in closet)
unis = SCapi.readInventory()
unilist= SCapi.readUniList() # read master list of unique unis and checkout info
# Inventory
SCuni.checkuni_duplicates(unilist) # check for non-unique uniforms from master unilist
#
# Older stuff... legacy
# Load an inventory file
inventory=pd.read_excel('uniform_inventory.xlsx')
unilist=pd.read_excel(cnf._INPUT_DIR+'\\Master_uniform_logbook.xlsx',sheetname='Unilog')
unisumm=pd.read_excel(cnf._INPUT_DIR+'\\Master_uniform_logbook.xlsx',sheetname='Summary')
#%%
''' For uniform night or issue, work from season_uniform_log multi-tabbed xls like (or
google sheet format), then after event read back number/size/setname combos into
master unilist;
financial info separately entered into master payment log file (fees and deposits)... separate
process so financials in uniform_log is just a view-only (or convenient way to copy o
'''
# ISSUING UNIFORMS
# teams=pd.read_excel(cnf._INPUT_DIR+'\\Teams_coaches.xlsx', sheetname='Teams')
oldteams=SCbill.loadoldteams(['Fall','Winter'], [2015,2016,2017, 2018])
# Read back new info entered into uniform logs
# Transfer unreturned unis from prior season's signup (if unreturned) to this season's signup
Mastersignups=SCbill.transferunis(Mastersignups, season, year)
# Transfer VB uniforms to BB (for those playing both sports)
Mastersignups=SCbill.transferunisVBBB(Mastersignups, year)
# TODO double check to ensure that this works
Mastersignups.to_csv('master_signups_test.csv', index=False) # Needs manual save
# Summary of currently available uniforms (after inventory)
# (totals, in (in closet/out(with player)/ miss (missing and unassigned), sh (shorts)
unisumm=updateunisumm(unisumm,unilist)
pdutil.writetoxls(unisumm, 'Summary', 'Master_uniform_logbook.xlsx')
# New uniform log ... get/copy requested size from signup
# After team assignment get desired shirt size distributions from signups (by team)
# Possible difference between requested and assigned sizes?
# Update unilist (and unilog summary) based on master signups
# Maybe interactive comparison here (i.e uniform checked out in mastersignups)
# but inventory shows it as returned
# Uniform tracking -auto output of uniform log .. autosaved to seasonyear_uniform_log.xlsx
# uniform logs for temp storage/convenience; info stored in mastersignups w/ player and payment log (deposits)
SCuni.writeuniformlog(Mastersignups, teams, players, season, year, paylog)
# output csv file with list of outstanding uniforms (along w/ available deposit info)
missing=SCuni.makemissingunilog(Mastersignups, paylog, players, fname='missingunilist_29Dec17.csv')
# Update master signups w/ issued uniform info after uniform night (works!)
# TODO also need to update unilist in uniform log
# TODO needs conflict resolution and priority of information
Mastersignups=SCuni.getuniinfo(teams, Mastersignups,'Winter_2017_uniform_log.xlsx', year)
# TODO added print statement if numbers conflict but needs testing
# Check out uniforms from unilist (based on recent master signups)
unilist=checkoutunis(Mastersignups, teams, season, year)
# Check out uniforms from unilist (based on recent master signups)
unilist=checkoutunis(Mastersignups, teams, season, year)
# Update unilist (and unilog summary) based on master signups
# Maybe interactive comparison here (i.e uniform checked out in mastersignups)
# but inventory shows it as returned
# Update master signups w/ issued uniform info after uniform night (works!)
Mastersignups=getuniinfo(teams, Mastersignups,'Winter_2017_uniform_log.xlsx', year) |
tkcroat/SC | pkg/sc_database_setup.py | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 16:42:40 2019
@author: kevin
"""
crtStmt=tkcreateTable(Mastersignups)
# Create statement for players table
stmt =''' CREATE TABLE players (
Plakey FLOAT NOT NULL,
First VARCHAR NOT NULL,
Last VARCHAR NOT NULL,
DOB VARCHAR NOT NULL,
Gender VARCHAR NOT NULL,
School VARCHAR NOT NULL,
Grade int32 NOT NULL,
Gradeadj FLOAT,
Uni# FLOAT,
Famkey FLOAT NOT NULL,
Family VARCHAR,
Alias VARCHAR,
PRIMARY KEY (Plakey),
FOREIGN KEY (Plakey)
REFERENCES mastersignups (Plakey),
FOREIGN KEY (Famkey)
REFERENCES families (Famkey),
); '''
# Creation of families table
stmt='''CREATE TABLE families (
Famkey INT NOT NULL,
Family VARCHAR NOT NULL,
Address VARCHAR NOT NULL,
City VARCHAR NOT NULL,
State VARCHAR NOT NULL,
Zip VARCHAR NOT NULL,
Parish_registration VARCHAR,
Parish_residence VARCHAR,
Pfirst1 VARCHAR,
Plast1 VARCHAR,
Pfirst2 VARCHAR,
Plast2 VARCHAR,
Pfirst3 VARCHAR,
Plast3 VARCHAR,
Playerlist FLOAT NOT NULL,
Phone1 VARCHAR NOT NULL,
Text1 VARCHAR,
Phone2 VARCHAR,
Text2 VARCHAR,
Phone3 VARCHAR,
Text3 VARCHAR,
Phone4 VARCHAR,
Text4 FLOAT,
Email1 VARCHAR,
Email2 VARCHAR,
Email3 VARCHAR,
PRIMARY KEY (Famkey),
);'''
# Create mastersignups table
stmt='''CREATE TABLE mastersignups (
SUkey FLOAT NOT NULL,
First VARCHAR NOT NULL,
Last VARCHAR NOT NULL,
Grade INT NOT NULL,
Gender VARCHAR NOT NULL,
Sport VARCHAR NOT NULL,
Year VARCHAR NOT NULL,
Team VARCHAR,
Plakey FLOAT NOT NULL,
Famkey FLOAT NOT NULL,
Family VARCHAR,
SUdate VARCHAR NOT NULL,
Issue date VARCHAR,
Uniform# VARCHAR,
UniReturnDate VARCHAR,
PRIMARY KEY (SUkey),
FOREIGN KEY (Plakey)
REFERENCES players (Plakey),
FOREIGN KEY (Famkey)
REFERENCES families (Famkey),
);'''
# Create teams table
stmt='''CREATE TABLE teams (
Teamkey INT NOT NULL,
Year INT NOT NULL,
Sport VARCHAR NOT NULL,
Grade VARCHAR NOT NULL,
Gender VARCHAR NOT NULL,
Division VARCHAR NOT NULL,
Level VARCHAR,
Team VARCHAR NOT NULL,
Coach ID VARCHAR NOT NULL,
Coach VARCHAR,
Graderange int32 NOT NULL,
AssistantIDs VARCHAR,
Uniforms VARCHAR NOT NULL,
Number FLOAT,
Lower FLOAT,
Upper FLOAT,
Playerlist VARCHAR,
Location VARCHAR,
Slot VARCHAR,
PRIMARY KEY (Teamkey),
FOREIGN KEY (Coach ID)
REFERENCES coaches (Coach ID),
);'''
# Create coaches table
stmt='''CREATE TABLE coaches (
Fname VARCHAR NOT NULL,
Lname VARCHAR NOT NULL,
Street VARCHAR NOT NULL,
City VARCHAR NOT NULL,
State VARCHAR NOT NULL,
Zip INT NOT NULL,
Phone VARCHAR NOT NULL,
Email VARCHAR NOT NULL,
Sex VARCHAR NOT NULL,
School VARCHAR NOT NULL,
Parish of Registration VARCHAR NOT NULL,
Parish of Residence VARCHAR NOT NULL,
Coach ID VARCHAR NOT NULL,
PRIMARY KEY (Coach ID),
);''' |
podo-os/podo-std-eye | op.py | <filename>op.py
import cv2
import datetime
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
cam.set(cv2.CAP_PROP_FPS, 30)
t = []
for _ in range(32):
_, mat = cam.read()
print(mat.shape)
t.append(datetime.datetime.now())
print('Elapsed FPS:', 30 / (t[31] - t[1]).total_seconds())
|
craigderington/blocknav.io | forms.py | # -*- coding: utf-8 -*-
from wtforms import Form, BooleanField, StringField, PasswordField, validators
class SearchForm(Form):
search = StringField('Search', [validators.DataRequired()])
class AddressSearchForm(Form):
addr = StringField('Address', [validators.DataRequired()])
class RegistrationForm(Form):
username = StringField('Username', [validators.Length(min=4, max=25)])
email = StringField('Email Address', [validators.Length(min=6, max=35)])
password = PasswordField('<PASSWORD>', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('<PASSWORD>')
accept_tos = BooleanField('I accept the TOS', [validators.DataRequired()]) |
craigderington/blocknav.io | config.py | # -*- coding: utf-8 -*-
import os
DEBUG = True
PORT = 5550
SECRET_KEY = os.urandom(64)
API_KEY = ''
STATS_URL = 'https://btc.com/stats/pool'
|
craigderington/blocknav.io | app.py | <gh_stars>0
# -*- coding: utf-8 -*-
from flask import Flask, request, render_template, redirect, flash, url_for, session
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_misaka import Misaka
from blockchain import blockexplorer as be
from blockchain import statistics
from blockchain.exceptions import APIException
from flask_qrcode import QRcode
from bs4 import BeautifulSoup
from datetime import datetime
from forms import SearchForm, AddressSearchForm, RegistrationForm
import config
import requests
import collections
bootstrap = Bootstrap()
app = Flask(__name__, static_url_path='')
moment = Moment(app)
QRcode(app)
Misaka(app)
app.secret_key = config.SECRET_KEY
app.api_code = config.API_KEY
MiningPool = collections.namedtuple('MiningPool', 'rank, name, p_total, h_rate, n_blocks, e_blocks, eb_percent, '
'b_size, avg_tx_fees, tx_fees_reward')
@app.route('/', methods=['GET', 'POST'])
def index():
blocks = []
block_count = 0
stats = {}
try:
blocks = be.get_blocks(api_code=app.api_code)[:5]
block_count = len(blocks)
stats = get_stats()
except APIException as e:
flash('Sorry, we had a problem contacting the blockchain.info API. Please try again later.')
return render_template(
'index.html',
blocks=blocks,
block_count=block_count,
stats=stats,
current_time=datetime.now().strftime('LLL')
)
@app.route('/blocks', methods=['GET', 'POST'])
def blocks():
blocks = []
block_count = 0
try:
blocks = be.get_blocks(api_code=app.api_code)
block_count = len(blocks)
except APIException as e:
flash('Sorry, we had a problem contacting the blockchain.info API. Please try again later.')
return render_template(
'blocks.html',
blocks=blocks,
block_count=block_count
)
@app.route('/block/<string:block>', methods=['GET', 'POST'])
def block(block):
try:
block = be.get_block(block, api_code=app.api_code)
transactions = block.transactions
except APIException as e:
print('Error ' + str(e))
return render_template(
'block.html',
block=block,
transactions=transactions,
current_time=datetime.now().strftime('LLL')
)
@app.route('/block/<int:height>', methods=['GET', 'POST'])
def get_block_height(height):
try:
blocks = be.get_block_height(height, api_code=app.api_code)
if blocks:
block_height = blocks[0].height
stats = get_stats()
block_count = len(blocks)
except APIException as e:
print('Error ' + str(e))
return render_template(
'blocks_by_height.html',
blocks=blocks,
stats=stats,
block_count=block_count,
block_height=block_height,
current_time=datetime.now().strftime('LLL')
)
@app.route('/address/<string:address>', methods=['GET', 'POST'])
def get_address(address):
try:
addr = be.get_address(address, api_code=app.api_code)
tx = addr.transactions
except APIException as e:
print('Error ' + str(e))
return render_template(
'address.html',
addr=addr,
tx=tx,
current_time=datetime.now().strftime('LLL')
)
@app.route('/address', methods=['GET', 'POST'])
def address():
form = AddressSearchForm(request.form)
if request.method == 'POST':
addr = request.form['addr'].strip()
try:
clean_addr = str(addr)
if len(clean_addr) == 34:
try:
address = be.get_address(clean_addr, api_code=app.api_code)
return render_template(
'_address.html',
address=address,
search_value=addr,
current_time=datetime.now().strftime('LLL')
)
except APIException as e:
flash('API Error', 'warning')
return redirect(url_for('address'))
else:
message = 'The Bitcoin address is malformed. Please check your data and try again.'
flash(message, 'danger')
redirect(url_for('address'))
except (ValueError, TypeError) as err:
message = 'An error has occurred: ' + str(err)
flash(message, 'danger')
return redirect(url_for('address'))
else:
return render_template(
'_address.html',
form=form,
current_time=datetime.now().strftime('LLL')
)
@app.route('/tx/<string:hash>', methods=['GET', 'POST'])
def tx(hash):
try:
tx = be.get_tx(hash, api_code=app.api_code)
blk = be.get_block_height(tx.block_height, api_code=app.api_code)
except APIException as e:
message = 'There has been an API Error.'
flash(message, 'danger')
return redirect(url_for('index'))
return render_template(
'transaction.html',
tx=tx,
block=blk,
current_time=datetime.now().strftime('LLL')
)
@app.route('/pools', methods=['GET', 'POST'])
def pools():
pools = list()
url = config.STATS_URL
hdr = {'user-agent': 'Mozilla/5.0 (Linux i686)'}
r = requests.get(url, headers=hdr)
soup = BeautifulSoup(r.text, 'html.parser')
table = soup.find('table', {'class': 'pool-panel-share-table'})
rows = table.find_all('tr')
for idx, row in enumerate(rows):
if idx != 0:
columns = row.find_all('td')
cols = [element.text.strip() for element in columns]
pools.append([element for element in cols if element])
return render_template(
'pools.html',
current_time=datetime.now().strftime('LLL'),
pools=pools,
)
@app.route('/pool/<string:pool>', methods=['GET', 'POST'])
def pool(pool):
blocks = None
block_count = None
try:
blocks = be.get_blocks(pool_name=pool, api_code=app.api_code)
block_count = len(blocks)
except APIException as e:
message = 'Sorry, an API exception occurred ' + str(e)
flash(message, 'danger')
return redirect(url_for('index'))
return render_template(
'pool.html',
blocks=blocks,
pool_name=pool,
block_count=block_count,
current_time=datetime.now().strftime('LLL')
)
@app.route('/api', methods=['GET'])
def api_docs():
return render_template(
'api.html',
current_time=datetime.now().strftime('LLL')
)
@app.route('/login', methods=['GET', 'POST'])
def login():
return render_template(
'login.html',
current_time=datetime.now().strftime('LLL')
)
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
return render_template(
'register.html',
form=form,
current_time=datetime.now().strftime('LLL')
)
@app.route('/search', methods=['POST'])
def search():
form = SearchForm(request.form)
if request.method == 'POST' and form.validate():
search = request.form['search'].strip()
return redirect(url_for('results', query=search))
else:
return redirect(url_for('index'))
@app.route('/results/<string:query>', methods=['GET', 'POST'])
def results(query):
search_results = None
type = None
if len(query) > 63:
try:
search_results = be.get_block(query, api_code=app.api_code)
type = 'Block Info'
except APIException as e:
print('An API error has occurred ' + str(e))
elif len(query) > 33:
try:
search_results = be.get_address(query)
type = 'Address Info'
except APIException as e:
print('Error ' + str(e))
else:
type = 'Block Height Info'
try:
try:
n = int(query)
search_results = be.get_block_height(n, api_code=app.api_code)
except (ValueError, TypeError) as err:
search_results = str('Invalid query expression. ' + str(err))
flash(search_results, 'danger')
return redirect(url_for('index'))
except APIException as e:
print('Error ' + str(e))
return render_template(
'results.html',
query=query,
search_results=search_results,
type=type,
current_time=datetime.now().strftime('LLL')
)
def get_stats():
stats = statistics.get(api_code=app.api_code)
return stats
@app.errorhandler(404)
def page_not_found(e):
return render_template(
'404.html'
), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template(
'500.html'
), 500
@app.template_filter('datetime')
def convert_unixtime(unixtime, format='medium'):
n = datetime.fromtimestamp(
int(unixtime)).strftime('%Y-%m-%d %H:%M:%S')
return n
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=config.PORT,
debug=config.DEBUG
)
|
alexandre-flamant/2DFEM | src/datatypes.py | from config import *
class Node:
__slots__ = ('x', 'y')
class Member:
__slots__ = ('start', 'end', 'area', 'length', 'inertia')
class Structure:
__slots__ = ('nodes', 'members', 'supports')
class Support:
__slots__ = ('ux', 'uy', 'ry')
|
alexandre-flamant/2DFEM | src/parser.py | <reponame>alexandre-flamant/2DFEM<filename>src/parser.py
from config import *
def parse_from_file(p, absolute: True):
try:
file = open(p, mode='r') # Get a read-only stream
except Exception:
raise
while()
file.close()
return None
|
alexandre-flamant/2DFEM | src/solver.py | <gh_stars>0
from config import *
class Solver():
@staticmethod
def create_stiffness_matrix(l, a, e, i):
"""
Return local stiffness array of an edge based on its property
...
:param l: length of the edge in [m]
:param a: section area of the edge in [m^2]
:param e: modulus of elasticity of the edge in [N/m^2]
:param i: inertia of the edge in [m^4]
:return: [6x6] stiffness matrix
"""
ea_l = e * a / l
ei_l = e * i / l
ei_l2 = e * i / l ** 2.
ei_l3 = e * i / l ** 3.
k = np.array((( ea_l, 0., 0., -ea_l, 0., 0.),
( 0., 12. * ei_l3, 6. * ei_l2, 0., -12. * ei_l3, 6. * ei_l2),
( 0., 6. * ei_l2, 4. * ei_l , 0., -6. * ei_l3, 2. * ei_l2),
(-ea_l, 0., 0., ea_l, 0., 0.),
( 0., -12. * ei_l3, -6. * ei_l2, 0., 12. * ei_l3, -6. * ei_l2),
( 0., 6. * ei_l2, 2. * ei_l , 0., -6. * ei_l2, 4. * ei_l),))
return k
|
estherjsuh/airflow_practice | dags/store_DAG.py | <gh_stars>0
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.mysql_operator import MySqlOperator
from airflow.operators.email_operator import EmailOperator
from datacleaner import data_cleaner
default_args = {
"owner": "airflow",
"start_date": datetime(2020, 11, 18),
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
yesterday_date = datetime.strftime(datetime.now() - timedelta(1), '%Y-%m-%d')
dag = DAG("store_dag", default_args=default_args, schedule_interval='@daily', template_searchpath=['/usr/local/airflow/sql_files'], catchup=False)
t1 = BashOperator(task_id="check_datafile_exists", bash_command='shasum ~/store_files_airflow/raw_store_transactions.csv', retries=2, retry_delay=timedelta(seconds=15), dag=dag)
t2 = PythonOperator(task_id="clean_raw_csv", python_callable=data_cleaner, dag=dag)
#first, create connection
t3 = MySqlOperator(task_id="create_mysql_table", mysql_conn_id="mysql_conn", sql="create_table.sql", dag=dag)
t4 = MySqlOperator(task_id="insert_into_table", mysql_conn_id="mysql_conn", sql="insert_into_table.sql", dag=dag)
t5 = MySqlOperator(task_id="select_from_table", mysql_conn_id="mysql_conn", sql="select_from_table.sql", dag=dag)
t6 = BashOperator(task_id="move_file", bash_command='cat ~/store_files_airflow/location_wise_profit.csv && mv ~/store_files_airflow/location_wise_profit.csv ~/store_files_airflow/location_wise_profit_%s.csv' % yesterday_date, dag=dag)
t7 = BashOperator(task_id="move_file_2", bash_command='cat ~/store_files_airflow/store_wise_profit.csv && mv ~/store_files_airflow/store_wise_profit.csv ~/store_files_airflow/store_wise_profit_%s.csv' % yesterday_date, dag=dag)
t8 = EmailOperator(task_id="send_email", to="<EMAIL>",
subject="Daily report generated",
html_content="""<h1>Yesterday's store reports are ready.</h1>""",
files=['usr/local/airflow/store_files_airflow/location_wise_profit_%s.csv' % yesterday_date, 'usr/local/airflow/store_files_airflow/store_wise_profit_%s.csv' % yesterday_date], dag=dag)
t9 = BashOperator(task_id="rename_raw", bash_command='mv ~/store_files_airflow/raw_store_transactions.csv ~/store_files_airflow/raw_store_transactions_%s.csv' % yesterday_date, dag=dag)
#use bit operators instead of set_upstream & set_downstream
t1 >> t2 >> t3 >> t4 >> t5 >> [t6,t7] >> t8 >> t9 |
jamesmalin/former2 | util/findImportAttributes.py | import boto3
import pprint
import json
import re
cfnclient = boto3.client('cloudformation', region_name = "us-east-1")
cfn_types = [
'AWS::ApiGatewayV2::VpcLink',
'AWS::ImageBuilder::ImagePipeline',
'AWS::ImageBuilder::ImageRecipe',
'AWS::ImageBuilder::Component',
'AWS::ImageBuilder::DistributionConfiguration',
'AWS::ImageBuilder::InfrastructureConfiguration',
'AWS::GroundStation::MissionProfile',
'AWS::GroundStation::Config',
'AWS::GroundStation::DataflowEndpointGroup'
]
with open("util/cfnspec.json", "r") as f:
cfn_spec = json.loads(f.read())['ResourceTypes']
for cfntype, _ in cfn_spec.items():
cfn_types.append(cfntype)
cfn_types = list(set(cfn_types)) # dedup
cfn_types.sort()
for cfntype in cfn_types:
try:
cfnclient.create_change_set(
StackName='importstack',
TemplateBody='''
Resources:
x:
Type: {}
'''.format(cfntype),
ChangeSetName='importchangeset',
ChangeSetType='IMPORT',
ResourcesToImport=[
{
'ResourceType': cfntype,
'LogicalResourceId': 'x',
'ResourceIdentifier': {
'x': 'x'
}
},
]
)
except Exception as e:
p = re.compile('Expected \[([a-zA-Z0-9]+)(?:, )?([a-zA-Z0-9]+)?(?:, )?([a-zA-Z0-9]+)?(?:, )?([a-zA-Z0-9]+)?\]')
results = p.findall(str(e))
if len(results) > 0:
print(cfntype, results[0])
else:
#print(cfntype)
#print("Not importable")
pass |
srounet/pystormlib | pystormlib/structure.py | <filename>pystormlib/structure.py
import ctypes
class MPQFileData(ctypes.Structure):
_fields_ = [
('filename', ctypes.c_char * 1024),
('plainpath', ctypes.c_char_p),
('hashindex', ctypes.c_int, 32),
('blockindex', ctypes.c_int, 32),
('filesize', ctypes.c_int, 32),
('fileflags', ctypes.c_int, 32),
('compsize', ctypes.c_int, 32),
('filetimelo', ctypes.c_int, 32),
('filetimehi', ctypes.c_int, 32),
('locale', ctypes.c_int, 32)
]
|
srounet/pystormlib | pystormlib/utils.py | import ctypes
import pystormlib.winerror
def raise_for_error(func, *args, **kwargs):
"""Small helper around GetLastError
:param func: a function using SetLastError internally
:type func: callable
:param args: Arbitrary Argument Lists
:param kwargs: Keyword Arguments
:return: func result
:raise: PyStormException in case something when wrong with stormlib
"""
ctypes.windll.kernel32.SetLastError(0)
result = func(*args, **kwargs)
error_code = ctypes.windll.kernel32.GetLastError()
if error_code:
exception = pystormlib.winerror.exceptions.get(
error_code, pystormlib.winerror.exceptions
)
raise exception(error_code)
return result |
srounet/pystormlib | pystormlib/__init__.py | <reponame>srounet/pystormlib
# -*- coding: UTF-8 -*-
"""Python wrapper for stormlib
Credits goes to <NAME> for the initial python code version (linux)
https://github.com/vjeux/pyStormLib
"""
import ctypes
import os
import pystormlib.structure
import pystormlib.winerror
from pystormlib.utils import raise_for_error
try:
dllpath = os.path.join(os.path.dirname(__file__), 'ressources\\stormlib_x86.dll')
_stormlib = ctypes.WinDLL(dllpath)
except:
raise RuntimeError("PyStormLib: can't locate stormlib_x86.dll")
class PyStormLib(object):
def __init__(self, filepath=None):
"""PyStormLib wrapper for stormlib.
:param filepath: A filepath to an MPQ file.
:type filepath: str
:raise: IOError if filename does not exists
:raise: PyStormException in case something when wrong with stormlib
"""
self.handle = ctypes.c_int()
self.filepath = filepath
if self.filepath:
self.open_mpq_archive(self.filepath)
def open_mpq_archive(self, filepath):
"""Opens a MPQ archive.
:param filepath: A filepath to an MPQ file.
:type filepath: str
:raise: IOError if filename does not exists
:raise: PyStormException in case something when wrong with stormlib
"""
self.filepath = filepath
if isinstance(filepath, str):
filepath = filepath.encode('ascii')
if not os.path.exists(filepath):
raise IOError('{} not found'.format(filepath))
raise_for_error(
_stormlib.SFileOpenArchive, filepath, 0, 0, ctypes.byref(self.handle)
)
def search(self, pattern=None):
"""Search for a glob expression within an open MPQ Archive.
..code-block:: python
adt_files = pystorm.search('*.adt')
dbc_files = pystorm.search('*.dbc')
:param pattern: a string representing a filename to search
:type pattern: str
:return: yields MPQFileData
:rtype: generator
:raise: PyStormException in case something when wrong with stormlib
"""
if not pattern:
pattern = '*'
pattern = pattern.encode('ascii')
file = pystormlib.structure.MPQFileData()
result = raise_for_error(
_stormlib.SFileFindFirstFile, self.handle, pattern, ctypes.byref(file), None
)
yield file
file = pystormlib.structure.MPQFileData()
try:
while raise_for_error(_stormlib.SFileFindNextFile, result, ctypes.byref(file)):
yield file
except pystormlib.winerror.ErrorNoMoreFiles:
pass
def extract(self, filepath, destination):
"""Extract a file from an MPQ archive.
If the destination directory doest not exists, the function will handle its creation by itself.
..code-block:: python
pystorm.extract('World\\Maps\\Shadowfang\\Shadowfang_25_33.adt', "C:\\test\\")
:param filepath: a string representing a filepath within an MPQ File
:type filepath: str
:param destination: a string representing a local directory to extract file.
:type destination: str
:return: yields MPQFileData
:rtype: generator
:raise: PyStormException in case something when wrong with stormlib
"""
if isinstance(filepath, str):
filepath = filepath.encode('ascii')
if isinstance(destination, str):
destination = destination.encode('ascii')
destination = destination.replace(b'\\', b'/')
try: os.makedirs(os.path.dirname(destination))
except OSError:
pass
try: raise_for_error(_stormlib.SFileExtractFile, self.handle, filepath, destination, 0)
except pystormlib.winerror.ErrorHandleEOF:
pass
def contains(self, filepath):
"""Search for a MPQ archive filename within the current opened MQP archive
..code-block:: python
pystorm.contains('World\\Maps\\Shadowfang\\Shadowfang_25_33.adt')
:param filepath: a string representing a filepath within an MPQ File
:type filepath: str
:return: True if filepath exsits
:rtype: boolean
:raise: PyStormException in case something when wrong with stormlib
"""
if isinstance(filepath, str):
filepath = filepath.encode('ascii')
try: raise_for_error(_stormlib.SFileHasFile, self.handle, filepath)
except pystormlib.winerror.PyStormException:
return False
return True
def read(self, filepath):
"""Reads and return the content of a file from an MPQ archive.
..code-block:: python
Shadowfang_25_33 = pystorm.read('World\\Maps\\Shadowfang\\Shadowfang_25_33.adt')
:param filepath: a string representing a filepath within an MPQ File
:type filepath: str
:return: True if filepath exsits
:rtype: boolean
:raise: PyStormException in case something when wrong with stormlib
"""
# Open the file
file = ctypes.c_int()
raise_for_error(_stormlib.SFileOpenFileEx, self.handle, filepath, 0, ctypes.byref(file))
# Get the Size
high = ctypes.c_int()
low = raise_for_error(_stormlib.SFileGetFileSize, file, ctypes.byref(high))
size = high.value * pow(2, 32) + low
# Read the File
data = ctypes.c_buffer(size)
read = ctypes.c_int()
raise_for_error(_stormlib.SFileReadFile, file, data, size, ctypes.byref(read), None)
# Close and Return
raise_for_error(_stormlib.SFileCloseFile, file)
content = data.raw
return content
|
srounet/pystormlib | setup.py | # -*- coding: utf-8 -*-
import setuptools
setuptools.setup(
name='PyStormLib',
version='0.1',
description='Wrapper around Stormlib',
author='<NAME>',
author_email='<EMAIL>',
license = "BSD",
url = "https://github.com/srounet/pystormlib",
packages = setuptools.find_packages(),
package_data = {
'': ['*.dll'],
},
classifiers=[
'Programming Language :: Python :: 3.4',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License'
],
)
|
srounet/pystormlib | pystormlib/winerror.py | <gh_stars>0
class PyStormException(Exception):
message = 'Oops, something bad happend'
def __init__(self, error_code):
message = '{}, ErrorCode: {}'.format(self.message, error_code)
super(PyStormException, self).__init__(message)
class ErrorNoMoreFiles(PyStormException):
message = 'No more files'
class ErrorInvalidParameter(PyStormException):
message = 'The parameter is incorrect.'
class ErrorHandleEOF(PyStormException):
message = 'Reached the end of the file.'
class ErrorPathNotFound(PyStormException):
message = 'The system cannot find the path specified.'
class ErrorSharingViolation(PyStormException):
message = 'The process cannot access the file because it is being used by another process.'
class ErrorFileNotFound(PyStormException):
message = 'The system cannot find the file specified.'
exceptions = {
2: ErrorFileNotFound,
3: ErrorPathNotFound,
18: ErrorNoMoreFiles,
32: ErrorSharingViolation,
87: ErrorInvalidParameter,
38: ErrorHandleEOF
}
|
exifna/gitHack | src/gitTools.py | <gh_stars>0
import subprocess
import traceback
from typing import List
import requests
import re
from src import tables, crud
from src.types import FirstHash, SimpleGitObject, GitObjectType
import os
class Git:
def __init__(self, proxies : dict = None, timeout: int = 5):
self.session = requests.session()
self.session.headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.3538.77 Safari/537.36'
self.session.proxies = proxies
self.session.timeout = timeout
try:
os.chdir('gitFiles/.git')
except:
pass
def detectGitFile(self, site_url : str) -> bool:
try:
check_url = self.reformat_url(site_url) + '.git/HEAD'
request = self.session.get(check_url)
if not request.ok:
return False
request.encoding = 'utf-8'
return request.text.startswith('ref: refs/')
except:
return False
def getFirstHashes(self, site_url: str) -> List[FirstHash]:
try:
find = False
text = str()
groups = None
re_search = r'(?P<first_hash>\w{40}) (?P<hash>\w{40}) (?P<user>[^<]{1,}) <(?P<mail>\S{1,})> (?P<time>\d{8,15}) (?P<hour>\S{0,6}) (?P<commit>.{1,})'
checked_url = [
self.reformat_url(site_url) + '.git/logs/HEAD',
self.reformat_url(site_url) + '.git/LOGS/HEAD'
]
for url in checked_url:
try:
request = self.session.get(url)
if not request.ok:
continue
tmp_text = self.space_replacer(request.text)
if re.search(re_search, tmp_text):
text = tmp_text
find = True
break
except:
pass
if not find:
return None
return_lst: List[FirstHash] = list()
for string in text.split('\n'):
r_data = re.search(re_search, string.replace('\n', ''))
if r_data:
return_lst.append(FirstHash(
r_data.group('first_hash'),
r_data.group('hash'),
r_data.group('user'),
r_data.group('mail'),
r_data.group('time'),
r_data.group('hour'),
r_data.group('commit')
))
return return_lst
except:
print(traceback.format_exc())
return None
def parseTreeHash(self, site: str, _hash : str) -> List[SimpleGitObject]:
try:
if not self.downloadObject(site, _hash):
print('not download')
return None
tmp = self.getObjectData(_hash)
r = re.search(r'tree (?P<hash>\S{40})', self.space_replacer(tmp))
if not r:
print('not found r')
return None
self.downloadObject(site, r.group('hash'))
tmp = self.getObjectData(r.group('hash'))
if not tmp:
print('not found tmp')
return None
returnList = self._parseTreeHash(tmp)
return returnList
except:
print(traceback.format_exc())
return None
def _parseTreeHash(self, text : str) -> List[SimpleGitObject]:
returnList = list()
for i in self.space_replacer(text).split('\n'):
r = re.search("\d{1,10} (?P<fType>\S{4}) (?P<hash>\S{40}) (?P<fName>.{1,})", i)
if r:
if r.group('fType') not in ['tree', 'blob']:
continue
returnList.append(SimpleGitObject(
GitObjectType.tree if r.group('fType') == 'tree' else GitObjectType.blob,
r.group('hash'),
r.group('fName')
))
return returnList
def downloadObject(self, site: str, _hash: str) -> bool:
try:
url = f'{self.reformat_url(site)}.git/objects/{_hash[:2]}/{_hash[2:]}'
request = self.session.get(url)
if not request.ok:
return None
request.encoding = 'utf-8'
try:
os.mkdir(f'objects/{_hash[:2]}')
except:
pass
with open(f'objects/{_hash[:2]}/{_hash[2:]}', 'wb') as f:
f.write(request.content)
return True
except:
return False
def getObjectData(self, _hash : str) -> str:
try:
return subprocess.getoutput(f'git cat-file -p {_hash}')
except:
return None
def dumpHash(self, _hash : str):
try:
text = subprocess.getoutput(f'git cat-file -p {_hash}')
with open(f'../tmp/{crud.getFileNameByHash(_hash)}', 'w', encoding='utf-8') as f:
f.write(text)
except:
try:
os.system(f'git cat-file -p {_hash} > ../tmp/{crud.getFileNameByHash(_hash)}')
except:
pass
# dev tools
def reformat_url(self, url : str) -> str:
site_url = url if '://' in url else 'https://' + url
site_url += '/' if not site_url.endswith('/') else ''
return site_url
def space_replacer(self, text : str) -> str:
t = ''
for i in text.split('\n'):
t += re.sub(r'\s{1,}', ' ', i) + '\n'
if t[-1] == '\n':
t = t[:-1]
return t
|
exifna/gitHack | console.py | import os
import subprocess
import time
import traceback
from datetime import datetime
from time import sleep
import faker
from pick import pick
from src import crud, types
from src.gitTools import Git
from googlesearch import search
title = types.label + 'Выбери с чем ты хочешь работать'
options = ['Мои сайты', 'Просканировать сайт', 'Конфигурация', 'Google AutoSearch']
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def recurser(site_id: int, _hash : str, path: str, name: str):
try:
proxy = crud.get_config()['proxies']
proxies = {'http' : proxy, 'https' : proxy}
git = Git(proxies=proxies)
site = crud.get_site(site_id).site_name
print(f'Качаю "{path}{name}"')
obj = git.downloadObject(site, _hash)
crud.add_object(site_id, types.GitObjectType.tree.value, bool(obj), _hash, path, name)
if not obj:
return
txt = git.getObjectData(_hash)
if not txt:
return
for i in git._parseTreeHash(txt):
if i.Type == types.GitObjectType.tree:
recurser(site_id, i._hash, path + name + '/', i.name)
if i.Type == types.GitObjectType.blob:
if True in [i.name.endswith(x) for x in types.ignore]:
continue
print(f'> Качаю {path + name}/{i.name}')
t = git.downloadObject(site, i._hash)
crud.add_object(site_id, types.GitObjectType.blob.value, bool(t), i._hash, path + name + '/', i.name)
except:
print(traceback.format_exc().replace('\n', ' | '))
def recurse_dump(site_id: int, path: str):
print(f'> Check folder: {path}')
for i in crud.get_folder_data(site_id, path):
if i.object_type == types.GitObjectType.tree.value:
try:
os.mkdir(f'../dumps/{crud.get_site(site_id).site_name}{path}{i.name}')
recurse_dump(site_id, path + i.name + '/')
except:
pass
if i.object_type == types.GitObjectType.blob.value:
print(f'> Dump {i.path}{i.name}...')
if not i.download:
text = '-'
with open(f'../dumps/{crud.get_site(site_id).site_name}{path}{i.name}', 'w', encoding='utf-8') as f:
f.write(text)
else:
git = Git()
try:
text = subprocess.getoutput(f'git cat-file -p {i._hash}')
with open(f'../dumps/{crud.get_site(site_id).site_name}{path}{i.name}', 'w', encoding='utf-8') as f:
f.write(text)
except:
try:
os.system(f'git cat-file -p {i._hash} > ../dumps/{crud.get_site(site_id).site_name}{path}{i.name}')
except:
print(f'> Can\'t dump {i.path}{i.name} | {i._hash}')
while True:
option, index = pick(options, title, indicator='=>')
if index == 0:
while True:
tmp_data = crud.get_my_sites()
sites = ['Назад'] + [x.site_name for x in tmp_data]
option, index = pick(sites, types.label + 'Выбери сайт', indicator='=>')
options_ = ['Назад'] + ['Просмотр всех файлов', 'Поиск файлов по любой информации', 'Просмотр триггер-файлов' ,
'Dump всех скачанных файлов в папку', 'Удалить всю информацию']
if not index:
break
clear()
print(types.label + '=> Плыз вейт, подгруражаю информацию...')
site_name = tmp_data[index - 1].site_name
site_id = tmp_data[index-1].id
txt = str(types.label +
f'\n=> Ты работаешь с сайтом {site_name}'
f'\n=> Файлов найдено: {crud.get_project_files_count(tmp_data[index - 1].id)}'
f'\n=> Файлов с триггер названиями: {crud.get_interested_files_count(tmp_data[index - 1].id)}')
while True:
option, index_ = pick(options_, txt, indicator='=>')
if not index_:
break
if index_ == 1:
path = '/'
git = Git()
while True:
clear()
files = crud.get_folder_data(site_id, path)
option, index__ = pick(['Назад'] + [f'[{"+ скачан" if x.download else "- нету "} {"/" if x.object_type == types.GitObjectType.tree.value else " "}] ' + str(x.path + x.name) + str("/" if x.object_type == types.GitObjectType.tree.value else "") for x in files], types.label + f'Выбери файл для просмотра', indicator='=>')
if not index__:
if path == '/':
break
path = str('/'.join(path.split('/')[:-2]) + '/').replace('//', '/')
continue
if files[index__ - 1].object_type == types.GitObjectType.tree.value:
path = files[index__ - 1].path + files[index__ - 1].name + '/'
files = crud.get_folder_data(site_id, path)
continue
text = git.getObjectData(files[index__ - 1]._hash)
clear()
print_text = types.label + f'> Нажми <enter> чтобы выйти, или напиши download чтобы скачать файл.\n> {"=" * 65} <\n\n{text if text else "> Не удалось получить исходный код..."}\n\n> {"=" * 65} <\n> Нажми <enter> чтобы выйти, или напиши download чтобы скачать файл.\n(input)=> '
tmp = input(print_text)
clear()
if tmp == 'download':
print(types.label + f'> Подожди, загружаю файл... После загрузки он должен был появится в папке gitFiles/tmp/ с названием "{files[index__ - 1].name}"')
git.dumpHash(files[index__ - 1]._hash)
input('> Дамп завершён... Нажми <enter> чтобы продолжить.')
if index_ == 2: # поиск файла по информации
clear()
data = input(types.label + f'> Введи любую известную тебе информацию о файле (хеш, имя, путь): ')
if not data:
continue
clear()
print(types.label + '> Подожди, получаю данные от базы данных...')
files = crud.find_objects_by_data(site_id, data)
t = ['Назад'] + [f'[{"+ скачан" if x.download else "- нету "} {"/" if x.object_type == types.GitObjectType.tree.value else " "}] ' + str(x.path + x.name) + str("/" if x.object_type == types.GitObjectType.tree.value else "") for x in files]
if not len(t):
clear()
input(types.label + '> Ничего не найдено...')
break
_, index__ = pick(t, types.label + 'Выбери пункт, содержащий нужную тебе информацию. Уточнение: по папкам переходить нельзя', indicator='=>')
if not index__:
continue
if files[index__ - 1].object_type == types.GitObjectType.tree.value:
clear()
input(types.label + f'Путь: {files[index__ - 1].path + files[index__ - 1].name}/')
break
git = Git()
text = git.getObjectData(files[index__ - 1]._hash)
clear()
print_text = types.label + f'> Нажми <enter> чтобы выйти, или напиши download чтобы скачать файл.\n> {"=" * 65} <\n\n{text if text else "> Не удалось получить исходный код..."}\n\n> {"=" * 65} <\n> Нажми <enter> чтобы выйти, или напиши download чтобы скачать файл.\n(input)=> '
tmp = input(print_text)
clear()
if tmp == 'download':
print(types.label + f'> Подожди, загружаю файл... После загрузки он должен был появится в папке gitFiles/tmp/ с названием "{files[index__ - 1].name}"')
git.dumpHash(files[index__ - 1]._hash)
input('> Дамп завершён... Нажми <enter> чтобы продолжить.')
if index_ == 3:
files = crud.get_triggers_site_files(site_id)
t = ['Назад'] + [f'[{"+ скачан" if x.download else "- нету "} {"/" if x.object_type == types.GitObjectType.tree.value else " "}] ' + str(x.path + x.name) + str("/" if x.object_type == types.GitObjectType.tree.value else "") for x in files]
if not len(t):
clear()
input(types.label + '> Ничего не найдено...')
break
_, index__ = pick(t, types.label + 'Выбери пункт, содержащий нужную тебе информацию. Уточнение: по папкам переходить нельзя', indicator='=>')
if not index__:
continue
if files[index__ - 1].object_type == types.GitObjectType.tree.value:
clear()
input(types.label + f'Путь: {files[index__ - 1].path + files[index__ - 1].name}/')
break
git = Git()
text = git.getObjectData(files[index__ - 1]._hash)
clear()
print_text = types.label + f'> Нажми <enter> чтобы выйти, или напиши download чтобы скачать файл.\n> {"=" * 65} <\n\n{text if text else "> Не удалось получить исходный код..."}\n\n> {"=" * 65} <\n> Нажми <enter> чтобы выйти, или напиши download чтобы скачать файл.\n(input)=> '
tmp = input(print_text)
clear()
if tmp == 'download':
print(types.label + f'> Подожди, загружаю файл... После загрузки он должен был появится в папке gitFiles/tmp/ с названием "{files[index__ - 1].name}"')
git.dumpHash(files[index__ - 1]._hash)
input('> Дамп завершён... Нажми <enter> чтобы продолжить.')
if index_ == 4:
path = '/'
git = Git()
os.mkdir(f'../dumps/{crud.get_site(site_id).site_name}')
recurse_dump(site_id, path)
print('\n\n> Done.')
if index_ == 5:
if input('> Вы уверены? ').lower() in ['y', 'yes', 'д', 'да']:
crud.remove_site(site_id)
break
if index == 1:
clear()
site = input(types.label + '> Введи url сайта, который хочешь просканировать: ').split('://')[-1]
if not site:
continue
proxy = crud.get_config()['proxies']
proxies = {'http' : proxy, 'https' : proxy}
git = Git(proxies=proxies)
data = git.getFirstHashes(site)
if not data:
input('> Ничего не найдено...')
continue
_, index_ = pick(['Назад'] + [f'{x._hash} | {datetime.utcfromtimestamp(int(x.time)).strftime("%d-%m-%Y %H:%M:%S")} | {x.mail} | {x.commit_message}' for x in list(reversed(data))], types.label + '> Выбери дерево, с которым хочешь работать', indicator='=>')
if not index_:
continue
s_data = crud.add_site(site)
site_id = s_data.id
site_name = s_data.site_name
path = '/'
first_hashes = git.parseTreeHash(site_name, data[index_-1]._hash)
if not first_hashes:
input('> Не удалось получить хеш дерева... (<enter>)')
continue
for i in first_hashes:
try:
if i.Type == types.GitObjectType.tree:
continue
print(f'Скачивая файл "{i.name}"... | ', end="")
t = git.downloadObject(site_name, i._hash)
crud.add_object(site_id, i.Type.value, bool(t), i._hash, '/', i.name)
print('Удалось' if t else 'Не удалось')
except:
print(f'Произошла ошибка: {traceback.format_exc()}. {i.name} - {i._hash}')
for i in first_hashes:
try:
if i.Type == types.GitObjectType.tree:
recurser(site_id, i._hash, '/', i.name)
except:
print(f'Произошла ошибка: {traceback.format_exc()}. {i.name} - {i._hash}')
if index == 2:
actions = ['Назад', 'Триггер-файлы', 'Прокси']
while True:
option, index_ = pick(actions, types.label + f'> Выбери чтобы ты хотел просмотреть/изменить',
indicator='=>')
if index_ == 0:
break
if index_ == 1:
while True:
data = crud.getTriggerData()
actions_ = ['Назад', 'Добавить триггер-файл'] + ['[*] ' + x.name for x in data]
option, index__ = pick(actions_, types.label + f'> Выбери какой триггер добавить', indicator='=>')
if not index__:
break
if index__ == 1:
clear()
t = input(types.label + '> Введи триггер (оставь строку пустой если хочешь отменить): ')
if not t:
continue
crud.addTrigger(t)
continue
clear()
if input(types.label + '> Подтверди удаление [y/д]: ').lower() in ['y', 'yes', 'д', 'да']:
crud.deleteTrigger(data[index__ - 2].id)
if index_ == 2:
clear()
t = input(types.label + f'> Прокси сейчас: "{crud.get_config()["proxies"]}"\n> Введи новые прокси, если не хочешь, то оставь поле пустым: ')
if not t:
continue
crud.editProxies(t)
if index == 3:
clear()
t = input(types.label + f'> Введи запрос, если хочешь отменить - просто жми <enter>: ')
if not t:
continue
clear()
print(types.label + '> Подожди, получаю информацию...')
result = search(t, num_results=1000)
res = [{'status': 'wait', 'site' : x.split('://')[-1].split('/')[0]} for x in result]
res1 = list()
for i in res:
if i not in res1:
res1.append(i)
result = res1
proxy = crud.get_config()['proxies']
proxies = {'http' : proxy, 'https' : proxy}
git = Git(proxies = proxies)
clear()
print(types.label + f'> Результатов получено: {len(result)}\n')
tmp = 1
max_ = len(max([x["site"] for x in res], key=len))
out_file = f'../tmp/' + faker.Faker().md5()[:5] + '_' +datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '.txt'
f = open(out_file, 'w', encoding='utf-8')
def_txt = "=" * 40 + "\n\n" + f' Сканирование по запросу "{t}" дало {len(res)} результатов.\n\n' + "=" * 40 + '\n\n> Ниже будет лог сайтов на которых были найдены хеши.\n\n'
f.write(def_txt)
f.close()
print(types.label + f'> Проверено: {tmp}/{len(result)}\n> Выходной файл с результатами: gitFiles/tmp/{out_file.split("/")[-1]}\n\n' + '\n'.join([
f'({tmp}/{len(result)})> {x["site"]}' + ' ' * (max_ - len(x["site"]) + 5) + x["status"] for x in res
]))
for i in res:
try:
print(f'> Checked: {i["site"]}')
check = git.getFirstHashes(i['site'])
print(f'> Checked: {i["site"]} - {check}')
status = 'Detect' if check else 'Not found'
i['status'] = status
def_txt += f'[{f"{status}":>9}] > {i["site"]} > Found {check} hashes\n'
with open(out_file, 'w', encoding='utf-8') as f:
f.write(def_txt)
clear()
print(types.label + f'> Проверено: {tmp}/{len(result)}\n> Выходной файл с результатами: gitFiles/tmp/{out_file.split("/")[-1]}\n\n' + '\n'.join([
f'({tmp}/{len(result)})> {x["site"]}' + ' ' * (max_ - len(x["site"]) + 5) + x["status"] for x in res
]))
except:
print(traceback.format_exc())
tmp += 1
print(types.label + f'> Проверено: {tmp}/{len(result)}\n\n\n' + '\n'.join([
f'({tmp}/{len(result)})> {x["site"]}' + ' ' * (max_ - len(x["site"]) + 5) + x["status"] for x in res
]) + f'\n\n>> Done.\n>> Check file gitFiles/tmp/{out_file.split("/")[-1]}\n\n>> Press <enter>.')
input()
|
exifna/gitHack | tests.py | <gh_stars>0
from src import crud
from src.gitTools import Git
from src.types import GitObjectType
proxy = 'socks5://127.0.0.1:9050'
git = Git(proxies={
'http' : proxy,
'https' : proxy
})
print(crud.get_config()) |
exifna/gitHack | src/crud.py | import configparser
from typing import List
from src.database import session
from src.gitTools import Git
from src.tables import *
def get_my_sites(session = session) -> List[Site]:
return session.query(Site).order_by(-Site.id).all()
def get_interested_files(session = session) -> List[str]:
return [x.name for x in session.query(InterestedFileName).all()]
def get_project_files_count(site_id : int, session = session) -> int:
return session.query(GitObject).filter(GitObject.site_id == site_id).filter(GitObject.object_type == GitObjectType.blob.value).count()
def get_interested_files_count(site_id : int, session = session) -> int:
tmp = 0
data = session.query(GitObject).filter(
GitObject.site_id == site_id
).all()
for i in data:
if i.name in get_interested_files():
tmp += 1
return tmp
def remove_site(site_id : int,session = session):
[session.delete(x) for x in session.query(GitObject).filter(GitObject.site_id == site_id).all()]
session.delete(session.query(Site).filter(Site.id == site_id).one())
session.commit()
def get_last_sites(session = session) -> List[Site]:
return session.query(Site).order_by(-Site.id).limit(10).all()
def find_sites(string:str, session = session) -> List[Site]:
return session.query(Site).filter(Site.site_name.startswith(string)).order_by(-Site.id).limit(10).all()
def get_triggers_site_files(site_id : int, session = session) -> List[GitObject]:
result = list()
for i in get_interested_files():
result.extend(session.query(GitObject).filter(GitObject.site_id == site_id).filter(GitObject.name == i).all())
return result
def add_site(site_name: str, session = session) -> Site:
site = Site(
site_name=site_name
)
session.add(site)
session.commit()
return site
def get_folder_data(site_id: int, folder: str, session = session) -> List[GitObject]:
return session.query(GitObject).filter(GitObject.site_id == site_id).filter(
GitObject.path == folder
).all()
def find_objects_by_data(site_id: int, data: str, session = session) -> List[GitObject]:
result = list()
result.extend(session.query(GitObject).filter(GitObject.site_id == site_id).filter(GitObject.name.contains(data)).all())
result.extend(session.query(GitObject).filter(GitObject.site_id == site_id).filter(GitObject.path.contains(data)).all())
result.extend(session.query(GitObject).filter(GitObject.site_id == site_id).filter(GitObject._hash.contains(data)).all())
return result
def get_site_objects_by_id(site_id: int, session = session) -> List[GitObject]:
return session.query(GitObject).filter(GitObject.site_id == site_id).all()
def getFileNameByHash(_hash: str, session = session) -> str:
return session.query(GitObject).filter(GitObject._hash == _hash).one().name
def get_site(site_id : int, session = session) -> Site:
return session.query(Site).filter(Site.id == site_id).one_or_none()
def add_object(site_id : int, object_type: GitObjectType, download: bool, _hash: str, path: str, name : str, session = session):
obj = GitObject(
site_id = site_id,
object_type = object_type,
download = download,
_hash = _hash,
path = path,
name = name
)
session.add(obj)
session.commit()
def addTrigger(trigger : str, session = session):
session.add(InterestedFileName(name = trigger))
session.commit()
def deleteTrigger(trigger_id : int, session = session):
session.delete(session.query(InterestedFileName).filter(InterestedFileName.id == trigger_id).one())
session.commit()
def getTriggerData(session = session) -> List[InterestedFileName]:
return session.query(InterestedFileName).all()
def editProxies(proxies : str):
git = Git()
parser = configparser.ConfigParser()
parser['PROXIES'] = {'proxy' : proxies}
with open('../../config.cfg', 'w', encoding='utf-8') as f:
parser.write(f)
def get_config() -> dict:
git = Git()
parser = configparser.ConfigParser()
parser.read('../../config.cfg')
d = dict()
d['proxies'] = parser['PROXIES']['proxy']
return d
|
exifna/gitHack | src/database.py | <gh_stars>0
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
# init all tables
from src.tables import *
engine = create_engine('sqlite:///data.db', echo=False, connect_args={'check_same_thread': False}) # connect to db
Base.metadata.create_all(engine) # run init
DBSession = sessionmaker(autoflush=False, bind=engine)
session = DBSession() |
exifna/gitHack | src/types.py | from enum import Enum
class FirstHash:
def __init__(self, last_hash: str, _hash: str, user: str, mail: str, time: int, hour: str, commit_message: str):
self.last_hash = last_hash
self._hash = _hash
self.user = user
self.mail = mail
self.time = time
self.hour = hour
self.commit_message = commit_message
class LogTypes(Enum):
System = 0
Exception = 1
Info = 2
class GitObjectType(Enum):
blob = 0
tree = 1
class SimpleGitObject:
def __init__(self, Type: GitObjectType, _hash: str, name: str):
self.Type = Type
self._hash = _hash
self.name = name
ignore = [
'.css', '.js', '.ttf', '.png', '.ico', '.jpeg', '.jpg'
]
label = """ _____ _ _ _ _ _
/ ____(_) | | | | | | |
| | __ _| |_| |__| | __ _ ___| | __
| | |_ | | __| __ |/ _` |/ __| |/ /
| |__| | | |_| | | | (_| | (__| <
\_____|_|\__|_| |_|\__,_|\___|_|\_\\
""" |
exifna/gitHack | src/tables.py | <filename>src/tables.py
from enum import Enum
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
from src.database import Base
from src.types import GitObjectType
class Site(Base):
__tablename__ = 'sites'
id: int = Column(Integer, nullable=False, unique=True, primary_key=True, autoincrement=True)
site_name: str = Column('site_name', String)
class Log(Base):
__tablename__ = 'logs'
id: int= Column(Integer, nullable=False, unique=True, primary_key=True, autoincrement=True)
time: int = Column('time', Integer)
log_type: int = Column('log_type', Integer)
text: str = Column('text', String)
class GitObject(Base):
__tablename__ = 'git_objects'
id: int = Column(Integer, nullable=False, unique=True, primary_key=True, autoincrement=True)
site_id: int = Column('site_id', ForeignKey('sites.id'))
object_type: GitObjectType = Column('type', Integer)
download: bool = Column('download', Boolean)
_hash: str = Column('hash', String)
path: str = Column('path', String)
name: str = Column('name', String)
class InterestedFileName(Base):
__tablename__ = 'interested_filename'
id: int = Column(Integer, nullable=False, unique=True, primary_key=True, autoincrement=True)
name: str = Column('name', String)
|
HypoChloremic/fcsan | src/indexer.py | <reponame>HypoChloremic/fcsan
from analyze import Analyze
import argparse
# ap = argparse.ArgumentParser()
# ap.addargument("-f", "--folder")
# opts = ap.parse_args()
run = Analyze()
run.read()
files = run.files
def indexer():
with open("FACS_INDEX.txt", "w") as file:
for i in files:
run.read(i)
meta = run.meta
str_to_save = f"File: {meta['$FIL']},Date: {meta['$DATE']},\n"
file.write(str_to_save)
indexer() |
HypoChloremic/fcsan | src/test.py | <gh_stars>0
# Test of fcsreader.py
# (cc) 2017 <NAME>
# Formagna
import fcsreader
import matplotlib.pyplot as plt
process = fcsreader.fcsReader("data.fcs")
s = process.data()
print(s.columns)
s.plot(x = "SSC-A", y="FSC-A", kind="scatter")
plt.show() |
HypoChloremic/fcsan | src/example_runs.py | # print(run.meta)
# run.kmeans(channels=['FSC-A', 'SSC-A', 'FSC-H', 'FSC-W', 'SSC-H', 'SSC-W', 'FITC-A', 'FITC-H', 'PE-A', 'PE-H', 'PE-Cy7-A', 'PE-Cy7-H', 'UV1-A', 'UV1-H', 'UV2-A', 'UV2-H', 'APC-Cy7-A', 'APC-Cy7-H', 'APC-A', 'APC-H', 'PE-Cy5-A', 'PE-Cy5-H'], logx=False, logy=True, transpose=False, nclusters=5)
# run.plot(x="FSC-A", y="SSC-A", kind="scatter", transpose=False)
# run.plot(x="FSC-A", y="SSC-A", yfunc=_log, kind="scatter", transpose=False)
# freq = run.freq("FSC-A", scope=500)
# run.plot(x="FSC-A", y="SSC-A", yfunc=_log, kind="scatter", transpose=False, save=True)
# plt.show()
# plt.savefig("blah.png")
# print(freq[])
# run.plot(x=freq[0], y=run.dataset["FSC-A"], kind="scatter")
# run.saveplots(run.freq, column="FSC-A", scope=500, rdata=True, delimiter='\\')
# run.saveplots(run.plot, x="FSC-A", y="SSC-A", yfunc=_log, kind="scatter", transpose=False, save=True, description="FSC-A_ON_SSC-A")
# print(run.dataset["SSC-A"])
run.saveplots(run.kmeans, channels=['FSC-A', 'SSC-A', 'FSC-H', 'FSC-W', 'SSC-H', 'SSC-W', 'FITC-A', 'FITC-H', 'PE-A', 'PE-H', 'PE-Cy7-A', 'PE-Cy7-H', 'UV1-A', 'UV1-H', 'UV2-A', 'UV2-H', 'APC-Cy7-A', 'APC-Cy7-H', 'APC-A', 'APC-H', 'PE-Cy5-A', 'PE-Cy5-H'], logx=False, logy=True, transpose=False, nclusters=5, description="kmeanS2", limit_dataset=None)
# run.saveplots(run.plot_3d, x="FSC-A", z="APC-A", y="FITC-A", yfunc=_log, kind="scatter", transpose=False, save=True, description="FITC")
# run.limiter(channels=["SSC-A", "FSC-A"], xmax=2000)
# run.plot(x="FSC-A", y="SSC-A", yfunc=_log, kind="scatter")
# run.saveplots(run.limiter, channels=["FSC-A", "SSC-A"], xmax=25000, save=True, description="limite") |
HypoChloremic/fcsan | src/fcsreader.py | <reponame>HypoChloremic/fcsan
# Reader of fcs files
# (cc) 2017 <NAME>
# Formagna
from pandas import DataFrame as df
import fcsparser
import numpy as np
__version__ = "0.1"
class fcsReader:
def __init__(self, path, **kwargs):
self.path = path
# Returns the data contained in the fcs file
self.meta, self.data = fcsparser.parse(path,
meta_data_only = False,
reformat_meta=True)
def rdata(self, path=None, **kwargs):
# Returns the data retrieved from the .fcs file, which
# in turn is related to different channels. Note that
# the it is returned in a pandas.dataframe type.
if self.path: path = self.path
elif path: self.path = path
else: raise "Path to .fcs file has not been provided"
self.meta, self.data = fcsparser.parse(path,
meta_data_only = False,
reformat_meta=True)
return self.data
def rmeta(self, path=None, **kwargs):
# This method returns the metadata of the fcs file
# such as the various channels which are necessary
# for the later analysis. Refer to the README for further
# detail regarding the different channels involved.
if self.path: path = self.path
elif path: self.path = path
else: raise "Path to .fcs file has not been provided"
self.meta, self.data = fcsparser.parse(path,
meta_data_only = False,
reformat_meta=True)
return self.meta
def save_data(self):
pass
|
HypoChloremic/fcsan | src/analyze.py | <reponame>HypoChloremic/fcsan<filename>src/analyze.py
# Test of fcsreader.py
# (cc) 2017 <NAME>
# Formagna
# An illustration of how access the arguments.
# Den här kommer att spara det the user har entered, vilket är värt att förstå.
# Alltså, när användaren skriver exempelvis python 1.py -a hello world (för att -a
# accepterar två arguemnts) kommer man att spara skiten i en Namespace lista,
# verkar det som, där man har equatat argumenten utan bindestreck med the values
# man entered som en string!
# We should look on SSC-A on FSC-A, then FSC-H on FSC-W, then
# APC-A on SSC-A, count on APC-A, then APC-A on SSC-A
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from pandas import DataFrame as df
from pandas import concat, cut
from fcsreader import fcsReader
from subprocess import call
from math import log
from mpld3 import plugins, utils
from matplotlib import use
from bs4 import BeautifulSoup as bs
from plotly.graph_objs import Scatter, Layout
import numpy as np
import plotly as ply
import matplotlib.pyplot as plt, mpld3
import seaborn
import os
__version__ = "0.2"
class Analyze:
def __init__(self, config="config/config.yaml", pos=0, name=None, *args, **kwargs):
self.pos = pos
self.name = name
print("[GLOBAL] Starting the analysis")
print("[GLOBAL] Opening config")
with open(config, "r") as file:
self.config = {i.split(": ")[0]: i.split(": ")[1].replace("\n", "") for i in file.readlines()}
self.path = self.config["PARENT"]
print(f"[GLOBAL] The parent-path: {self.path}")
def read(self, file=None, **kwargs):
if not file:
self.__files()
file=self.file
process = fcsReader(file)
self.meta = process.meta
self.dataset = process.data
def __files(self, top=None, delimiter="\\", **kwargs):
if not top: top=self.path
self.files = [f"{i[0]}{delimiter}{k}" for i in os.walk(top) for k in i[-1] if k.endswith(".fcs")]
if self.name: self.file = [i for i in self.files if self.name in i][0]
else:
self.file = self.files[self.pos]
self.names = [f"{i.split(delimiter)[-2]}_{i.split(delimiter)[-1]}" for i in self.files]
########################
### Analysis methods ###
########################
def kmeans(self, dataset=None, nclusters=2, logx=False, logy=False, limit_dataset=None, transpose=False, channels=None, **kwargs):
print("[KMeans] Running KMeans clustering")
if dataset is None: dataset = self.dataset
if limit_dataset: pr_dataset = df(dataset, index=dataset.index, columns=limit_dataset)
elif not limit_dataset: pr_dataset = df(dataset, index=dataset.index)
# Necessary for seaborn, providing the number of
# clusters to color
predict = KMeans(n_clusters=nclusters, **kwargs).fit(pr_dataset)
predicted = df(predict.predict(pr_dataset), columns=["mapping"])
dataset = concat([dataset, predicted], axis=1)
if channels and not transpose: x,y=channels[0],channels[1]
elif channels and transpose: x,y=channels[1],channels[0]
print(f"[KMeans] x: {x}, y: {y}")
if logx is True: dataset[x] = dataset[x].apply(func=lambda x: self.log(x))
if logy is True: dataset[y] = dataset[y].apply(func=lambda y: self.log(y))
# Using seaborn so as to color map the scatter plot
order = [i for i in range(nclusters)]
fg = seaborn.FacetGrid(data=dataset, hue="mapping", hue_order=order, aspect=1.61)
fg.map(plt.scatter, x, y).add_legend()
def histo(self, dataset=None, channels=None):
if dataset is None: dataset=self.dataset
if channels is None: channels=self.channels
def log(self, i):
if i > 0: return log(i)
else: return None
def plot(self, dataset=None, xfunc=None, yfunc=None, transpose=False, save=False, threeD=False, **kwargs):
# We are using the plot method associated with the dataframe
# Note that that transposing the
if not dataset: dataset = self.dataset
if transpose:
temp = kwargs["x"]
kwargs["x"] = kwargs["y"]
kwargs["y"] = temp
if xfunc: dataset[kwargs["x"]] = dataset[kwargs["x"]].apply(func=lambda x: xfunc(x))
if yfunc: dataset[kwargs["y"]] = dataset[kwargs["y"]].apply(func=lambda y: yfunc(y))
self.dataset.plot(**kwargs)
if not save: plt.show()
def plot_3d(self, dataset=None, xfunc=None, yfunc=None, zfunc=None, save=False, threeD=False, kind="scatter", transpose=False, **kwargs):
# We are using the plot method associated with the dataframe
# Note that that transposing the
if not dataset: dataset = self.dataset
if transpose:
temp = kwargs["x"]
kwargs["x"] = kwargs["y"]
kwargs["y"] = temp
if xfunc: dataset[kwargs["x"]] = dataset[kwargs["x"]].apply(func=lambda x: xfunc(x))
if yfunc: dataset[kwargs["y"]] = dataset[kwargs["y"]].apply(func=lambda y: yfunc(y))
if yfunc: dataset[kwargs["z"]] = dataset[kwargs["z"]].apply(func=lambda z: yfunc(z))
threedee = plt.figure().gca(projection="3d")
if kind=="scatter": threedee.scatter(dataset[kwargs["x"]],dataset[kwargs["y"]],dataset[kwargs["z"]])
threedee.set_xlabel(kwargs["x"])
threedee.set_ylabel(kwargs["y"])
threedee.set_zlabel(kwargs["z"])
if not save: plt.show()
def freq(self, column, dataset=None, scope = 64, func=None, *args, **kwargs):
print("[FREQ] Running frequency method")
if not dataset: dataset=self.dataset
if func: dataset[column] = dataset[column].apply(func=lambda x: func(x))
# Converting it to a more
_min, _max = dataset[column].min(), dataset[column].max()
res = (_max - _min)/scope
frequency = dataset[column].groupby(cut(dataset[column], np.arange(_min,_max,res))).count()
return frequency
def saveplots(self, func=None, folder=None, rdata=False, delimiter="\\", description=None, log_overwrite=True, logfile="log.txt", *args, **kwargs):
if not folder: folder=self.config["OUTPUT"]
use("Agg")
for pos, file in enumerate(self.files):
self.read(file=file)
name = f"{folder}{self.names[pos].replace('.fcs','')}{description}{pos}.png"
if not func:
dataset.plot(**kwargs)
elif func and not rdata:
func(*args, **kwargs)
elif func and rdata:
data = func(*args, **kwargs)
data.plot()
plt.savefig(name)
plt.close()
if log_overwrite and pos==0:
with open(f"{folder}{logfile}", "w"):
pass
# self.logger(file=name, logfile=f"{folder}{logfile}")
def limiter(self, channels, dataset=None, xmax=None, xmin=None,ymax=None, ymin=None, nclusters=2, save=False, **kwargs):
print("[LIMITER] Running the limiter")
if not dataset: dataset = self.dataset
x = channels[0]
y = channels[1]
# For convenience; who gives a shit about overhead.
upper_limit = lambda name, _max: dataset[name].apply(lambda k: 1 if k <= _max else 0)
lower_limit = lambda name, _min: dataset[name].apply(lambda k: 1 if k >= _min else 0)
if xmax: dataset["mapping"] = upper_limit(x, xmax)
if ymax: dataset["mapping"] = upper_limit(y, ymax)
if xmin: dataset["mapping"] = lower_limit(x, xmin)
if ymin: dataset["mapping"] = lower_limit(y, ymin)
order = [i for i in range(nclusters)]
fg = seaborn.FacetGrid(data=dataset, hue="mapping", hue_order=order, aspect=1.61)
fg.map(plt.scatter, x, y).add_legend()
if not save: plt.show()
def plot_map(self, x=None, y=None, dataset=None, xfunc=None, yfunc=None, transpose=False, save=False, threeD=False, nclusters=2, **kwargs):
print("[MAPPER] Running the map plotter")
if not dataset: dataset = self.dataset
order = [i for i in range(nclusters)]
fg = seaborn.FacetGrid(data=dataset, hue="mapping", hue_order=order, aspect=1.61)
fg.map(plt.scatter, x, y).add_legend()
if not save: plt.show()
def logger(self, file, clusters=2, dataset=None, map="mapper", logfile="log.txt", state="a", folder=None, **kwargs):
if not dataset: dataset = self.dataset
data = list(dataset["mapping"])
first = 0
tot = len(data)
for i in data:
if i == 1: first +=1
ratio = f"{(first/tot)*100}%"
to_write=f"**************\nSample: {file}\n debris/total: {ratio}\n"
with open(logfile, state) as file:
file.write(to_write)
def gen_html(self, dataset=None, channels=["FSC-A", "SSC-A"]):
if not dataset: dataset = self.dataset
data = [dataset[i].values for i in channels]
fig = plt.figure()
ax = fig.add_subplot(111)
plot = ax.scatter(data[0], data[1])
plugins.clear(fig)
plugins.connect(fig, plugins.LinkedBrush(plot), plugins.ClickSendToBack(plot))
the_html = mpld3.fig_to_html(fig)
with open("initialfigure.html", "w") as file:
file.write(the_html)
o = bs(open("initialfigure.html"), "html.parser")
script = str(o.find_all("script")[0])
script_2 = script.replace("<script>","").replace("</script>","")
with open("the_figure.js", "w") as file:
file.write(script_2)
with open("the_figure.html", "w") as file:
the_html = the_html.replace(script, "<script src='.\\the_figure.js'></script>")
file.write(the_html)
def gen_html_ply(self, dataset=None, channels=["FSC-A", "SSC-A"]):
if not dataset: dataset = self.dataset
data = [dataset[i].values for i in channels]
# Note that u should be looking for the zoomlayer class,
# to get the box selection
ply.offline.plot({"data":[Scatter(x=data[0], y=data[1], mode="markers")]}, )
def _log(i):
if i > 0: return log(i)
else: return None
if __name__ == '__main__':
use("Agg")
run = Analyze()
run.read(file="C:\\Users\\<NAME>\\Desktop\\Emb_data\\exporteddebrisembla\\160420_O8-289\\72307.fcs")
run.gen_html_ply() |
HypoChloremic/fcsan | src/dash_server.py | from dash.dependencies import Input, Output
from analyze import Analyze
import dash
import dash_core_components as dcc
import dash_html_components as html
import json
analyze_run = Analyze()
data = analyze_run.read()
print(analyze_run.meta)
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Graph(
id='basic-interactions',
figure={
'data': [
{
'x': analyze_run.dataset["FSC-A"].values,
'y': analyze_run.dataset["SSC-A"].values,
'mode': 'markers',
},
]
}
),
html.Div([
dcc.Markdown("""
**Hover Data**
Mouse over values in the graph.
""".replace(' ', '')),
html.Pre(id='hover-data')
]),
html.Div([
dcc.Markdown("""
**Click Data**
Click on points in the graph.
""".replace(' ', '')),
html.Pre(id='click-data'),
]),
html.Div([
dcc.Markdown("""
**Selection Data**
Choose the lasso or rectangle tool in the graph's menu
bar and then select points in the graph.
""".replace(' ', '')),
html.Pre(id='selected-data'),
])
])
@app.callback(
Output('hover-data', 'children'),
[Input('basic-interactions', 'hoverData')])
def display_hover_data(hoverData):
#
# This is where you can access the hover data
# This function will get called automatically when you hover over points
# hoverData will be equal to an object with that data
# You can compute something off of this data, and return it to the front-end UI
#
return json.dumps(hoverData, indent=2)
@app.callback(
Output('click-data', 'children'),
[Input('basic-interactions', 'clickData')])
def display_click_data(clickData):
# Similarly for data when you click on a point
return json.dumps(clickData, indent=2)
@app.callback(
Output('selected-data', 'children'),
[Input('basic-interactions', 'selectedData')])
def display_selected_data(selectedData):
# Similarly for data when you select a region
print(selectedData)
return json.dumps(selectedData, indent=2)
app.run_server(debug=True) |
HypoChloremic/fcsan | src/dropdown_plotly.py | <filename>src/dropdown_plotly.py<gh_stars>0
import plotly
from plotly.graph_objs import Scattergl, Layout
import plotly.figure_factory as ff
import plotly.graph_objs as go
import pandas as pd
plotly.offline.init_notebook_mode(connected=True)
columns = ["x","y","z"]
x = [1,2,3,4,5,6,7,8,9,10]
y = [2,3,4,5,6,7,8,9,10,11]
z = [3,4,5,6,7,8,9,10,11,12]
d = [x,y,z]
data = []
for i in d:
for k in d:
data.append(go.Scatter(x=i, y=k, name=f"{i}vs{k}"))
colors = ['#ffaeb9', '#ffb6c0', '#ffbec7', '#ffc6ce', '#ffced5',
'#ffd6dc', '#ffdee3', '#ffe6ea', '#ffeef1', '#fff6f8']
color_buttons = []
column_buttons_x = []
column_buttons_y = []
for i in colors:
color_buttons.append(
dict(args=['line.color', i],
label=i, method='restyle')
)
for j in columns:
column_buttons_x.append(
dict(args=['x',j],
label=j,method='update')
)
for k in columns:
column_buttons_y.append(
dict(args=['y',k],
label=k,method='update')
)
layout = Layout(
annotations=[dict(text='Change Color',
x=-0.25, y=0.83,
xref='paper', yref='paper',
showarrow=False)],
updatemenus=list([
dict(x=-0.1, y=0.7,
yanchor='middle',
bgcolor='c7c7c7',
buttons=list(color_buttons)),
dict(x=-0.1,y=0.5,
yanchor = 'middle',
bgcolor = 'c7c7c7',
buttons=list(column_buttons_x)),
dict(x=-0.1,y=0.3,
yanchor = 'middle',
bgcolor = 'c7c7c7',
buttons=list(column_buttons_y))
])
)
trace = go.Scatter(
x=[j],
y=[k],
mode='markers'
)
fig = dict(data=data, layout=layout)
plotly.offline.plot(fig) |
ConnectionMaster/YCSB | vitess/scripts/workload-runner-generator.py | <reponame>ConnectionMaster/YCSB<filename>vitess/scripts/workload-runner-generator.py
import json
import sys
def main(unused_argv):
vtgate_host = sys.argv[1]
ycsb_count = int(sys.argv[2])
workload_file = sys.argv[3] if len(sys.argv) > 3 else 'workloads.json'
shards = sys.argv[4] or '0'
with open(workload_file) as data_file:
data = json.load(data_file)
for i in xrange(ycsb_count):
with open('workload-runner%d.sh' % i,'w') as cmd_file:
for index, workload in enumerate(data["workloads"]):
action = workload["action"]
create_table = "'CREATE TABLE usertable (YCSB_KEY VARCHAR (255) PRIMARY KEY, field0 TEXT, field1 TEXT, keyspace_id BIGINT unsigned NOT NULL)'"
cmd = 'YCSB/bin/ycsb %s vitess -P YCSB/workloads/workload%s -p hosts=%s -p shards=%s -p keyspace=test_keyspace -p insertorder=ordered -p fieldcount=2 -threads %s -s' % (
action, workload["workload"], vtgate_host, shards, workload["threads"])
if action == 'run':
cmd += ' -p operationcount=%s' % workload['operationcount']
else:
cmd += ' -p recordcount=%s -p insertcount=%s -p insertstart=%d' % (workload['recordcount'], workload['recordcount'], i * int(workload['recordcount']))
if workload.has_key('createtable') and workload['createtable'] == 'True':
cmd += ' -p createTable=%s -p doCreateTable=true' % create_table
if workload.has_key('maxexecutiontime'):
cmd += ' -p maxexecutiontime=%s' % workload['maxexecutiontime']
# sleep for 1 min before each run
if action == 'run':
cmd_file.write('sleep 60\n')
cmd_file.write('%s > ~/workloadlogs/workload%s%02d.txt\n' % (cmd, workload["workload"], index))
if action == 'load' and workload.has_key('wait'):
cmd_file.write('sleep %s\n' % workload['wait'])
if __name__ == '__main__':
main(sys.argv)
|
xeruvimov/admins_pain | replacer.py | <reponame>xeruvimov/admins_pain
from datetime import date
import math
SITE = 'site'
TWITTER = 'twitter'
INSTAGRAM = 'instagram'
TELEGRAM = 'tlg'
def prepare_text(text, target, img):
res_text = remove_tag(text)
# res_text = replace_tag_with_link(res_text)
if target is SITE:
return prepare_text_for_site(res_text, img)
elif target is TWITTER:
return get_first_sentence(res_text)
elif target is INSTAGRAM:
return get_first_sentence(res_text)
elif target is TELEGRAM:
return res_text
return
def replace_tag_with_link(text):
if '@' in text:
return text.replace('@', 'vk.com/')
def remove_tag(text):
return text.replace('@samara_it_community', '')
def prepare_text_for_site(text, img):
result = '+++\n title = "' + get_first_sentence(text) + '" \n date = ' + date.today().strftime(
"%Y-%m-%d") + '\ndescription = "test"'
result += '\n+++'
result += '\n<!-- more -->\n'
result += text + '\n'
if img is not None:
for i in img:
result += '\n'
return result
# def find_pos_of_first_separator(text):
# return text.find('\n')
# dot = text.find('.')
# vosk = text.find('!')
# vop = text.find('?')
# dot=math.inf if dot == -1 else dot
# vosk = math.inf if vosk == -1 else vosk
# vop = math.inf if vop == -1 else vop
# return min(dot, vosk, vop)
def get_first_sentence(text):
return text.split('\n')[0]
# return text[0:find_pos_of_first_separator(text)]
# if __name__ == '__main__':
# print(prepare_text_for_site(long_string,[])) |
xeruvimov/admins_pain | main.py | import threading
import traceback
from datetime import date
from datetime import datetime
import telebot
import vk_api
from telebot.types import InputMediaPhoto
from vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType
import data
import replacer
start_msg = "Start listen just now"
message_breakers = [':', ' ', '\n']
max_message_length = 4091
bot = telebot.TeleBot(data.BOT_TOKEN)
vk_session = None
vk_long_poll = None
repost_image_urls = None
original_post_img_urls = None
def get_session():
global vk_session
if vk_session is None:
vk_session = vk_api.VkApi(token=data.GROUP_TOKEN)
return vk_session
else:
return vk_session
def get_longpoll():
global vk_long_poll
if vk_long_poll is None:
vk_long_poll = VkBotLongPoll(get_session(), data.GROUP_ID, 60)
return vk_long_poll
else:
return vk_long_poll
def check_posts_vk(chat_id):
global bot
global original_post_img_urls
longpoll = get_longpoll()
print(start_msg)
bot.send_message(data.MY_CHAT_ID, start_msg)
for event in longpoll.listen():
if event.type == VkBotEventType.WALL_POST_NEW:
post = event.obj
print('------------------------------------------------------------------------------------------------')
print(post)
text = post['text']
images = []
links = []
doc = []
attachments = []
if 'attachments' in post:
attach = post['attachments']
for add in attach:
if add['type'] == 'photo':
img = add['photo']
images.append(img)
elif add['type'] == 'video':
video = add['video']
if 'player' in video:
links.append(video['player'])
elif add['type'] == 'doc':
docs = add['doc']
if 'url' in docs:
doc.append(docs['title'])
doc.append(docs['url'])
else:
for (key, value) in add.items():
if key != 'type' and 'url' in value:
attachments.append(value['url'])
print(doc, '\n')
if len(doc) != 0:
text += '\n'
text += '\n'.join(doc)
send_posts_text(replacer.prepare_text(text, replacer.TELEGRAM, None), chat_id)
if len(images) > 0:
original_post_img_urls = list(
map(lambda img: max(img["sizes"], key=lambda size: size["type"])["url"], images))
print(original_post_img_urls)
bot.send_media_group(chat_id, map(lambda url: InputMediaPhoto(url), original_post_img_urls))
bot.send_message(data.MY_CHAT_ID, "News posted on telegram")
create_site_post(text)
bot.send_message(data.MY_CHAT_ID, "News posted on site")
def create_site_post(text):
site_text = replacer.prepare_text(text, replacer.SITE, original_post_img_urls)
print(site_text)
file_path = data.PATH_TO_SITE_PAGES + str(date.today().strftime("%Y-%m-%d")) + str(datetime.now().hour) + str(
datetime.now().minute) + str(datetime.now().second) + ".md"
print(file_path)
page_file = open(file_path, 'w+', encoding='utf-8')
page_file.write(site_text)
page_file.close()
def send_posts_text(text, chat_id):
global bot
if text == '':
print('no text')
else:
for msg in split(text):
bot.send_message(chat_id, msg)
def split(text):
global message_breakers
global max_message_length
if len(text) >= max_message_length:
last_index = max(
map(lambda separator: text.rfind(separator, 0, max_message_length), message_breakers))
good_part = text[:last_index]
bad_part = text[last_index + 1:]
return [good_part] + split(bad_part)
else:
return [text]
@bot.message_handler(commands=['test'])
def test(message):
bot.send_message(message.chat.id, "I`m still work")
if __name__ == '__main__':
bot_polling_thread = threading.Thread(target=bot.polling, args=())
bot_polling_thread.start()
try:
check_posts_vk(data.CHANEL_ID)
except Exception:
bot.send_message(data.MY_CHAT_ID, traceback.format_exc())
bot.send_message(data.MY_CHAT_ID, "Stop check new posts")
bot.send_message(data.MY_CHAT_ID, "Attempt to restore work")
try:
check_posts_vk(data.CHANEL_ID)
except Exception:
bot.send_message(data.MY_CHAT_ID, traceback.format_exc())
bot.send_message(data.MY_CHAT_ID, "Stop check new posts\nRestart me")
|
xeruvimov/admins_pain | data.py | <reponame>xeruvimov/admins_pain<filename>data.py
import configparser
config_path = 'settings.ini'
config = configparser.ConfigParser()
config.read(config_path)
GROUP_TOKEN = config.get('VK', 'GROUP_TOKEN')
GROUP_ID = config.get('VK', 'GROUP_ID')
BOT_TOKEN = config.get('Telegram', 'BOT_TOKEN')
MY_CHAT_ID = config.get('Telegram', 'MY_CHAT_ID')
CHANEL_ID = config.get('Telegram', 'CHANEL_ID')
PATH_TO_SITE_PAGES = config.get('Site', 'PATH_TO_PAGES')
|
rt-phb/Spooq | src/spooq2/spooq2_logger.py | <reponame>rt-phb/Spooq
"""
Global Logger instance used by Spooq2.
Example
-------
>>> import logging
>>> logga = logging.getLogger("spooq2")
<logging.Logger at 0x7f5dc8eb2890>
>>> logga.info("Hello World")
[spooq2] 2020-03-21 23:55:48,253 INFO logging_example::<module>::4: Hello World
"""
import os
import sys
import logging
from spooq2._version import __version__ as version_number
initialized = False
def initialize():
"""
Initializes the global logger for Spooq with pre-defined levels for ``stdout`` and ``stderr``.
No input parameters are needed, as the configuration is received via :py:meth:`get_logging_level`.
Note
----
The output format is defined as:
| "[%(name)s] %(asctime)s %(levelname)s %(module)s::%(funcName)s::%(lineno)d: %(message)s"
| For example "[spooq2] 2020-03-11 15:40:59,313 DEBUG newest_by_group::__init__::53: group by columns: [u'user_id']"
Warning
-------
The ``root`` logger of python is also affected as it has to have a level at least as
fine grained as the logger of Spooq, to be able to produce an output.
"""
global initialized
if initialized:
return
logging_level = get_logging_level()
# logging.getLogger("root").setLevel(logging_level)
logger = logging.getLogger("spooq2")
logger.setLevel(logging_level)
if not len(logger.handlers):
formatter = logging.Formatter(
"[%(name)s] %(asctime)s %(levelname)s %(module)s::%(funcName)s::%(lineno)d: %(message)s"
)
# STDOUT Handler
ch_out = logging.StreamHandler(sys.stdout)
ch_out.setLevel(logging_level)
ch_out.setFormatter(formatter)
logger.addHandler(ch_out)
# STDERR Handler
# ch_err = logging.StreamHandler(sys.stderr)
# ch_err.setLevel(logging_level)
# ch_err.setFormatter(formatter)
# logger.addHandler(ch_err)
initialized = True
logger.info(f"Thank you for choosing Spooq {version_number}!")
def get_logging_level():
"""
Returns the logging level depending on the environment variable `SPOOQ_ENV`.
Note
----
If SPOOQ_ENV is
* **dev** -> "DEBUG"
* **test** -> "ERROR"
* something else -> "INFO"
Returns
-------
:any:`str`
Logging level
"""
spooq_env = os.getenv('SPOOQ_ENV', "default").lower()
if spooq_env.startswith("dev"):
return "DEBUG"
elif spooq_env.startswith("test"):
return "ERROR"
elif spooq_env.startswith("pr"):
return "WARN"
else:
return "INFO"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.