repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
pwinslow/change-org-app | get_data.py | """
This file takes two cmd line arguments at runtime:
1) Path to list of change.org petition urls
2) API key
in the following format:
python get_data.py --url_list_path=path/to/list/of/urls --api_key=api/key
It then makes multiple API calls to change.org to perform the following tasks for each petition url:
1) Obtain a unique petition id. This will used for all further API calls to collect petition data.
2) Obtain petition reasons. Each person that signs the petition has an option to leave a reason for signing.
This can help gauge the public response to the cause or the text of the petition.
3) Obtain petition updates. The person who owns the petition has the option of providing updates on the petition
at any point in time. This can tell us information about the time victory or loss occurred as well as gauge
the progress of the petition over time.
4) Obtain petition data and meta data. This is the meat of the data collection, it contains the petition text
itself as well as any other data specified at the time of creation.
Once all the above data has been collected for a given petition, add a new row to a postgresSQL database.
"""
# Import for parsing cmd line arguments
import argparse
# Imports for obtaining and parsing API data
import requests
from time import sleep
import json
# Miscellaneous imports
from sys import exit
import pandas as pd
def is_json(data):
"""Method: Checks for whether passed data parameter is a json object."""
try:
json.loads(data)
except ValueError:
return False
return True
class GetData(object):
"""Class for gathering petition data from the Change.org API."""
def __init__(self):
# Read path to url list and api key from cmd line args
self.url_list_path, self.api_key = self.get_cmdline_args()
# Read url list from path
with open(self.url_list_path, "r+") as f:
self.url_list = f.readlines()
@staticmethod
def get_cmdline_args():
# Initialize arg parse object and define desired cmd line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--url_list_path", help="Path to list of change.org petition urls", type=str)
parser.add_argument("--api_key", help="API key to make calls with", type=str)
# Get path to list of petition urls and API key
args = parser.parse_args()
path, api = args.url_list_path, args.api_key
# Check that all arguments are non-null
if path and api:
return path, api
else:
print "Missing some arguments. Please review usage..."
print parser.print_help()
exit()
def output_filename(self):
# Extract the appropriate file name from the path
file_path = "/".join(self.url_list_path.split("/")[:-1])
file_name = self.url_list_path.split("/")[-1].split("-")[1].split(".")[0] + "_data.csv"
return "/".join([file_path, file_name])
def get_response(self, input_url, first_try=True):
# Make an api call and handle HTTP errors
try:
r = requests.request("GET", input_url.strip())
r.raise_for_status()
return r
except requests.exceptions.HTTPError as err:
return "HTTP Error: {}".format(err.response.status_code)
except requests.exceptions.ConnectionError as err:
if first_try:
sleep(5)
self.get_response(input_url, first_try=False)
else:
return "Connection Error: {}".format(err.response.status_code)
def get_petition_id(self, petition_url):
# Make api call for petition id
id_url = ("https://api.change.org/v1/petitions/"
"get_id?petition_url={0}&api_key={1}").format(petition_url.strip(),
self.api_key)
response = self.get_response(id_url)
try:
id_json = json.loads(response.text)
petition_id = id_json["petition_id"]
return petition_id
except AttributeError:
return response
def reasons_updates(self, petition_id, data="reasons"):
# Check to make sure data flag is valid
if data not in ["reasons", "updates"]:
print ("Please choose valid data flag for reasons_updates method.\n"
"Valid choices: reasons/updates")
exit(2)
# Initialize array to store reasons/updates
arr = []
# Define initial API endpoint url for list of reasons/updates
data_url = ("https://api.change.org/v1/petitions/{0}"
"/{1}?page_size=100&sort=time_asc&api_key={2}").format(petition_id,
data,
self.api_key)
# Make initial API call
response = self.get_response(data_url)
# Start data collection
try:
# Get first batch of data
data_json = json.loads(response.text)
arr.extend(data_json[data])
# Get total_pages value
total_pages = int(data_json["total_pages"])
# Make additional calls to collect data from remaining pages if they exist
if total_pages > 1:
for page in range(2, total_pages+1, 1):
# Form next API endpoint url
next_url = data_url + "&page={}".format(page)
# Make next call
response = self.get_response(next_url)
# Collect data
try:
data_json = json.loads(response.text)
arr.extend(data_json[data])
except AttributeError:
continue
# Convert full reasons array into single json object
data_json = json.dumps(arr)
return data_json
except AttributeError:
return response
def petitions(self, petition_id):
# Specify fields to collect from petition data
fields = ",".join(["title",
"status",
"targets",
"overview",
"letter_body",
"signature_count",
"category",
"goal",
"created_at",
"end_at",
"creator_name",
"creator_url",
"organization_name",
"organization_url"])
# Define initial url for api call for petition data
data_url = ("https://api.change.org/v1/petitions/{0}"
"?fields={1}&api_key={2}").format(petition_id,
fields,
self.api_key)
# Make api call
response = self.get_response(data_url)
try:
data_json = json.loads(response.text)
return data_json
except AttributeError:
return response
def main():
# Initialize GetData object
get_data = GetData()
df = pd.DataFrame(columns=("id", "reasons", "updates", "data"))
# Loop over urls
for cnt, url in enumerate(get_data.url_list):
# Get petition id
_id = get_data.get_petition_id(url)
if isinstance(_id, int):
# Get reasons for signing petition
reasons = get_data.reasons_updates(_id, data="reasons")
# Get updates for petition
updates = get_data.reasons_updates(_id, data="updates")
# Get petition data
data = get_data.petitions(_id)
if is_json(reasons) and is_json(updates):
# Organize all data into instance and add to data frame
df.loc[cnt] = [str(_id), reasons, updates, data]
print "Done with {}".format(_id)
if (cnt + 1) % 25 == 0:
df.to_csv(get_data.output_filename(), index=False)
else:
continue
else:
continue
df.to_csv(get_data.output_filename(), index=False)
if __name__ == "__main__":
main()
|
chandansingh1653/python-application-CI-CD | tests/test_app.py | from app import index_page
def test_index_page():
assert index_page() == "This is index page"
|
chandansingh1653/python-application-CI-CD | src/app.py | <filename>src/app.py<gh_stars>0
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index_page():
return "This is index page"
|
tdruiva/cerificates | generate_certificate.py | <reponame>tdruiva/cerificates
# -*- coding: utf-8 -*-
import csv, re
import codecs, hashlib, os.path, unicodedata, string
from subprocess import Popen, PIPE
class Certificate:
def __init__(self, name):
svg_filename = 'certificate.svg'
svg_file = codecs.open(svg_filename, "rb", "utf8")
svg_content = svg_file.read()
content = svg_content.replace("%%NOMBRE%%", name.decode('utf-8'))
self.normalized_name = self._normalize_name(name)
self.in_file = os.path.join("/tmp", self.normalized_name + ".svg")
tmp_file = codecs.open(self.in_file, "w", "utf8")
tmp_file.write(content)
tmp_file.close()
def _normalize_name(self, name):
return re.sub(r"\W+", '_', name).strip().lower()[:30]
def as_pdf(self):
inkscape = '/usr/bin/inkscape'
output_dir = os.getcwd() + '/to_print/pdf'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
out_file = os.path.join(output_dir, self.normalized_name + ".pdf")
p = Popen([inkscape, '-z', '-f', self.in_file, '-A', out_file], stdin=PIPE, stdout=PIPE)
p.wait()
return out_file
CSV_PATH = 'names_to_print.csv'
with open(CSV_PATH, "rb") as csv_file:
rows = csv.reader(csv_file, delimiter='\t')
for row in rows:
name = row[0]
result = Certificate(name).as_pdf()
if os.path.isfile(result):
print 'Certificate: ' + name + ' ========= OK'
else:
print 'Certificate: ' + name + ' ########## FAIL'
|
pwrliang/libvineyard | python/vineyard/data/base.py | <reponame>pwrliang/libvineyard<filename>python/vineyard/data/base.py<gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import re
import vineyard
from vineyard._C import Object, ObjectMeta
from .utils import normalize_dtype
class ObjectSet:
def __init__(self, object_or_meta):
if isinstance(object_or_meta, Object):
self.meta = object_or_meta.meta
else:
self.meta = object_or_meta
@property
def num_of_instances(self):
return int(self.meta['num_of_instances'])
@property
def num_of_objects(self):
return int(self.meta['num_of_objects'])
def __getitem__(self, idx):
''' Return the member of ith element.
'''
return self.meta.get_member('object_%d' % idx)
def get_member(self, idx):
''' Return the member of ith element.
'''
return self.meta.get_member('object_%d' % idx)
def get_member_meta(self, idx):
''' Return the member of ith element.
'''
return self.meta['object_%d' % idx]
def int_builder(client, value):
meta = ObjectMeta()
meta['typename'] = 'vineyard::Scalar<int>'
meta['value_'] = value
meta['type_'] = getattr(type(value), '__name__')
meta['nbytes'] = 0
return client.create_metadata(meta)
def double_builder(client, value):
meta = ObjectMeta()
meta['typename'] = 'vineyard::Scalar<double>'
meta['value_'] = value
meta['type_'] = getattr(type(value), '__name__')
meta['nbytes'] = 0
return client.create_metadata(meta)
def string_builder(client, value):
meta = ObjectMeta()
meta['typename'] = 'vineyard::Scalar<std::basic_string<char,std::char_traits<char>,std::allocator<char>>>'
meta['value_'] = value
meta['type_'] = getattr(type(value), '__name__')
meta['nbytes'] = 0
return client.create_metadata(meta)
def tuple_builder(client, value, builder):
if len(value) == 2:
# use pair
meta = ObjectMeta()
meta['typename'] = 'vineyard::Pair'
meta.add_member('first_', builder.run(client, value[0]))
meta.add_member('second_', builder.run(client, value[1]))
return client.create_metadata(meta)
else:
meta = ObjectMeta()
meta['typename'] = 'vineyard::Tuple'
meta['size_'] = 3
for i, item in enumerate(value):
meta.add_member('__elements_-%d' % i, builder.run(client, item))
meta['__elements_-size'] = 3
return client.create_metadata(meta)
def scalar_resolver(obj):
meta = obj.meta
typename = obj.typename
if typename == 'vineyard::Scalar<std::basic_string<char,std::char_traits<char>,std::allocator<char>>>':
return meta['value_']
if typename == 'vineyard::Scalar<int>':
return int(meta['value_'])
if typename == 'vineyard::Scalar<float>' or typename == 'vineyard::Scalar<double>':
return float(meta['value_'])
return None
def pair_resolver(obj, resolver):
fst = obj.member('first_')
snd = obj.member('second_')
return (resolver.run(fst), resolver.run(snd))
def tuple_resolver(obj, resolver):
meta = obj.meta
elements = []
for i in range(int(meta['__elements_-size'])):
elements.append(resolver.run(obj.member('__elements_-%d' % i)))
return tuple(elements)
def array_resolver(obj):
typename = obj.typename
value_type = normalize_dtype(re.match(r'vineyard::Array<([^>]+)>', typename).groups()[0])
return np.frombuffer(memoryview(obj.member("buffer_")), dtype=value_type)
def object_set_resolver(obj):
return ObjectSet(obj)
def register_base_types(builder_ctx=None, resolver_ctx=None):
if builder_ctx is not None:
builder_ctx.register(int, int_builder)
builder_ctx.register(float, double_builder)
builder_ctx.register(str, string_builder)
builder_ctx.register(tuple, tuple_builder)
if resolver_ctx is not None:
resolver_ctx.register('vineyard::Scalar', scalar_resolver)
resolver_ctx.register('vineyard::Pair', pair_resolver)
resolver_ctx.register('vineyard::Tuple', tuple_resolver)
resolver_ctx.register('vineyard::Array', array_resolver)
resolver_ctx.register('vineyard::ObjectSet', object_set_resolver)
|
pwrliang/libvineyard | python/vineyard/data/utils.py | <filename>python/vineyard/data/utils.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
def normalize_dtype(dtype):
''' Normalize a descriptive C++ type to numpy.dtype.
'''
if isinstance(dtype, np.dtype):
return dtype
if dtype in ['i32', 'int', 'int32', 'int32_t']:
return np.dtype('int32')
if dtype in ['u32', 'uint', 'uint_t', 'uint32', 'uint32_t']:
return np.dtype('uint32')
if dtype in [int, 'i64', 'int64', 'long long', 'int64_t']:
return np.dtype('int64')
if dtype in ['u64', 'uint64', 'uint64_t']:
return np.dtype('uint64')
if dtype in ['float', 'float32']:
return np.dtype('float')
if dtype in [float, 'double', 'float64']:
return np.dtype('double')
return dtype
def build_buffer(client, address, size):
if size == 0:
return client.create_empty_blob()
buffer = client.create_blob(size)
buffer.copy(0, address, size)
return buffer.seal(client)
def build_numpy_buffer(client, array):
if not array.flags['C_CONTIGUOUS']:
array = np.ascontiguousarray(array)
address, _ = array.__array_interface__['data']
return build_buffer(client, address, array.nbytes)
|
pwrliang/libvineyard | python/vineyard/core/driver.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
import functools
from sortedcontainers import SortedDict
from .utils import find_most_precise_match
class DriverContext():
def __init__(self):
self.__factory = defaultdict(SortedDict)
def register(self, typename_prefix, meth, func):
self.__factory[meth][typename_prefix] = func
def resolve(self, obj, typename):
for meth_name, methods in self.__factory.items():
prefix, method = find_most_precise_match(typename, methods)
if prefix is not None:
meth = functools.partial(method, obj)
# if shouldn't failed, since it has already been wrapped in during resolving
setattr(obj, meth_name, meth)
return obj
default_driver_context = DriverContext()
def repartition(g):
raise NotImplementedError('No repartition method implementation yet')
def register_builtin_drivers(ctx):
assert isinstance(ctx, DriverContext)
ctx.register('vineyard::Graph', 'repartition', repartition)
def registerize(func):
''' Registerize a method, add a `__factory` attribute and a `register`
interface to a method.
multiple-level register is automatically supported, users can
>>> open.register(local_io_adaptor)
>>> open.register(oss_io_adaptor)
OR
>>> open.register('file', local_io_adaptor)
>>> open.register('odps', odps_io_adaptor)
OR
>>> open.register('file', 'csv', local_csv_reader)
>>> open.register('file', 'tsv', local_tsv_reader)
'''
@functools.wraps(func)
def wrap(*args, **kwargs):
return func(*args, **kwargs)
setattr(wrap, '__factory', None)
def register(*args):
if len(args) == 1:
if wrap.__factory is None:
wrap.__factory = []
if not isinstance(wrap.__factory, list):
raise RuntimeError('Invalid arguments: inconsistent with existing registerations')
wrap.__factory.append(args[0])
else:
if wrap.__factory is None:
wrap.__factory = {}
if not isinstance(wrap.__factory, dict):
raise RuntimeError('Invalid arguments: inconsistent with existing registerations')
root = wrap.__factory
for arg in args[:-2]:
if arg not in root:
root[arg] = dict()
root = root[arg]
if args[-2] not in root:
root[args[-2]] = list()
root[args[-2]].append(args[-1])
setattr(wrap, 'register', register)
return wrap
__all__ = ['default_driver_context', 'register_builtin_drivers', 'registerize']
|
pwrliang/libvineyard | python/vineyard/cli.py | <reponame>pwrliang/libvineyard
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from argparse import ArgumentParser
import sys
import vineyard
def vineyard_argument_parser():
parser = ArgumentParser(add_help=True)
parser.add_argument('--ipc_socket')
parser.add_argument('--rpc_host')
parser.add_argument('--rpc_port', type=int)
parser.add_argument('--rpc_endpoint')
cmd_parsers = parser.add_subparsers(title='commands', dest='cmd')
ls_opt = cmd_parsers.add_parser('ls', parents=[parser], add_help=False, help='List objects')
ls_opt.add_argument('--limit', default=5, type=int)
get_opt = cmd_parsers.add_parser('get', parents=[parser], add_help=False, help='Get object')
get_opt.add_argument('--object_id')
del_opt = cmd_parsers.add_parser('del', parents=[parser], add_help=False, help='Delete object')
del_opt.add_argument('--object_id')
del_opt.add_argument('--recursive', default=False, type=bool)
del_opt.add_argument('--force', default=False, type=bool)
return parser
optparser = vineyard_argument_parser()
def exit_with_help():
optparser.print_help(sys.stderr)
sys.exit(-1)
__vineyard_client = None
def connect_vineyard(args):
if args.ipc_socket is not None:
client = vineyard.connect(args.ipc_socket)
# force use rpc client in cli tools
client = vineyard.connect(*client.rpc_endpoint.split(':'))
elif args.rpc_endpoint is not None:
client = vineyard.connect(*args.rpc_endpoint.split(':'))
elif args.rpc_host is not None and args.rpc_port is not None:
client = vineyard.connect(args.rpc_host, args.rpc_port)
else:
exit_with_help()
global __vineyard_client
__vineyard_client = client
return client
def as_object_id(object_id):
try:
return int(object_id)
except ValueError:
return vineyard.ObjectID.wrap(object_id)
def ls(client, limit):
objects = client.list(limit=limit)
print(objects)
def get(client, object_id):
if object_id is None:
exit_with_help()
value = client.get(as_object_id(object_id))
print(value)
def delete(client, object_id, recursive):
if object_id is None:
exit_with_help()
client.delete(as_object_id(object_id), deep=recursive)
def main():
args = optparser.parse_args()
if args.cmd is None:
exit_with_help()
client = connect_vineyard(args)
if args.cmd == 'ls':
return ls(client, args.limit)
elif args.cmd == 'get':
return get(client, args.object_id)
elif args.cmd == 'del':
return delete(client, args.object_id, args.recursive)
if __name__ == "__main__":
main()
|
pwrliang/libvineyard | python/vineyard/data/default.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
try:
import pyarrow as pa
except ImportError:
pa = None
def default_builder(client, value):
''' Default builder: serialize the object, using pyarrow if it presents,
otherwise with pickle, then build a blob object for it.
'''
if pa is not None:
payload = pa.serialize(value).to_buffer().to_pybytes()
serialization = 'pyarrow'
else:
payload = pickle.dumps(value)
serialization = 'pickle'
buffer = client.create_blob(len(payload))
buffer.copy(0, payload)
buffer['serialization'] = serialization
return buffer.seal(client).id
def default_resolver(obj):
view = memoryview(obj)
serialization = obj.meta['serialization']
if serialization:
if pa is not None and serialization == 'pyarrow':
return pa.deserialize(view)
if serialization == 'pickle':
return pickle.loads(view, fix_imports=True)
# fallback: still returns the blob
return obj
def register_default_types(builder_ctx=None, resolver_ctx=None):
if builder_ctx is not None:
builder_ctx.register(object, default_builder)
if resolver_ctx is not None:
resolver_ctx.register('vineyard::Blob', default_resolver)
|
pwrliang/libvineyard | python/vineyard/data/graph.py | <filename>python/vineyard/data/graph.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from vineyard._C import Object, ObjectMeta
class Graph:
def __init__(self, object_or_meta):
if isinstance(object_or_meta, Object):
self.meta = object_or_meta.meta
else:
self.meta = object_or_meta
def attr(self, key, default=None):
return self.meta.get(key, default=default)
def vineyard_graph_resolver(obj):
return Graph(obj)
def register_graph_types(builder_ctx, resolver_ctx):
if resolver_ctx is not None:
resolver_ctx.register('vineyard::Graph', vineyard_graph_resolver)
|
pwrliang/libvineyard | python/vineyard/data/tensor.py | <filename>python/vineyard/data/tensor.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import numpy as np
from vineyard._C import ObjectMeta
from .utils import build_numpy_buffer, normalize_dtype
def numpy_ndarray_builder(client, value, **kw):
meta = ObjectMeta()
meta['typename'] = 'vineyard::Tensor<%s>' % value.dtype.name
meta['value_type_'] = value.dtype.name
meta['shape_'] = json.dumps(value.shape)
meta['partition_index_'] = json.dumps(kw.get('partition_index', []))
meta['nbytes'] = value.nbytes
meta.add_member('buffer_', build_numpy_buffer(client, value))
return client.create_metadata(meta)
def tensor_resolver(obj):
meta = obj.meta
value_type = normalize_dtype(meta['value_type_'])
shape = json.loads(meta['shape_'])
return np.frombuffer(memoryview(obj.member("buffer_")), dtype=value_type).reshape(shape)
def register_tensor_types(builder_ctx, resolver_ctx):
if builder_ctx is not None:
builder_ctx.register(np.ndarray, numpy_ndarray_builder)
if resolver_ctx is not None:
resolver_ctx.register('vineyard::Tensor', tensor_resolver)
|
pwrliang/libvineyard | modules/io/adaptors/write_hdfs_bytes.py | <reponame>pwrliang/libvineyard<gh_stars>0
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import sys
from urllib.parse import urlparse
import vineyard
from hdfs3 import HDFileSystem
import pyarrow as pa
from vineyard.io.byte import ByteStreamBuilder
def write_hdfs_bytes(vineyard_socket, stream_id, path, proc_num, proc_index):
client = vineyard.connect(vineyard_socket)
streams = client.get(stream_id)
if len(streams) != proc_num or streams[proc_index] is None:
raise ValueError(f'Fetch stream error with proc_num={proc_num},proc_index={proc_index}')
instream = streams[proc_index]
reader = instream.open_reader(client)
host, port = urlparse(path).netloc.split(':')
hdfs = HDFileSystem(host=host, port=int(port))
path = urlparse(path).path
with hdfs.open(path, 'wb') as f:
while True:
try:
buf = reader.next()
except vineyard.StreamDrainedException:
f.close()
break
f.write(bytes(memoryview(buf)))
if __name__ == '__main__':
if len(sys.argv) < 6:
print('usage: ./write_hdfs_bytes <ipc_socket> <stream_id> <hdfs path> <proc_num> <proc_index>')
exit(1)
ipc_socket = sys.argv[1]
stream_id = sys.argv[2]
hdfs_path = sys.argv[3]
proc_num = int(sys.argv[4])
proc_index = int(sys.argv[5])
write_hdfs_bytes(ipc_socket, stream_id, hdfs_path, proc_num, proc_index)
|
pwrliang/libvineyard | python/vineyard/data/tests/test_base.py | <gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import vineyard
from vineyard.core import default_builder_context, default_resolver_context
from vineyard.data import register_builtin_types
register_builtin_types(default_builder_context, default_resolver_context)
def test_int(vineyard_client):
object_id = vineyard_client.put(1)
assert vineyard_client.get(object_id) == 1
def test_double(vineyard_client):
object_id = vineyard_client.put(1.234)
assert vineyard_client.get(object_id) == pytest.approx(1.234)
def test_string(vineyard_client):
object_id = vineyard_client.put('abcde')
assert vineyard_client.get(object_id) == 'abcde'
def test_pair(vineyard_client):
object_id = vineyard_client.put((1, "2"))
assert vineyard_client.get(object_id) == (1, "2")
def test_tuple(vineyard_client):
object_id = vineyard_client.put((1, "2", 3.456))
assert vineyard_client.get(object_id) == (1, "2", pytest.approx(3.456))
|
SuvodipDey/FGA | compute_accuracy_trippy.py | #-----------------------------------
# Run Command
# python compute_accuracy_trippy.py
#-----------------------------------
import math
import json
import os
def loadJson(data_file):
if os.path.isfile(data_file):
with open(data_file, 'r') as read_file:
data = json.load(read_file)
return data
def getErrorIndexes(val):
err = set()
for i in range(len(val)):
if val[i]==0:
err.add(i)
return err
def load_dataset_config(dataset_config):
with open(dataset_config, "r", encoding='utf-8') as f:
raw_config = json.load(f)
return raw_config['class_types'], raw_config['slots'], raw_config['label_maps']
def getNotNoneIndexes(gt, slot_dict):
id_list = []
for dom in gt:
for sl in gt[dom]:
sl2 = sl.replace("book ", "")
sl_key = dom+"-"+sl2
id_list.append(slot_dict[sl_key])
return id_list
def getAvgGoalAccuracy(id_list, val):
acc = -1
if(len(id_list)>0):
c = 0
for i in id_list:
c+=val[i]
acc = c/float(len(id_list))
return acc
filename = os.path.join("trippy", "trippy_result.json")
data = loadJson(filename)
total = 0
cor = 0
fga_cor = [0, 0, 0, 0]
turn_cor = 0
slot_acc = 0
lst_lambda = [0.25, 0.5, 0.75, 1.0]
for k in data:
#Ignoring PMUL1455.json since it is not part of the official MultiWOZ test data
if k=='PMUL1455.json':
continue
fga_prev = None
err_set = []
err_turn = 0
for turn in data[k]:
val = data[k][turn]
c = 1
for v in val:
c = c*v
total+=1
cor+=c
for l in range(len(lst_lambda)):
fga = 1
if(c==0):
if(int(turn)==0):
#Type 1 error
#First turn is wrong
fga = 0
elif(fga_prev==1):
#Type 1 error
#Last turn was correct i.e the error in current turn
fga = 0
else:
err = getErrorIndexes(val)
diff = err_set.symmetric_difference(err)
if(len(diff)>0):
#Type 1 error
#There exists some undetected/false positive intent in the current prediction
fga = 0
else:
#Type 2 error
#Current turn is correct but source of the error is some previous turn
turn_diff = int(turn)-err_turn
fga = (1-math.exp(-lst_lambda[l]*turn_diff))
fga_cor[l]+=fga
if(fga==0):
err_set = getErrorIndexes(val)
err_turn = int(turn)
else:
turn_cor+=1
sa = sum(val)/30.0
slot_acc+=sa
fga_prev = fga
filename = os.path.join("som-dst", "som-dst_result.json")
data_som = loadJson(filename)
dataset_config = os.path.join("trippy", "multiwoz21.json")
class_types, slots, label_maps = load_dataset_config(dataset_config)
slot_dict = {}
i = 0
for slot in slots:
arr = slot.split("-")
dom = arr[0]
sl = arr[1].lower()
sl = sl.replace("book_", "")
sl_key = dom+"-"+sl
slot_dict[sl_key] = i
i+=1
avgGoalAcc = []
for k in data_som:
for turn in data_som[k]:
gt = data_som[k][turn]['gt']
id_list = getNotNoneIndexes(gt, slot_dict)
val = data[k][turn]
aga = getAvgGoalAccuracy(id_list, val)
if(aga>=0):
avgGoalAcc.append(aga)
avg_goal_acc = round(sum(avgGoalAcc)*100.0/len(avgGoalAcc),2)
print("-"*40)
print(f"Total: {total}, Exact Match: {cor}, Turn Match: {turn_cor}")
print(f"Joint Acc = {round(cor*100.0/total,2)}, Slot Acc = {round(slot_acc*100.0/total,2)}, Avg. Goal Acc = {avg_goal_acc}")
for l in range(len(lst_lambda)):
fga_acc = round(fga_cor[l]*100.0/total,2)
print(f"FGA L={lst_lambda[l]} : {fga_acc}")
print("-"*40) |
SuvodipDey/FGA | compute_accuracy_hi-dst.py | <gh_stars>0
#-----------------------------------
# Run Command
# python compute_accuracy_hi-dst.py
#-----------------------------------
import os
import json
import pandas as pd
import argparse
import re
import math
from datetime import datetime
result_file = os.path.join("hi-dst", "log_test_1.json")
print("Path of the result : {}".format(result_file))
if not os.path.isfile(result_file):
print ("Result file does not exist. Please provide correct key.")
exit(0)
dataset_config = os.path.join("hi-dst","multiwoz21.json")
with open(dataset_config, "r", encoding='utf-8') as f:
raw_config = json.load(f)
class_types = raw_config['class_types']
slot_list = raw_config['slots']
label_maps = raw_config['label_maps']
time_slots = ["time", "leave", "arrive"]
#-----------------------------------
def loadJson(data_file):
if os.path.isfile(data_file):
with open(data_file, 'r') as read_file:
data = json.load(read_file)
return data
# Slot Accuracy Computation taken from TRADE model
def getSlotAcc(gt, pr):
miss_gold = 0
miss_slot = []
for g in gt:
if g not in pr:
miss_gold += 1
miss_slot.append(g)
else:
is_match = isMatch(gt[g], pr[g], g)
if(not is_match):
miss_gold += 1
miss_slot.append(g)
wrong_pred = 0
for p in pr:
if p not in gt and p not in miss_slot:
wrong_pred += 1
ACC_TOTAL = 30
ACC = 30 - miss_gold - wrong_pred
ACC = ACC / float(ACC_TOTAL)
return ACC
def normalize_time(text):
text = re.sub("(\d{1})(a\.?m\.?|p\.?m\.?)", r"\1 \2", text) # am/pm without space
text = re.sub("(^| )(\d{1,2}) (a\.?m\.?|p\.?m\.?)", r"\1\2:00 \3", text) # am/pm short to long form
text = re.sub("(^| )(at|from|by|until|after) ?(\d{1,2}) ?(\d{2})([^0-9]|$)", r"\1\2 \3:\4\5", text) # Missing separator
text = re.sub("(^| )(\d{2})[;.,](\d{2})", r"\1\2:\3", text) # Wrong separator
#text = re.sub("(^| )(\d{1})[;.,](\d{2})", r" \2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{2}):(\d{2})/", r"\1\2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{1}) (\d{2})", r"\1\2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{2}):!(\d{1})", r"\1\2:1\3", text) # Wrong format
text = re.sub("(^| )(at|from|by|until|after) ?(\d{1,2})([;., ]|$)", r"\1\2 \3:00\4", text) # normalize simple full hour time
text = re.sub("(^| )(\d{1}:\d{2})", r"\g<1>0\2", text) # Add missing leading 0
# Map 12 hour times to 24 hour times
text = re.sub("(\d{2})(:\d{2}) ?p\.?m\.?", lambda x: str(int(x.groups()[0]) + 12 if int(x.groups()[0]) < 12 else int(x.groups()[0])) + x.groups()[1], text)
text = re.sub("(^| )24:(\d{2})", r"\g<1>00:\2", text) # Correct times that use 24 as hour
return text
def isMatch(v1, v2, key):
is_match = False
if(v1==v2 or v1 in v2 or v2 in v1):
is_match = True
else:
v3 = re.sub("b and b","bed and breakfast", v1)
v3 = re.sub("(^the | |-|'|\"|:)", "", v3)
v4 = re.sub("b and b","bed and breakfast", v2)
v4 = re.sub("(^the | |-|'|\"|:)", "", v4)
if(v3==v4 or v3 in v4 or v4 in v3):
is_match = True
else:
slot = key.split("-")[1]
if (slot in time_slots):
v3 = normalize_time(v1.lower())
v4 = v2.replace(" ","")
if(v3==v4):
is_match = True
else:
try:
if(":" in v3 and ":" in v4):
v3 = v3.replace(" : ",":")
v4 = v4.replace("24:","00:")
t1 = datetime.strptime(v3, '%H:%M')
t2 = datetime.strptime(v4, '%H:%M')
t_diff = abs((t1 - t2).total_seconds() / 60.0)
if(t_diff<=15):
is_match = True
except:
print("Err :: {} {} {}".format(key, v1, v2))
else:
v1 = re.sub("^the ", "", v1)
v2 = re.sub("^the ", "", v2)
v2 = v2.replace(" - ","-")
if v1 in label_maps:
for value_label_variant in label_maps[v1]:
if (v2 in value_label_variant or value_label_variant in v2):
is_match = True
if(not is_match and v2 in label_maps):
for value_label_variant in label_maps[v2]:
if (v1 in value_label_variant or value_label_variant in v1):
is_match = True
return is_match
def getMatch(gt, pr):
if(len(gt)!=len(pr)):
return 0
if(len(gt)==0):
return 1
gt_keys = set()
pr_keys = set()
for key in gt:
gt_keys.add(key)
for key in pr:
pr_keys.add(key)
diff = gt_keys.symmetric_difference(pr_keys)
if(len(diff)>0):
return 0
f=1
for key in gt:
v1 = gt[key]
v2 = pr[key]
is_match = isMatch(v1, v2, key)
if (not is_match):
f=0
break
return f
# Average Goal Accuracy
def getAvgGoalAccuracy(gt, pr):
set_gt = set()
set_pr = set()
for key in gt:
set_gt.add(key)
for key in pr:
set_pr.add(key)
set_i = set_gt.intersection(set_pr)
c = 0
for key in set_i:
is_match = isMatch(gt[key], pr[key], key)
if(is_match):
c+=1
acc = -1
if(len(gt)>0):
acc = c/float(len(gt))
return acc
def getDiff(bs1, bs2):
set1 = set()
set2 = set()
for key in bs1:
set1.add(key)
for key in bs2:
set2.add(key)
set_diff = set1.difference(set2)
set_extra = set()
for key in bs1:
if key not in set_diff:
is_match = isMatch(bs1[key], bs2[key], key)
if(not is_match):
set_extra.add(key)
return set_diff.union(set_extra)
# Slot Accuracy
def getSlotAcc2(gt, pr):
d1 = getDiff(gt, pr)
d2 = getDiff(pr, gt)
#s1 = set([d.rsplit("-", 1)[0] for d in d1])
#s2 = set([d.rsplit("-", 1)[0] for d in d2])
set_i = d1.intersection(d2)
acc = (30 - len(d1) - len(d2) + len(set_i))/30.0
return acc
def checkSubset(bs1, bs2):
flag = True
for key in bs1:
if key in bs2:
v1 = bs1[key]
v2 = bs2[key]
is_match = isMatch(v1, v2, key)
if (not is_match):
flag = False
break
else:
flag = False
break
return flag
# Flexible Goal Accuracy
def getFGA(gt_list, pr_list, gt_turn, pr_turn, turn_diff, L):
gt = gt_list[-1]
pr = pr_list[-1]
m = getMatch(gt, pr)
if(m==1):
#Exact match
return 1
else:
if len(gt_list)==1:
#Type 1 error
#First turn is wrong
return 0
else:
n = getMatch(gt_list[-2], pr_list[-2])
if (n==1):
#Type 1 error
#Last turn was correct i.e the error in current turn
return 0
else:
if(not checkSubset(gt_turn, pr) or not checkSubset(pr_turn, gt)):
#Type 1 error
#There exists some undetected/false positive intent in the current prediction
return 0
else:
#Type 2 error
#Current turn is correct but source of the error is some previous turn
return (1-math.exp(-L*turn_diff))
def modifyBS(bs):
bs_modified = {}
for slot_key in bs:
if(True):
v = bs[slot_key]
v = v.replace(" '","'")
bs_modified[slot_key] = v
return bs_modified
def modifyTurnPrediction(pr, pred_slots):
pr_turn = {}
for slot_key in pr:
slot_act = pred_slots[slot_key][0]
slot = slot_key.split("-")[1]
if(True):
if (slot in time_slots):
v = pr[slot_key]
pr_turn[slot_key] = v
else:
v = pr[slot_key].replace(" '","'")
pr_turn[slot_key] = v
return pr_turn
def isUnseen(slot_key, slot_val, bs):
f = True
if (slot_key in bs):
if(slot_val==bs[slot_key]):
f=False
else:
v = bs[slot_key]
if v in label_maps:
for value_label_variant in label_maps[v]:
if slot_val == value_label_variant:
f = False
break
if (f and slot_val in label_maps):
for value_label_variant in label_maps[slot_val]:
if v == value_label_variant:
f = False
break
return f
def getTurnPrediction(bs, bs_prev):
bs_turn = {}
for slot_key in bs:
slot_val = bs[slot_key]
if(isUnseen(slot_key, slot_val, bs_prev)):
bs_turn[slot_key] = slot_val
return bs_turn
# Model Accuracy
def getModelAccuracy(result_file):
dst_res = loadJson(result_file)
total = 0
c1 = 0
c2 = 0
c3=0
sa_list = []
avgGoalAcc = []
turn_cor = 0
lst_lambda = [0.25, 0.5, 0.75, 1.0]
fga = [0, 0, 0, 0]
for idx in dst_res:
pr = {}
gt_prev = {}
pr_prev = {}
gt_list = []
pr_list = []
error_turn = 0
for turn in dst_res[idx]:
total+=1
gt = modifyBS(dst_res[idx][turn]['gt'])
gt_turn = getTurnPrediction(gt, gt_prev)
pr_turn = modifyTurnPrediction(dst_res[idx][turn]['pr_turn'], dst_res[idx][turn]['slots'])
for slot_key in pr_turn:
pr[slot_key] = pr_turn[slot_key]
pr_turn = getTurnPrediction(pr, pr_prev)
gt_list.append(gt)
pr_list.append(pr)
m = getMatch(gt, pr)
c1+=m
sa = getSlotAcc(gt, pr)
c3+=sa
sa_list.append(sa)
aga = getAvgGoalAccuracy(gt, pr)
if(aga>=0):
avgGoalAcc.append(aga)
n = getMatch(gt_turn, pr_turn)
c2+=n
turn_diff = int(turn)-error_turn
w=0
for l in range(len(lst_lambda)):
w= getFGA(gt_list, pr_list, gt_turn, pr_turn, int(turn)-error_turn, lst_lambda[l])
fga[l]+=w
if(w==0):
error_turn = int(turn)
else:
turn_cor+=1
gt_prev = gt.copy()
pr_prev = pr.copy()
print(f"Total: {total}, Exact Match: {c1}, Turn Match: {turn_cor}")
joint_acc = c1*100.0/total
turn_acc= c2*100.0/total
slot_acc = sum(sa_list)*100.0/len(sa_list)
avg_goal_acc = sum(avgGoalAcc)*100.0/len(avgGoalAcc)
print(f"Joint Acc = {round(joint_acc,2)}, Slot Acc = {round(slot_acc,2)}, Avg. Goal Acc = {round(avg_goal_acc,2)}")
for l in range(len(lst_lambda)):
fga_acc = round(fga[l]*100.0/total,2)
print(f"FGA L={lst_lambda[l]} : {fga_acc}")
print("-"*40)
getModelAccuracy(result_file)
print("-"*40) |
SuvodipDey/FGA | compute_accuracy_trade_somdst.py | #-----------------------------------
# Run Command
# python compute_accuracy_trade_somdst.py
#-----------------------------------
import os
import json
import pandas as pd
import math
def loadJson(data_file):
if os.path.isfile(data_file):
with open(data_file, 'r') as read_file:
data = json.load(read_file)
return data
def getBeliefSet(ds):
bs = set()
for dom in ds:
for slot in ds[dom]:
t = dom+"-"+slot+"-"+ds[dom][slot]
bs.add(t)
return bs
# Slot Accuracy
def getSlotAccuracy(gt, pr):
d1 = gt.difference(pr)
d2 = pr.difference(gt)
s1 = set([d.rsplit("-", 1)[0] for d in d1])
s2 = set([d.rsplit("-", 1)[0] for d in d2])
set_i = s1.intersection(s2)
acc = (30 - len(d1) - len(d2) + len(set_i))/30.0
return acc
# Slot Accuracy Computation taken from TRADE model
def compute_acc(gold, pred):
miss_gold = 0
miss_slot = []
for g in gold:
if g not in pred:
miss_gold += 1
miss_slot.append(g.rsplit("-", 1)[0])
wrong_pred = 0
for p in pred:
if p not in gold and p.rsplit("-", 1)[0] not in miss_slot:
wrong_pred += 1
ACC_TOTAL = 30
ACC = 30 - miss_gold - wrong_pred
ACC = ACC / float(ACC_TOTAL)
return ACC
# Average Goal Accuracy
def getAvgGoalAccuracy(gt, pr):
set_i = gt.intersection(pr)
acc = -1
if(len(gt)>0):
acc = len(set_i)/float(len(gt))
return acc
# Flexible Goal Accuracy
def getFGA(gt_list, pr_list, turn_diff, L):
gt = gt_list[-1]
pr = pr_list[-1]
diff1 = gt.symmetric_difference(pr)
if len(diff1)==0: #Exact match
return 1
else:
if len(gt_list)==1:
#Type 1 error
#First turn is wrong
return 0
else:
diff2 = gt_list[-2].symmetric_difference(pr_list[-2])
if len(diff2)==0:
#Type 1 error
#Last turn was correct i.e the error in current turn
return 0
else:
tgt = gt.difference(gt_list[-2])
tpr = pr.difference(pr_list[-2])
if(not tgt.issubset(pr) or not tpr.issubset(gt)):
#Type 1 error
#There exists some undetected/false positive intent in the current prediction
return 0
else:
#Type 2 error
#Current turn is correct but source of the error is some previous turn
return (1-math.exp(-L*turn_diff))
def getModifiedBS(bs):
bs_new = {}
for k in bs:
bs_new[k] = {}
for slot in bs[k]:
sl = slot
v = bs[k][slot]
if "book" in slot:
sl = slot.split(' ')[1]
bs_new[k][sl] = v
return bs_new
def getModelAccuracy(model_name, dialog_data):
dst_res_path = os.path.join(model_name, model_name+"_result.json")
dst_res = loadJson(dst_res_path)
joint_acc = 0
slot_acc = 0
avgGoalAcc = []
fga = [0, 0, 0, 0]
turn_acc = 0
total = 0
lst_lambda = [0.25, 0.5, 0.75, 1.0]
for idx in dst_res:
res = dst_res[idx]
log = dialog_data[idx]['log']
sys = " "
gt_list = []
pr_list = []
error_turn = -1
for turn in res:
total+=1
i = 2*int(turn)
usr = log[i]['text'].strip()
if(i>0):
sys = log[i-1]['text'].strip()
gt = getBeliefSet(res[turn]['gt'])
pr = getBeliefSet(res[turn]['pr'])
gt_list.append(gt)
pr_list.append(pr)
#print(f"Sys_{turn}: {sys}")
#print(f"Usr_{turn}: {usr}")
#print(f"GT_{turn}: {getModifiedBS(res[turn]['gt'])}")
#print(f"PR_{turn}: {getModifiedBS(res[turn]['pr'])}")
#print("-"*40)
diff = gt.symmetric_difference(pr)
m = 1 if len(diff)==0 else 0
joint_acc+=m
#sa = getSlotAccuracy(gt, pr)
sa = compute_acc(gt, pr)
slot_acc+=sa
aga = getAvgGoalAccuracy(gt, pr)
if(aga>=0):
avgGoalAcc.append(aga)
m = 0
for l in range(len(lst_lambda)):
m = getFGA(gt_list, pr_list, int(turn)-error_turn, lst_lambda[l])
fga[l]+=m
if(m==0):
error_turn = int(turn)
else:
turn_acc+=1
print(f"Total: {total}, Exact Match: {joint_acc}, Turn Match: {turn_acc}")
joint_acc = round(joint_acc*100.0/total,2)
slot_acc = round(slot_acc*100.0/total,2)
avg_goal_acc = round(sum(avgGoalAcc)*100.0/len(avgGoalAcc),2)
print(f"Joint Acc = {joint_acc}, Slot Acc = {slot_acc}, Avg. Goal Acc = {avg_goal_acc}")
for l in range(len(lst_lambda)):
fga_acc = round(fga[l]*100.0/total,2)
print(f"FGA with L={lst_lambda[l]} : {fga_acc}")
#-----------------------------------
#Load raw data
dialog_data_file = os.path.join('data.json')
dialog_data = loadJson(dialog_data_file)
print("-"*40)
print("Trade :-")
getModelAccuracy("trade", dialog_data)
print("-"*40)
print("SOM-DST :-")
getModelAccuracy("som-dst", dialog_data)
print("-"*40)
#----------------------------------- |
camran89/covid-prevention | LambdaFunction/PDA_Registration.py | <gh_stars>0
import boto3
dynamoDB = boto3.resource('dynamodb')
table = dynamoDB.Table('users')
def lambda_handler(event, context):
# TODO implement
print(event)
event = event['queryStringParameters']
email = event['email']
data = {"email":email}
print("This is email: " + email)
print("This is data")
print(data)
table.put_item(Item=event)
return {
'statusCode':200,
'headers': {
"Content-Type": "application/json",
"Access-Control-Allow-Origin":"*"
},
'body':'Registration successful'
}
# return {"code":200, 'headers': {"Content-Type": "application/json","Access-Control-Allow-Origin":"*"},"message":""}
|
camran89/covid-prevention | LambdaFunction/PDA_CountBloodTypes.py | <reponame>camran89/covid-prevention
import json
import boto3
from boto3.dynamodb.conditions import Key
dynamoDB = boto3.resource('dynamodb')
table = dynamoDB.Table('users')
def lambda_handler(event,context):
grps = ["O Positive","A Positive","B Positive","AB Positive","O Negative","A Negative","B Negative","AB Negative"]
vals = []
for i in grps:
response = table.scan(FilterExpression=Key('blood_type').eq(i))
vals.append(len(response['Items']))
returnObj = {
"returnObj":vals
}
return {
'statusCode':200,
'headers': {
"Content-Type": "application/json",
"Access-Control-Allow-Origin":"*"
},
'body': json.dumps(returnObj['returnObj'])
}
# return vals
|
camran89/covid-prevention | LambdaFunction/PDA_Request.py | <gh_stars>0
import json
import boto3
from boto3.dynamodb.conditions import Key
dynamoDB = boto3.resource('dynamodb')
table = dynamoDB.Table('users')
def lambda_handler(event,context):
event = event['queryStringParameters']
blood = event['blood']
print(blood)
response = table.scan(FilterExpression=Key('blood_type').eq(blood))
print(response['Items'])
return {
'statusCode':200,
'headers': {
"Content-Type": "application/json",
"Access-Control-Allow-Origin":"*"
},
'body':json.dumps(response['Items'])
}
# return response['Items'] |
camran89/covid-prevention | UI/test.py | def errorHandling(value):
print(value)
# print(value['message'])
print( 'message' in value)
if isinstance(value,dict):
if hasattr(value,'message') and value['message']=='Internal server error':
return True
return False
some_value={'messages': 'Internal server error'}
print (errorHandling(some_value)) |
camran89/covid-prevention | LambdaFunction/PDA_GetUserData.py | <reponame>camran89/covid-prevention
import boto3
import json
dynamoDB = boto3.resource('dynamodb')
table = dynamoDB.Table('users')
def lambda_handler(event,context):
event = event['queryStringParameters']
email=event['email']
print(email)
resp = table.get_item(Key={"email":email})
return {
'statusCode':200,
'headers': {
"Content-Type": "application/json",
"Access-Control-Allow-Origin":"*"
},
'body':json.dumps(resp['Item'])
}
#return resp['Item']
|
camran89/covid-prevention | UI/app.py | from flask import Flask, render_template, request, redirect, url_for,flash, make_response,session
import requests
from datetime import date
import datetime
app = Flask(__name__)
app.secret_key = 'GribeshDhakal'
app.permanent_session_lifetime = datetime.timedelta(days=1)
@app.route('/')
def hello():
url = "https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-getreviews"
response = requests.request("GET",url)
r=""
r = response.json()
# print("From / hello printing isUser ", session['isUser'])
# print(r)
if('isUser' in session):
print(session['isUser'])
else:
session['isUser'] = False
if('user_reviews' in session):
session['user_reviews']=r
print(session['user_reviews'])
else:
session['user_reviews']=r
return render_template('index.html',isUser=session['isUser'], response=r)
def check(email):
url = "https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-getuserdata?email="+email
status = requests.request("GET",url)
# print(status.json())
return status.json()
def errorHandling(value):
print(value)
# print(value['message'])
# print(hasattr(value,'message'))
if isinstance(value,dict):
if ((('message' in value ) )and (value['message']=='Internal server error')):
return True
return False
return False
@app.route('/login')
def login():
if('isUser' in session):
if(session['isUser']):
return redirect(url_for('dashboard'))
else:
return render_template('login.html', pred="Please login!")
else:
session['isUser']=False
print(session['isUser'])
return render_template('login.html', pred="Please login!")
@app.route('/loginpage',methods=['POST'])
def loginpage():
user = request.form['user']
passw = request.form['passw']
# print(user,passw)
data = check(user)
print("[INFO]: Confidential Data")
print(data)
if(errorHandling(data)):
return render_template('login.html', pred="The username is not found, recheck the spelling or please register.")
else:
if(passw==data['password']):
print("[Info] making isUser True")
session['isUser']=True
session['user']=data['name']
print(session['isUser'])
return redirect(url_for('dashboard'))
else:
return render_template('login.html', pred="Login unsuccessful. You have entered the wrong password.")
@app.route('/registration')
def register_home():
return render_template('register.html')
@app.route('/registration',methods=['POST'])
def register():
x = [x for x in request.form.values()]
# print(x)
params = "name="+x[0]+"&email="+x[1]+"&phone="+x[2]+"&city="+x[3]+"&isInfected="+x[4]+"&blood_type="+x[5]+"&password="+x[6]
if(errorHandling(check(x[1]))):
# print("Inside this if")
url = "https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-registration?"+params
response = requests.get(url)
return render_template('register.html', pred="Registration Successful, please login using your details")
else:
return render_template('register.html', pred="You are already a member, please login using your details")
@app.route('/requester')
def requester():
# print(session['isUser'])
if(session['isUser']):
return render_template('requester.html')
return redirect(url_for('login'))
@app.route('/requested',methods=['POST'])
def requested():
bloodgrp = request.form['bloodgrp']
#print(bloodgrp)
url = "https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-request?blood="+bloodgrp
status = requests.request("GET",url)
a=status.json()
# print(a)
email=[]
phone=[]
for i in a:
phonenos={
"phone":str(i['phone'])
}
print("Sent message to "+str(phonenos))
# Uncomment this line to send SMS to user
respo= requests.post('https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-sendsms/', json = phonenos)
print(respo)
email.append(i['email'])
result=requests.request("GET",url)
print(result)
phone.append(i['phone'])
# print(email)
payload={
"email" :email
}
resp= requests.post('https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-sendnotification/', json = payload)
# print(resp)
# print(phone)
return render_template('requester.html', pred="Your request is sent to the concerned people.")
@app.route('/dashboard')
def dashboard():
if(session['isUser']):
url = "https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-countbloodtypes"
response = requests.request("GET",url)
r = response.json()
# print(r)
return render_template('dashboard.html',b=sum(r),b1=str(r[0]),b2=str(r[1]),b3=str(r[2]),b4=str(r[3]),b5=str(r[4]),b6=str(r[5]),b7=str(r[6]),b8=str(r[7]))
return redirect(url_for('login'))
@app.route('/writereview')
def writereview():
if(session['isUser']):
print(session['user_reviews'])
userreview=""
for resp in session['user_reviews']:
if (resp['username'] == session['user']):
userreview=resp
break
print(userreview)
return render_template('review.html', pastreview=userreview)
# return redirect(url_for('login'))
return render_template('review.html')
@app.route('/wrotereview',methods=['POST'])
def wrotereview():
username=request.form['username']
review=request.form['review']
if(not username):
username="Anonymous"
params={
'username':username,
'review':review,
'date':date.today().strftime("%d/%m/%Y")
}
print(params)
respo= requests.post('https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-writereviews/', json=params)
return render_template('review.html', pred="You have written review.")
@app.route('/handleupdates',methods=['POST'])
def handleupdates():
print(request.form)
username=request.form['username']
review=request.form['review']
ids=request.form['id']
action=request.form['action']
if(not username):
username="Anonymous"
params={
'id':ids,
'username':username,
'review':review,
'date':date.today().strftime("%d/%m/%Y")
}
print(params)
if action=='Update':
respo= requests.put('https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-updatereviews/', json=params)
print('update')
if action=='Delete':
params={'id':ids}
print('Delete')
respo= requests.delete('https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-deletereviews/', json=params)
return render_template('review.html', pred="You have deleted review.")
respo= requests.post('https://259juy4wy8.execute-api.us-east-2.amazonaws.com/prod/pda-updatereviews/', json=params)
return render_template('review.html', pred="You have updated review.")
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('login'))
if __name__ == '__main__':
app.run(debug=True)
|
CODER-KILLERS/Encode | tes_enc.py |
# Obfuscated By <NAME> | HTR-TECH
# Github : https://www.github.com/htr-tech
# Instagram : https://instagr.am/tahmid.rayat
# --------------------------------------------
# Time : Tue Jan 4 18:50:13 2022
# Platform : Linux aarch64
# --------------------------------------------
import codecs,base64
htr = [99, 72, 74, 112, 98, 110, 81, 103, 75, 67, 74, 111, 90, 87, 120]
tahmid = 'folO3o3WxVvxX'
pizza = '\x72\x6f\x74\x5f\x31\x33'
mobile = codecs.decode(eval('\x74\x61\x68\x6d\x69\x64'), eval('\x70\x69\x7a\x7a\x61'))
burger = base64.b64decode(''.join([chr(tech) for tech in htr])+eval('\x6d\x6f\x62\x69\x6c\x65'))
eval(compile(eval("\x62\x75\x72\x67\x65\x72"),"<tahm1d>","exec"))
|
colebob9/NichiRadioW | main.py | <reponame>colebob9/NichiRadioW
import datetime
import pandas as pd
import pandas_read_xml as pdx
# import matplotlib
import matplotlib.pyplot as plt
import os
def make_graph(xmlFile):
df = pdx.read_xml(
xmlFile, ['packet', 'chat'], encoding='utf-8')
# ------------------------------- Clean up xml -------------------------- #
print(df)
# drop unnecessary columns
df.drop(['@thread', '@no', '@premium', '@anonymity', '@user_id', '@mail',
'@leaf', '@score', '@deleted', '@date_usec', '@date'],
axis=1, inplace=True)
# drop rows without anything in the text column
df.dropna(subset=['#text'], inplace=True)
# rename columns
df.rename(columns={"@vpos": "vpos", "#text": "text"}, inplace=True)
# to numeric for accurate sorting
df.vpos = df.vpos.astype(float)
df.sort_index(inplace=True)
# sort by vpos
df.sort_values(by='vpos', kind="mergesort",
inplace=True, ignore_index=True)
df['vpos'] = pd.to_numeric(df['vpos'], downcast='integer') # back to int
print(df)
# --------------------- Loop through text columns for w's --------------- #
w_list = ['w', 'w', 'W']
w_stats = dict.fromkeys(w_list, 0)
w_amounts = []
for index, row in df.iterrows():
# print(row['vpos'], row['text'])
w_total_amount = 0
for w in w_list:
w_count = row['text'].count(w)
if w_count > 0:
# print(f"Found {w} in DF index {index}. Counted {w_count} times.")
# print(f"vpos: {row['vpos']}")
# print(row['text'])
# print('')
w_stats[w] = w_stats[w] + w_count
w_total_amount = w_count
w_amounts.append(w_total_amount)
print(w_stats)
# print(w_amounts)
print(len(w_amounts))
# --------------------------- matplotlib graphing ----------------------- #
# MPL setup
plt.figure(figsize=(25, 15))
plt.title("Number of W's in video")
plt.xlabel("Video Pos")
plt.ylabel("Number of W's")
plt.legend()
# make vpos into timestamps
vpos_list = df['vpos'].values.tolist()
print(len(vpos_list))
# print(vpos_list)
vpos_stamps = []
for v in vpos_list:
vpos_stamps.append(datetime.datetime.fromtimestamp(v/1000.0))
x = vpos_list
y = w_amounts
plt.plot(x, y)
# plt.bar(x, y, align='center')
# plt.scatter(x,y)
# for i in range(len(y)):
# plt.hlines(y[i], 0, x[i]) # Here you are drawing the horizontal lines
plt.savefig(xmlFile + '.png', dpi=300, orientation='landscape')
# plt.show()
for subdir, dirs, files in os.walk("NichiXML"):
for file in files:
make_graph("NichiXML/" + file)
|
culibraries/ir-exportq | ir-exportq/tasks/tasks.py | <reponame>culibraries/ir-exportq
from celery.task import task
import json
import requests
import csv
import time
from datetime import datetime
import boto3
import os
from botocore.exceptions import ClientError
workTypeDict = {
'GraduateThesisOrDissertation': 'graduate_thesis_or_dissertations',
'UndergraduateHonorsThesis': 'undergraduate_honors_theses',
'Dataset': 'datasets',
'Article': 'articles',
'Presentation': 'presentations',
'ConferenceProceeding': 'conference_proceedings',
'Book': 'books',
'BookChapter': 'book_chapters',
'Report': 'reports',
'Default': 'defaults'
}
def uploadToS3(countRecords):
s3_client = boto3.client('s3')
filePath = datetime.now().strftime('%Y-%m-%d') + '-ir-export.csv'
if os.path.isfile(filePath):
try:
response = s3_client.upload_file(
filePath, 'cubl-ir-reports', filePath)
os.remove(filePath)
except ClientError as e:
return {'message': 'unable to upload to s3. Check log for more information.'}
return {'message': 'Total: ' + str(countRecords) + ' records has been exported. File: ' + filePath + ' has been uploaded to S3.'}
else:
return {'message': 'File is not exits'}
@task()
def runExport():
url = 'https://scholar.colorado.edu/catalog.json?per_page=100&q=&search_field=all_fields'
initPage = url + '&page=1'
total_pages = requests.get(initPage).json()[
'response']['pages']['total_pages']
fields = ['Title', 'Academic Affilation', 'Resource Type', 'URL']
rows = []
links = []
fileName = datetime.now().strftime('%Y-%m-%d') + '-ir-export.csv'
countRecords = 0
for pageNumber in range(total_pages):
pageNumber = pageNumber + 1
pageURL = url + '&page=' + str(pageNumber)
time.sleep(1)
for doc in requests.get(pageURL).json()['response']['docs']:
links = []
try:
title = ', '.join(doc['title_tesim'])
except:
title = 'error'
try:
if 'Collection' in doc['has_model_ssim']:
academic = ''
else:
academic = ', '.join(doc['academic_affiliation_tesim'])
except:
academic = 'error'
try:
if 'Collection' in doc['has_model_ssim']:
resource = ''
else:
resource = ', '.join(doc['resource_type_tesim'])
except:
resource = 'error'
try:
if 'Collection' in doc['has_model_ssim']:
link = 'https://scholar.colorado.edu/collections/' + \
doc['id']
else:
works = doc['has_model_ssim']
for work in works:
links.append('https://scholar.colorado.edu/concern/' +
workTypeDict[work] + '/' + doc['id'])
link = ', '.join(links)
except:
link = 'error'
rows.append([title, academic, resource, link])
countRecords = countRecords + 1
with open(fileName, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(rows)
return uploadToS3(countRecords)
|
culibraries/ir-exportq | setup.py | <reponame>culibraries/ir-exportq
from setuptools import setup, find_packages
setup(name='ir-exportq',
version='1.0',
packages=find_packages(),
install_requires=[
'requests',
'boto3'
],
)
|
culibraries/ir-exportq | ir-exportq/__init__.py | <filename>ir-exportq/__init__.py
from .tasks import __init__
|
le717/vss365today | src/views/search.py | <gh_stars>0
from flask import flash, redirect, render_template, session, url_for
from requests.exceptions import HTTPError
from src.blueprints import search
from src.core import api, forms
from src.core.filters.date import create_datetime, format_datetime_ymd
@search.get("/")
def index():
render_opts = {
"form_date": forms.PromptSearchByDate(),
"form_host": forms.PromptSearchByHost(),
"form_word": forms.PromptSearchByWord(),
}
return render_template("search/search.html", **render_opts)
@search.post("/date")
def by_date():
"""Search for a specific day's Prompt."""
# We got a date to search by
session["search_type"] = "date"
form = forms.PromptSearchByDate()
if form.validate_on_submit():
return redirect(url_for("root.view_date", date=form.data["query"]))
# Something didn't happen so we can't search
flash(
f"We were unable to find a prompt for {form.data['query']}. "
"Please select a different date.",
"error",
)
return redirect(url_for("search.index"))
@search.post("/host")
def by_host():
"""Search for Prompts from a specific Host."""
session["search_type"] = "host"
form = forms.PromptSearchByHost()
if form.validate_on_submit():
query = form.data["query"]
try:
response = api.get("search", params={"host": query})
# There doesn't appear any prompts from that Host
if response["total"] == 0:
raise HTTPError
# The search was not successful
except HTTPError:
flash(f"No prompts from {query} could be found.", "error")
return redirect(url_for("search.index"))
# We got a single result, go directly to the prompt
session.update(response)
if response["total"] == 1:
date = create_datetime(response["prompts"][0]["date"])
return redirect(url_for("root.view_date", date=format_datetime_ymd(date)))
# More than one result came back, display them all
return redirect(url_for("search.results", query=query))
# That Host was not provided
flash("A Host name must be provided to search.", "error")
return redirect(url_for("search.index"))
@search.post("/word")
def by_word():
"""Search for Prompts by a specific word."""
session["search_type"] = "word"
form = forms.PromptSearchByWord()
if form.validate_on_submit():
query = form.data["query"]
# Connect to the API to search
try:
response = api.get("search", params={"prompt": query})
# There doesn't appear any prompts with that word
if response["total"] == 0:
raise HTTPError
# The search was not successful
except HTTPError:
flash(f"No prompts containing {query} could be found.", "error")
return redirect(url_for("search.index"))
# We got multiple search results
session.update(response)
if response["total"] >= 2:
return redirect(url_for("search.results", query=query))
# We got a single response back, go directly to the prompt
if response["total"] == 1:
date = create_datetime(response["prompts"][0]["date"])
return redirect(url_for("root.view_date", date=format_datetime_ymd(date)))
# No search results were returned
flash(
f"We were unable to find prompts containing {form.data['query']}. "
"Please try using a different term.",
"error",
)
return redirect(url_for("search.index"))
@search.get("/results")
def results():
render_opts = {
"form_date": forms.PromptSearchByDate(),
"form_host": forms.PromptSearchByHost(),
"form_word": forms.PromptSearchByWord(),
}
render_opts.update(session)
return render_template("search/results.html", **render_opts)
|
le717/vss365today | src/extensions.py | from flask_wtf.csrf import CSRFProtect
csrf = CSRFProtect()
def init_extensions(app):
"""Load app extensions."""
csrf.init_app(app)
|
le717/vss365today | src/views/root.py | from collections import namedtuple
from random import randrange
from flask import abort, flash, redirect, render_template, session, url_for
from num2words import num2words
from requests.exceptions import HTTPError
from src.blueprints import root
from src.core import api
from src.core.filters import date as date_format
from src.core import forms
@root.post("form-subscribe")
def form_subscribe():
form = forms.SubscribeForm()
# The magic "is human" numbers do not exist, don't continue on
if "SUBSCRIBE_NUM" not in session or not form.validate_on_submit():
flash("We were unable to add you to #vss365 notifications.", "error")
return redirect(url_for("root.index"))
# The magic numbers were not summed correctly
if form.number.data != session["SUBSCRIBE_NUM"][0] + session["SUBSCRIBE_NUM"][1]:
flash("We were unable to add you to #vss365 notifications.", "error")
return redirect(url_for("root.index"))
# Attempt to record the email
email = form.email.data
try:
api.post("subscription/", params={"email": email})
flash(
f"{email} has been added to #vss365 notifications! "
"Tomorrow's prompt will be in your inbox!",
"info",
)
except HTTPError:
flash(f"We were unable to add {email} to #vss365 notifications.", "error")
return redirect(url_for("root.index"))
@root.get("subscribe")
def subscribe():
# Generate two random numbers to use for a basic "is human" check.
# Once generated, add them to the session for confirmation on form submit.
# We generate these numbers on every page load unconditionally
# so we don't persist anything
second_num = randrange(16)
random_nums = [randrange(1, 21), second_num, num2words(second_num)]
session["SUBSCRIBE_NUM"] = random_nums
# Build up the input label to contain the math equation to be solved
# and remove any prior input the browser might have preserved (*@ Firefox...*)
form = forms.SubscribeForm()
form.number.data = None
form.number.label.text = f"{random_nums[0]} + {random_nums[2]} ="
render_opts = {"form_subscribe": form}
return render_template("root/subscribe.html", **render_opts)
@root.post("form-unsubscribe")
def form_unsubscribe():
form = forms.UnsubscribeForm()
if not form.validate_on_submit():
flash("We were unable to remove you from #vss365 notifications.", "error")
return redirect(url_for("root.unsubscribe"))
# Attempt to delete the email
email = form.email.data
try:
api.delete("subscription/", params={"email": email})
flash(f"{email} has been removed from #vss365 notifications.", "info")
return redirect(url_for("root.index"))
except HTTPError:
flash(f"We were unable to remove {email} from #vss365 notifications.", "error")
return redirect(url_for("root.unsubscribe"))
@root.get("unsubscribe")
def unsubscribe():
render_opts = {"form_unsubscribe": forms.UnsubscribeForm()}
return render_template("root/unsubscribe.html", **render_opts)
@root.get("about")
def about():
return render_template("root/about.html")
@root.get("browse")
def browse():
# Handle the archive file possibly being unavailable
try:
archive_name = api.get("archive")
except HTTPError:
archive_name = None
render_opts = {
"years": api.get("browse", "years"),
"archive": archive_name,
}
return render_template("root/browse.html", **render_opts)
@root.get("browse/<year>")
def browse_by_year(year: str):
# Get the host's list and group them up if needed
try:
prompt_months: list = api.get("browse", "months", params={"year": year})
except HTTPError:
abort(404)
render_opts = {"months": prompt_months, "year": year}
return render_template("root/browse-year.html", **render_opts)
@root.get("browse/<year>/<month>")
def browse_by_year_month(year: str, month: str) -> str:
try:
month_prompts: dict = api.get("browse", params={"year": year, "month": month})
except HTTPError:
abort(404)
render_opts = {
"date": date_format.format_month_year(f"{year}-{month}-01"),
"month_prompts": month_prompts["prompts"],
}
return render_template("root/browse-month.html", **render_opts)
@root.get("donate")
def donate():
Costs = namedtuple("Costs", ["cost", "month_freq"])
site_costs = {
"domain": Costs(8, 1),
"email": Costs(12, 12),
"server": Costs(6, 12),
}
render_opts = {"site_costs": site_costs}
return render_template("root/donate.html", **render_opts)
@root.get("/")
def index():
# Create a proper date object for each prompt
# There are some older days that have multiple prompts,
# and we need to handle these special cases
available_prompts = api.get("prompt")
prompts = []
for prompt in available_prompts:
prompt["date"] = date_format.create_datetime(prompt["date"])
prompts.append(prompt)
render_opts = {
"prompts": prompts,
"previous": prompts[0]["previous"],
"next": None,
}
return render_template("root/tweet.html", **render_opts)
def view_one_year(prompt_info: dict):
"""Build out the special 1 year anniversary prompt page."""
render_opts = {
"prompts": prompt_info["prompts"],
"previous": prompt_info["previous"],
"next": prompt_info["next"],
"host": prompt_info["writer_handle"],
}
return render_template("root/one-year.html", **render_opts)
@root.get("view/<date>")
def view_date(date: str):
"""Build out the daily prompt page."""
# Try to get the prompt for this day
try:
available_prompts = api.get(
"prompt", params={"date": date_format.create_datetime(date).isoformat()}
)
# There is no prompt for this day
except HTTPError:
abort(404)
# Load the special 1 year prompt archive page if requested
if date == "2017-09-05":
return view_one_year(available_prompts)
# Create a proper date object for each prompt
# There are some older days that have multiple prompts,
# and we need to handle these special cases
prompts = []
for prompt in available_prompts:
prompt["date"] = date_format.create_datetime(prompt["date"])
prompts.append(prompt)
render_opts = {
"prompts": prompts,
"previous": prompts[0]["previous"],
"next": prompts[0]["next"],
}
return render_template("root/tweet.html", **render_opts)
|
le717/vss365today | src/core/filters/__init__.py | from src.core.filters.date import (
create_datetime,
format_date_pretty,
format_datetime_ymd,
format_month_year,
)
from src.core.helpers import format_content
# Define the filters we want to export
ALL_FILTERS = {
"create_datetime": create_datetime,
"format_datetime_ymd": format_datetime_ymd,
"format_content": format_content,
"format_date_pretty": format_date_pretty,
"format_month_year": format_month_year,
}
|
le717/vss365today | src/blueprints.py | from typing import Callable, Optional
from flask import Blueprint
def _factory(
partial_module_string: str,
url_prefix: str,
protected: bool = False,
auth_function: Optional[Callable] = None,
) -> Blueprint:
# Create the blueprint
blueprint = Blueprint(
partial_module_string,
f"src.views.{partial_module_string}",
url_prefix=url_prefix,
)
# This endpoint is not to be publicly used
if protected:
# Protected endpoints must have an authorization method
if auth_function is None:
raise NotImplementedError(
"An authorization method must be given for protected endpoints!" # noqa
)
# Protect the endpoint with an authorization routine
blueprint.before_request(auth_function)
return blueprint
root = _factory("root", "/")
search = _factory("search", "/search")
shortcuts = _factory("shortcuts", "/")
all_blueprints = (root, search, shortcuts)
|
le717/vss365today | src/middleware.py | from datetime import datetime
from typing import Callable, Dict
from flask import current_app, flash, render_template, request, url_for
@current_app.before_request
def global_alert():
# Display a global alert message if
# 1. We have one to display
# 2. We are loading a route and not anything else
# 3. We aren't coming from a shortcut (which are redirects)
if (
(alert_msg := current_app.config.get("GLOBAL_ALERT")) is not None
and request.blueprint
and request.blueprint != "shortcuts"
):
flash(alert_msg[0], alert_msg[1])
@current_app.context_processor
def inject_current_date() -> Dict[str, datetime]:
return {"current_date": datetime.now()}
@current_app.context_processor
def nav_cur_page() -> Dict[str, Callable]:
return {
"nav_cur_page": lambda title, has: (
"active" if has.strip() in title.strip().lower() else ""
)
}
@current_app.context_processor
def create_url() -> Dict[str, Callable]:
def _func(prompt: dict) -> str:
return "https://twitter.com/{0}/status/{1}".format(
prompt["writer_handle"], prompt["id"]
)
return {"create_url": _func}
@current_app.context_processor
def get_static_url() -> Dict[str, Callable]:
"""Generate a URL to static assets based on dev/prod status."""
def _func(filename: str) -> str:
# If this config key is present, we are running in prod,
# which means we should pull the files from a URL
if (static_url := current_app.config.get("STATIC_FILES_URL")) is not None:
return f"{static_url}/{filename}"
# Otherwise, we're running locally, so we pull the files
# from the local filesystem
return url_for("static", filename=filename)
return {"get_static_url": _func}
@current_app.errorhandler(404)
def page_not_found(exc) -> tuple:
return render_template("partials/errors/404.html"), 404
@current_app.errorhandler(500)
def server_error(exc) -> tuple:
return render_template("partials/errors/500.html"), 500
|
le717/vss365today | src/views/shortcuts.py | <filename>src/views/shortcuts.py
from flask import current_app, redirect, url_for
from src.blueprints import shortcuts
@shortcuts.get("today")
def today():
"""Shortcut link to latest Prompt."""
return redirect(url_for("root.index"))
@shortcuts.get("privacy")
def privacy():
"""Shortcut link to site privacy notice."""
return redirect(url_for("root.about", _anchor="privacy"))
@shortcuts.get("abuse")
def abuse():
"""Shortcut link to file an email complaint."""
return redirect(f'mailto:{current_app.config["ABUSE_EMAIL_ADDR"]}')
|
le717/vss365today | src/core/filters/date.py | from datetime import datetime
from typing import Union
__all__ = [
"create_datetime",
"format_datetime_ymd",
"format_date_pretty",
"format_month_year",
]
def create_datetime(date_str: str) -> datetime:
"""Create a datetime object from an ISO 8601 date string."""
return datetime.fromisoformat(date_str.strip())
def format_date_pretty(datetime_obj: Union[datetime, str]) -> str:
"""Pretty format a date in MM DD, YYYY."""
if not isinstance(datetime_obj, datetime):
datetime_obj = create_datetime(datetime_obj)
return datetime_obj.strftime("%B %d, %Y")
def format_month_year(date: Union[str, datetime]) -> str:
"""Format a date as MM YYYY."""
# If the date is provided as a string, conver it to a datetime obj
if not isinstance(date, datetime):
# Add in a dummy day if needed
if len(date.split("-")) == 2:
date = f"{date}-01"
date = create_datetime(date)
return date.strftime("%B %Y")
def format_datetime_ymd(datetime_obj: Union[datetime, str]) -> str:
"""Format a date as YYYY-MM-DD."""
if not isinstance(datetime_obj, datetime):
datetime_obj = create_datetime(datetime_obj)
return datetime_obj.strftime("%Y-%m-%d")
|
le717/vss365today | src/core/helpers.py | <filename>src/core/helpers.py
import re
from html import unescape
import markupsafe
__all__ = [
"format_content",
"get_all_hashtags",
"make_hashtags",
"make_mentions",
"make_urls",
]
def format_content(text: str) -> str:
# Wrap all non-blank lines in paragraphs
split_text = text.split("\n")
split_text = [
f"<p>{para.strip()}</p>"
for para in split_text
if para # false-y value means blank line
]
# Rejoin the lines and make all links clickable
new_text = "\n".join(split_text)
new_text = unescape(new_text)
new_text = make_hashtags(new_text)
new_text = make_mentions(new_text)
new_text = make_urls(new_text)
return new_text
def get_all_hashtags(text: str) -> tuple:
return tuple(re.findall(r"(#\w+)", text, re.I))
def make_hashtags(text: str) -> str:
# Go through each hashtag and make it a clickable link
for ht in get_all_hashtags(text):
html = f'<a href="https://twitter.com/hashtag/{ht[1:]}">{ht}</a>'
text = re.sub(fr"({ht})\b", html, text)
return markupsafe.soft_str(markupsafe.Markup(text))
def make_mentions(text: str) -> str:
# Start by finding all possible @mentions
mentions = re.findall(r"(@\w+)", text, re.I)
if not mentions:
return text
# Go through each mention and make it a clickable link
for mention in mentions:
html = markupsafe.Markup(
f'<a href="https://twitter.com/{mention[1:]}">{mention}</a>'
)
text = text.replace(mention, html)
return markupsafe.soft_str(text)
def make_urls(text: str) -> str:
"""Convert all text links in a tweet into an HTML link."""
# Start by finding all possible t.co text links
links = re.findall(r"(https://t\.co/[a-z0-9]+)", text, re.I)
if not links:
return text
# Go through each url and make it a clickable link
for link in links:
html = markupsafe.Markup(f'<a href="{link}">{link}</a>')
text = text.replace(link, html)
return markupsafe.soft_str(text)
|
le717/vss365today | src/core/forms.py | from datetime import datetime
from src.core import api
from src.core.filters.date import format_datetime_ymd
from flask_wtf import FlaskForm
from wtforms.fields.simple import SubmitField
from wtforms.fields.html5 import EmailField, IntegerField, SearchField, DateField
from wtforms.validators import InputRequired, Email
from wtforms_components import SelectField
__all__ = [
"PromptSearchByDate",
"PromptSearchByHost",
"PromptSearchByWord",
"SubscribeForm",
"UnsubscribeForm",
]
class PromptSearchByDate(FlaskForm):
query = DateField(
"Date search",
validators=[InputRequired()],
render_kw={
"placeholder": "2020-07-02",
"pattern": r"\d{4}-\d{2}-\d{2}",
"max": format_datetime_ymd(datetime.now()),
},
)
class PromptSearchByHost(FlaskForm):
query = SelectField(
"Host search",
id="input-search-host",
validators=[InputRequired()],
choices=[
(host["handle"], host["handle"])
for host in api.get("host", params={"all": True})
],
)
class PromptSearchByWord(FlaskForm):
query = SearchField(
"Word search",
validators=[InputRequired()],
render_kw={"placeholder": "braid"},
)
class SubscribeForm(FlaskForm):
"""Notification email subscribe form."""
email = EmailField(
"Email",
validators=[InputRequired(), Email()],
render_kw={
"placeholder": "<EMAIL>",
"autocomplete": "email",
"inputmode": "email",
},
)
number = IntegerField(
validators=[InputRequired()],
render_kw={"inputmode": "numeric"},
)
submit = SubmitField("Subscribe")
class UnsubscribeForm(FlaskForm):
"""Notification email unsubscribe form."""
email = EmailField(
"Unsubscribe from daily #vss365 notifications",
validators=[InputRequired(), Email()],
render_kw={
"placeholder": "<EMAIL>",
"autocomplete": "email",
"inputmode": "email",
},
)
submit = SubmitField("Unsubscribe")
|
xuerong/MMServerEngine | code/MMServerEngine/others/proto/idcreator.py | #!/usr/bin/python
#-*- coding: utf-8 -*-
import os
import sys
proto_file_name = sys.argv[1]
java_class_name = sys.argv[2]
start_index = int(sys.argv[3])
java_class_path = "com/protocol/"+java_class_name
proto_file = open(proto_file_name,"r")
lines = proto_file.readlines()
#删除旧文件
if os.path.exists(java_class_path):
os.remove(java_class_path)
#初始化JAVA文件
newJavaFile = open(java_class_path,"wb")
newJavaFile.write("package com.protocol;\n")
newJavaFile.write("//Auto Generate File, Do NOT Modify!!!!!!!!!!!!!!!\n")
newJavaFile.write("public class %s {\n" % (java_class_name.split('.')[0]))
#遍历生成ID
for line in lines:
if line.startswith("message"):
text = line.split(' ')
if text[1].find("\n") > 0:
message_name = text[1].split("\n")[0]
else:
message_name = text[1]
if message_name.find("{") > 0:
message_name = message_name.split("{")[0]
newJavaFile.write( "\tpublic static final int %s = %s;\n" % (message_name,start_index))
start_index = start_index + 1
print message_name
#java文件结束
newJavaFile.write("\n}\n")
newJavaFile.close()
#
proto_file.close() |
xuerong/MMServerEngine | code/MMServerEngine/others/proto/csharp/idcreator.py | #!/usr/bin/python
#-*- coding: utf-8 -*-
#CSharp所需要的MessageID文件
import os
import sys
proto_file_name = sys.argv[1]
cs_class_name = sys.argv[2]
start_index = int(sys.argv[3])
builder_class_name = sys.argv[4]
proto_file = open(proto_file_name,"r")
lines = proto_file.readlines()
if os.path.exists(cs_class_name):
os.remove(cs_class_name)
# 初始化CS文件
newCSharplines = []
newCSharpFile = open(cs_class_name,"wb")
newCSharpFile.write("//Auto Generate File, Do NOT Modify!!!!!!!!!!!!!!!\n")
newCSharpFile.write("using System;\n")
newCSharpFile.write("namespace com.protocol\n")
newCSharpFile.write("{\n")
newCSharpFile.write("\tpublic enum %s\n" % (cs_class_name.replace("csharpfile\\","").split('.')[0]))
newCSharpFile.write("\t{\n")
#遍历生成ID
for line in lines:
#if line.startswith("//"):
#newCSharpFile.write( "%s" % (line))
if line.startswith("message"):
text = line.split(' ')
if text[1].find("\n") > 0:
message_name = text[1].split("\n")[0]
else:
message_name = text[1]
if message_name.find("{") > 0:
message_name = message_name.split("{")[0]
newCSharpFile.write( "\t\t%s = %s,\n" % (message_name,start_index))
start_index = start_index + 1
print message_name
#c sharp文件结束
newCSharpFile.write("\n\t}\n")
newCSharpFile.write("}\n")
newCSharpFile.close()
proto_file.close()
|
xuerong/MMServerEngine | code/MMServerEngine/others/sysPara/sysPara.py | #!/usr/bin/python
#-*- coding: utf-8 -*-
import os
import sys
class Properties(object):
def __init__(self, fileName):
self.fileName = fileName
self.properties = {}
def __getDict(self,strName,dictName,value):
if(strName.find('.')>0):
k = strName.split('.')[0]
dictName.setdefault(k,{})
return self.__getDict(strName[len(k)+1:],dictName[k],value)
else:
dictName[strName] = value
return
def getProperties(self):
try:
pro_file = open(self.fileName, 'Ur')
for line in pro_file.readlines():
line = line.strip().replace('\n', '')
if line.find("#")!=-1:
line=line[0:line.find('#')]
if line.find('=') > 0:
strs = line.split('=')
strs[1]= line[len(strs[0])+1:]
self.__getDict(strs[0].strip(),self.properties,strs[1].strip())
except Exception, e:
raise e
else:
pro_file.close()
return self.properties
def createJavaFile(name,content):
java_class_path = "com/sys/"+name+".java"
#删除旧文件
if os.path.exists(java_class_path):
os.remove(java_class_path)
newJavaFile = open(java_class_path,"wb")
newJavaFile.write(content)
newJavaFile.close()
dictProperties=Properties("sysPara.properties").getProperties()
print dictProperties
sb = "package com.sys;\n//Auto Generate File, Do NOT Modify!!!!!!!!!!!!!!!\npublic final class SysPara{"
map = "\n\tpublic static java.util.Map<String,String> paras = new java.util.HashMap<String,String>(){\n\t\t{\n"
key = "\n"
for k in dictProperties.keys():
map = map+"\t\t\tput(\""+k+"\",\""+dictProperties[k]+"\");\n"
key = key+"\tpublic static final String "+k+" = \""+k+"\";\n"
# sb = sb+"\n\tpublic static String "+k+" = \""+dictProperties[k]+"\";"
map = map+"\t\t}\n\t};"
sb = sb+map+key+"\n}"
createJavaFile("SysPara",sb)
|
dmyersturnbull/dscience | dscience/ml/decision_frame.py | from __future__ import annotations
from typing import Sequence
from pathlib import Path
import numpy as np
import pandas as pd
from dscience.core.exceptions import *
from dscience.core.extended_df import *
from dscience.ml.confusion_matrix import *
from kale.ml.accuracy_frames import *
class DecisionFrame(OrganizingFrame):
"""
An n × m matrix of probabilities (or scores) from a classifier.
The n rows are samples, and the m columns are predictions. The values are the confidence or pobability of prediction.
The single index column is named 'correct_label', and the single column name is named 'label'.
Practically, this is a Pandas wrapper around a scikit-learn decision_function
that also has the predicted and correct class labels.
"""
@classmethod
def required_index_names(cls) -> Sequence[str]:
return ["label", "sample_id"]
@classmethod
def of(
cls,
correct_labels: Sequence[str],
labels: Sequence[str],
decision_function: np.array,
sample_ids: Sequence[Any],
) -> DecisionFrame:
"""
Wraps a decision function numpy array into a DecisionFrame instance complete with labels as names and columns.
:param correct_labels: A length-n list of the correct labels for each of the n samples
:param labels: A length-m list of class labels matching the predictions (columns) on `probabilities`
:param decision_function: An n × m matrix of probabilities (or scores) from the classifier.
The rows are samples, and the columns are predictions.
scikit-learn decision_functions (ex model.predict_proba) will output this.
:param sample_ids: IDs (or names) of training examples for later reference; should be unique
:return: A DecisionFrame
"""
decision_function = pd.DataFrame(decision_function)
decision_function.index = [correct_labels, sample_ids]
decision_function.columns = labels
decision_function.index.names = ["label", "sample_id"]
return cls.convert(decision_function)
def confusion(self) -> ConfusionMatrix:
labels = self.columns
correct_labels = self.index.get_level_values("label")
if self.shape[0] != len(correct_labels):
raise LengthMismatchError(
"Number of rows of decision function of shape {} is not the length of the correct labels {}".format(
self.shape, len(correct_labels)
)
)
if self.shape[1] != len(labels):
raise LengthMismatchError(
"Number of columns of decision function of shape {} is not the length of the class labels {}".format(
self.shape, len(labels)
)
)
correct_confused_with = {c: {p: 0.0 for p in labels} for c in labels}
for r, row in enumerate(self.index):
correct_name = correct_labels[r]
for c, column in enumerate(self.columns):
confused_name = labels[c]
correct_confused_with[correct_name][confused_name] += self.iat[r, c]
correct_confused_with = pd.DataFrame(correct_confused_with)
correct_confused_with /= correct_confused_with.sum()
return ConfusionMatrix(correct_confused_with)
def accuracy(self) -> AccuracyFrame:
actual_labels = self.index.get_level_values("label").values
sample_ids = self.index.get_level_values("sample_id").values
stripped = self.reset_index().drop("sample_id", axis=1).set_index("label")
predicted_labels = stripped.idxmax(axis=1).values
predicted_probs = stripped.max(axis=1).values
actual_probs = stripped.apply(lambda r: r.loc[r.name], axis=1).values
return AccuracyFrame(
{
"label": actual_labels,
"sample_id": sample_ids,
"prediction": predicted_labels,
"score": actual_probs * 100.0,
"score_for_prediction": predicted_probs * 100.0,
}
)
@classmethod
def read_csv(cls, path: PathLike, *args, **kwargs) -> DecisionFrame:
df = pd.read_csv(Path(path)).set_index(cls.required_index_names())
return cls(df)
def to_csv(self, path: PathLike, *args, **kwargs):
self.to_vanilla().to_csv(path, index_label=self.__class__.required_index_names())
__all__ = ["DecisionFrame"]
|
dmyersturnbull/dscience | dscience/calc/stats.py | from typing import Tuple as Tup
import scipy
from scipy import stats
from statsmodels.nonparametric.kde import KDEUnivariate
import numpy as np
import pandas as pd
from dscience.core.exceptions import LengthMismatchError, NullValueError
class StatTools:
@classmethod
def kde(
cls, a: np.array, kernel: str = "gau", bw: str = "normal_reference"
) -> Tup[np.array, np.array]:
"""
Calculates univariate KDE with statsmodel.
(This function turned into a thin wrapper around statsmodel.)
Note that scipy uses statsmodel for KDE if it's available. Otherwise, it silently falls back to scipy. That's clearly hazardous.
"""
if isinstance(a, pd.Series):
a = a.values
dens = KDEUnivariate(a)
dens.fit(kernel=kernel, bw=bw)
return dens.support, dens.density
@classmethod
def ttest_pval(cls, z: pd.Series, a: str, b: str) -> float:
"""Calculates a p-value from a t-test between labels a and b."""
s = pd.DataFrame(z)
neg = s[s.index.get_level_values("name") == a].values
if len(neg) < 2:
raise LengthMismatchError("Too few ({}) values for {}".format(len(neg), a), minimum=2)
pos = s[s.index.get_level_values("name") == b].values
if len(pos) < 2:
raise LengthMismatchError("Too few ({}) values for {}".format(len(pos), b), minimum=2)
pval = scipy.stats.ttest_ind(pos, neg, equal_var=False).pvalue
if isinstance(pval, float) and np.isnan(pval):
raise NullValueError("NaN for {} and {}".format(a, b))
else:
return pval[0]
__all__ = ["StatTools"]
|
dmyersturnbull/dscience | dscience/__init__.py | <reponame>dmyersturnbull/dscience
"""
Metadata for dscience.
"""
from pathlib import Path
from importlib_metadata import metadata as __load
metadata = __load(Path(__file__).parent.name)
__status__ = "Development"
__copyright__ = "Copyright (2015–2020)"
__date__ = "2020-04-30"
__uri__ = metadata["home-page"]
__title__ = metadata["name"]
__summary__ = metadata["summary"]
__license__ = metadata["license"]
__version__ = metadata["version"]
__author__ = metadata["author"]
__maintainer__ = metadata["maintainer"]
__contact__ = metadata["maintainer"]
|
dmyersturnbull/dscience | dscience/ml/confusion_matrix.py | <reponame>dmyersturnbull/dscience
from __future__ import annotations
import logging
from typing import Set, Sequence, Union, Callable, Mapping
from copy import deepcopy
import numpy as np
import pandas as pd
from clana.visualize_cm import simulated_annealing
from dscience.core import PathLike
from dscience.core.exceptions import *
from dscience.core.extended_df import *
from dscience.core.chars import *
logger = logging.getLogger("dscience")
class ConfusionMatrix(SimpleFrame):
"""
A wrapper around a confusion matrix as a Pandas DataFrame.
The rows are the correct labels, and the columns are the predicted labels.
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
super().__init__(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
def warn_if_asymmetric(self) -> None:
if self.rows != self.cols:
logger.warning("Rows {} != columns {}".format(self.rows, self.columns))
def is_symmetric(self) -> bool:
return self.rows == self.cols
def _repr_html_(self) -> str:
return "<strong>{}: {} {}</strong>\n{}".format(
self.__class__.__name__,
self._dims(),
Chars.check if self.rows == self.cols else Chars.x,
pd.DataFrame._repr_html_(self),
len(self),
)
def sub(self, names: Set[str]) -> ConfusionMatrix:
return ConfusionMatrix(self.loc[names][names])
def shuffle(self) -> ConfusionMatrix:
"""
Returns a copy with every value mapped to a new location.
Destroys the correct links between labels and values.
Useful for permutation tests.
:return: A copy
"""
cp = deepcopy(self.flatten())
np.random.shuffle(cp)
vals = cp.reshape((len(self.rows), len(self.columns)))
return ConfusionMatrix(vals, index=self.rows, columns=self.columns)
def diagonals(self) -> np.array:
"""
Returns diagonal elements.
"""
return np.array([self.iat[i, i] for i in range(len(self))])
def off_diagonals_quantiles(self, q: float = 0.5) -> np.array:
lst = []
for i in range(len(self)):
lst.append(np.quantile([self.iat[i, j] for j in range(len(self))], q))
return np.array(lst)
def off_diagonals_means(self) -> np.array:
lst = []
for i in range(len(self)):
lst.append(np.mean([self.iat[i, j] for j in range(len(self))]))
return np.array(lst)
def flatten(self) -> np.array:
return self.values.flatten()
def sum_diagonal(self) -> float:
s = 0
for i in range(len(self)):
s += self.iat[i, i]
return s
def sum_off_diagonal(self) -> float:
s = 0
for i in range(len(self)):
for j in range(len(self)):
if i != j:
s += self.iat[i, j]
return s
def score_df(self) -> pd.DataFrame:
"""
Get the diagonal elements as a Pandas DataFrame with columns 'name' and 'score'.
:return: A Pandas DataFrame
"""
sers = []
for score, name in self.scores():
sers.append(pd.Series({"name": name, "score": score}))
return pd.DataFrame(sers)
def symmetrize(self) -> ConfusionMatrix:
"""Averages with its transpose, forcing it to be symmetric. Returns a copy."""
return ConfusionMatrix(0.5 * (self + self.T))
def triagonalize(self) -> ConfusionMatrix:
"""
NaNs out the upper triangle, returning a copy.
You may consider calling symmetrize first.
WARNING: Do NOT call a sorting method after this.
"""
return ConfusionMatrix(self.where(np.tril(np.ones(self.shape)).astype(np.bool)))
def log(self) -> ConfusionMatrix:
"""Takes the log10 of every value in this ConfusionMatrix, returning a new one."""
return ConfusionMatrix(self.applymap(np.log10))
def negative(self) -> ConfusionMatrix:
return ConfusionMatrix(self.applymap(lambda x: -x))
def sort(self, **kwargs) -> ConfusionMatrix:
"""
Sorts this confusion matrix to show clustering. The same ordering is applied to the rows and columns.
Call this first. Do not call symmetrize(), log(), or triagonalize() before calling this.
Returns a copy.
:param: kwargs Passed to simulated_annealing: `steps`, `cooling_factor`, `temp`, `deterministic`
:return: A dictionary mapping class names to their new positions (starting at 0)
:return: A new ConfusionMatrix, sorted
"""
permutation = self.permutation(**kwargs)
return self.sort_with(permutation)
def permutation(self, **kwargs) -> Mapping[str, int]:
"""
Sorts this confusion matrix to show clustering. The same ordering is applied to the rows and columns.
Returns the sorting. Does not alter this ConfusionMatrix.
:param: kwargs Passed to simulated_annealing: `steps`, `cooling_factor`, `temp`, `deterministic`
:return: A dictionary mapping class names to their new positions (starting at 0)
"""
if not self.is_symmetric():
raise AmbiguousRequestError(
"Unclear how to sort because rows {} and columns {} differ".format(
self.rows, self.columns
)
)
optimized = simulated_annealing(self.values, **kwargs)
perm = list(reversed(optimized["perm"]))
perm = {name: perm.index(i) for i, name in enumerate(self.rows)}
logger.info("Permutation for rows {}: {}".format(self.rows, perm))
return perm
def sort_alphabetical(self) -> ConfusionMatrix:
"""
Sort by the labels alphabetically.
"""
labels = sorted(self.rows)
return self.sort_first(labels)
def sort_with(self, permutation: Union[Sequence[str], Mapping[str, int]]) -> ConfusionMatrix:
"""
Sorts this ConfusionMatrix's rows and columns by a predefined ordering. Returns a copy.
:param permutation: Maps names (strings) to their 0-indexed positions (integers)
If a mapping, takes as-is; these are returned by permutation()
If a DataFrame, must have 2 columns 'key' (name) and 'value' (position), and 1 row per name
If a str, tries to read a CSV file at that path into a DataFrame; uses Tools.csv_to_dict
:return: A new ConfusionMatrix with sorted rows and columns
"""
if not self.is_symmetric():
raise AmbiguousRequestError(
"Unclear how to sort because rows {} and columns {} differ".format(
self.rows, self.columns
)
)
if isinstance(permutation, Sequence):
if set(permutation) != set(self.rows):
permutation = {name: i for i, name in enumerate(permutation)}
else:
raise RefusingRequestError(
"{} permutation elements instead of {}. See `sort_first`.".format(
len(permutation), len(self)
)
)
data = self.reindex(sorted(self.rows, key=lambda s: permutation[s]), axis=1)
xx = [permutation[name] for name in self.rows]
data["__sort"] = xx
data = data.sort_values("__sort").drop("__sort", axis=1)
return ConfusionMatrix(data)
def sort_first(self, first: Sequence[str]) -> ConfusionMatrix:
"""
Put these elements first.
"""
first = [*first, *[r for r in self.rows if r not in first]]
permutation = {name: i for i, name in enumerate(first)}
return self.sort_with(permutation)
@property
def rows(self):
return self.index.tolist()
@property
def cols(self):
return self.columns.tolist()
@property
def length(self) -> int:
"""Get a safe length, verifying that the len(rows) == len(cols)."""
if len(self.rows) != len(self.cols):
raise LengthMismatchError("{} rows != {} cols".format(len(self.rows), len(self.cols)))
return len(self.rows)
def __repr__(self) -> str:
return "ConfusionMatrix({} labels @ {})".format(len(self.rows), hex(id(self)))
def __str__(self) -> str:
return repr(self)
@classmethod
def read_csv(cls, path: PathLike, *args, **kwargs):
path = Path(path)
df = pd.read_csv(path, index_col=0)
df.index.name = "name"
return ConfusionMatrix(df)
class ConfusionMatrices:
@classmethod
def average(cls, matrices: Sequence[ConfusionMatrix]) -> ConfusionMatrix:
"""
Averages a list of confusion matrices.
:param matrices: An iterable of ConfusionMatrices (does not need to be a list)
:return: A new ConfusionMatrix
"""
if len(matrices) < 1:
raise EmptyCollectionError("Cannot average 0 matrices")
matrices = [m.unsort() for m in matrices]
rows, cols, mx0 = matrices[0].rows, matrices[0].columns, matrices[0]
if any((not m.is_symmetric() for m in matrices)):
raise RefusingRequestError(
"Refusing to average matrices because"
"for at least one matrix the rows and columns are different"
)
for m in matrices[1:]:
if m.rows != rows:
raise RefusingRequestError(
"At least one confusion matrix has different rows than another"
"(or different columns than another)"
)
mx0 += m
# noinspection PyTypeChecker
return ConfusionMatrix((1.0 / len(matrices)) * mx0)
@classmethod
def agg_matrices(
cls,
matrices: Sequence[ConfusionMatrix],
aggregation: Callable[[Sequence[pd.DataFrame]], None],
) -> ConfusionMatrix:
"""
Averages a list of confusion matrices.
:param matrices: An iterable of ConfusionMatrices (does not need to be a list)
:param aggregation to perform, such as np.mean
:return: A new ConfusionMatrix
"""
if len(matrices) < 1:
raise EmptyCollectionError("Cannot aggregate 0 matrices")
matrices = [mx.unsort() for mx in matrices]
rows, cols, mx = matrices[0].rows, matrices[0].columns, matrices[0]
if rows != cols:
raise RefusingRequestError(
"Refusing to aggregate matrices because for at least one matrix the rows and columns are different"
)
ms = []
for m in matrices[1:]:
if m.rows != rows or m.columns != cols:
raise RefusingRequestError(
"At least one confusion matrix has different rows than another (or different columns than another)"
)
ms.append(m)
return ConfusionMatrix(aggregation(ms))
@classmethod
def zeros(cls, classes: Sequence[str]) -> ConfusionMatrix:
return ConfusionMatrix(
pd.DataFrame(
[pd.Series({"class": r, **{c: 0.0 for c in classes}}) for r in classes]
).set_index("class")
)
@classmethod
def perfect(cls, classes: Sequence[str]) -> ConfusionMatrix:
return ConfusionMatrix(
pd.DataFrame(
[
pd.Series({"class": r, **{c: 1.0 if r == c else 0.0 for c in classes}})
for r in classes
]
).set_index("class")
)
@classmethod
def uniform(cls, classes: Sequence[str]) -> ConfusionMatrix:
return ConfusionMatrix(
pd.DataFrame(
[
pd.Series({"class": r, **{c: 1.0 / len(classes) for c in classes}})
for r in classes
]
).set_index("class")
)
__all__ = ["ConfusionMatrix", "ConfusionMatrices"]
|
dmyersturnbull/dscience | dscience/ml/__init__.py | from dscience.core import SmartEnum
class ErrorBehavior(SmartEnum):
FAIL = 1
LOG_ERROR = 2
LOG_WARNING = 3
LOG_CAUTION = 4
IGNORE = 5
__all__ = ["ErrorBehavior"]
|
dmyersturnbull/dscience | dscience/calc/peak_finding.py | <reponame>dmyersturnbull/dscience
"""
Functions for identifying peaks in signals.
"""
import numpy as np
from scipy.stats import scoreatpercentile
from scipy.signal.wavelets import cwt, ricker
from scipy.signal._peak_finding_utils import _peak_prominences
from dscience.core.exceptions import OutOfRangeError, WrongDimensionError
class PeakFinder:
@classmethod
def peak_prominences(cls, x, peaks, wlen=None):
"""
Calculate the prominence of each peak in a signal.
The prominence of a peak measures how much a peak stands out from the
surrounding baseline of the signal and is defined as the vertical distance
between the peak and its lowest contour line.
Parameters
----------
x : sequence
A signal with peaks.
peaks : sequence
Indices of peaks in `x`.
wlen : int or float, optional
A window length in samples that optionally limits the evaluated area for
each peak to a subset of `x`. The peak is always placed in the middle of
the window therefore the given length is rounded up to the next odd
integer. This parameter can speed up the calculation (see Notes).
Returns
-------
prominences : ndarray
The calculated prominences for each peak in `peaks`.
left_bases, right_bases : ndarray
The peaks' bases as indices in `x` to the left and right of each peak.
The higher base of each pair is a peak's lowest contour line.
Raises
------
ValueError
If an index in `peaks` does not point to a local maximum in `x`.
See Also
--------
find_peaks
Find peaks inside a signal based on peak properties.
peak_widths
Calculate the width of peaks.
Notes
-----
Strategy to compute a peak's prominence:
1. Extend a horizontal line from the current peak to the left and right
until the line either reaches the window border (see `wlen`) or
intersects the signal again at the slope of a higher peak. An
intersection with a peak of the same height is ignored.
2. On each side find the minimal signal value within the interval defined
above. These points are the peak's bases.
3. The higher one of the two bases marks the peak's lowest contour line. The
prominence can then be calculated as the vertical difference between the
peaks height itself and its lowest contour line.
Searching for the peak's bases can be slow for large `x` with periodic
behavior because large chunks or even the full signal need to be evaluated
for the first algorithmic step. This evaluation area can be limited with the
parameter `wlen` which restricts the algorithm to a window around the
current peak and can shorten the calculation time if the window length is
short in relation to `x`.
However this may stop the algorithm from finding the true global contour
line if the peak's true bases are outside this window. Instead a higher
contour line is found within the restricted window leading to a smaller
calculated prominence. In practice this is only relevant for the highest set
of peaks in `x`. This behavior may even be used intentionally to calculate
"local" prominences.
.. warning::
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
.. versionadded:: 1.1.0
References
----------
.. [1] Wikipedia Article for Topographic Prominence:
https://en.wikipedia.org/wiki/Topographic_prominence
Examples
--------
>>> from scipy.signal import find_peaks, peak_prominences
>>> import matplotlib.pyplot as plt
Create a test signal with two overlayed harmonics
>>> x = np.linspace(0, 6 * np.pi, 1000)
>>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
Find all peaks and calculate prominences
>>> peaks, _ = find_peaks(x)
>>> prominences = peak_prominences(x, peaks)[0]
>>> prominences
array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 ,
0.47822491, 2.48340261, 0.47822491])
Calculate the height of each peak's contour line and plot the results
>>> contour_heights = x[peaks] - prominences
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks])
>>> plt.show()
Let's evaluate a second example that demonstrates several edge cases for
one peak at index 5.
>>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0])
>>> peaks = np.array([5])
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
>>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases)
(array([3.]), array([2]), array([6]))
Note how the peak at index 3 of the same height is not considered as a
border while searching for the left base. Instead two minima at 0 and 2
are found in which case the one closer to the evaluated peak is always
chosen. On the right side however the base must be placed at 6 because the
higher peak represents the right border to the evaluated area.
>>> peak_prominences(x, peaks, wlen=3.1)
(array([2.]), array([4]), array([6]))
Here we restricted the algorithm to a window from 3 to 7 (the length is 5
samples because `wlen` was rounded up to the next odd integer). Thus the
only two candidates in the evaluated area are the two neighbouring samples
and a smaller prominence is calculated.
"""
# Inner function expects `x` to be C-contiguous
x = np.asarray(x, order="C", dtype=np.float64)
if x.ndim != 1:
raise WrongDimensionError("`x` must have exactly one dimension")
peaks = np.asarray(peaks)
if peaks.size == 0:
# Empty arrays default to np.float64 but are valid input
peaks = np.array([], dtype=np.intp)
try:
# Safely convert to C-contiguous array of type np.intp
peaks = peaks.astype(np.intp, order="C", casting="safe", subok=False, copy=False)
except TypeError:
raise TypeError("Cannot safely cast `peaks` to dtype('intp')")
if peaks.ndim != 1:
raise WrongDimensionError("`peaks` must have exactly one dimension")
if wlen is None:
wlen = -1 # Inner function expects int -> None == -1
elif 1 < wlen:
# Round up to next positive integer; rounding up to next odd integer
# happens implicitly inside the inner function
wlen = int(np.ceil(wlen))
else:
# Give feedback if wlen has unexpected value
raise OutOfRangeError("`wlen` must be at larger than 1, was " + str(wlen))
return _peak_prominences(x, peaks, wlen)
@classmethod
def _boolrelextrema(cls, data, comparator, axis=0, order=1, mode="clip"):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> PeakFinder._boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if (int(order) != order) or (order < 1):
raise OutOfRangeError("Order must be an int >= 1")
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in range(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if ~results.any():
return results
return results
@classmethod
def _identify_ridge_lines(cls, matr, max_distances, gap_thresh):
"""
Identify ridges in the 2-D matrix.
Expect that the width of the wavelet feature increases with increasing row
number.
Parameters
----------
matr : 2-D ndarray
Matrix in which to identify ridge lines.
max_distances : 1-D sequence
At each row, a ridge line is only connected
if the relative max at row[n] is within
`max_distances`[n] from the relative max at row[n+1].
gap_thresh : int
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if
there are more than `gap_thresh` points without connecting
a new relative maximum.
Returns
-------
ridge_lines : tuple
Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
found. Each ridge-line will be sorted by row (increasing), but the
order of the ridge lines is not specified.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> data = np.random.rand(5,5)
>>> ridge_lines = PeakFinder._identify_ridge_lines(data, 1, 1)
Notes
-----
This function is intended to be used in conjunction with `cwt`
as part of `find_peaks_cwt`.
"""
if len(max_distances) < matr.shape[0]:
raise OutOfRangeError(
"Max_distances must have at least as many rows as matr",
value=len(max_distances),
minimum=matr.shape[0],
)
all_max_cols = PeakFinder._boolrelextrema(matr, np.greater, axis=1, order=1)
# Highest row for which there are any relative maxima
has_relmax = np.where(all_max_cols.any(axis=1))[0]
if len(has_relmax) == 0:
return []
start_row = has_relmax[-1]
# Each ridge line is a 3-tuple:
# rows, cols, Gap number
ridge_lines = [[[start_row], [col], 0] for col in np.where(all_max_cols[start_row])[0]]
final_lines = []
rows = np.arange(start_row - 1, -1, -1)
cols = np.arange(0, matr.shape[1])
for row in rows:
this_max_cols = cols[all_max_cols[row]]
# Increment gap number of each line,
# set it to zero later if appropriate
for line in ridge_lines:
line[2] += 1
# XXX These should always be all_max_cols[row]
# But the order might be different. Might be an efficiency gain
# to make sure the order is the same and avoid this iteration
prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
# Look through every relative maximum found at current row
# Attempt to connect them with existing ridge lines.
for ind, col in enumerate(this_max_cols):
# If there is a previous ridge line within
# the max_distance to connect to, do so.
# Otherwise start a new one.
line = None
if len(prev_ridge_cols) > 0:
diffs = np.abs(col - prev_ridge_cols)
closest = np.argmin(diffs)
if diffs[closest] <= max_distances[row]:
line = ridge_lines[closest]
if line is not None:
# Found a point close enough, extend current ridge line
line[1].append(col)
line[0].append(row)
line[2] = 0
else:
new_line = [[row], [col], 0]
ridge_lines.append(new_line)
# Remove the ridge lines with gap_number too high
# XXX Modifying a list while iterating over it.
# Should be safe, since we iterate backwards, but
# still tacky.
for ind in xrange(len(ridge_lines) - 1, -1, -1):
line = ridge_lines[ind]
if line[2] > gap_thresh:
final_lines.append(line)
del ridge_lines[ind]
out_lines = []
for line in final_lines + ridge_lines:
sortargs = np.array(np.argsort(line[0]))
rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
rows[sortargs] = line[0]
cols[sortargs] = line[1]
out_lines.append([rows, cols])
return out_lines
@classmethod
def _filter_ridge_lines(
cls, cwt, ridge_lines, window_size=None, min_length=None, min_snr=1, noise_perc=10
):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
----------
cwt : 2-D ndarray
Continuous wavelet transform from which the `ridge_lines` were defined.
ridge_lines : 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively).
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. :doi:`10.1093/bioinformatics/btl355`
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
window_size = int(window_size)
hf_window, odd = divmod(window_size, 2)
# Filter based on SNR
row_one = cwt[0, :]
noises = np.zeros_like(row_one)
for ind, val in enumerate(row_one):
window_start = max(ind - hf_window, 0)
window_end = min(ind + hf_window + odd, num_points)
noises[ind] = scoreatpercentile(row_one[window_start:window_end], per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return list(filter(filt_func, ridge_lines))
@classmethod
def find_peaks_cwt(
cls,
vector,
widths,
wavelet=None,
max_distances=None,
gap_thresh=None,
min_length=None,
min_snr=1,
noise_perc=10,
noise_window_size=None,
):
"""
Find peaks in a 1-D array with wavelet transformation.
The general approach is to smooth `vector` by convolving it with
`wavelet(width)` for each width in `widths`. Relative maxima which
appear at enough length scales, and with sufficiently high SNR, are
accepted.
Parameters
----------
vector : ndarray
1-D array in which to find the peaks.
widths : sequence
1-D array of widths to use for calculating the CWT matrix. In general,
this range should cover the expected width of peaks of interest.
wavelet : callable, optional
Should take two parameters and return a 1-D array to convolve
with `vector`. The first parameter determines the number of points
of the returned wavelet array, the second parameter is the scale
(`width`) of the wavelet. Should be normalized and symmetric.
Default is the ricker wavelet.
max_distances : ndarray, optional
At each row, a ridge line is only connected if the relative max at
row[n] is within ``max_distances[n]`` from the relative max at
``row[n+1]``. Default value is ``widths/4``.
gap_thresh : float, optional
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if there are more
than `gap_thresh` points without connecting a new relative maximum.
Default is the first value of the widths array i.e. widths[0].
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
`stats.scoreatpercentile`. Default is 10.
Returns
-------
peaks_indices : ndarray
Indices of the locations in the `vector` where peaks were found.
The list is sorted.
See Also
--------
cwt
Continuous wavelet transform.
find_peaks
Find peaks inside a signal based on peak properties.
Notes
-----
This approach was designed for finding sharp peaks among noisy data,
however with proper parameter selection it should function well for
different peak shapes.
The algorithm is as follows:
1. Perform a continuous wavelet transform on `vector`, for the supplied
`widths`. This is a convolution of `vector` with `wavelet(width)` for
each width in `widths`. See `cwt`
2. Identify "ridge lines" in the cwt matrix. These are relative maxima
at each row, connected across adjacent rows. See identify_ridge_lines
3. Filter the ridge_lines using filter_ridge_lines.
.. versionadded:: 0.11.0
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> from scipy import signal
>>> xs = np.arange(0, np.pi, 0.05)
>>> data = np.sin(xs)
>>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
>>> peakind, xs[peakind], data[peakind]
([32], array([ 1.6]), array([ 0.9995736]))
"""
widths = np.asarray(widths)
if gap_thresh is None:
gap_thresh = np.ceil(widths[0])
if max_distances is None:
max_distances = widths / 4.0
if wavelet is None:
wavelet = ricker
if noise_window_size is None:
noise_window_size = np.ceil(len(vector) / 20)
cwt_dat = cwt(vector, wavelet, widths)
ridge_lines = PeakFinder._identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
filtered = PeakFinder._filter_ridge_lines(
cwt_dat,
ridge_lines,
min_length=min_length,
min_snr=min_snr,
noise_perc=noise_perc,
window_size=noise_window_size,
)
max_locs = np.asarray([x[1][0] for x in filtered])
max_locs.sort()
return max_locs
__all__ = ["PeakFinder"]
|
dmyersturnbull/dscience | dscience/ml/sklearn_utils.py | import subprocess
from pathlib import Path
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from dscience.core import PathLike
from typing import Sequence
class ClassificationUtils:
@classmethod
def viz_tree(
cls, tree: DecisionTreeClassifier, classes: Sequence[str], path: PathLike, **kwargs
) -> Path:
"""
Plots a single tree from a DecisionTreeClassifier to a path.
"""
path = Path(path)
dotpath = path.with_suffix(".dot")
export_graphviz(tree, class_names=classes, out_file=str(dotpath), label="none", **kwargs)
command = ["dot", "-T" + path.suffix.lstrip("."), str(dotpath), "-o", str(path)]
subprocess.check_output(command)
dotpath.unlink()
return path
__all__ = ["ClassificationUtils"]
|
dmyersturnbull/dscience | dscience/ml/accuracy_frame.py | <reponame>dmyersturnbull/dscience<filename>dscience/ml/accuracy_frame.py
from __future__ import annotations
import logging
from typing import Sequence, Union, Iterable
import pandas as pd
from dscience.core.extended_df import *
logger = logging.getLogger("dscience")
class AccuracyCountFrame(SimpleFrame):
pass
class AccuracyFrame(OrganizingFrame):
"""
Has columns 'label', 'score', 'prediction', and 'score_for_prediction', with one row per prediction.
"""
@classmethod
def required_columns(cls) -> Sequence[str]:
return ["label", "prediction", "score", "score_for_prediction"]
def counts(self) -> AccuracyCountFrame:
df = self.copy()
df["score"] = df["label"] == df["prediction"]
df = self.groupby("label").sum()[["score"]]
return AccuracyCountFrame(AccuracyCountFrame(df.reset_index()))
def means(self) -> AccuracyCountFrame:
df = self.copy()
df["score"] = df["label"] == df["prediction"]
df = self.groupby("label").mean()[["score"]] * 100.0
return AccuracyCountFrame(AccuracyCountFrame(df.reset_index()))
def with_label(self, label: Union[str, Iterable[str]]) -> AccuracyFrame:
if isinstance(label, str):
return self.__class__.retype(self[self["label"] == label])
else:
return self.__class__.retype(self[self["label"].isin(label)])
def boot_mean(self, b: int, q: float = 0.95) -> BaseFrame:
"""
Calculates a confidence interval of the mean from bootstrap over the rows.
:param b: The number of bootstrap samples
:param q: The high quantile, between 0 and 1.
:return: A DataFrame with columns 'label', 'lower', and 'upper'.
"""
data = []
for repeat in range(b):
samples = self.sample(len(self), replace=True)
data.append(samples.groupby("label")[["score"]].mean().reset_index())
upper = (
AccuracyFrame(pd.concat(data))
.groupby("label")
.quantile(q)
.rename(columns={"label": "upper"})["upper"],
)
lower = (
AccuracyFrame(pd.concat(data))
.groupby("label")
.quantile(1 - q)
.rename(columns={"label": "lower"})["lower"]
)
return BaseFrame(pd.merge(upper, lower, left_index=True, right_index=True))
class AccuracyFrames:
@classmethod
def concat(cls, *views: Sequence[AccuracyFrame]) -> AccuracyFrame:
return AccuracyFrame(pd.concat(views, sort=False))
__all__ = ["AccuracyFrame", "AccuracyFrames"]
|
dmyersturnbull/dscience | dscience/calc/morphology.py | import numpy as np
from PIL import Image
from skimage.io import imread
from skimage.color import rgb2gray
from skimage.morphology import convex_hull_image
class ConvexHullCropper:
"""
Auto-crops images based on their convex hull, assuming a white or transparent background.
Idea & code from <NAME>'s answer on
https://stackoverflow.com/questions/14211340/automatically-cropping-an-image-with-python-pil
"""
def crop(self, im_array: np.array) -> Image:
# create a binary image first
im1 = 1 - rgb2gray(im_array)
im1[im1 <= 0.5] = 0
im1[im1 > 0.5] = 1
# now compute the hull
chull = convex_hull_image(im1)
# now crop
img_box = Image.fromarray((chull * 255).astype(np.uint8)).getbbox()
return Image.fromarray(im_array).crop(img_box)
def crop_file(self, from_path: str, to_path: str) -> None:
im = imread(from_path)
cropped = self.crop(im)
cropped.save(to_path)
def __repr__(self):
return type(self).__name__
def __str__(self):
return repr(self)
__all__ = ["ConvexHullCropper"]
|
dmyersturnbull/dscience | dscience/ml/trainable.py | import abc
from datetime import datetime
from pathlib import Path
from typing import Type
import numpy as np
import pandas as pd
from dscience.tools.filesys_tools import FilesysTools
from dscience.core import PathLike
class AbstractSaveLoad(metaclass=abc.ABCMeta):
def save(self, path: PathLike) -> None:
raise NotImplementedError()
# noinspection PyAttributeOutsideInit
def load(self, path: PathLike):
raise NotImplementedError()
class SaveableTrainable(AbstractSaveLoad):
"""
A simpler saveable.=
Saves and loads a .info file with these properties.
To implement, just override save() and load(), and have each call its supermethod
"""
def __init__(self):
self.info = {}
def save(self, path: PathLike) -> None:
FilesysTools.save_json(self.info, path.with_suffix(path.suffix + ".info"))
def load(self, path: PathLike):
path = Path(path)
self.info = FilesysTools.load_json(path.with_suffix(path.suffix + ".info"))
def fix(key, value):
if key in ["started", "finished"]:
return datetime.isoformat(value)
elif isinstance(value, list):
return np.array(value)
else:
return value
self.info = {k: fix(k, v) for k, v in self.info.items()}
return self
class SaveableTrainableCsv(SaveableTrainable, metaclass=abc.ABCMeta):
def save(self, path: PathLike):
path = Path(path)
super().save(path)
self.data.to_csv(path)
# noinspection PyAttributeOutsideInit
def load(self, path: PathLike):
path = Path(path)
super().load(path)
self.data = pd.read_csv(path)
return self
class SaveLoadCsv(AbstractSaveLoad, metaclass=abc.ABCMeta):
"""
Has an attribute (property) called `data`.
"""
@property
@abc.abstractmethod
def data(self) -> pd.DataFrame:
raise NotImplementedError()
@data.setter
def data(self, df: pd.DataFrame):
raise NotImplementedError()
@property
def df_class(self) -> Type[pd.DataFrame]:
return pd.DataFrame
def save(self, path: PathLike):
if not isinstance(self.data, self.df_class):
raise TypeError("Type {} is not a {}".format(type(self.data), self.df_class))
path = Path(path)
pd.DataFrame(self.data).to_csv(path)
def load(self, path: PathLike):
path = Path(path)
self.data = self.df_class(pd.read_csv(path))
__all__ = ["AbstractSaveLoad", "SaveableTrainable", "SaveableTrainableCsv", "SaveLoadCsv"]
|
ab93/Depression-Identification | src/main/config.py | <reponame>ab93/Depression-Identification
import os
TRAIN_SPLIT_FILE = os.path.join('data','classification_data','training_split.csv')
TEST_SPLIT_FILE = os.path.join('data','classification_data','test_split.csv')
VAL_SPLIT_FILE = os.path.join('data','classification_data','dev_split.csv')
D_ND_DIR = os.path.join('data','disc_nondisc')
POS_NEG_DIR = os.path.join('data','pos_neg')
SEL_FEAT_TRAIN_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','train')
SEL_FEAT_TEST_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','test')
SEL_FEAT_VAL_REGULAR_CLASSIFY = os.path.join('data','selected_features','regular','classify','val')
SEL_FEAT_TRAIN_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','train')
SEL_FEAT_VAL_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','val')
SEL_FEAT_TEST_REGULAR_ESTIMATE = os.path.join('data','selected_features','regular','estimate','test')
ALL_FEAT_TRAIN_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','train')
ALL_FEAT_TEST_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','test')
ALL_FEAT_VAL_REGULAR_CLASSIFY = os.path.join('data','all_features','regular','classify','val')
ALL_FEAT_TRAIN_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','train')
ALL_FEAT_VAL_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','val')
ALL_FEAT_TEST_REGULAR_ESTIMATE = os.path.join('data','all_features','regular','estimate','test')
SEL_FEAT_TRAIN_NORMALIZED_CLASSIFY = os.path.join('data','selected_features','normalize','classify','train')
SEL_FEAT_VAL_NORMALIZED_CLASSIFY = os.path.join('data','selected_features','normalize','classify','val')
SEL_FEAT_TRAIN_NORMALIZED_ESTIMATE = os.path.join('data','selected_features','normalize','estimate','train')
SEL_FEAT_VAL_NORMALIZED_ESTIMATE = os.path.join('data','selected_features','normalize','estimate','val')
RESULTS_CLASSIFY = os.path.join('results','grid_search','classification')
RESULTS_ESTIMATE = os.path.join('results','grid_search','regression')
SEL_FEAT = os.path.join('data','selected_features')
ALL_FEAT = os.path.join('data','all_features')
ANOVA_DIR = os.path.join('results','anova')
GRID_SEARCH_CLF_DIR = os.path.join('results','grid_search','classification')
GRID_SEARCH_REG_DIR = os.path.join('results','grid_search','regression')
|
ab93/Depression-Identification | src/obsolete/grid_search_dt_lr.py | import src.main.config
import os
import pandas as pd
import numpy as np
from pprint import pprint
def main(mode,classifier):
if classifier == "DT":
file_type = ".txt"
seperator = "\t"
column = 4
else:
file_type = ".csv"
seperator = ","
column = 6
data = pd.read_csv(src.main.config.GRID_SEARCH_CLF_DIR + "/" + mode + "_" + classifier + "_PN" + file_type, sep=seperator, header= None)
result = data.sort(column,ascending=False)
result = result.iloc[0:5]
#print data
# print result.unique()
return result
def write():
result_acoustic_LR = main("acoustic", "LR")
result_visual_LR = main("visual", "LR")
result_ling_LR = main("linguistic", "LR")
result_LR = result_acoustic_LR.append(result_visual_LR)
result_LR = result_LR.append(result_ling_LR)
#print result_LR
result_LR.to_csv(src.main.config.GRID_SEARCH_CLF_DIR + "/" + "refined_LR.csv", index=None)
result_acoustic_DT = main("acoustic", "DT")
result_visual_DT = main("visual", "DT")
result_ling_DT = main("linguistic", "DT")
result_DT = result_acoustic_DT.append(result_visual_DT)
result_DT = result_DT.append(result_ling_DT)
result_DT.to_csv(src.main.config.GRID_SEARCH_CLF_DIR + "/" + "refined_DT.csv", index=None)
res_LR = {}
res_DT = {}
for i in range(result_LR.shape[1]-1):
res_LR[i] = result_LR[i].unique()
for i in range(result_DT.shape[1]-1):
res_DT[i] = result_DT[i].unique()
#pprint(res_LR)
#pprint(res_DT)
return res_LR,res_DT
def ret_func():
res_LR,res_DT = write()
res_LR[0] = list(set(res_LR[0]).union(set(res_DT[0])))
del res_DT[0]
res_LR[1] = list(set(res_LR[1]).union(set(res_DT[3])))
del res_DT[3]
res_LR[2] = list(set(res_LR[2]).union(set(res_LR[3])))
del res_LR[3]
res_LR[4] = list(set(res_LR[4]).union(set(res_LR[5])))
del res_LR[5]
res_DT[1] = list(set(res_DT[1]).union(set(res_DT[2])))
del res_DT[2]
#pprint(res_LR)
#pprint(res_DT)
return res_LR,res_DT
#ret_func()
|
ab93/Depression-Identification | src/models/regressor.py | import sys
import operator
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, clone
from sklearn.externals import six
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
class MetaRegressor(BaseEstimator, RegressorMixin):
""" A combined multi-class regressor
Parameters
----------
regressors : array-like, shape = [n_regressors]
weights : array-like, shape = [n_regressors]
Optional, default: None
If a list of `int` or `float` values are
provided, the regressors are weighted by importance;
Uses uniform weights if `weights=None`.
"""
def __init__(self, regressors, weights=None):
self.regressors = regressors
self.weights = weights
def fit(self, Xs, ys, nested=True):
""" Fit regressors
Parameters
----------
Xs : List of {array-like, sparse matrix},
length = number of regressors
List of matrices of training samples
ys : List of array-like,
length = number of regressors
List of vectors of target class labels
nested: Bool (default = True)
Returns
-------
self : object
"""
assert(len(Xs) == len(ys) == len(self.regressors))
if (not isinstance(Xs,list)) or (not isinstance(ys,list)):
raise TypeError
sys.exit()
if nested:
Xs = map(np.vstack, Xs)
ys = map(np.hstack, ys)
self.regressors_ = []
for i,reg in enumerate(self.regressors):
fitted_reg = clone(reg).fit(Xs[i], ys[i])
self.regressors_.append(fitted_reg)
return self
def predict(self, Xs):
""" Predict class labels.
Parameters
----------
Xs : List of {array-like, sparse matrix},
length = number of regressors
List of matrices of training samples
Returns
-------
weighted_pred : array-like, shape = [n_samples]
Predicted (weighted) target values
"""
num_regs = len(self.regressors_)
preds = []
for index, X in enumerate(Xs):
pred = [np.mean(self.regressors_[index].predict(P), axis=0) for P in X]
preds.append(pred)
preds = np.asarray(preds)
weighted_pred = np.average(preds, axis=0, weights=self.weights)
return weighted_pred
def score(self, Xs, y_true, scoring='rmse'):
"""
Returns the R2 (Coefficient of Determination) score by default
Parameters
----------
Xs : List of {array-like, sparse matrix},
length = number of regressors
List of matrices of training samples
y_true: Single vectors of true y values
"""
y_true = np.asarray(y_true)
if scoring == 'r2':
return r2_score(y_true,self.predict(Xs))
elif scoring == 'mean_abs_error':
return mean_absolute_error(y_true, self.predict(Xs))
elif scoring == 'rmse':
return np.sqrt(mean_squared_error(y_true, self.predict(Xs)))
class LateFusionRegressor(BaseEstimator, RegressorMixin):
"""
Weighted Combined Regressor
"""
def __init__(self,regressors,weights=None):
self.regressors = regressors # list of regressors
self.weights = weights # weights for each of the regressors
def fit(self,Xs,ys):
"""
Trains on the data.
Xs = [[], [], []] (one matrix for each mode)
ys = [[], [], []]
Returns: self
"""
if isinstance(Xs,list) and isinstance(ys,list):
assert(len(Xs) == len(ys) == len(self.regressors))
self.regressors_ = [] # store trained regressors
for idx, reg in enumerate(self.regressors):
fitted_reg = clone(reg).fit(Xs[idx],ys[idx])
self.regressors_.append(fitted_reg)
return self
def predict(self,Xs):
"""
Predicts new data instances
Args:
Xs = [[], [], []]
Returns:
weighted_pred: Weighted prediction of the target
"""
preds = []
for mode_idx, reg in enumerate(self.regressors_):
preds.append(reg.predict(Xs[mode_idx]))
preds = np.asarray(preds)
weighted_preds = np.average(preds, axis=0, weights=self.weights)
return weighted_preds
def score(self, Xs, y_true, scoring='rmse'):
"""
Returns the R2 (Coefficient of Determination) score by default
Parameters
----------
Xs : List of {array-like, sparse matrix},
length = number of regressors
List of matrices of training samples
y_true: Single vectors of true y values
"""
y_true = np.asarray(y_true)
if scoring == 'r2':
return r2_score(y_true,self.predict(Xs))
elif scoring == 'mean_abs_error':
return mean_absolute_error(y_true, self.predict(Xs))
elif scoring == 'rmse':
return np.sqrt(mean_squared_error(y_true, self.predict(Xs)))
|
ab93/Depression-Identification | src/feature_extract/extract_COVAREP_FORMANT.py | import pandas as pd
from glob import glob
import numpy as np
import re
import csv
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList = {}
'''headers for COVAREP features'''
header = ["video", "question", "starttime", "endtime", 'F0_mean', 'VUV_mean', 'NAQ_mean', 'QOQ_mean', 'H1H2_mean',
'PSP_mean', 'MDQ_mean', 'peakSlope_mean', 'Rd_mean', 'Rd_conf_mean', 'creak_mean', 'MCEP_0_mean',
'MCEP_1_mean', 'MCEP_2_mean', 'MCEP_3_mean', 'MCEP_4_mean', 'MCEP_5_mean', 'MCEP_6_mean', 'MCEP_7_mean',
'MCEP_8_mean', 'MCEP_9_mean', 'MCEP_10_mean', 'MCEP_11_mean', 'MCEP_12_mean', 'MCEP_13_mean', 'MCEP_14_mean',
'MCEP_15_mean', 'MCEP_16_mean', 'MCEP_17_mean', 'MCEP_18_mean', 'MCEP_19_mean', 'MCEP_20_mean',
'MCEP_21_mean', 'MCEP_22_mean', 'MCEP_23_mean', 'MCEP_24_mean', 'HMPDM_0_mean', 'HMPDM_1_mean',
'HMPDM_2_mean', 'HMPDM_3_mean', 'HMPDM_4_mean', 'HMPDM_5_mean', 'HMPDM_6_mean', 'HMPDM_7_mean',
'HMPDM_8_mean', 'HMPDM_9_mean', 'HMPDM_10_mean', 'HMPDM_11_mean', 'HMPDM_12_mean', 'HMPDM_13_mean',
'HMPDM_14_mean', 'HMPDM_15_mean', 'HMPDM_16_mean', 'HMPDM_17_mean', 'HMPDM_18_mean', 'HMPDM_19_mean',
'HMPDM_20_mean', 'HMPDM_21_mean', 'HMPDM_22_mean', 'HMPDM_23_mean', 'HMPDM_24_mean', 'HMPDD_0_mean',
'HMPDD_1_mean', 'HMPDD_2_mean', 'HMPDD_3_mean', 'HMPDD_4_mean', 'HMPDD_5_mean', 'HMPDD_6_mean',
'HMPDD_7_mean', 'HMPDD_8_mean', 'HMPDD_9_mean', 'HMPDD_10_mean', 'HMPDD_11_mean', 'HMPDD_12_mean',
'F0_stddev', 'VUV_stddev', 'NAQ_stddev', 'QOQ_stddev', 'H1H2_stddev', 'PSP_stddev', 'MDQ_stddev',
'peakSlope_stddev', 'Rd_stddev', 'Rd_conf_stddev', 'creak_stddev', 'MCEP_0_stddev', 'MCEP_1_stddev',
'MCEP_2_stddev', 'MCEP_3_stddev', 'MCEP_4_stddev', 'MCEP_5_stddev', 'MCEP_6_stddev', 'MCEP_7_stddev',
'MCEP_8_stddev', 'MCEP_9_stddev', 'MCEP_10_stddev', 'MCEP_11_stddev', 'MCEP_12_stddev', 'MCEP_13_stddev',
'MCEP_14_stddev', 'MCEP_15_stddev', 'MCEP_16_stddev', 'MCEP_17_stddev', 'MCEP_18_stddev', 'MCEP_19_stddev',
'MCEP_20_stddev', 'MCEP_21_stddev', 'MCEP_22_stddev', 'MCEP_23_stddev', 'MCEP_24_stddev', 'HMPDM_0_stddev',
'HMPDM_1_stddev', 'HMPDM_2_stddev', 'HMPDM_3_stddev', 'HMPDM_4_stddev', 'HMPDM_5_stddev', 'HMPDM_6_stddev',
'HMPDM_7_stddev', 'HMPDM_8_stddev', 'HMPDM_9_stddev', 'HMPDM_10_stddev', 'HMPDM_11_stddev', 'HMPDM_12_stddev',
'HMPDM_13_stddev', 'HMPDM_14_stddev', 'HMPDM_15_stddev', 'HMPDM_16_stddev', 'HMPDM_17_stddev',
'HMPDM_18_stddev', 'HMPDM_19_stddev', 'HMPDM_20_stddev', 'HMPDM_21_stddev', 'HMPDM_22_stddev',
'HMPDM_23_stddev', 'HMPDM_24_stddev', 'HMPDD_0_stddev', 'HMPDD_1_stddev', 'HMPDD_2_stddev', 'HMPDD_3_stddev',
'HMPDD_4_stddev', 'HMPDD_5_stddev', 'HMPDD_6_stddev', 'HMPDD_7_stddev', 'HMPDD_8_stddev', 'HMPDD_9_stddev',
'HMPDD_10_stddev', 'HMPDD_11_stddev', 'HMPDD_12_stddev', 'gender']
'''headers for FORMANT features'''
header_f = ["video", "question", "starttime", "endtime", 'formant1_mean', 'formant2_mean', 'formant3_mean',
'formant4_mean', 'formant5_mean', 'formant1_stddev', 'formant2_stddev', 'formant3_stddev',
'formant4_stddev', 'formant5_stddev', 'gender']
questionType_DND = {}
questionType_PN = {}
questionAnswers = {}
'''
Reads DND questions and PN questions.
Retrieves acknowledgements, follow ups, intimate and non intimate questions and stores in global variables
'''
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc = pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg = pd.read_csv('data/misc/PN_Annotations.csv')
# Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question = disc_nondisc.iloc[i]['Questions']
qType = disc_nondisc.iloc[i]['Annotations']
questionType_DND[question] = qType
# Positive/Negative annotations
for i in xrange(len(pos_neg)):
question = pos_neg.iloc[i]['Questions']
qType = pos_neg.iloc[i]['Annotations']
questionType_PN[question] = qType
for item in utterrances.itertuples():
if item[3] == "#follow_up" and item[1] not in followUp:
followUp[item[1]] = item[2]
elif item[3] == "#ack" and item[1] not in ack:
ack[item[1]] = item[2]
elif item[3] == "#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]] = item[2]
elif item[3] == "#int" and item[1] not in intimate:
intimate[item[1]] = item[2]
'''
Reads transcripts, captures the start and end times of the answers for most frequent intimate questions. Also captures the start and end times of follow up questions that are following most frequent intimate questions
'''
def readTranscript():
global featureList
transcriptFiles = glob(sys.argv[1] + '[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0, len(transcriptFiles)):
t = pd.read_csv(transcriptFiles[i], delimiter=',|\t', engine='python')
t = t.fillna("")
captureStarted = False
startTime = 0.0
endTime = 0.0
prevQuestion = ""
participantNo = transcriptFiles[i][-18:-15]
for j in xrange(len(t)):
question = re.search(".*\((.*)\)$", t.iloc[j]['value'])
if question is not None:
question = question.group(1)
else:
question = t.iloc[j]['value']
question = question.strip()
if t.iloc[j]['speaker'] == 'Ellie':
endTime = t.iloc[j]['start_time']
if question in nonIntimate and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in intimate and question in questionType_DND and captureStarted:
endTime = t.iloc[j]['start_time']
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
elif question in intimate and question in questionType_DND and not captureStarted:
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
captureStarted = True
elif question in intimate and question not in questionType_DND and captureStarted:
endTime = t.iloc[j]['start_time']
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in followUp or question in ack and captureStarted:
endTime = t.iloc[j]['stop_time']
elif t.iloc[j]['speaker'] == 'Participant' and captureStarted:
# endTime=t.iloc[j]['stop_time']
continue
'''
Generates features from FORMANT files considering the start and end times for each frequent intimate questions from PN list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readFORMANT_DND():
print 'FORMANT DND'
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
dFile = open('data/disc_nondisc/discriminative_FORMANT.csv', 'w')
ndFile = open('data/disc_nondisc/nondiscriminative_FORMANT.csv', 'w')
dWriter = csv.writer(dFile)
ndWriter = csv.writer(ndFile)
dWriter.writerow(header_f)
ndWriter.writerow(header_f)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_FORMANT.csv'
f = pd.read_csv(fileName, delimiter=',|\t', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = startTime * 100
endFrame = endTime * 100
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
vector = instance[1][:]
vector += features_mean
vector += features_stddev
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_DND[instance[0]] == 'D':
dWriter.writerow(vector)
else:
ndWriter.writerow(vector)
'''
Generates features from FORMANT files considering the start and end times for each frequent intimate questions from PN list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readFORMANT_PN():
print 'FORMANT PN'
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
pFile = open('data/pos_neg/positive_FORMANT.csv', 'w')
nFile = open('data/pos_neg/negative_FORMANT.csv', 'w')
pWriter = csv.writer(pFile)
nWriter = csv.writer(nFile)
pWriter.writerow(header_f)
nWriter.writerow(header_f)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_FORMANT.csv'
f = pd.read_csv(fileName, delimiter=',|\t', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = startTime * 100
endFrame = endTime * 100
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
vector = instance[1][:]
vector += features_mean
vector += features_stddev
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_PN[instance[0]] == 'P':
pWriter.writerow(vector)
else:
nWriter.writerow(vector)
'''
Generates features from COVAREP files considering the start and end times for each frequent intimate questions from DND list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readCOVAREP_DND():
print 'COVAREP DND'
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
dFile = open('data/disc_nondisc/discriminative_COVAREP.csv', 'w')
ndFile = open('data/disc_nondisc/nondiscriminative_COVAREP.csv', 'w')
dWriter = csv.writer(dFile)
ndWriter = csv.writer(ndFile)
dWriter.writerow(header)
ndWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_COVAREP.csv'
f = pd.read_csv(fileName, delimiter=',|\t', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = startTime * 100
endFrame = endTime * 100
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
vector = instance[1][:]
vector += features_mean
vector += features_stddev
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_DND[instance[0]] == 'D':
dWriter.writerow(vector)
else:
ndWriter.writerow(vector)
'''
Generates features from COVAREP files considering the start and end times for each frequent intimate questions from PN list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readCOVAREP_PN():
print 'COVAREP PN'
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
pFile = open('data/pos_neg/positive_COVAREP.csv', 'w')
nFile = open('data/pos_neg/negative_COVAREP.csv', 'w')
pWriter = csv.writer(pFile)
nWriter = csv.writer(nFile)
pWriter.writerow(header)
nWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_COVAREP.csv'
f = pd.read_csv(fileName, delimiter=',|\t', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = startTime * 100
endFrame = endTime * 100
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
vector = instance[1][:]
vector += features_mean
vector += features_stddev
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_PN[instance[0]] == 'P':
pWriter.writerow(vector)
else:
nWriter.writerow(vector)
if __name__ == "__main__":
readHelperData()
readTranscript()
readFORMANT_DND()
readFORMANT_PN()
readCOVAREP_DND()
readCOVAREP_PN()
|
ab93/Depression-Identification | src/helpers/plot.py | import os
import re
import sys
import csv
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from src.main import config
def get_combined_data(file1, file2):
feature_df = pd.read_csv(file1)
feature_df = feature_df.append(pd.read_csv(file2))
train_split_df = pd.read_csv(config.TRAIN_SPLIT_FILE,
usecols=['Participant_ID', 'PHQ_Binary'])
feature_df = feature_df[feature_df['video'].isin(train_split_df['Participant_ID'])]
print "features: ", feature_df.shape
print "train_split: ", train_split_df.shape
train_split_dict = train_split_df.set_index('Participant_ID').T.to_dict()
del train_split_df
labels = np.zeros(feature_df.shape[0])
for i in xrange(feature_df.shape[0]):
video_id = feature_df.iat[i,0]
labels[i] = train_split_dict[video_id]['PHQ_Binary']
feature_df['label'] = pd.Series(labels, index=feature_df.index)
try:
feature_df.drop(['video','question','starttime','endtime'], inplace=True, axis=1)
except ValueError:
feature_df.drop(['video','question'], inplace=True, axis=1)
return feature_df
def plot_boxplot(df, dir_name):
columns = df.columns[:-1]
grouped_df = df.groupby(by='label')
for feature in columns:
data = []
for key, item in grouped_df:
temp_df = grouped_df.get_group(key)
print temp_df.loc[:,feature].describe()
#raw_input()
data.append(temp_df.loc[:,feature].values)
print len(data[0]), len(data[1])
plt.clf()
fig = plt.figure(1, figsize=(9, 6))
fig.clf()
ax = fig.add_subplot(111)
bp = ax.boxplot(data, notch=True, sym='+', vert=True, whis=1.5,
patch_artist=True)
ax.set_xticklabels(['Non-depressed','Depressed'])
ax.set_ylabel(feature)
ax.set_xlabel('Class Label')
plt.grid(axis='y',
linestyle='--',
which='major',
color='black',
alpha=0.25)
colors = ['green', 'red']
for box,color in zip(bp['boxes'],colors):
box.set(color='black', linewidth=0.5)
box.set_facecolor(color)
for whisker in bp['whiskers']:
whisker.set(color='grey', linewidth=1.5, linestyle='--')
for cap in bp['caps']:
cap.set(color='black', linewidth=2)
for median in bp['medians']:
median.set(color='black', linewidth=3)
for flier in bp['fliers']:
flier.set(marker='o', color='green', alpha=0.7)
#plt.show()
#sys.exit(1)
fig.savefig('plots/' + dir_name + '/' + feature + '.png')
def calculate_anova(df, filename):
filename += '.csv'
columns = df.columns[:-1]
grouped_df = df.groupby(by='label')
with open(os.path.join(config.ANOVA_DIR,filename), 'w') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',')
csv_writer.writerow(["Feature","P-value","F-value"])
for feature in columns:
data = []
for key, item in grouped_df:
temp_df = grouped_df.get_group(key)
data.append(temp_df.loc[:,feature].values)
f_val, p_val = stats.f_oneway(data[0], data[1])
csv_writer.writerow([feature, p_val, f_val])
def main():
filename = sys.argv[3]
features_df = get_combined_data(os.path.join(config.D_ND_DIR, sys.argv[1]),
os.path.join(config.D_ND_DIR, sys.argv[2]))
calculate_anova(features_df, filename)
plot_boxplot(features_df, filename)
if __name__ == '__main__':
main()
|
ab93/Depression-Identification | src/feature_extract/check_features.py | import pandas as pd
def get_test_data():
test_data = pd.read_csv('data/classification_data/dev_split.csv')
#print test_data
test = test_data['Participant_ID'].tolist()
#print test
#test.append(video)
clm_d = pd.read_csv('data/disc_nondisc/discriminative_CLM.csv')
covarep_d = pd.read_csv('data/disc_nondisc/discriminative_COVAREP.csv')
liwc_d = pd.read_csv('data/disc_nondisc/discriminative_LIWC.csv')
clm_nd = pd.read_csv('data/disc_nondisc/nondiscriminative_CLM.csv')
covarep_nd = pd.read_csv('data/disc_nondisc/nondiscriminative_COVAREP.csv')
liwc_nd = pd.read_csv('data/disc_nondisc/nondiscriminative_LIWC.csv')
for key in test:
if not ((clm_nd['video'] == key).any() ):
print "visual ",key
if not ((covarep_nd['video'] == key).any() ):
print "acoustic ", key
#print key
if not((liwc_nd['video'] == key).any()):
print "liwc ", key
get_test_data()
|
ab93/Depression-Identification | src/obsolete/utils.py | import numpy as np
import pandas as pd
from src.obsolete.read_labels import features
def oversample(X,y):
X = np.vstack(X)
y = np.hstack(y)
df = pd.DataFrame(X)
df['labels'] = y
grouped_df = df.groupby('labels')
for key, dframe in grouped_df:
if key == 1:
sampled_df = dframe
df = df.append(sampled_df)
data = df.values
X, y = data[:, :-1], data[:, -1]
return X, y
def get_single_mode_data(mode='acoustic', count="all", select="select", category='PN', problem_type='C', feature_scale='False'):
"""
Get training and validation data for a single mode int
a particular category
Args:
mode: str
category: str
Returns:
X_train: 3D numpy array
y_train: 2D numpy array
X_val: 3D numpy array
y_val: 2D numpy array
"""
if category == 'PN':
cat_1 = "positive"
cat_2 = "negative"
else:
cat_1 = "discriminative"
cat_2 = "nondiscriminative"
# print mode, category, problem_type, feature_scale
X_train = [map(np.asarray, features(mode, cat_1, "train", problem_type, feature_scale, count=count)[0]),
map(np.asarray, features(mode, cat_2, "train", problem_type, feature_scale, count=count)[0])]
y_train = [map(np.asarray, features(mode, cat_1, "train", problem_type, feature_scale, count=count)[1]),
map(np.asarray, features(mode, cat_2, "train", problem_type, feature_scale, count=count)[1])]
X_val = [map(np.asarray, features(mode, cat_1, "val", problem_type, feature_scale)[0]),
map(np.asarray, features(mode, cat_2, "val", problem_type, feature_scale)[0])]
y_val = [map(np.asarray, features(mode, cat_1, "val", problem_type, feature_scale)[1]),
map(np.asarray, features(mode, cat_2, "val", problem_type, feature_scale)[1])]
X_train = [map(np.asarray, features(mode, cat_1, "train", problem_type, feature_scale, count = count, select = select)[0]),
map(np.asarray, features(mode, cat_2, "train", problem_type, feature_scale, count = count, select = select)[0])]
y_train = [map(np.asarray, features(mode, cat_1, "train", problem_type, feature_scale, count = count, select = select)[1]),
map(np.asarray, features(mode, cat_2, "train", problem_type, feature_scale, count = count, select = select)[1])]
X_val = [map(np.asarray, features(mode, cat_1, "val", problem_type, feature_scale, select = select)[0]),
map(np.asarray, features(mode, cat_2, "val", problem_type, feature_scale, select = select)[0])]
y_val = [map(np.asarray, features(mode, cat_1, "val", problem_type, feature_scale, select = select)[1]),
map(np.asarray, features(mode, cat_2, "val", problem_type, feature_scale, select = select)[1])]
return X_train, y_train, X_val, y_val
def get_multi_data(count="all", select="select", category='PN', problem_type='C', feature_scale='regular'):
X_A_train, y_A_train, X_A_val, y_A_val = get_single_mode_data('acoustic', count, select, category, problem_type, feature_scale)
X_V_train, y_V_train, X_V_val, y_V_val = get_single_mode_data('visual', count, select, category, problem_type, feature_scale)
X_L_train, y_L_train, X_L_val, y_L_val = get_single_mode_data('linguistic', count, select, category, problem_type, feature_scale)
Xs = [X_A_train, X_V_train, X_L_train]
ys = [y_A_train, y_V_train, y_L_train]
Xs_val = [X_A_val, X_V_val, X_L_val]
ys_val = [y_A_val, y_V_val, y_L_val]
return Xs, ys, Xs_val, ys_val
|
ab93/Depression-Identification | src/feature_extract/convert_CSV_to_txt.py | <filename>src/feature_extract/convert_CSV_to_txt.py
import os
import fnmatch
import sys
def renameCSVToTxt(dir):
rootdir = dir
for root, dirnames, filenames in os.walk(rootdir):
for filename in fnmatch.filter(filenames, '*CLM*.csv'):
#print(os.path.join(rootdir+'/'+filename))
os.rename(os.path.join(root+'/'+filename), root+'/'+filename[:-4] + '.txt')
if __name__ == "__main__":
dirPath = sys.argv[1]
renameCSVToTxt(dirPath)
|
ab93/Depression-Identification | src/obsolete/classify.py | <filename>src/obsolete/classify.py
import os
import config
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from src.models.classifier import MetaClassifier, LateFusionClassifier
from src.obsolete import read_labels
from ..feature_extract.read_labels import features
def oversample(X,y):
X = np.vstack(X)
y = np.hstack(y)
df = pd.DataFrame(X)
df['labels'] = y
grouped_df = df.groupby('labels')
for key, dframe in grouped_df:
if key == 1:
sampled_df = dframe
df = df.append(sampled_df)
data = df.values
X, y = data[:,:-1], data[:,-1]
return X, y
def get_classifiers():
clf1 = LogisticRegression(C=1, penalty='l1', n_jobs=-1, class_weight={1:4})
clf2 = LogisticRegression(C=1, penalty='l1', n_jobs=-1, class_weight={1:1.5})
#clf1 = SVC(kernel='rbf',degree=2, probability=True, class_weight={1:1})
#clf2 = SVC(kernel='rbf', probability=True, class_weight={1:1})
#clf1 = RandomForestClassifier(n_jobs=-1, class_weight={1:2})
#clf2 = RandomForestClassifier(n_jobs=-1, class_weight={1:2})
#clf1 = DecisionTreeClassifier()
#clf2 = DecisionTreeClassifier()
#clf1 = GaussianNB()
#clf2 = GaussianNB()
return [clf1, clf2]
def get_data():
lin_data = read_labels.return_lin_pn([['word76', 'word87',
'50cogproc_(Cognitive_Processes)',
'31posemo_(Positive_Emotions)']])
acou_data = read_labels.return_acou_dnd([['MCEP_11', 'F0', 'HMPDM_10', 'HMPDM_9', 'HMPDD_9', 'HMPDD_11'],
[]])
vis_data = read_labels.return_vis_dnd([['x2', 'x3', 'x4', 'x5', 'x6'],
['Z9','Z54','Z64','Z10'],
[],
['Rx','Ry','Tz'],
['AU17Evidence']])
# Set the data
X_A_train = [map(np.asarray, acou_data[0]), map(np.asarray, acou_data[2])]
y_A_train = [map(np.asarray, acou_data[1]), map(np.asarray, acou_data[3])]
X_A_val = [map(np.asarray, acou_data[4]), map(np.asarray, acou_data[6])]
y_A_val = [map(np.asarray, acou_data[5]), map(np.asarray, acou_data[7])]
X_V_train = [map(np.asarray, vis_data[0]), map(np.asarray, vis_data[2])]
y_V_train = [map(np.asarray, vis_data[1]), map(np.asarray, vis_data[3])]
X_V_val = [map(np.asarray, vis_data[4]), map(np.asarray, vis_data[6])]
y_V_val = [map(np.asarray, vis_data[5]), map(np.asarray, vis_data[7])]
X_L_train = [map(np.asarray, lin_data[0]), map(np.asarray, lin_data[2])]
y_L_train = [map(np.asarray, lin_data[1]), map(np.asarray, lin_data[3])]
X_L_val = [map(np.asarray, lin_data[4]), map(np.asarray, lin_data[6])]
y_L_val = [map(np.asarray, lin_data[5]), map(np.asarray, lin_data[7])]
Xs = [X_A_train, X_V_train, X_L_train]
ys = [y_A_train, y_V_train, y_L_train]
Xs_val = [X_A_val, X_V_val, X_L_val]
ys_val = [y_A_val, y_V_val, y_L_val]
return Xs, ys, Xs_val, ys_val
def late_fusion_classify():
# Read the data
Xs_train, ys_train, Xs_val, ys_val = get_data()
clf_A_D = LogisticRegression(C=0.0316, penalty='l1', class_weight={1:4})
clf_A_ND = LogisticRegression(C=0.0316, penalty='l1', class_weight={1:4})
clf_V_D = LogisticRegression(C=1.0, penalty='l2', class_weight={1:3})
clf_V_ND = LogisticRegression(C=1.0, penalty='l2', class_weight={1:3})
clf_L_D = LogisticRegression(C=1.0, penalty='l2', class_weight={1:3})
clf_L_ND = LogisticRegression(C=1.0, penalty='l2', class_weight={1:3})
clf_A = MetaClassifier(classifiers=[clf_A_D, clf_A_ND])
clf_V = MetaClassifier(classifiers=[clf_V_D, clf_V_ND])
clf_L = MetaClassifier(classifiers=[clf_L_D, clf_L_ND])
lf_clf = LateFusionClassifier(classifiers=[clf_A, clf_V, clf_L], weights=[0.6,0.2,0.1])
lf_clf.fit(Xs_train, ys_train)
print lf_clf.predict(Xs_val)
preds = lf_clf.predict_proba(Xs_val, get_all=True)
y_true = map(int,map(np.mean,ys_val[0][0]))
print lf_clf.score(Xs_val,y_true,scoring='f1')
for i in xrange(len(y_true)):
print preds[0][i], preds[1][i], preds[2][i], y_true[i]
def grid_search_lf():
Xs_train, ys_train, Xs_val, ys_val = get_data()
y_true_val = map(int,map(np.mean,ys_val[0][0]))
clf_A_D = LogisticRegression(C=0.0316, penalty='l1', class_weight={1:4})
clf_A_ND = LogisticRegression(C=0.0316, penalty='l1', class_weight={1:4})
clf_V_D = LogisticRegression(C=1.0, penalty='l1', class_weight={1:4})
clf_V_ND = LogisticRegression(C=0.0316, penalty='l1', class_weight={1:4})
clf_L_D = LogisticRegression(C=31.623, penalty='l2', class_weight={1:4})
clf_L_ND = LogisticRegression(C=0.0316, penalty='l1', class_weight={1:4})
clf_A = MetaClassifier(classifiers=[clf_A_D, clf_A_ND])
clf_V = MetaClassifier(classifiers=[clf_V_D, clf_V_ND])
clf_L = MetaClassifier(classifiers=[clf_L_D, clf_L_ND], weights=[0.3,0.7])
mode_weights = [None, [0.6, 0.3, 0.1], [0.3, 0.6, 0.1], [0.4, 0.4, 0.2],
[0.5, 0.4, 0.1], [0.4, 0.5, 0.1], [0.25, 0.25, 0.5]]
with open(os.path.join(config.GRID_SEARCH_DIR, 'late_fusion.csv'),'w') as outfile:
outfile.write('A_wt' + ',' + 'V_wt' + ',' + 'L_wt' + ',' + 'f1_score' + '\n')
for mode_wt in mode_weights:
lf_clf = LateFusionClassifier(classifiers=[clf_A, clf_V, clf_L], weights=mode_wt)
lf_clf.fit(Xs_train, ys_train)
f1_score = lf_clf.score(Xs_val,y_true_val,scoring='f1')
if not mode_wt:
mode_wt = [0.3, 0.3, 0.3]
outfile.write(str(mode_wt[0]) + ',' + str(mode_wt[1]) + ',' +
str(mode_wt[2]) + ',' +str(f1_score) + '\n')
print f1_score
def grid_search_meta(mode='acoustic',category='DND'):
# Read data
# if mode == 'acoustic':
# data_d = features(mode,"discriminative","train")
# data_nd = features(mode,"nondiscriminative","train")
# elif mode == 'visual':
# data = read_labels.return_vis_dnd([['x2','x3','x4','x5','x6'],
# ['Z9','Z54','Z64','Z10'],
# [],
# ['Rx','Ry','Tz'],
# ['AU17Evidence']])
# else:
# data = read_labels.return_lin_pn([['word76','word87',
# '50cogproc_(Cognitive_Processes)',
# '31posemo_(Positive_Emotions)']])
if category == 'PN':
cat_1 = "positive"
cat_2 = "negative"
else:
cat_1 = "discriminative"
cat_2 = "nondiscriminative"
X_train = [map(np.asarray, features(mode,cat_1,"train")[0]),
map(np.asarray, features(mode,cat_2,"train")[0])]
y_train = [map(np.asarray,features(mode,cat_1,"train")[1] ),
map(np.asarray, features(mode,cat_2,"train")[1])]
X_val = [map(np.asarray, features(mode,cat_1,"val")[0]),
map(np.asarray, features(mode,cat_2,"val")[0])]
y_val = [map(np.asarray, features(mode,cat_1,"val")[1]),
map(np.asarray, features(mode,cat_2,"val")[1])]
# Set y_true for validation
y_true_val = map(int,map(np.mean,y_val[0]))
# Set parameters for GridSearch
class_weights = np.arange(2,5)
clf_weights = [None, [0.7,0.3], [0.3,0.7]]
C_vals = np.logspace(-3,3,num=5)
gammas = np.logspace(-3,3,num=5)
penalties = ('l1','l2')
num_neighbors = np.arange(3,13,2)
weights = ['uniform', 'distance']
p_vals = np.arange(1,5)
#num_estimators = np.arange(50,100,10)
#estimators = [LogisticRegression, SVC]
#svm_vals = [{'C':x, 'gamma':y, 'class_weight':z} for x in C_vals for y in gammas for z in class_weights ]
#max_ent_vals = [{'C':x, 'penalty':y, 'class_weight':z} for x in C_vals for y in penalties for z in class_weights]
# def get_params(param_list):
# idx = 0
# while idx < len(param_list):
# yield param_list[idx]
# idx += 1
# results = {}
# max_ent_gen = get_params(max_ent_vals)
# svm_gen = get_params(svm_vals)
# est_gen = {LogisticRegression: max_ent_gen, SVC: svm_gen}
#with open(os.path.join(config.GRID_SEARCH_DIR, mode + '.csv'),'w') as outfile:
#for clf_wt in clf_weights:
# for est1 in estimators:
# for est2 in estimators:
# clf1 = est1(**(est_gen[est1].next()))
# clf2 = est2(**(est_gen[est2].next()))
# print clf1
# print clf2
# #meta_clf = MetaClassifier(classifiers=[clf1, clf2], weights=clf_wt)
# #meta_clf.fit(X_train, y_train)
# raw_input()
# #f1_score = meta_clf.score(X_val, y_true_val)
# #print f1_score
# with open(os.path.join(config.GRID_SEARCH_DIR, mode + '_KNN.csv'),'w') as outfile:
# for n1 in num_neighbors:
# for n2 in num_neighbors:
# for p1 in p_vals:
# for p2 in p_vals:
# for w1 in weights:
# for w2 in weights:
# clf_D = KNeighborsClassifier(n_jobs=-1, n_neighbors=n1, p=p1, weights=w1)
# clf_ND = KNeighborsClassifier(n_jobs=-1, n_neighbors=n2, p=p2, weights=w2)
# meta_clf = MetaClassifier(classifiers=[clf_D, clf_ND])
# meta_clf.fit(X_train, y_train)
# f1_score = meta_clf.score(X_val, y_true_val)
# outfile.write(str(n1) + ',' + str(n2) + ',' +
# str(p1) + ',' + str(p2) + ',' + str(w1) + ','
# + str(w2) + ',' +str(f1_score) + '\n')
# print f1_score
with open(os.path.join(config.GRID_SEARCH_DIR, mode + '_' + category + '.csv'),'w') as outfile:
for p1 in penalties:
for p2 in penalties:
for clf_wt in clf_weights:
for class_wt in class_weights:
for C1 in C_vals:
for C2 in C_vals:
clf_D = LogisticRegression(C=C1, penalty=p1, n_jobs=-1, class_weight={1:class_wt})
clf_ND = LogisticRegression(C=C2, penalty=p2, n_jobs=-1, class_weight={1:class_wt})
meta_clf = MetaClassifier(classifiers=[clf_D, clf_ND], weights=clf_wt)
meta_clf.fit(X_train, y_train)
f1_score = meta_clf.score(X_val, y_true_val)
if not clf_wt:
clf_wt = [0.5, 0.5]
outfile.write(str(clf_wt[0]) + ' ' + str(clf_wt[1]) + ',' +
str(class_wt) + ',' + str(C1) + ',' + str(C2) + ','
+ p1 + ',' + p2 + ','
+ str(f1_score) + '\n')
print f1_score
def train_classify():
data = read_labels.return_lin_pn([['word76', 'word87',
'50cogproc_(Cognitive_Processes)',
'31posemo_(Positive_Emotions)']])
#data = read_labels.return_acou_dnd([[ 'MCEP_11','F0', 'HMPDM_10','HMPDM_9','HMPDD_9','HMPDD_11'],
# []])
X_A_train = [map(np.asarray, data[0]), map(np.asarray, data[2])]
y_A_train = [map(np.asarray, data[1]), map(np.asarray, data[3])]
# X_D, y_D = oversample(X_A_train[0], y_A_train[0])
# X_ND, y_ND = oversample(X_A_train[1], y_A_train[1])
# X_A_train = [X_D, X_ND]
# y_A_train = [y_D, y_ND]
X_A_val = [map(np.asarray, data[4]), map(np.asarray, data[6])]
y_A_val = [map(np.asarray, data[5]), map(np.asarray, data[7])]
#X_A_val = [map(np.asarray, val_data[4]), map(np.asarray, val_data[6])]
#y_A_val = [map(np.asarray, val_data[5]), map(np.asarray, val_data[7])]
print len(X_A_train[0]), len(X_A_train[1])
clfs = get_classifiers()
meta_clf = MetaClassifier(classifiers=clfs, weights=[0.9, 0.1])
meta_clf.fit(X_A_train, y_A_train)
print len(X_A_train[0]), len(X_A_train[1])
print len(X_A_val[0]), len(X_A_val[1])
#raw_input()
# print "\nTraining data...\n"
# preds = meta_clf.predict_proba(X_A_train, get_all=True)
# y_true = map(int,map(np.mean,y_A_train[0]))
# print "F1_score:",meta_clf.score(X_A_train, y_true, scoring='f1')
# print "Accuracy:",meta_clf.score(X_A_train, y_true, scoring='accuracy')
# for i in xrange(len(y_true)):
# print preds[0][i], preds[1][i], y_true[i]
print "\nTesting data...\n"
preds = meta_clf.predict_proba(X_A_val, get_all=True)
#print meta_clf.predict(X_A_val)
y_true = map(int,map(np.mean,y_A_val[0]))
print meta_clf.score(X_A_val, y_true)
print meta_clf.score(X_A_val, y_true, scoring='accuracy')
for i in xrange(len(y_true)):
print preds[0][i], preds[1][i], y_true[i]
def main():
#train_classify()
#late_fusion_classify()
#grid_search_meta(mode='visual', category='PN')
grid_search_meta(mode='acoustic', category='PN')
#grid_search_meta(mode='linguistic', category='PN')
#grid_search_lf()
if __name__ == '__main__':
main() |
ab93/Depression-Identification | src/obsolete/test.py | <reponame>ab93/Depression-Identification<filename>src/obsolete/test.py
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
import src.main.config
import src.obsolete.utils
from src.models import classifier
X_train, y_train, X_val, y_val = src.obsolete.utils.get_single_mode_data(mode="acoustic", normalize='normalize')
y_true = map(int, map(np.mean, y_val[0]))
clf1 = DecisionTreeClassifier(class_weight={1:3},max_features = 13,min_samples_leaf=5, max_depth=5)
clf2 = LogisticRegression(C=1.0, class_weight={1: 3}, penalty='l1')
meta_clf = classifier.MetaClassifier(classifiers=[clf1,clf2], weights=[0.7,0.3])
meta_clf.fit(X_train, y_train)
f1_score = meta_clf.score(X_val, y_true)
print f1_score |
ab93/Depression-Identification | src/helpers/add_test_labels.py | <filename>src/helpers/add_test_labels.py
import pandas as pd
from ..main import config
import os
def add_labels():
split_file = config.TEST_SPLIT_FILE
split_df = pd.read_csv(split_file)
combined_file = os.path.join('data','classification_data','combined_WoZ+AI+Framing+PDHA_meta.csv')
combined_df = pd.read_csv(combined_file)
test_ids = split_df['participant_ID'].tolist()
combined_df = combined_df[combined_df['Participant'].isin(test_ids)]
combined_df = combined_df[['Participant','PTSD_binary','PTSD_score']]
combined_df.rename(columns={'Participant':'Participant_ID' , 'PTSD_binary':'PHQ_Binary' , 'PTSD_score':'PHQ_Score'}, inplace=True)
#print combined_df
combined_df.to_csv(config.TEST_SPLIT_FILE, index=False)
if __name__ == '__main__':
add_labels() |
ab93/Depression-Identification | src/helpers/questions.py | import pandas as pd
import os,re,operator
import json
import sys
filenames = []
p=re.compile('.*_TRANSCRIPT.csv')
for (path,dir,files) in os.walk(sys.argv[1]):
for each in files:
if p.match(each):
filenames.append(path+"/"+each)
print filenames
print len(filenames)
stopwords = ['okay','cool','nice','awesome','really','mhm','[laughter]']
data_file = open('dictionary.json','r')
questions = json.load(data_file)
for file in filenames:
df = pd.read_csv(file,sep='\t')
i = 0
while i<len(df):
if df.iloc[i]['value'].find("ask a few questions to get us started") > -1:
break
i += 1
print i
df = df[i+1:]
if len(df) == 0:
print "hiiiii"
break
i = 0
while i < len(df):
if df.iloc[i]['speaker'] == "Ellie":
try:
curr = i
if df.iloc[i+1]['speaker'] == "Ellie":
curr = i+1
i += 1
val = re.search(".*\((.*)\)$", df.iloc[curr]['value'])
if val != None:
val = val.group(1)
else:
val = df.iloc[curr]['value']
if val not in stopwords:
if val in questions:
questions[val] = questions[val] + 1
else:
questions[val] = 1
except:
print "index out of bound"
i += 1
data_file.close()
data_file = open('dictionary.json','w')
json.dump(questions,data_file)
data_file.close()
#print (questions)
# sorted_questions = sorted(questions.items(), key=operator.itemgetter(1),reverse=True)
# ques = open('questions.csv', 'wb')
# ques.write("Questions")
# ques.write(",")
# ques.write("Count")
# ques.write("\n")
# for q in sorted_questions:
# ques.write(q[0])
# ques.write(",")
# ques.write(str(q[1]))
# ques.write("\n")
|
ab93/Depression-Identification | src/main/train.py | <reponame>ab93/Depression-Identification
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression, Ridge, Lasso
from src.models.classifier import MetaClassifier, LateFusionClassifier
from src.models.regressor import MetaRegressor, LateFusionRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn import metrics
import config
from feature_data import Data
class TrainRegressor(object):
def __init__(self, category, modality='acoustic', feature_scale=False, feature_select=False):
self.category = category
self.feature_scale = feature_scale
self.feature_select = feature_select
self.modality = modality
self.data = Data(category=category, feature_scale=feature_scale,
feature_select=feature_select, problem_type='R')
self.reg_weights = [None, [0.7, 0.3], [0.3, 0.7]]
# Ridge and Lasso params
alpha_vals = np.logspace(-3, 3, num=5)
lr_params = [{'alpha': alpha, 'normalize': True} for alpha in alpha_vals]
# Decision Tree Regressor params
max_features_ = np.arange(3, 20, 5)
max_depths = np.arange(3, 6)
min_samples_leaves = np.arange(2, 6)
dt_params = [{'max_features': x, 'max_depth': y, 'min_samples_leaf': z, 'random_state': 42}
for x in max_features_ for y in max_depths
for z in min_samples_leaves]
self.params = {'DT': dt_params,
'Ridge': lr_params,
'Lasso': lr_params}
self.regs = {'DT': DecisionTreeClassifier,
'Ridge': Ridge,
'Lasso': Lasso}
def grid_search_meta(self, model='DT'):
x_train, y_train, x_val, y_val = self.data.get_data(self.modality)
y_true_train = map(int, map(np.mean, y_train[0]))
y_true_val = map(int, map(np.mean, y_val[0]))
print x_train[0][0].shape
# raw_input()
feature_select = 'sel' if self.feature_select else 'all'
print "Performing regression grid search for {}".format(self.modality)
with open(os.path.join(config.GRID_SEARCH_REG_DIR,
'{}_{}_{}_{}.txt'.format(self.modality, model, self.category, feature_select)),
'w') as outfile:
for reg_wt in self.reg_weights:
for param1 in self.params[model]:
for param2 in self.params[model]:
reg_1 = self.regs[model](**param1)
reg_2 = self.regs[model](**param2)
meta_reg = MetaRegressor(regressors=[reg_1, reg_2], weights=reg_wt)
meta_reg.fit(x_train, y_train)
val_score = meta_reg.score(x_val, y_true_val)
train_score = meta_reg.score(x_train, y_true_train)
if not reg_wt:
reg_wt = [0.5, 0.5]
print "val RMSE:", val_score, "train RMSE:", train_score
outfile.write(str(reg_wt[0]) + ' ' + str(reg_wt[1]) + '\t' +
str(param1) + '\t' + str(param2) + '\t' +
str(val_score) + '\t' +
str(train_score) + '\n')
def grid_search_late_fusion(self):
Xs_train, ys_train, Xs_val, ys_val = self.data.get_multi_data()
y_true_val = map(int, map(np.mean, ys_val[0][0]))
reg_A_1 = DecisionTreeRegressor(max_depth=5, max_features=3,
min_samples_leaf=2, random_state=42)
reg_A_2 = DecisionTreeRegressor(max_depth=5, max_features=13,
min_samples_leaf=5, random_state=42)
reg_V_1 = DecisionTreeRegressor(max_depth=5, max_features=8,
min_samples_leaf=2, random_state=42)
reg_V_2 = DecisionTreeRegressor(max_depth=5, max_features=18,
min_samples_leaf=2, random_state=42)
reg_L_1 = DecisionTreeRegressor(max_depth=5, max_features=3,
min_samples_leaf=2, random_state=42)
reg_L_2 = DecisionTreeRegressor(max_depth=4, max_features=3,
min_samples_leaf=2, random_state=42)
reg_A = MetaRegressor(regressors=[reg_A_1, reg_A_2])
reg_V = MetaRegressor(regressors=[reg_V_1, reg_V_2], weights=[0.7, 0.3])
reg_L = MetaRegressor(regressors=[reg_L_1, reg_L_2])
mode_weights = [None, [0.6, 0.3, 0.1], [0.3, 0.6, 0.1], [0.4, 0.4, 0.2],
[0.5, 0.4, 0.1], [0.4, 0.5, 0.1], [0.25, 0.25, 0.5],
[0.2, 0.7, 0.1], [0.2, 0.55, 0.25]]
with open(os.path.join(config.GRID_SEARCH_REG_DIR, '_' + self.category + '_lf.txt'), 'w') as outfile:
outfile.write('A_wt' + ',' + 'V_wt' + ',' + 'L_wt' + ',' + 'score' + '\n')
for mode_wt in mode_weights:
lf_reg = LateFusionRegressor(regressors=[reg_A, reg_V, reg_L], weights=mode_wt)
lf_reg.fit(Xs_train, ys_train)
score = lf_reg.score(Xs_val, y_true_val)
print mode_wt
print "LF:", lf_reg.predict(Xs_val)
print "A :", lf_reg.regressors_[0].predict(Xs_val[0])
print "V :", lf_reg.regressors_[1].predict(Xs_val[1])
print "L :", lf_reg.regressors_[2].predict(Xs_val[2])
print "Y: ", np.array(y_true_val)
if not mode_wt:
mode_wt = [0.3, 0.3, 0.3]
outfile.write(str(mode_wt[0]) + ',' + str(mode_wt[1]) + ',' +
str(mode_wt[2]) + ',' + str(score) + '\n')
print "LF:", score
print "A :", lf_reg.regressors_[0].score(Xs_val[0], y_true_val)
print "V :", lf_reg.regressors_[1].score(Xs_val[1], y_true_val)
print "L :", lf_reg.regressors_[2].score(Xs_val[2], y_true_val), '\n'
def test_late_fusion(self):
x_train, y_train = self.data.get_full_train_multi()
x_test, y_test = self.data.get_test_data_multi()
print y_train
raw_input()
print y_test
raw_input()
y_true_test = map(int, map(np.mean, y_test[0][0]))
y_true_train = map(int, map(np.mean, y_train[0][0]))
reg_a = MetaRegressor(regressors=[Lasso(alpha=0.0316, normalize=True),
Lasso(alpha=0.0316, normalize=True)],
weights=[0.7, 0.3])
reg_v = MetaRegressor(regressors=[Lasso(alpha=0.001, normalize=True),
Lasso(alpha=0.0316, normalize=True)],
weights=[0.7, 0.3])
reg_l = MetaRegressor(regressors=[Lasso(alpha=0.001, normalize=True),
Lasso(alpha=0.0316, normalize=True)],
weights=[0.7, 0.3])
for mode_wt in (None, [0.3, 0.3, 0.4], [0.5, 0.2, 0.3], [0.2, 0.6, 0.2], [0.3, 0.2, 0.5]):
lf_reg = LateFusionRegressor(regressors=[reg_a, reg_v, reg_l], weights=mode_wt)
lf_reg.fit(x_train, y_train)
score = lf_reg.score(x_test, y_true_test)
print mode_wt
print "Y :", np.array(y_true_test)
print "LF:", lf_reg.predict(x_test)
print "A :", lf_reg.regressors_[0].predict(x_test[0])
print "V :", lf_reg.regressors_[1].predict(x_test[1])
print "L :", lf_reg.regressors_[2].predict(x_test[2])
print "RMSE :", score
print "A :", lf_reg.regressors_[0].score(x_test[0], y_true_test)
print "V :", lf_reg.regressors_[1].score(x_test[1], y_true_test)
print "L :", lf_reg.regressors_[2].score(x_test[2], y_true_test), '\n'
class TrainClassifier(object):
def __init__(self, category, modality='acoustic', feature_scale=False, feature_select=False):
self.category = category
self.feature_scale = feature_scale
self.modality = modality
self.feature_select = feature_select
self.data = Data(category=category, feature_scale=feature_scale,
feature_select=feature_select, problem_type='C')
self.class_weights = np.arange(3, 6)
self.clf_weights = [None, [0.6, 0.4], [0.4, 0.6]]
# Logistic Regression params
c_values = np.logspace(-3, 3, num=5)
penalties = ('l1', 'l2')
max_ent_params = [{'C': x, 'penalty': y, 'random_state': 42} for x in c_values for y in penalties]
# Decision Tree params
max_features_ = np.arange(3, 20, 5)
max_depths = np.arange(3, 6)
min_samples_leaves = np.arange(2, 6)
dt_params = [{'max_features': x, 'max_depth': y, 'min_samples_leaf': z, 'random_state': 42}
for x in max_features_ for y in max_depths
for z in min_samples_leaves]
# AdaBoost params
base_estimators = [DecisionTreeClassifier(max_depth=1),
DecisionTreeClassifier(max_depth=3)]
num_estimators = np.arange(50, 200, 50)
boost_params = [{'base_estimator': x, 'n_estimators': y}
for x in base_estimators for y in num_estimators]
self.params = {'DT': dt_params,
'LR': max_ent_params,
'AdaBoost': boost_params}
self.clfs = {'DT': DecisionTreeClassifier,
'LR': LogisticRegression,
'AdaBoost': AdaBoostClassifier}
def grid_search_meta(self, model='DT'):
x_train, y_train, x_val, y_val = self.data.get_data(self.modality)
print x_train[0][0].shape
raw_input()
y_true_train = map(int, map(np.mean, y_train[0]))
y_true_val = map(int, map(np.mean, y_val[0]))
feature_select = 'sel' if self.feature_select else 'all'
print "Performing classification grid search for {}".format(self.modality)
with open(os.path.join(config.GRID_SEARCH_CLF_DIR,
'{}_{}_{}_{}.txt'.format(self.modality, model, self.category, feature_select)), 'w') as outfile:
for clf_wt in self.clf_weights:
for class_wt in self.class_weights:
for param1 in self.params[model]:
for param2 in self.params[model]:
clf_1 = self.clfs[model](class_weight={1: class_wt}, **param1)
clf_2 = self.clfs[model](class_weight={1: class_wt}, **param2)
meta_clf = MetaClassifier(classifiers=[clf_1, clf_2], weights=clf_wt)
meta_clf.fit(x_train, y_train)
val_f1_score = meta_clf.score(x_val, y_true_val)
train_f1_score = meta_clf.score(x_train, y_true_train)
if not clf_wt:
clf_wt = [0.5, 0.5]
print "val f1:", val_f1_score, "train f1:", train_f1_score
outfile.write(str(clf_wt[0]) + ' ' + str(clf_wt[1]) + '\t' + str(class_wt) + '\t' +
str(param1) + '\t' + str(param2) + '\t' +
str(val_f1_score) + '\t' +
str(train_f1_score) + '\n')
def grid_search_late_fusion(self):
Xs_train, ys_train, Xs_val, ys_val = self.data.get_multi_data()
y_true_val = map(int, map(np.mean, ys_val[0][0]))
y_true_train = map(int, map(np.mean, ys_train[0][0]))
clf_A = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 5}, max_depth=5, max_features=13,
min_samples_leaf=2, random_state=42),
DecisionTreeClassifier(class_weight={1: 5}, max_depth=5, max_features=18,
min_samples_leaf=2, random_state=42)],
weights=[0.6, 0.4])
clf_V = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 4}, max_depth=4,
max_features=13, min_samples_leaf=2,
random_state=42),
DecisionTreeClassifier(class_weight={1: 4}, max_depth=5,
max_features=3, min_samples_leaf=5,
random_state=42)],
weights=[0.6, 0.4])
clf_L = MetaClassifier(classifiers=[LogisticRegression(C=0.03162, penalty='l2', class_weight={1: 4},
random_state=42),
LogisticRegression(C=0.001, penalty='l2', class_weight={1: 4},
random_state=42)],
weights=[0.4, 0.6])
mode_weights = [None, [0.6, 0.3, 0.1], [0.3, 0.6, 0.1], [0.4, 0.4, 0.2],
[0.5, 0.4, 0.1], [0.4, 0.5, 0.1], [0.25, 0.25, 0.5],
[0.2, 0.7, 0.1], [0.2, 0.55, 0.25]]
with open(os.path.join(config.GRID_SEARCH_CLF_DIR, '_' + self.category + '_lf.txt'), 'w') as outfile:
outfile.write('A_wt' + ',' + 'V_wt' + ',' + 'L_wt' + ',' + 'f1_score' + '\n')
for mode_wt in mode_weights:
lf_clf = LateFusionClassifier(classifiers=[clf_A, clf_V, clf_L], weights=mode_wt)
lf_clf.fit(Xs_train, ys_train)
f1_score = lf_clf.score(Xs_val, y_true_val, scoring='f1')
print mode_wt
print "LF:", lf_clf.predict(Xs_val)
print "A :", lf_clf.classifiers_[0].predict(Xs_val[0])
print "V :", lf_clf.classifiers_[1].predict(Xs_val[1])
print "L :", lf_clf.classifiers_[2].predict(Xs_val[2])
print "Y: ", np.array(y_true_val)
if not mode_wt:
mode_wt = [0.3, 0.3, 0.3]
outfile.write(str(mode_wt[0]) + ',' + str(mode_wt[1]) + ',' +
str(mode_wt[2]) + ',' + str(f1_score) + '\n')
print "LF:", f1_score
print "A :", lf_clf.classifiers_[0].score(Xs_val[0], y_true_val)
print "V :", lf_clf.classifiers_[1].score(Xs_val[1], y_true_val)
print "L :", lf_clf.classifiers_[2].score(Xs_val[2], y_true_val), '\n'
def grid_search_early_fusion(self, model='DT'):
Xs_train, ys_train, Xs_val, ys_val = self.data.get_multi_data()
x_train, y_train = self.data.concat_features(Xs_train[0], Xs_train[1], Xs_train[2], ys_train[0])
x_val, y_val = self.data.concat_features(Xs_val[0], Xs_val[1], Xs_val[2], ys_val[0])
# print ys_train[0][1][116].shape, y_train[1][116].shape
y_true_train = map(int, map(np.mean, y_train[0]))
y_true_val = map(int, map(np.mean, y_val[0]))
feature_select = 'sel' if self.feature_select else 'all'
print "Performing early fusion classification grid search for {}".format(self.modality)
with open(os.path.join(config.GRID_SEARCH_CLF_DIR,
'{}_{}_{}_{}.txt'.format(model, self.category, feature_select, 'EF')),
'w') as outfile:
for clf_wt in self.clf_weights:
for class_wt in self.class_weights:
for param1 in self.params[model]:
for param2 in self.params[model]:
clf_1 = self.clfs[model](class_weight={1: class_wt}, **param1)
clf_2 = self.clfs[model](class_weight={1: class_wt}, **param2)
meta_clf = MetaClassifier(classifiers=[clf_1, clf_2], weights=clf_wt)
meta_clf.fit(x_train, y_train)
val_f1_score = meta_clf.score(x_val, y_true_val)
train_f1_score = meta_clf.score(x_train, y_true_train)
if not clf_wt:
clf_wt = [0.5, 0.5]
print "val f1:", val_f1_score, "train f1:", train_f1_score
outfile.write(str(clf_wt[0]) + ' ' + str(clf_wt[1]) + '\t' + str(class_wt) + '\t' +
str(param1) + '\t' + str(param2) + '\t' +
str(val_f1_score) + '\t' +
str(train_f1_score) + '\n')
def test_early_fusion(self):
x_train, y_train = self.data.get_full_train_multi()
x_train, y_train = self.data.concat_features(x_train[0], x_train[1], x_train[2], y_train[0])
x_test, y_test = self.data.get_test_data_multi()
x_test, y_test = self.data.concat_features(x_test[0], x_test[1], x_test[2], y_test[0])
y_true_test = map(int, map(np.mean, y_test[0]))
ef_clf = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 5}, max_depth=3, max_features=13,
min_samples_leaf=2, random_state=42),
DecisionTreeClassifier(class_weight={1: 4}, max_depth=5, max_features=3,
min_samples_leaf=5, random_state=42)],
weights=[0.4, 0.6])
ef_clf.fit(x_train, y_train)
score = ef_clf.score(x_test, y_true_test)
print "F1 score:", score
print "Y :", np.array(y_true_test)
print "LF:", ef_clf.predict(x_test)
def test_late_fusion(self):
x_train, y_train = self.data.get_full_train_multi()
x_test, y_test = self.data.get_test_data_multi()
y_true_test = map(int, map(np.mean, y_test[0][0]))
y_true_train = map(int, map(np.mean, y_train[0][0]))
clf_a = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 4}, max_depth=3, max_features=18,
min_samples_leaf=2, random_state=42),
DecisionTreeClassifier(class_weight={1: 4}, max_depth=4, max_features=13,
min_samples_leaf=3, random_state=42)],
weights=[0.4, 0.6])
clf_v = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 3}, max_depth=3,
max_features=18, min_samples_leaf=5,
random_state=42),
DecisionTreeClassifier(class_weight={1: 3}, max_depth=5,
max_features=13, min_samples_leaf=4,
random_state=42)])
clf_l = MetaClassifier(classifiers=[LogisticRegression(C=0.03162, penalty='l2', class_weight={1: 4}),
LogisticRegression(C=0.03162, penalty='l1', class_weight={1: 4})])
mode_weights = [None, [0.6, 0.3, 0.1], [0.3, 0.6, 0.1], [0.4, 0.4, 0.2],
[0.5, 0.4, 0.1], [0.4, 0.5, 0.1], [0.25, 0.25, 0.5],
[0.2, 0.7, 0.1], [0.2, 0.55, 0.25]]
for mode_wt in mode_weights:
lf_clf = LateFusionClassifier(classifiers=[clf_a, clf_v, clf_l], weights=mode_wt)
lf_clf.fit(x_train, y_train)
score = lf_clf.score(x_test, y_true_test)
print mode_wt
print "Y :", np.array(y_true_test)
print "LF:", lf_clf.predict(x_test)
print "A :", lf_clf.classifiers_[0].predict(x_test[0])
print "V :", lf_clf.classifiers_[1].predict(x_test[1])
print "L :", lf_clf.classifiers_[2].predict(x_test[2])
print "F1 score:", score
print "A :", lf_clf.classifiers_[0].score(x_test[0], y_true_test)
print "V :", lf_clf.classifiers_[1].score(x_test[1], y_true_test)
print "L :", lf_clf.classifiers_[2].score(x_test[2], y_true_test), '\n'
def plot_learning_curve(self):
steps = np.arange(56, 187, 10)
if self.modality == 'acoustic':
clf = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 4}, max_depth=5, max_features=3,
min_samples_leaf=2, random_state=42),
DecisionTreeClassifier(class_weight={1: 4}, max_depth=5, max_features=13,
min_samples_leaf=5, random_state=42)])
elif self.modality == 'visual':
clf = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 3}, max_depth=5, max_features=8,
min_samples_leaf=2, random_state=42),
DecisionTreeClassifier(class_weight={1: 3}, max_depth=5, max_features=18,
min_samples_leaf=2, random_state=42)],
weights=[0.7, 0.3])
elif self.modality == 'linguistic':
clf = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 4}, max_depth=5, max_features=3,
min_samples_leaf=2, random_state=42),
DecisionTreeClassifier(class_weight={1: 4}, max_depth=4, max_features=3,
min_samples_leaf=2, random_state=42)])
train_scores, val_scores = [], []
for train_count in steps:
x_train, y_train, x_val, y_val = self.data.get_data(self.modality, size=train_count)
y_true_train = map(int, map(np.mean, y_train[0]))
y_true_val = map(int, map(np.mean, y_val[0]))
clf.fit(x_train, y_train)
val_scores.append(clf.score(x_val, y_true_val))
train_scores.append(clf.score(x_train, y_true_train))
print val_scores
print train_count
plt.figure()
plt.plot(steps, train_scores, 'o-', color="r",
label="Training score")
plt.plot(steps, val_scores, 'o-', color="g",
label="Validation score")
plt.title('Learning Curve for {}'.format(self.modality))
plt.xlabel("Training examples")
plt.ylabel("F1 Score")
plt.grid()
plt.legend(loc='best')
plt.show()
def plot_roc(self):
# Xs_train, ys_train = self.data.get_full_train_multi()
# Xs_val, ys_val = self.data.get_test_data_multi()
#
# y_true_val = map(int, map(np.mean, ys_val[0][0]))
# y_true_train = map(int, map(np.mean, y_train[0][0]))
Xs_train, ys_train, Xs_val, ys_val = self.data.get_multi_data()
y_true_val = map(int, map(np.mean, ys_val[0][0]))
probs = {}
clf_A = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 4}, max_depth=3, max_features=18,
min_samples_leaf=2, random_state=42),
DecisionTreeClassifier(class_weight={1: 4}, max_depth=4, max_features=13,
min_samples_leaf=3, random_state=42)],
weights=[0.4, 0.6])
clf_V = MetaClassifier(classifiers=[DecisionTreeClassifier(class_weight={1: 3}, max_depth=3,
max_features=18, min_samples_leaf=5,
random_state=42),
DecisionTreeClassifier(class_weight={1: 3}, max_depth=5,
max_features=13, min_samples_leaf=4,
random_state=42)])
clf_L = MetaClassifier(classifiers=[LogisticRegression(C=0.03162, penalty='l2', class_weight={1: 4}),
LogisticRegression(C=0.03162, penalty='l1', class_weight={1: 4})])
clf_A.fit(Xs_train[0], ys_train[0])
probs['acoustic'] = clf_A.predict_proba(Xs_val[0])[:, 1]
clf_V.fit(Xs_train[1], ys_train[1])
probs['visual'] = clf_V.predict_proba(Xs_val[1])[:, 1]
clf_L.fit(Xs_train[2], ys_train[2])
probs['linguistic'] = clf_L.predict_proba(Xs_val[2])[:, 1]
lf_clf = LateFusionClassifier(classifiers=[clf_A, clf_V, clf_L], weights=None)
lf_clf.fit(Xs_train, ys_train)
probs['lateFusion'] = lf_clf.predict_proba(Xs_val)[:, 1]
labels = ['acoustic', 'visual', 'linguistic', 'lateFusion']
fpr, tpr, roc_auc = {}, {}, {}
plt.figure()
colors = ['aqua', 'darkorange', 'cornflowerblue', 'green']
for lbl, c in zip(labels, colors):
fpr[lbl], tpr[lbl], _ = metrics.roc_curve(y_true_val, probs[lbl], pos_label=1)
roc_auc[lbl] = metrics.auc(fpr[lbl], tpr[lbl])
plt.plot(fpr[lbl], tpr[lbl], color=c, lw=2,
label='ROC curve of {0} (area = {1:0.2f})'.format(lbl, roc_auc[lbl]))
plt.plot([0, 1], [0, 1], color='black', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristics')
plt.legend(loc="lower right")
plt.show()
if __name__ == '__main__':
trn = TrainClassifier(category='PN', feature_scale=True, feature_select=False, modality='acoustic')
# trn = TrainRegressor(category='PN', feature_scale=False, feature_select=False, modality='linguistic')
# trn.grid_search_meta(model='Ridge')
# trn.grid_search_early_fusion(model='LR')
# trn.grid_search_late_fusion()
trn.plot_roc()
# trn.plot_learning_curve()
# trn.test_late_fusion()
# trn.test_early_fusion()
# trn = TrainRegressor(category='PN', feature_scale=False, feature_select=True, modality='acoustic')
# trn.grid_search_meta(model='Ridge')
# trn.grid_search_late_fusion()
|
ab93/Depression-Identification | src/obsolete/plot_roc.py | <filename>src/obsolete/plot_roc.py
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import src.main.config
import src.obsolete.utils
import src.obsolete.utils
from src.models import classifier
def plot_roc_curve(fpr_a,tpr_a,roc_area_a,fpr_v,tpr_v,roc_area_v,fpr_l,tpr_l,roc_area_l):
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.plot(fpr_a, tpr_a,lw=2,label='ROC Acoustic (area = %0.2f)' % roc_area_a)
plt.plot(fpr_v, tpr_v,lw=2,label='ROC Visual (area = %0.2f)' % roc_area_v)
plt.plot(fpr_l, tpr_l,lw=2,label='ROC Linguistic (area = %0.2f)' % roc_area_l)
plt.legend(loc="best")
plt.show()
def plot_roc_latefusion(fpr,tpr,roc_area):
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.plot(fpr, tpr, lw=2, label='ROC Late Fusion(area = %0.2f)' % roc_area)
plt.legend(loc="best")
plt.show()
class_names = ["Non-Depressed","Depressed"]
def late_fusion_model(clf1_a,clf2_a,clf1_v,clf2_v,clf1_l,clf2_l):
# Read the data
Xs_train, ys_train, Xs_val, ys_val = src.obsolete.utils.get_multi_data()
clf_A = classifier.MetaClassifier(classifiers=[clf1_a, clf2_a])
clf_V = classifier.MetaClassifier(classifiers=[clf1_v, clf2_v])
clf_L = classifier.MetaClassifier(classifiers=[clf1_l, clf2_l])
lf_clf = classifier.LateFusionClassifier(classifiers=[clf_A, clf_V, clf_L], weights=[0.4, 0.4, 0.2])
lf_clf.fit(Xs_train, ys_train)
y_true = map(int, map(np.mean, ys_val[0][0]))
f1 = lf_clf.score(Xs_val, y_true, scoring='f1')
accuracy = lf_clf.score(Xs_val, y_true, scoring='accuracy')
preds = lf_clf.predict_proba(Xs_val, get_all=False)
preds_label = lf_clf.predict(Xs_val)
preds_positive_labels = []
for i in preds:
preds_positive_labels.append(i[1])
fpr, tpr, thresholds = roc_curve(y_true, preds_positive_labels)
roc_area = roc_auc_score(y_true, preds_positive_labels)
plot_roc_latefusion(fpr,tpr,roc_area)
print "F1 score: ",f1
print "Accuracy: ",accuracy
print "ROC Area: ",roc_area
cnf_matrix = confusion_matrix(y_true,preds_label)
plot_confusion_matrix(cnf_matrix,classes = class_names,title="Confusion Matrix Late Fusion")
def plot_confusion_matrix(cm, classes,title='Confusion matrix',cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def logistic_model(mode,inter_clf_weights,clf1,clf2):
# print clf1
# print clf2
inter_clf_weights = eval(inter_clf_weights)
temp = []
for i in inter_clf_weights:
#print i
temp.append(float(i))
#print temp
X_train, y_train, X_val, y_val = src.obsolete.utils.get_single_mode_data(mode=mode, normalize='normalize')
print X_train
#print y_train
#print X_val
raw_input()
y_true = map(int, map(np.mean, y_val[0]))
clf1 = eval(clf1)
clf2 = eval(clf2)
meta_clf = classifier.MetaClassifier(classifiers=[clf1,clf2], weights=temp)
meta_clf.fit(X_train, y_train)
preds = meta_clf.predict_proba(X_val, get_all=False)
preds_label = meta_clf.predict(X_val)
preds_positive_labels = []
for i in preds:
preds_positive_labels.append(i[1])
f1 = meta_clf.score(X_val, y_true)
accuracy = meta_clf.score(X_val, y_true, scoring='accuracy')
fpr, tpr, thresholds = roc_curve(y_true, preds_positive_labels)
roc_area = roc_auc_score(y_true, preds_positive_labels)
cnf_matrix = confusion_matrix(y_true, preds_label)
return clf1,clf2,f1,accuracy,fpr,tpr,thresholds,roc_area,cnf_matrix
def call_logistic_model(mode):
classify = pd.read_csv(src.main.config.RESULTS_CLASSIFY + "/" + mode + "_test_PN.txt", sep="\t", header=None)
data = classify.iloc[classify[4].argmax()]
return logistic_model(mode,data[0],data[1],data[3])
def main():
print "Acoustic"
clf1_a, clf2_a, f1_a, accuracy_a, fpr_a, tpr_a, thresholds_a, roc_area_a, cnf_matrix_a = call_logistic_model('acoustic')
print "F1 score: ",f1_a
print "Accuracy: ",accuracy_a
print "ROC Area: ",roc_area_a
print "\n"
print "Visual"
clf1_v, clf2_v,f1_v, accuracy_v, fpr_v, tpr_v, thresholds_v, roc_area_v,cnf_matrix_v = call_logistic_model('visual')
print "F1 score: ",f1_v
print "Accuracy: ",accuracy_v
print "ROC Area: ",roc_area_v
print "\n"
print "linguistic"
clf1_l, clf2_l,f1_l, accuracy_l, fpr_l, tpr_l, thresholds_l, roc_area_l,cnf_matrix_l = call_logistic_model('linguistic')
print "F1 score: ",f1_l
print "Accuracy: ",accuracy_l
print "ROC Area: ",roc_area_l
print "\n"
print "Late Fusion"
# plot_roc_curve(fpr_a, tpr_a, roc_area_a, fpr_v, tpr_v, roc_area_v, fpr_l, tpr_l, roc_area_l)
# plot_confusion_matrix(cnf_matrix_a, classes=class_names,title="Confusion Matrix Acoustic")
# plot_confusion_matrix(cnf_matrix_v, classes=class_names,title="Confusion Matrix Visual")
# plot_confusion_matrix(cnf_matrix_l, classes=class_names,title="Confusion Matrix Linguistic")
late_fusion_model(clf1_a,clf2_a,clf1_v,clf2_v,clf1_l,clf2_l)
main()
|
ab93/Depression-Identification | src/helpers/normalized_features.py | <reponame>ab93/Depression-Identification<gh_stars>10-100
import pandas as pd
import os
from sklearn import preprocessing
from ..main import config
import numpy as np
import sys
def get_normalized_features(filename):
filename_train = filename
data_train = pd.read_csv(filename_train)
filename_val = filename.replace("train","val")
filename_test = filename.replace("train", "test")
data_val = pd.read_csv(filename_val)
data_test = pd.read_csv(filename_test)
columns = data_train.columns[1:]
column = columns[:-2]
#print column
#print filename_train
X_train = data_train.as_matrix(columns=column)
X_val = data_val.as_matrix(columns = column)
X_test = data_test.as_matrix(columns = column)
scalar = preprocessing.StandardScaler().fit(X_train)
transformed_train = scalar.transform(X_train)
transformed_val = scalar.transform(X_val)
#print X_test
#print np.isfinite(X_test).all()
# for i in X_test:
# for j in i:
# if j == True:
# print i,j
X_test[np.isnan(X_test)] = 0
transformed_test = scalar.transform(X_test)
data_normalized_train = pd.DataFrame(transformed_train,columns=column)
data_normalized_val = pd.DataFrame(transformed_val, columns=column)
data_normalized_test = pd.DataFrame(transformed_test, columns=column)
data_normalized_train[['video','label','score']] = data_train[['video','label','score']]
data_normalized_val[['video','label','score']] = data_val[['video','label','score']]
data_normalized_test[['video','label','score']] = data_test[['video','label','score']]
write_path_file_train = filename_train.replace("regular","normalize")
write_path_file_val = filename_val.replace("regular","normalize")
write_path_file_test = filename_test.replace("regular", "normalize")
#print write_path_file_train
#print filename_val
#print write_path_file_val
#print filename_test
#print write_path_file_test
data_normalized_train.to_csv(write_path_file_train,index=None)
data_normalized_val.to_csv(write_path_file_val,index=None)
data_normalized_test.to_csv(write_path_file_test,index=None)
def normalize_features(select = "select"):
if select == "select":
path_classify = config.SEL_FEAT_TRAIN_REGULAR_CLASSIFY
path_estimate = config.SEL_FEAT_TRAIN_REGULAR_ESTIMATE
else:
path_classify = config.ALL_FEAT_TRAIN_REGULAR_CLASSIFY
path_estimate = config.ALL_FEAT_TRAIN_REGULAR_ESTIMATE
list_train_classify = [os.path.join(path_classify, fn) for fn in next(os.walk(config.SEL_FEAT_TRAIN_REGULAR_CLASSIFY))[2]]
print list_train_classify
for i in range(len(list_train_classify)):
get_normalized_features(list_train_classify[i])
list_train_estimate = [os.path.join(path_estimate, fn) for fn in next(os.walk(config.SEL_FEAT_TRAIN_REGULAR_ESTIMATE))[2]]
print list_train_estimate
for i in range(len(list_train_estimate)):
get_normalized_features(list_train_estimate[i])
#normalize_features()
if __name__ == '__main__':
select = "select"
if len(sys.argv) == 2:
select = sys.argv[1]
print select
normalize_features(select) |
ab93/Depression-Identification | src/feature_extract/extract_FACET.py | <filename>src/feature_extract/extract_FACET.py
import pandas as pd
from pprint import pprint
from glob import glob
import numpy as np
import re
import csv
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList={}
questionType_DND={}
questionType_PN={}
discriminativeVectors=[]
nonDiscriminativeVectors=[]
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc=pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg=pd.read_csv('data/misc/PN_Annotations.csv')
#Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question=disc_nondisc.iloc[i]['Questions']
qType=disc_nondisc.iloc[i]['Annotations']
questionType_DND[question]=qType
#Positive/Negative annotations
for i in xrange(len(pos_neg)):
question=pos_neg.iloc[i]['Questions']
qType=pos_neg.iloc[i]['Annotations']
questionType_PN[question]=qType
for item in utterrances.itertuples():
if item[3]=="#follow_up" and item[1] not in followUp:
followUp[item[1]]=item[2]
elif item[3]=="#ack" and item[1] not in ack:
ack[item[1]]=item[2]
elif item[3]=="#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]]=item[2]
elif item[3]=="#int" and item[1] not in intimate:
intimate[item[1]]=item[2]
def readTranscript():
global featureList
transcriptFiles=glob(sys.argv[1]+'[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0,len(transcriptFiles)):
t=pd.read_csv(transcriptFiles[i], delimiter='\t')
t = t.fillna("")
captureStarted=False
startTime=0.0
endTime=0.0
prevQuestion=""
participantNo=transcriptFiles[i][-18:-15]
for j in xrange(len(t)):
question=re.search(".*\((.*)\)$", t.iloc[j]['value'])
if question is not None:
question=question.group(1)
else:
question=t.iloc[j]['value']
question=question.strip()
if t.iloc[j]['speaker']=='Ellie':
if question in nonIntimate and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
captureStarted=False
elif question in intimate and question in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
startTime=t.iloc[j]['start_time']
endTime=t.iloc[j]['stop_time']
prevQuestion=question
elif question in intimate and question in questionType_DND and not captureStarted:
startTime=t.iloc[j]['start_time']
endTime=t.iloc[j]['stop_time']
prevQuestion=question
captureStarted=True
elif question in intimate and question not in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
captureStarted=False
elif question in followUp or question in ack and captureStarted:
endTime=t.iloc[j]['stop_time']
elif t.iloc[j]['speaker']=='Participant' and captureStarted:
endTime=t.iloc[j]['stop_time']
def readFACET_DND():
groupByQuestion={}
dFile=open('data/disc_nondisc/discriminative_FACET.csv','w')
ndFile=open('data/disc_nondisc/nondiscriminative_FACET.csv','w')
dWriter=csv.writer(dFile)
ndWriter=csv.writer(ndFile)
header=["video","question","starttime","endtime","frametime","Face X","Face Y","Face Width","Face Height","angerEvidence","contemptEvidence","disgustEvidence","joyEvidence","fearEvidence","baselineEvidence","sadnessEvidence","surpriseEvidence","confusionEvidence","frustrationEvidence","AU1Evidence","AU2Evidence","AU4Evidence","AU5Evidence","AU6Evidence","AU7Evidence","AU9Evidence","AU10Evidence","AU12Evidence","AU14Evidence","AU15Evidence","AU17Evidence","AU18Evidence","AU20Evidence","AU23Evidence","AU24Evidence","AU25Evidence","AU26Evidence","AU28Evidence","AU43Evidence","hasGlassesEvidence","isMaleEvidence"]
dWriter.writerow(header)
ndWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]]=[(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName=sys.argv[1]+item+'_P/'+item+'_FACET_features.csv'
f=pd.read_csv(fileName, delimiter=',')
for instance in groupByQuestion[item]:
startTime=instance[1][0]
endTime=instance[1][1]
startFrame=f.ix[(f['Frametime']-startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame=f.ix[(f['Frametime']-endTime).abs().argsort()[:1]].index.tolist()[0]
features=f.ix[startFrame:endFrame].mean(0).tolist()
vector=instance[1][:]
vector+=features
vector.insert(0,instance[0])
vector.insert(0, item)
vector=np.asarray(vector)
#print item, instance[0], startTime, endTime
if questionType_DND[instance[0]]=='D':
dWriter.writerow(vector)
else:
ndWriter.writerow(vector)
dFile.close()
ndFile.close()
def readFACET_PN():
groupByQuestion={}
pFile=open('data/pos_neg/positive_FACET.csv','w')
nFile=open('data/pos_neg/negative_FACET.csv','w')
pWriter=csv.writer(pFile)
nWriter=csv.writer(nFile)
header=["video","question","starttime","endtime","frametime","Face X","Face Y","Face Width","Face Height","angerEvidence","contemptEvidence","disgustEvidence","joyEvidence","fearEvidence","baselineEvidence","sadnessEvidence","surpriseEvidence","confusionEvidence","frustrationEvidence","AU1Evidence","AU2Evidence","AU4Evidence","AU5Evidence","AU6Evidence","AU7Evidence","AU9Evidence","AU10Evidence","AU12Evidence","AU14Evidence","AU15Evidence","AU17Evidence","AU18Evidence","AU20Evidence","AU23Evidence","AU24Evidence","AU25Evidence","AU26Evidence","AU28Evidence","AU43Evidence","hasGlassesEvidence","isMaleEvidence"]
pWriter.writerow(header)
nWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]]=[(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName=sys.argv[1]+item+'_P/'+item+'_FACET_features.csv'
f=pd.read_csv(fileName, delimiter=',')
for instance in groupByQuestion[item]:
startTime=instance[1][0]
endTime=instance[1][1]
startFrame=f.ix[(f['Frametime']-startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame=f.ix[(f['Frametime']-endTime).abs().argsort()[:1]].index.tolist()[0]
features=f.ix[startFrame:endFrame].mean(0).tolist()
vector=instance[1][:]
vector+=features
vector.insert(0,instance[0])
vector.insert(0, item)
vector=np.asarray(vector)
#print item, instance[0], startTime, endTime
if questionType_PN[instance[0]]=='P':
pWriter.writerow(vector)
else:
nWriter.writerow(vector)
pFile.close()
nFile.close()
if __name__=="__main__":
readHelperData()
readTranscript()
readFACET_DND()
readFACET_PN() |
ab93/Depression-Identification | src/obsolete/estimate.py | <reponame>ab93/Depression-Identification
import os
from copy import deepcopy
import numpy as np
from sklearn.linear_model import Lasso
import src.main.config
import src.main.feature_select
from src.models.regressor import MetaRegressor
from src.obsolete.utils import get_single_mode_data
def grid_search_meta(mode='acoustic',category='PN'):
X_train, y_train, X_val, y_val = get_single_mode_data(mode, category,
problem_type='R')
X_data = deepcopy(X_train)
X_data[0].extend(X_val[0])
X_data[1].extend(X_val[1])
y_data = deepcopy(y_train)
y_data[0].extend(y_val[0])
y_data[1].extend(y_val[1])
# Set y_true for validation
y_true_val = map(int,map(np.mean,y_val[0]))
# Set parameters for GridSearch
reg_weights = [None, [0.7,0.3], [0.3,0.7]]
# Ridge parameters
r_alphas = np.logspace(-4,4,10)
# Lasso parameters
l_alphas = np.logspace(-4,4,5)
min_mae = 0.0
max_reg1 = None
max_reg2 = None
max_weights = []
with open(os.path.join(src.main.config.GRID_SEARCH_REG_DIR, mode + '_' + category + '.csv'), 'w') as outfile:
for reg_wt in reg_weights:
temp = []
for i in reg_wt:
temp.append(float(i))
for alpha_1 in l_alphas:
for alpha_2 in l_alphas:
for is_normalized in [True, False]:
#reg_1 = Ridge(alpha=alpha_1, normalize=is_normalized)
#reg_2 = Ridge(alpha=alpha_2, normalize=is_normalized)
reg_1 = Lasso(alpha=alpha_1, normalize=is_normalized)
reg_2 = Lasso(alpha=alpha_2, normalize=is_normalized)
meta_reg = MetaRegressor(regressors=[reg_1, reg_2], weights=reg_wt)
meta_reg.fit(X_train, y_train)
#r2_score = meta_reg.score(X_val, y_true_val)
mean_abs_error = meta_reg.score(X_val, y_true_val, scoring='mean_abs_error')
if mean_abs_error < min_mae:
min_mae = mean_abs_error
max_reg1 = reg_1
max_reg2 = reg_2
max_weights = temp[:]
if not reg_wt:
reg_wt = [0.5, 0.5]
outfile.write(str(reg_wt[0]) + ' ' + str(reg_wt[1]) +
',' + str(is_normalized) + ',' + str(alpha_1) +
',' + str(alpha_2) + ',' + str(r2_score) +',' +
str(mean_abs_error) + '\n')
print r2_score, mean_abs_error
def main():
#print "Selecting features...\n"
#feature_select.feature_select("R")
#print "Normalizing features...\n"
#normalize_features()
print "Performing Grid Search for visual...\n"
grid_search_meta(mode='visual', category='PN')
print "Performing Grid Search for acoustic...\n"
grid_search_meta(mode='acoustic', category='PN')
print "Performing Grid Search for linguistic...\n"
grid_search_meta(mode='linguistic', category='PN')
#print "Performing Grid Search for Late Fusion...\n"
#grid_search_lf(category='PN')
if __name__ == '__main__':
main()
|
ab93/Depression-Identification | src/models/classifier.py | import sys
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, f1_score
from sklearn.pipeline import _name_estimators
class MetaClassifier(BaseEstimator, ClassifierMixin):
""" A combined multi-class classifier
Parameters
----------
classifiers : array-like, shape = [n_classifiers]
vote : str, {'classlabel', 'probability'}
weights : array-like, shape = [n_classifiers]
If a list of `int` or `float` values are
provided, the classifiers are weighted by
importance; Uses uniform weights if `weights=None`.
method: str, {'stacking', 'majority_voting'}
"""
def __init__(self, classifiers, vote='probability',
weights=None, method='majority_voting'):
self.classifiers = classifiers
self.named_classifiers = {k:v for k,v in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
self.method = method
def fit(self, X_list, y_list, nested=True):
""" Fit classifiers.
Parameters
----------
X_list : List of {array-like, sparse matrix},
length = number of classifiers
List of matrices of training samples
y_list : List of array-like,
length = number of classifiers
List of vectors of target class labels
nested: Bool (default = True)
Returns
-------
self : object
"""
assert(len(X_list) == len(y_list) == len(self.classifiers))
if (not isinstance(X_list,list)) or (not isinstance(y_list,list)):
raise TypeError
sys.exit()
self.lablenc_ = LabelEncoder()
if nested:
X_list = map(np.vstack, X_list)
y_list = map(np.hstack, y_list)
self.lablenc_.fit(y_list[0]) # make sure both y vectors have both the classes
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for i,clf in enumerate(self.classifiers):
fitted_clf = clone(clf).fit(X_list[i],
self.lablenc_.transform(y_list[i]))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X_list):
""" Predict class labels
Parameters
----------
X_list : List of {array-like, sparse matrix},
length = number of classifiers
List of matrices of training samples
Returns
-------
maj_vote : array-like, shape = [n_samples]
Predicted class labels
"""
num_clfs = len(self.classifiers_)
preds = []
for index, X in enumerate(X_list):
pred = [np.mean(self.classifiers_[index].predict_proba(P), axis=0) for P in X]
preds.append(pred)
preds = np.asarray(preds)
weighted_proba = np.average(preds, axis=0, weights=self.weights)
maj_vote = np.argmax(weighted_proba, axis=1)
return maj_vote
def predict_proba(self, X_list, get_all=False):
""" Predict class probabilities.
Parameters
----------
X_list : List of {array-like, sparse matrix},
length = number of classifiers
List of matrices of training samples
Returns
-------
weighted_proba : array-like,shape = [n_samples, n_classes]
Weighted average probability
for each class per sample.
"""
num_clfs = len(self.classifiers_)
preds = []
for index, X in enumerate(X_list):
pred = [np.mean(self.classifiers_[index].predict_proba(P), axis=0) for P in X]
preds.append(pred)
preds = np.asarray(preds)
weighted_proba = np.average(preds, axis=0, weights=self.weights)
if get_all:
return preds[0], preds[1], weighted_proba
return weighted_proba
def score(self, Xs, y_true, scoring='f1'):
"""
Returns the f1 score by default
Parameters
----------
Xs : List of {array-like, sparse matrix},
length = number of classifiers
List of matrices of training samples
y_true: Single vectors of target class labels.
"""
y_true = np.asarray(y_true)
if scoring == 'f1':
return f1_score(y_true,self.predict(Xs),average='binary')
elif scoring == 'accuracy':
return accuracy_score(y_true, self.predict(Xs))
class LateFusionClassifier(BaseEstimator, ClassifierMixin):
"""
Plurality/Majority voting based Combined Classifier. Supports both
single feature set/multiple feature set based Classification.
"""
def __init__(self,classifiers,vote='classlabel',weights=None):
self.classifiers = classifiers # list of classifiers
self.vote = vote # 'probability' or 'classlabel'
self.named_classifiers = {key: value for key, value in _name_estimators(classifiers)}
self.weights = weights # weights for each of the classifiers
def fit(self,Xs,ys):
"""
Trains on the data.
Xs = [[], [], []]
ys = [[], [], []]
Returns: self
"""
if isinstance(Xs,list) and isinstance(ys,list):
assert(len(Xs) == len(ys) == len(self.classifiers))
self.classifiers_ = [] # store trained classifiers
for idx, clf in enumerate(self.classifiers):
fitted_clf = clone(clf).fit(Xs[idx],ys[idx])
self.classifiers_.append(fitted_clf)
return self
def predict(self,Xs):
"""
Predicts new data instances.
Args:
Xs = [[], [], []]
Returns:
maj_vote: Predicted class
"""
if self.vote == 'probability':
maj_vote = np.argmax(self.predict_proba(Xs),axis=1)
else: # classlabel
predictions = np.asarray([clf.predict(Xs[mode_idx]) for mode_idx,clf in enumerate(self.classifiers_)]).T
## print '\n',predictions
maj_vote = np.apply_along_axis(lambda x: np.argmax(np.bincount(x,
weights=self.weights)),axis=1,arr=predictions)
return maj_vote
def predict_proba(self, Xs, get_all=False):
"""
Predicts new data instances.
Args:
Xs = [[], [], []]
Returns:
avg_proba: Average probabilities of the class
"""
probas = np.asarray([clf.predict_proba(Xs[mode_idx])
for mode_idx,clf in enumerate(self.classifiers_)])
avg_proba = np.average(probas, axis=0, weights=self.weights)
if get_all:
return probas[0], probas[1], probas[2], avg_proba
return avg_proba
def score(self,Xs,y_true,scoring='f1'):
"""
Returns the weighted F1-score (default)
"""
if scoring == 'f1':
return f1_score(y_true,self.predict(Xs),average='binary')
elif scoring == 'accuracy':
return accuracy_score(y_true, self.predict(Xs))
|
ab93/Depression-Identification | src/feature_extract/extract_all_featues.py | import pandas as pd
from pprint import pprint
from glob import glob
import numpy as np
import re
import csv
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList={}
questionType_DND={}
questionType_PN={}
discriminativeVectors=[]
nonDiscriminativeVectors=[]
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc=pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg=pd.read_csv('data/misc/PN_Annotations.csv')
#Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question=disc_nondisc.iloc[i]['Questions']
qType=disc_nondisc.iloc[i]['Annotations']
questionType_DND[question]=qType
#Positive/Negative annotations
for i in xrange(len(pos_neg)):
question=pos_neg.iloc[i]['Questions']
qType=pos_neg.iloc[i]['Annotations']
questionType_PN[question]=qType
for item in utterrances.itertuples():
if item[3]=="#follow_up" and item[1] not in followUp:
followUp[item[1]]=item[2]
elif item[3]=="#ack" and item[1] not in ack:
ack[item[1]]=item[2]
elif item[3]=="#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]]=item[2]
elif item[3]=="#int" and item[1] not in intimate:
intimate[item[1]]=item[2]
def readTranscript():
global featureList
transcriptFiles=glob(sys.argv[1]+'[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0,len(transcriptFiles)):
t=pd.read_csv(transcriptFiles[i], delimiter='\t')
t = t.fillna("")
captureStarted=False
startTime=0.0
endTime=0.0
prevQuestion=""
participantNo=transcriptFiles[i][-18:-15]
for j in xrange(len(t)):
question=re.search(".*\((.*)\)$", t.iloc[j]['value'])
if question is not None:
question=question.group(1)
else:
question=t.iloc[j]['value']
question=question.strip()
if t.iloc[j]['speaker']=='Ellie':
if question in nonIntimate and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
captureStarted=False
elif question in intimate and question in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
startTime=t.iloc[j]['start_time']
endTime=t.iloc[j]['stop_time']
prevQuestion=question
elif question in intimate and question in questionType_DND and not captureStarted:
startTime=t.iloc[j]['start_time']
endTime=t.iloc[j]['stop_time']
prevQuestion=question
captureStarted=True
elif question in intimate and question not in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
captureStarted=False
elif question in followUp or question in ack and captureStarted:
endTime=t.iloc[j]['stop_time']
elif t.iloc[j]['speaker']=='Participant' and captureStarted:
endTime=t.iloc[j]['stop_time']
|
ab93/Depression-Identification | src/tests/clf_test.py | import unittest
import numpy as np
from sklearn.linear_model import LogisticRegression
from ..models.classifier import MetaClassifier, LateFusionClassifier
from ..feature_extract.read_labels import features
from ..main.classify import get_single_mode_data, get_multi_data
class MetaClassifierTest(unittest.TestCase):
"""
Tests for the models.MetaClassifier class
"""
def _get_dummy_data(self):
x1 = np.array([ np.array([[1,5,7], [1,2,4], [1,8,9]]), # [r1,r2,r3] for p1
np.array([[2,8,6], [2,0,3]]), # [r1,r2] for p2
np.array([[3,7,5], [3,4,3], [3,9,7]]) # [r1,r2,r3] for p3
])
# for non discriminative
x2 = np.array([ np.array([[1,5,7], [1,2,4]]),
np.array([[2,8,6], [2,0,3], [2,5,5]]),
np.array([[3,7,5], [3,4,3], [3,9,7]])
])
y1 = np.array([ np.array([1,1,1]),
np.array([1,1]),
np.array([0,0,0])
])
y2 = np.array([ np.array([0,0]),
np.array([0,0,0]),
np.array([1,1,1])
])
X = [x1,x2]
y = [y1,y2]
return X,y
def _get_classifiers(self):
clf1 = LogisticRegression(n_jobs=-1, class_weight={1:4})
clf2 = LogisticRegression(n_jobs=-1, class_weight={1:4})
return [clf1, clf2]
def test_fit_predict(self):
X_list, y_list = self._get_dummy_data()
clfs = [LogisticRegression(C=100, penalty='l2'), LogisticRegression(C=10,penalty='l1')]
meta_clf = MetaClassifier(clfs)
meta_clf.fit(X_list,y_list)
print "\npredict:",meta_clf.predict(X_list)
def test_fit_predict_proba(self):
X_list, y_list = self._get_dummy_data()
clfs = [LogisticRegression(C=100, penalty='l2'), LogisticRegression(C=10,penalty='l1')]
meta_clf = MetaClassifier(clfs)
meta_clf.fit(X_list,y_list)
print "\npredict:",meta_clf.predict_proba(X_list)
def test_fit_score(self):
X_list, y_list = self._get_dummy_data()
clfs = [LogisticRegression(C=100, penalty='l2'), LogisticRegression(C=10,penalty='l1')]
meta_clf = MetaClassifier(clfs)
meta_clf.fit(X_list,y_list)
y_true = np.array([1,0,0])
print "\nscore:",meta_clf.score(X_list, y_true)
def test_model(self):
X_train, y_train, X_val, y_val = get_single_mode_data()
y_true = map(int,map(np.mean,y_val[0]))
clfs = self._get_classifiers()
meta_clf = MetaClassifier(classifiers=clfs, weights=[0.9, 0.1])
meta_clf.fit(X_train, y_train)
print "\nTesting data..."
preds = meta_clf.predict_proba(X_val, get_all=True)
print "F1-score: ", meta_clf.score(X_val, y_true)
print "Accuracy: ", meta_clf.score(X_val, y_true, scoring='accuracy')
for i in xrange(len(y_true)):
print preds[0][i], preds[1][i], y_true[i]
class LateFusionClassifierTest(unittest.TestCase):
"""
Tests for the models.LateFusionClassifierTest class
"""
def _get_dummy_data(self):
x1 = np.array([ np.array([[1,5,7], [1,2,4], [1,8,9]]),
np.array([[2,8,6], [2,0,3]]),
np.array([[3,7,5], [3,4,3], [3,9,7]])
])
x2 = np.array([ np.array([[1,5,7], [1,2,4]]),
np.array([[2,8,6], [2,0,3], [2,5,5]]),
np.array([[3,7,5], [3,4,3], [3,9,7]])
])
y1 = np.array([ np.array([1,1,1]),
np.array([1,1]),
np.array([0,0,0])
])
y2 = np.array([ np.array([0,0]),
np.array([0,0,0]),
np.array([1,1,1])
])
X_acou, y_acou = [x1,x2], [y1,y2]
X_vis, y_vis = [x1,x2], [y1,y2]
X_lin, y_lin = [x1,x2], [y1,y2]
return [X_acou, X_vis, X_lin], [y_acou, y_vis, y_lin]
def _get_fitted_clf(self,Xs,ys):
clfs = [LogisticRegression(C=100, penalty='l2'), LogisticRegression(C=10,penalty='l1')]
meta_clf = MetaClassifier(clfs)
meta_clf.fit(Xs,ys)
return meta_clf
def test_fit_predict(self):
Xs, Ys = self._get_dummy_data()
clf1 = self._get_fitted_clf(Xs[0],Ys[0])
clf2 = self._get_fitted_clf(Xs[1],Ys[1])
clf3 = self._get_fitted_clf(Xs[2],Ys[2])
lf_clf = LateFusionClassifier(classifiers=[clf1,clf2,clf3])
lf_clf.fit(Xs,Ys)
print "\npredict:\n", lf_clf.predict(Xs)
print "\npredict_proba:\n",lf_clf.predict_proba(Xs)
def test_scores(self):
Xs, Ys = self._get_dummy_data()
clf1 = self._get_fitted_clf(Xs[0],Ys[0])
clf2 = self._get_fitted_clf(Xs[1],Ys[1])
clf3 = self._get_fitted_clf(Xs[2],Ys[2])
lf_clf = LateFusionClassifier(classifiers=[clf1,clf2,clf3])
lf_clf.fit(Xs,Ys)
y_true = np.array([1,0,0])
print "\npredict:\n", lf_clf.predict(Xs)
print "\nscore:", lf_clf.score(Xs,y_true)
def test_late_fusion_model(self):
# Read the data
Xs_train, ys_train, Xs_val, ys_val = get_multi_data()
clf_A_D = LogisticRegression(C=1, penalty='l2', class_weight={1:4})
clf_A_ND = LogisticRegression(C=0.001, penalty='l1', class_weight={1:4})
clf_V_D = LogisticRegression(C=1.0, penalty='l2', class_weight={1:4})
clf_V_ND = LogisticRegression(C=1.0, penalty='l2', class_weight={1:4})
clf_L_D = LogisticRegression(C=1.0, penalty='l2', class_weight={1:3})
clf_L_ND = LogisticRegression(C=1.0, penalty='l2', class_weight={1:3})
clf_A = MetaClassifier(classifiers=[clf_A_D, clf_A_ND])
clf_V = MetaClassifier(classifiers=[clf_V_D, clf_V_ND])
clf_L = MetaClassifier(classifiers=[clf_L_D, clf_L_ND])
lf_clf = LateFusionClassifier(classifiers=[clf_A, clf_V, clf_L], weights=[0.6,0.2,0.1])
lf_clf.fit(Xs_train, ys_train)
print lf_clf.predict(Xs_val)
preds = lf_clf.predict_proba(Xs_val, get_all=True)
y_true = map(int,map(np.mean,ys_val[0][0]))
print lf_clf.score(Xs_val,y_true,scoring='f1')
for i in xrange(len(y_true)):
print preds[0][i], preds[1][i], preds[2][i], y_true[i]
if __name__ == '__main__':
unittest.main()
|
ab93/Depression-Identification | src/feature_extract/extract_OPENFACE.py | <reponame>ab93/Depression-Identification
import pandas as pd
from pprint import pprint
from glob import glob
import numpy as np
import re
import csv
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList = {}
questionType_DND = {}
questionType_PN = {}
discriminativeVectors = []
nonDiscriminativeVectors = []
'''headers for OPENFACE features'''
header = ["video", "question", "starttime", "endtime", 'pose_Tx_mean', 'pose_Ty_mean', 'pose_Tz_mean', 'pose_Rx_mean', 'pose_Ry_mean',
'pose_Rz_mean', 'AU01_r_mean', 'AU02_r_mean', 'AU04_r_mean', 'AU05_r_mean', 'AU06_r_mean', 'AU07_r_mean',
'AU09_r_mean', 'AU10_r_mean', 'AU12_r_mean', 'AU14_r_mean', 'AU15_r_mean', 'AU17_r_mean', 'AU20_r_mean',
'AU23_r_mean', 'AU25_r_mean', 'AU26_r_mean', 'AU45_r_mean', 'AU01_c_mean', 'AU02_c_mean', 'AU04_c_mean',
'AU05_c_mean', 'AU06_c_mean', 'AU07_c_mean', 'AU09_c_mean', 'AU10_c_mean', 'AU12_c_mean', 'AU14_c_mean',
'AU15_c_mean', 'AU17_c_mean', 'AU20_c_mean', 'AU23_c_mean', 'AU25_c_mean', 'AU26_c_mean', 'AU28_c_mean',
'AU45_c_mean', 'pose_Tx_stddev',
'pose_Ty_stddev', 'pose_Tz_stddev', 'pose_Rx_stddev', 'pose_Ry_stddev', 'pose_Rz_stddev', 'AU01_r_stddev',
'AU02_r_stddev', 'AU04_r_stddev', 'AU05_r_stddev', 'AU06_r_stddev', 'AU07_r_stddev', 'AU09_r_stddev',
'AU10_r_stddev', 'AU12_r_stddev', 'AU14_r_stddev', 'AU15_r_stddev', 'AU17_r_stddev', 'AU20_r_stddev',
'AU23_r_stddev', 'AU25_r_stddev', 'AU26_r_stddev', 'AU45_r_stddev', 'AU01_c_stddev', 'AU02_c_stddev',
'AU04_c_stddev', 'AU05_c_stddev', 'AU06_c_stddev', 'AU07_c_stddev', 'AU09_c_stddev', 'AU10_c_stddev',
'AU12_c_stddev', 'AU14_c_stddev', 'AU15_c_stddev', 'AU17_c_stddev', 'AU20_c_stddev', 'AU23_c_stddev',
'AU25_c_stddev', 'AU26_c_stddev', 'AU28_c_stddev', 'AU45_c_stddev','gender']
'''
Reads DND questions and PN questions.
Retrieves acknowledgements, follow ups, intimate and non intimate questions and stores in global variables
'''
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc = pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg = pd.read_csv('data/misc/PN_Annotations.csv')
# Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question = disc_nondisc.iloc[i]['Questions']
qType = disc_nondisc.iloc[i]['Annotations']
questionType_DND[question] = qType
# Positive/Negative annotations
for i in xrange(len(pos_neg)):
question = pos_neg.iloc[i]['Questions']
qType = pos_neg.iloc[i]['Annotations']
questionType_PN[question] = qType
for item in utterrances.itertuples():
if item[3] == "#follow_up" and item[1] not in followUp:
followUp[item[1]] = item[2]
elif item[3] == "#ack" and item[1] not in ack:
ack[item[1]] = item[2]
elif item[3] == "#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]] = item[2]
elif item[3] == "#int" and item[1] not in intimate:
intimate[item[1]] = item[2]
'''
Reads transcripts, captures the start and end times of the answers for most frequent intimate questions. Also captures the start and end times of follow up questions that are following most frequent intimate questions
'''
def readTranscript():
global featureList
transcriptFiles = glob(sys.argv[1] + '[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0, len(transcriptFiles)):
t = pd.read_csv(transcriptFiles[i], delimiter=',|\t', engine='python')
t = t.fillna("")
captureStarted = False
startTime = 0.0
endTime = 0.0
prevQuestion = ""
participantNo = transcriptFiles[i][-18:-15]
for j in xrange(len(t)):
question = re.search(".*\((.*)\)$", t.iloc[j]['value'])
if question is not None:
question = question.group(1)
else:
question = t.iloc[j]['value']
question = question.strip()
if t.iloc[j]['speaker'] == 'Ellie':
if question in nonIntimate and captureStarted:
endTime = t.iloc[j]['start_time']
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in intimate and question in questionType_DND and captureStarted:
endTime = t.iloc[j]['start_time']
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
elif question in intimate and question in questionType_DND and not captureStarted:
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
captureStarted = True
elif question in intimate and question not in questionType_DND and captureStarted:
endTime = t.iloc[j]['start_time']
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in followUp or question in ack and captureStarted:
endTime = t.iloc[j]['stop_time']
elif t.iloc[j]['speaker'] == 'Participant' and captureStarted:
continue
# endTime = t.iloc[j]['stop_time']
'''
Generates features from OPENFACE files considering the start and end times for each frequent intimate questions from DND list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readOPENFACE_DND():
print "DND"
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
dFile = open('data/disc_nondisc/discriminative_OPENFACE.csv', 'w')
ndFile = open('data/disc_nondisc/nondiscriminative_OPENFACE.csv', 'w')
dWriter = csv.writer(dFile)
ndWriter = csv.writer(ndFile)
dWriter.writerow(header)
ndWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_OPENFACE.txt'
f = pd.read_csv(fileName, delimiter=', ', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = f.ix[(f['timestamp'] - startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame = f.ix[(f['timestamp'] - endTime).abs().argsort()[:1]].index.tolist()[0]
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
if len(features_mean)>45:
features_mean = features_mean[:45]
if len(features_stddev)>45:
features_stddev = features_stddev[:45]
vector = instance[1][:]
vector += features_mean
vector += features_stddev
#drop frame_mean, frame_stddev, timestamp_mean, timestamp_stddev, success_mean,
#success_stddev, confidence_mean, confidence_stddev as they don't make sense.
#vector = instance[1][0:2] + instance[1][6:47] + instance[1][51:]
for i in range(4):
del(vector[2])
for i in range(4):
del(vector[43])
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
if questionType_DND[instance[0]] == 'D':
dWriter.writerow(vector)
else:
ndWriter.writerow(vector)
dFile.close()
ndFile.close()
'''
Generates features from OPENFACE files considering the start and end times for each frequent intimate questions from PN list.
Features are generated by taking mean and std dev of all the features for every question for every video
'''
def readOPENFACE_PN():
print "PN"
groupByQuestion = {}
gender=pd.Series.from_csv('data/misc/gender.csv').to_dict()
pFile = open('data/pos_neg/positive_OPENFACE.csv', 'w')
nFile = open('data/pos_neg/negative_OPENFACE.csv', 'w')
pWriter = csv.writer(pFile)
nWriter = csv.writer(nFile)
pWriter.writerow(header)
nWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_OPENFACE.txt'
f = pd.read_csv(fileName, delimiter=', ', engine='python')
print item
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = f.ix[(f['timestamp'] - startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame = f.ix[(f['timestamp'] - endTime).abs().argsort()[:1]].index.tolist()[0]
features_mean = f.ix[startFrame:endFrame].mean(0).tolist()
features_stddev = f.ix[startFrame:endFrame].std(0).tolist()
if len(features_mean)>45:
features_mean = features_mean[:45]
if len(features_stddev)>45:
features_stddev = features_stddev[:45]
vector = instance[1][:]
vector += features_mean
vector += features_stddev
#drop frame_mean, frame_stddev, timestamp_mean, timestamp_stddev, success_mean,
#success_stddev, confidence_mean, confidence_stddev as they don't make sense.
for i in range(4):
del(vector[2])
for i in range(4):
del(vector[43])
vector.insert(0, item)
vector.insert(1, instance[0])
vector.append(gender[item])
vector = np.asarray(vector)
# print item, instance[0], startTime, endTime
if questionType_PN[instance[0]] == 'P':
pWriter.writerow(vector)
else:
nWriter.writerow(vector)
pFile.close()
nFile.close()
if __name__ == "__main__":
readHelperData()
readTranscript()
readOPENFACE_DND()
readOPENFACE_PN()
|
ab93/Depression-Identification | src/feature_extract/extract_CLM.py | import pandas as pd
from pprint import pprint
from glob import glob
import numpy as np
import re
import csv
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList = {}
questionType_DND = {}
questionType_PN = {}
discriminativeVectors = []
nonDiscriminativeVectors = []
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc = pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg = pd.read_csv('data/misc/PN_Annotations.csv')
# Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question = disc_nondisc.iloc[i]['Questions']
qType = disc_nondisc.iloc[i]['Annotations']
questionType_DND[question] = qType
# Positive/Negative annotations
for i in xrange(len(pos_neg)):
question = pos_neg.iloc[i]['Questions']
qType = pos_neg.iloc[i]['Annotations']
questionType_PN[question] = qType
for item in utterrances.itertuples():
if item[3] == "#follow_up" and item[1] not in followUp:
followUp[item[1]] = item[2]
elif item[3] == "#ack" and item[1] not in ack:
ack[item[1]] = item[2]
elif item[3] == "#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]] = item[2]
elif item[3] == "#int" and item[1] not in intimate:
intimate[item[1]] = item[2]
def readTranscript():
global featureList
transcriptFiles = glob(sys.argv[1] + '[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0, len(transcriptFiles)):
t = pd.read_csv(transcriptFiles[i], delimiter='\t')
t = t.fillna("")
captureStarted = False
startTime = 0.0
endTime = 0.0
prevQuestion = ""
participantNo = transcriptFiles[i][-18:-15]
for j in xrange(len(t)):
question = re.search(".*\((.*)\)$", t.iloc[j]['value'])
if question is not None:
question = question.group(1)
else:
question = t.iloc[j]['value']
question = question.strip()
if t.iloc[j]['speaker'] == 'Ellie':
if question in nonIntimate and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in intimate and question in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
elif question in intimate and question in questionType_DND and not captureStarted:
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
captureStarted = True
elif question in intimate and question not in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in followUp or question in ack and captureStarted:
endTime = t.iloc[j]['stop_time']
elif t.iloc[j]['speaker'] == 'Participant' and captureStarted:
endTime = t.iloc[j]['stop_time']
def readCLM_DND():
groupByQuestion = {}
dFile = open('data/disc_nondisc/discriminative_CLM.csv', 'w')
ndFile = open('data/disc_nondisc/nondiscriminative_CLM.csv', 'w')
dWriter = csv.writer(dFile)
ndWriter = csv.writer(ndFile)
header = ["video", "question", "starttime", "endtime","frame", "timestamp", "confidence", "success", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9",
"x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24",
"x25", "x26", "x27", "x28", "x29", "x30", "x31", "x32", "x33", "x34", "x35", "x36", "x37", "x38", "x39",
"x40", "x41", "x42", "x43", "x44", "x45", "x46", "x47", "x48", "x49", "x50", "x51", "x52", "x53", "x54",
"x55", "x56", "x57", "x58", "x59", "x60", "x61", "x62", "x63", "x64", "x65", "x66", "x67", "y0", "y1", "y2",
"y3", "y4", "y5", "y6", "y7", "y8", "y9", "y10", "y11", "y12", "y13", "y14", "y15", "y16", "y17", "y18",
"y19", "y20", "y21", "y22", "y23", "y24", "y25", "y26", "y27", "y28", "y29", "y30", "y31", "y32", "y33",
"y34", "y35", "y36", "y37", "y38", "y39", "y40", "y41", "y42", "y43", "y44", "y45", "y46", "y47", "y48",
"y49", "y50", "y51", "y52", "y53", "y54", "y55", "y56", "y57", "y58", "y59", "y60", "y61", "y62", "y63",
"y64", "y65", "y66", "y67"]
dWriter.writerow(header)
ndWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_CLM_features.txt'
f = pd.read_csv(fileName, delimiter=', ')
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = f.ix[(f['timestamp'] - startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame = f.ix[(f['timestamp'] - endTime).abs().argsort()[:1]].index.tolist()[0]
features = f.ix[startFrame:endFrame].mean(0).tolist()
vector = instance[1][:]
vector += features
vector.insert(0, instance[0])
vector.insert(0, item)
vector = np.asarray(vector)
# print item, instance[0], startTime, endTime
if questionType_DND[instance[0]] == 'D':
dWriter.writerow(vector)
else:
ndWriter.writerow(vector)
dFile.close()
ndFile.close()
def readCLM_PN():
groupByQuestion = {}
pFile = open('data/pos_neg/positive_CLM.csv', 'w')
nFile = open('data/pos_neg/negative_CLM.csv', 'w')
pWriter = csv.writer(pFile)
nWriter = csv.writer(nFile)
header = ["video", "question", "starttime", "endtime","frame", "timestamp", "confidence", "success", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9",
"x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24",
"x25", "x26", "x27", "x28", "x29", "x30", "x31", "x32", "x33", "x34", "x35", "x36", "x37", "x38", "x39",
"x40", "x41", "x42", "x43", "x44", "x45", "x46", "x47", "x48", "x49", "x50", "x51", "x52", "x53", "x54",
"x55", "x56", "x57", "x58", "x59", "x60", "x61", "x62", "x63", "x64", "x65", "x66", "x67", "y0", "y1", "y2",
"y3", "y4", "y5", "y6", "y7", "y8", "y9", "y10", "y11", "y12", "y13", "y14", "y15", "y16", "y17", "y18",
"y19", "y20", "y21", "y22", "y23", "y24", "y25", "y26", "y27", "y28", "y29", "y30", "y31", "y32", "y33",
"y34", "y35", "y36", "y37", "y38", "y39", "y40", "y41", "y42", "y43", "y44", "y45", "y46", "y47", "y48",
"y49", "y50", "y51", "y52", "y53", "y54", "y55", "y56", "y57", "y58", "y59", "y60", "y61", "y62", "y63",
"y64", "y65", "y66", "y67"]
pWriter.writerow(header)
nWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_CLM_features.txt'
f = pd.read_csv(fileName, delimiter=', ')
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = f.ix[(f['timestamp'] - startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame = f.ix[(f['timestamp'] - endTime).abs().argsort()[:1]].index.tolist()[0]
features = f.ix[startFrame:endFrame].mean(0).tolist()
vector = instance[1][:]
vector += features
vector.insert(0, instance[0])
vector.insert(0, item)
vector = np.asarray(vector)
# print item, instance[0], startTime, endTime
if questionType_PN[instance[0]] == 'P':
pWriter.writerow(vector)
else:
nWriter.writerow(vector)
pFile.close()
nFile.close()
if __name__ == "__main__":
readHelperData()
readTranscript()
readCLM_DND()
readCLM_PN() |
ab93/Depression-Identification | src/helpers/Q_extract.py | <reponame>ab93/Depression-Identification<filename>src/helpers/Q_extract.py
import pandas as pd
import os,re
import numpy as np
filenames = []
p=re.compile('.*_TRANSCRIPT.csv')
for (path,dir,files) in os.walk('../../data/'):
for each in files:
if p.match(each):
filenames.append(path+"/"+each)
print filenames
print len(filenames)
list_of_qs = ["what are you","what do you do now"]
#print len(list_of_qs)
#for each in list_of_qs:
# print each
#data_file = open('dictionary.json','r')
#questions = json.load(data_file)
#print questions
for each in list_of_qs:
print "Q: ",each
for file in filenames:
df = pd.read_csv(file,sep='\t')
df = df.replace(np.nan,"",regex=True)
i = 0
found = 0
while i < len(df):
val = re.search(r".*\((.*)\)$",df.iloc[i]['value'])
if val != None:
val = val.group(1)
else:
val = df.iloc[i]['value']
if df.iloc[i]['speaker'] == "Ellie" and val==each:
context = ""
beg=i-5 if i-5>=0 else 0
end=i+5 if i+5<len(df) else len(df)-1
print "File: ",file
print df.iloc[beg:end][['speaker','value']]
inp=raw_input()
if inp !='y':
found = 1
break
i += 1
if found==1:
break |
ab93/Depression-Identification | src/helpers/generate_full_liwc.py | #<NAME>
#Extract LIWC features using 2015 English version
from __future__ import division
from collections import defaultdict
import re
import os
from glob import glob
import sys
import pandas as pd
import csv
features=[]
category_names=[]
""" Get features using LIWC 2015. categories in total."""
categoryIDs = {} #keep track of each category number
liwcD = {} #create liwc dictionary where each liwc dictionary word is a key that maps to a list that contains the liwc categories for that word
liwc_file = 'data/misc/LIWC2015_English.dic'#path to LIWC dict
read = open(liwc_file,'r').readlines()
header = read[1:77] #change this number depending on how many liwc categories you want to use
for line in header:
items = line.strip().split()
number,category_name = items[0],items[1]
categoryIDs[number]=category_name
liwc_words = read[88:]#liwc dictionary words
for line in liwc_words:
items = line.strip().split('\t')
word = items[0].replace('(','').replace(')','')
word_cats = items[1:]
liwcD[word] = word_cats
def liwc(words):#words is a list of words
global category_names, categoryIDs, liwcD, liwc_words
total_words = len(words)
line = ' '.join(words)
feats = defaultdict(int)#keep track of liwc frequencies
for word in sorted(liwcD.keys()): #first 9 words are emojis with special characters TODO: treat them separately
cats = liwcD[word] #list of liwc categories
if '*' in word:
pattern = re.compile(' %s'%word.replace('*',''))
else:
pattern = re.compile(' %s '%word)
matches = [(m.start(0), m.end(0)) for m in re.finditer(pattern, line)] #check is liwc word is in sentence
if matches != []: #count matches
for C in cats:
feats[int(C)]+=len(matches)
else:
for C in cats:
feats[int(C)] += 0
if total_words != 0: #if 0 zero words in sentence - create zero vector
liwc_features = [(float(feats[key])/total_words) for key in sorted(feats)]
else:
liwc_features = ','.join([0]*73)
category_names = [categoryIDs[str(c)] for c in sorted(feats)]
return liwc_features
if __name__=='__main__':
ext=int(sys.argv[2])
ext1=int(sys.argv[3])
transcriptFiles = glob(sys.argv[1] + '[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(ext,ext1):
t = pd.read_csv(transcriptFiles[i], delimiter=',|\t')
t = t.fillna("")
participantNo=transcriptFiles[i][-18:-15]
print participantNo
for j in xrange(len(t)):
if t.iloc[j]['speaker']=='Participant':
utterance=re.search(".*\((.*)\)$", t.iloc[j]['value'])
if utterance is not None:
utterance=utterance.group(1)
else:
utterance=t.iloc[j]['value']
utterance=utterance.strip()
split_utterance=utterance.split(" ")
feature=liwc(split_utterance)
features.append([participantNo, utterance]+feature)
with open('liwc_new'+str(ext)+'_'+str(ext1)+'.csv','w') as f:
writer=csv.writer(f)
writer.writerow(['video','question']+category_names)
for item in features:
writer.writerow(item)
|
ab93/Depression-Identification | src/feature_extract/extract_CLM_3D.py | <filename>src/feature_extract/extract_CLM_3D.py
import pandas as pd
from pprint import pprint
from glob import glob
import numpy as np
import re
import csv
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList = {}
questionType_DND = {}
questionType_PN = {}
discriminativeVectors = []
nonDiscriminativeVectors = []
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc = pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg = pd.read_csv('data/misc/PN_Annotations.csv')
# Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question = disc_nondisc.iloc[i]['Questions']
qType = disc_nondisc.iloc[i]['Annotations']
questionType_DND[question] = qType
# Positive/Negative annotations
for i in xrange(len(pos_neg)):
question = pos_neg.iloc[i]['Questions']
qType = pos_neg.iloc[i]['Annotations']
questionType_PN[question] = qType
for item in utterrances.itertuples():
if item[3] == "#follow_up" and item[1] not in followUp:
followUp[item[1]] = item[2]
elif item[3] == "#ack" and item[1] not in ack:
ack[item[1]] = item[2]
elif item[3] == "#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]] = item[2]
elif item[3] == "#int" and item[1] not in intimate:
intimate[item[1]] = item[2]
def readTranscript():
global featureList
transcriptFiles = glob(sys.argv[1] + '[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0, len(transcriptFiles)):
t = pd.read_csv(transcriptFiles[i], delimiter='\t')
t = t.fillna("")
captureStarted = False
startTime = 0.0
endTime = 0.0
prevQuestion = ""
participantNo = transcriptFiles[i][-18:-15]
for j in xrange(len(t)):
question = re.search(".*\((.*)\)$", t.iloc[j]['value'])
if question is not None:
question = question.group(1)
else:
question = t.iloc[j]['value']
question = question.strip()
if t.iloc[j]['speaker'] == 'Ellie':
if question in nonIntimate and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in intimate and question in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
elif question in intimate and question in questionType_DND and not captureStarted:
startTime = t.iloc[j]['start_time']
endTime = t.iloc[j]['stop_time']
prevQuestion = question
captureStarted = True
elif question in intimate and question not in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)] = [startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1] = endTime
captureStarted = False
elif question in followUp or question in ack and captureStarted:
endTime = t.iloc[j]['stop_time']
elif t.iloc[j]['speaker'] == 'Participant' and captureStarted:
endTime = t.iloc[j]['stop_time']
header = ["video", "question", "starttime", "endtime", "frame", "timestamp", "confidence", "success", "X0", "X1", "X2",
"X3", "X4", "X5", "X6", "X7", "X8", "X9",
"X10", "X11", "X12", "X13", "X14", "X15", "X16", "X17", "X18", "X19", "X20", "X21", "X22", "X23", "X24",
"X25", "X26", "X27", "X28", "X29", "X30", "X31", "X32", "X33", "X34", "X35", "X36", "X37", "X38", "X39",
"X40", "X41", "X42", "X43", "X44", "X45", "X46", "X47", "X48", "X49", "X50", "X51", "X52", "X53", "X54",
"X55", "X56", "X57", "X58", "X59", "X60", "X61", "X62", "X63", "X64", "X65", "X66", "X67", "Y0", "Y1", "Y2",
"Y3", "Y4", "Y5", "Y6", "Y7", "Y8", "Y9", "Y10", "Y11", "Y12", "Y13", "Y14", "Y15", "Y16", "Y17", "Y18",
"Y19", "Y20", "Y21", "Y22", "Y23", "Y24", "Y25", "Y26", "Y27", "Y28", "Y29", "Y30", "Y31", "Y32", "Y33",
"Y34", "Y35", "Y36", "Y37", "Y38", "Y39", "Y40", "Y41", "Y42", "Y43", "Y44", "Y45", "Y46", "Y47", "Y48",
"Y49", "Y50", "Y51", "Y52", "Y53", "Y54", "Y55", "Y56", "Y57", "Y58", "Y59", "Y60", "Y61", "Y62", "Y63",
"Y64", "Y65", "Y66", "Y67", "Z0", "Z1", "Z2", "Z3", "Z4", "Z5", "Z6", "Z7", "Z8", "Z9", "Z10", "Z11", "Z12",
"Z13", "Z14", "Z15", "Z16", "Z17", "Z18", "Z19", "Z20", "Z21", "Z22", "Z23", "Z24", "Z25", "Z26", "Z27",
"Z28",
"Z29", "Z30", "Z31", "Z32", "Z33", "Z34", "Z35", "Z36", "Z37", "Z38", "Z39", "Z40", "Z41", "Z42", "Z43",
"Z44",
"Z45", "Z46", "Z47", "Z48", "Z49", "Z50", "Z51", "Z52", "Z53", "Z54", "Z55", "Z56", "Z57", "Z58", "Z59",
"Z60", "Z61", "Z62", "Z63", "Z64", "Z65", "Z66", "Z67"]
def readCLM3D_DND():
groupByQuestion = {}
dFile = open('data/disc_nondisc/discriminative_CLM_3D.csv', 'w')
ndFile = open('data/disc_nondisc/nondiscriminative_CLM_3D.csv', 'w')
dWriter = csv.writer(dFile)
ndWriter = csv.writer(ndFile)
dWriter.writerow(header)
ndWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_CLM_features3D.txt'
f = pd.read_csv(fileName, delimiter=' ,')
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = f.ix[(f['timestamp'] - startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame = f.ix[(f['timestamp'] - endTime).abs().argsort()[:1]].index.tolist()[0]
features = f.ix[startFrame:endFrame].mean(0).tolist()
vector = instance[1][:]
vector += features
vector.insert(0, instance[0])
vector.insert(0, item)
vector = np.asarray(vector)
# print item, instance[0], startTime, endTime
if questionType_DND[instance[0]] == 'D':
dWriter.writerow(vector)
else:
ndWriter.writerow(vector)
dFile.close()
ndFile.close()
def readCLM3D_PN():
groupByQuestion = {}
pFile = open('data/pos_neg/positive_CLM_3D.csv', 'w')
nFile = open('data/pos_neg/negative_CLM_3D.csv', 'w')
pWriter = csv.writer(pFile)
nWriter = csv.writer(nFile)
pWriter.writerow(header)
nWriter.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName = sys.argv[1] + item + '_P/' + item + '_CLM_features3D.txt'
f = pd.read_csv(fileName, delimiter=', ')
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = f.ix[(f['timestamp'] - startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame = f.ix[(f['timestamp'] - endTime).abs().argsort()[:1]].index.tolist()[0]
features = f.ix[startFrame:endFrame].mean(0).tolist()
vector = instance[1][:]
vector += features
vector.insert(0, instance[0])
vector.insert(0, item)
vector = np.asarray(vector)
# print item, instance[0], startTime, endTime
if questionType_PN[instance[0]] == 'P':
pWriter.writerow(vector)
else:
nWriter.writerow(vector)
pFile.close()
nFile.close()
if __name__ == "__main__":
readHelperData()
readTranscript()
readCLM3D_DND()
readCLM3D_PN() |
ab93/Depression-Identification | src/feature_extract/struct2csv.py | <reponame>ab93/Depression-Identification<gh_stars>10-100
import scipy.io as sio
import csv
import os
import math
sessions_path=os.path.join('..','..','..','Data_new')
sessions=os.listdir(sessions_path)
#header=['F0','VUV','NAQ','QOQ','H1H2','PSP','MDQ','peakSlope','Rd','Rd_conf','creak','MCEP_0','MCEP_1','MCEP_2','MCEP_3','MCEP_4','MCEP_5','MCEP_6','MCEP_7','MCEP_8','MCEP_9','MCEP_10','MCEP_11','MCEP_12','MCEP_13','MCEP_14','MCEP_15','MCEP_16','MCEP_17','MCEP_18','MCEP_19','MCEP_20','MCEP_21','MCEP_22','MCEP_23','MCEP_24','HMPDM_0','HMPDM_1','HMPDM_2','HMPDM_3','HMPDM_4','HMPDM_5','HMPDM_6','HMPDM_7','HMPDM_8','HMPDM_9','HMPDM_10','HMPDM_11','HMPDM_12','HMPDM_13','HMPDM_14','HMPDM_15','HMPDM_16','HMPDM_17','HMPDM_18','HMPDM_19','HMPDM_20','HMPDM_21','HMPDM_22','HMPDM_23','HMPDM_24','HMPDD_0','HMPDD_1','HMPDD_2','HMPDD_3','HMPDD_4','HMPDD_5','HMPDD_6','HMPDD_7','HMPDD_8','HMPDD_9','HMPDD_10','HMPDD_11','HMPDD_12']
header=['F1','F2','F3','F4','F5']
sessions=sessions[1:]
for session in sessions:
if int(session[0:3]) >= 600:
print session
covarep_file=os.path.join(sessions_path,session,session[0:3]+'_COVAREP.mat')
mat_contents=sio.loadmat(covarep_file)
data=[]
for h in header:
current_feature=mat_contents['covarep'][h][0][0]
zipped_feature=zip(*current_feature)
final_feature=list(zipped_feature[0])
for i in range(0,len(final_feature)):
if math.isnan(final_feature[i]):
final_feature[i]=0
data.append(final_feature)
data=[list(a) for a in zip(*data)]
print len(data)
print len(data[0])
print data[0][0]
csv_path=os.path.join(sessions_path,session,session[0:3]+'_FORMANT.csv')
with open(csv_path,'w') as f:
writer=csv.writer(f)
for row in data:
writer.writerow(row)
|
ab93/Depression-Identification | src/main/feature_data.py | import os
import numpy as np
import pandas as pd
import config as cfg
from copy import deepcopy
class Data(object):
def __init__(self, category, feature_scale=False, feature_select=False, problem_type='C'):
self.category = category
self.problem_type = problem_type
self.feature_scale = feature_scale
self.feature_select = feature_select
def _scale_features(self):
raise NotImplementedError("Not implemented yet!")
def _select_data(self, modality, q_category, split, size='all'):
scale = 'normalize' if self.feature_scale else 'regular'
p_type = 'classify' if self.problem_type == 'C' else 'estimate'
if self.feature_select:
file_ = os.path.join(cfg.SEL_FEAT, scale, p_type, split,
'{}_{}_{}.csv'.format(q_category, modality, split))
else:
file_ = os.path.join(cfg.ALL_FEAT, scale, p_type, split,
'{}_{}_{}.csv'.format(q_category, modality, split))
data = pd.read_csv(file_)
if split == "train" and size != "all":
split_file = cfg.TRAIN_SPLIT_FILE
split_df = pd.read_csv(split_file, usecols=['Participant_ID'])
split_df = split_df.loc[:int(size) - 1]
data = data[data['video'].isin(split_df['Participant_ID'])]
return self._group_features(data)
def _group_features(self, data):
y_label = 'label' if self.problem_type == 'C' else 'score'
grouped = data.groupby('video')
X = []
y = []
for video, group in grouped:
X_person = []
y_person = []
for i in range(len(group)):
X_person.append(group.iloc[i].tolist()[1:-2])
y_person.append(group.iloc[i][y_label])
X.append(X_person)
y.append(y_person)
return X, y
def get_full_train(self, modality):
x_train, y_train, x_val, y_val = self.get_data(modality)
x_train[0].extend(x_val[0])
x_train[1].extend(x_val[1])
y_train[0].extend(y_val[0])
y_train[1].extend(y_val[1])
return x_train, y_train
def get_test_data(self, modality):
if self.category == 'PN':
cat_1 = "positive"
cat_2 = "negative"
else:
cat_1 = "discriminative"
cat_2 = "nondiscriminative"
print "Reading test data for {}".format(modality)
x_test = [map(np.asarray, self._select_data(modality, cat_1, "test")[0]),
map(np.asarray, self._select_data(modality, cat_2, "test")[0])]
y_test = [map(np.asarray, self._select_data(modality, cat_1, "test")[1]),
map(np.asarray, self._select_data(modality, cat_2, "test")[1])]
return x_test, y_test
def get_test_data_multi(self):
x_a_test, y_a_test = self.get_test_data('acoustic')
x_v_test, y_v_test = self.get_test_data('visual')
x_l_test, y_l_test = self.get_test_data('linguistic')
return [x_a_test, x_v_test, x_l_test], [y_a_test, y_v_test, y_l_test]
def get_full_train_multi(self):
x_a_train, y_a_train = self.get_full_train('acoustic')
x_v_train, y_v_train = self.get_full_train('visual')
x_l_train, y_l_train = self.get_full_train('linguistic')
return [x_a_train, x_v_train, x_l_train], [y_a_train, y_v_train, y_l_train]
def get_data(self, modality, size='all'):
if self.category == 'PN':
cat_1 = "positive"
cat_2 = "negative"
else:
cat_1 = "discriminative"
cat_2 = "nondiscriminative"
print "Reading data for {}".format(modality)
x_train = [map(np.asarray, self._select_data(modality, cat_1, "train", size=size)[0]),
map(np.asarray, self._select_data(modality, cat_2, "train", size=size)[0])]
y_train = [map(np.asarray, self._select_data(modality, cat_1, "train", size=size)[1]),
map(np.asarray, self._select_data(modality, cat_2, "train", size=size)[1])]
x_val = [map(np.asarray, self._select_data(modality, cat_1, "val")[0]),
map(np.asarray, self._select_data(modality, cat_2, "val")[0])]
y_val = [map(np.asarray, self._select_data(modality, cat_1, "val")[1]),
map(np.asarray, self._select_data(modality, cat_2, "val")[1])]
return x_train, y_train, x_val, y_val
def get_multi_data(self, size='all'):
X_A_train, y_A_train, X_A_val, y_A_val = self.get_data('acoustic', size)
X_V_train, y_V_train, X_V_val, y_V_val = self.get_data('visual', size)
X_L_train, y_L_train, X_L_val, y_L_val = self.get_data('linguistic', size)
Xs = [X_A_train, X_V_train, X_L_train]
ys = [y_A_train, y_V_train, y_L_train]
Xs_val = [X_A_val, X_V_val, X_L_val]
ys_val = [y_A_val, y_V_val, y_L_val]
return Xs, ys, Xs_val, ys_val
@staticmethod
def concat_features(x1, x2, x3, y):
if not len(x1) == len(x2) == len(x3) == 2:
raise RuntimeError('Data sizes are not equal')
elif not len(x1[0]) == len(x2[0]) == len(x3[0]):
raise RuntimeError('Number of samples not equal')
num_samples = len(x1[0])
x = [[], []]
y = deepcopy(y)
for cat_idx in range(len(x)):
for idx in xrange(num_samples):
try:
stacked_data = np.hstack((x1[cat_idx][idx], x2[cat_idx][idx], x3[cat_idx][idx]))
x[cat_idx].append(stacked_data)
except ValueError:
num_min_samples = min([data[cat_idx][idx].shape[0] for data in (x1, x2, x3)])
stacked_data = np.hstack((x1[cat_idx][idx][:num_min_samples, :],
x2[cat_idx][idx][:num_min_samples, :],
x3[cat_idx][idx][:num_min_samples, :]))
x[cat_idx].append(stacked_data)
y[cat_idx][idx] = y[cat_idx][idx][:num_min_samples]
return x, y
if __name__ == '__main__':
feat_data = Data('PN', feature_select=True, feature_scale=False, problem_type='C')
# x_train, y_train, x_val, y_val = feat_data.get_data(modality='acoustic')
X, Y = feat_data.get_test_data(modality='acoustic')
# X, y = feat_data.get_full_train(modality='acoustic')
print X[1][0].shape
exit()
X_A_train, y_A_train, X_A_val, y_A_val = feat_data.get_data('acoustic')
X_V_train, y_V_train, X_V_val, y_V_val = feat_data.get_data('visual')
X_L_train, y_L_train, X_L_val, y_L_val = feat_data.get_data('linguistic')
feat_data.concat_features(X_A_train, X_V_train, X_L_train)
|
ab93/Depression-Identification | src/helpers/remove_empty_rows.py | import csv
input = open('../data/discriminativeFACET_o.csv', 'rb')
output = open('../data/discriminativeFACET.csv', 'wb')
writer = csv.writer(output)
for row in csv.reader(input):
if row or any(row) or any(field.strip() for field in row):
#print row
writer.writerow(row)
input.close()
output.close() |
ab93/Depression-Identification | src/obsolete/read_labels.py | import pandas as pd
from sklearn import preprocessing
import pprint
from src.main import config
def get_features(data, split, classifier_type="C"):
if classifier_type == "C":
y_label = 'label'
else:
y_label = 'score'
grouped = data.groupby('video')
X = []
y = []
if split != "test":
for video, group in grouped:
X_person = []
y_person = []
for i in range(len(group)):
X_person.append(group.iloc[i].tolist()[1:-2])
y_person.append(group.iloc[i][y_label])
X.append(X_person)
y.append(y_person)
return X, y
elif split == "test":
for video, group in grouped:
X_person = []
for i in range(len(group)):
X_person.append(group.iloc[i].tolist()[1:])
X.append(X_person)
return X
def features(mode, category, split, problem_type='C', feature_scale=False, count="all", select = "select"):
normalize = 'normalize' if feature_scale else 'regular'
if problem_type == "C":
directory = "classify"
else:
directory = "estimate"
if select == "select":
sel = "selected_features"
else:
sel = "all_features"
if mode == "visual":
file_ = "data/"+sel+"/"+normalize+"/"+directory+"/"+split+"/"+category+"_visual_"+split+".csv"
elif mode == "acoustic":
file_ = "data/"+sel+"/"+normalize+"/"+directory+"/"+split+"/"+category+"_acoustic_"+split+".csv"
elif mode == "linguistic":
file_ = "data/"+sel+"/"+normalize+"/"+directory+"/"+split+"/"+category+"_linguistic_"+split+".csv"
data = pd.read_csv(file_)
if split == "train" and count != "all":
split_file = config.TRAIN_SPLIT_FILE
split_df = pd.read_csv(split_file,usecols=['Participant_ID'])
split_df = split_df.loc[:int(count)-1]
data = data[data['video'].isin(split_df['Participant_ID'])]
return get_features(data, split, problem_type)
|
ab93/Depression-Identification | src/tests/reg_test.py | import unittest
import numpy as np
from sklearn.linear_model import LinearRegression
from ..models.regressor import MetaRegressor, LateFusionRegressor
from ..feature_extract.read_labels import features
from ..main.classify import get_single_mode_data, get_multi_data
class MetaRegressorTest(unittest.TestCase):
"""
Tests for models.MetaRegressor class
"""
def _get_dummy_data(self):
x1 = np.array([ np.array([[1,5,7], [1,2,4], [1,8,9]]), # [r1,r2,r3] for p1
np.array([[2,8,6], [2,0,3]]), # [r1,r2] for p2
np.array([[3,7,5], [3,4,3], [3,9,7]]) # [r1,r2,r3] for p3
])
# for non discriminative
x2 = np.array([ np.array([[1,5,7], [1,2,4]]),
np.array([[2,8,6], [2,0,3], [2,5,5]]),
np.array([[3,7,5], [3,4,3], [3,9,7]])
])
y1 = np.array([ np.array([5.53,5.53,5.53]),
np.array([7.2,7.2]),
np.array([2,74,2.74])
])
y2 = np.array([ np.array([6.3,6.3]),
np.array([3.9,3.9,3.9]),
np.array([9.2,9.2,9.2])
])
X = [x1,x2]
y = [y1,y2]
return X,y
def _get_regressors(self):
reg1 = LinearRegression(normalize=True)
reg2 = LinearRegression(normalize=True)
return [reg1, reg2]
def test_fit_predict(self):
print "\nMetaRegressor...\n"
X_list, y_list = self._get_dummy_data()
y_true = np.array([5.0,11.0,13.5])
meta_reg = MetaRegressor(self._get_regressors())
meta_reg.fit(X_list,y_list)
preds = meta_reg.predict(X_list)
print "preds:\n", preds
print "R2 score:\n",meta_reg.score(X_list,y_true)
print "Mean abs error:\n",meta_reg.score(X_list,y_true,scoring='mean_abs_error')
class LateFusionRegressorTest(unittest.TestCase):
"""
Tests for model.LateFusionRegressor
"""
def _get_dummy_data(self):
x1 = np.array([ np.array([[1,5,7], [1,2,4], [1,8,9]]), # [r1,r2,r3] for p1
np.array([[2,8,6], [2,0,3]]), # [r1,r2] for p2
np.array([[3,7,5], [3,4,3], [3,9,7]]) # [r1,r2,r3] for p3
])
# for non discriminative
x2 = np.array([ np.array([[1,5,7], [1,2,4]]),
np.array([[2,8,6], [2,0,3], [2,5,5]]),
np.array([[3,7,5], [3,4,3], [3,9,7]])
])
y1 = np.array([ np.array([5.53,5.53,5.53]),
np.array([7.2,7.2]),
np.array([2,74,2.74])
])
y2 = np.array([ np.array([6.3,6.3]),
np.array([3.9,3.9,3.9]),
np.array([9.2,9.2,9.2])
])
X_acou, y_acou = [x1,x2], [y1,y2]
X_vis, y_vis = [x1,x2], [y1,y2]
X_lin, y_lin = [x1,x2], [y1,y2]
return [X_acou, X_vis, X_lin], [y_acou, y_vis, y_lin]
def _get_meta_reg(self,Xs,ys):
regs = [LinearRegression(), LinearRegression()]
meta_reg = MetaRegressor(regs)
return meta_reg
def test_fit_predict(self):
print "\nLateFusionRegressor...\n"
Xs, Ys = self._get_dummy_data()
y_true = np.array([5.0,11.0,13.5])
reg1 = self._get_meta_reg(Xs[0],Ys[0])
reg2 = self._get_meta_reg(Xs[1],Ys[1])
reg3 = self._get_meta_reg(Xs[2],Ys[2])
lf_reg = LateFusionRegressor(regressors=[reg1,reg2,reg3],
weights=[0.4,0.5,0.1])
lf_reg.fit(Xs,Ys)
preds = lf_reg.predict(Xs)
print "preds:\n", preds
print "R2 score:\n",lf_reg.score(Xs,y_true)
print "Mean abs error:\n",lf_reg.score(Xs,y_true,scoring='mean_abs_error')
if __name__ == '__main__':
unittest.main()
|
ab93/Depression-Identification | src/main/feature_select.py | <reponame>ab93/Depression-Identification
import os
import numpy as np
import pandas as pd
import sys
import config
from sklearn.feature_selection import VarianceThreshold
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.ensemble import RandomForestClassifier
def get_feature_df(train, file_, *files):
'''Obtain feature df of train/validation/test split
Parameters
----------
train : str, {'train','val','test'}
file_ : str, Path to first feature file
files : array[str], Paths to more feature files
Returns
-------
feature_df : dataframe, Features obtained from input files based on split
'''
# Set split_file based on input - 'train'/'val'/'test'
if train == 'train':
split_file = config.TRAIN_SPLIT_FILE
elif train == "val":
split_file = config.VAL_SPLIT_FILE
else:
split_file = config.TEST_SPLIT_FILE
# Append feature file columns to a single feature data frame
feature_df = pd.read_csv(file_,error_bad_lines=False)
feature_df = feature_df.fillna(0)
if len(files):
for f in files[0]:
feature_df_second = pd.read_csv(f)
feature_df_second = feature_df_second.fillna(0)
feature_df = pd.concat([feature_df, feature_df_second], axis=1)
feature_df = feature_df.T.drop_duplicates().T
# Trim data frame to hold only train/validation records based on split_file
#if train == "test":
#split_df = pd.read_csv(split_file, usecols=['Participant_ID'])
#feature_df = feature_df[feature_df['video'].isin(split_df['Participant_ID'])]
#else:
split_df = pd.read_csv(split_file,usecols=['Participant_ID', 'PHQ_Binary','PHQ_Score'])
feature_df = feature_df[feature_df['video'].isin(split_df['Participant_ID'])]
# Populate labels and scores accordingly from split_df
split_dict = split_df.set_index('Participant_ID').T.to_dict()
del split_df
labels = np.zeros(feature_df.shape[0])
scores = np.zeros(feature_df.shape[0])
for i in xrange(feature_df.shape[0]):
video_id = feature_df.iat[i, 0]
labels[i] = split_dict[video_id]['PHQ_Binary']
scores[i] = split_dict[video_id]['PHQ_Score']
feature_df['label'] = pd.Series(labels, index=feature_df.index)
feature_df['score'] = pd.Series(scores, index=feature_df.index)
# Drop common (unwanted) columns - question, starttime, endtime
try:
feature_df.drop(['question','starttime','endtime'], inplace=True, axis=1)
except ValueError:
feature_df.drop(['question'], inplace=True, axis=1)
return feature_df
def remove_low_variance(df):
'''Remove low variance features
Parameters
----------
df : dataframe, Features to select from
Returns
-------
final_df : dataframe, Features with only selected feature columns
'''
# Store feature names
column_names=list(df.columns.values)
# Obtain high variance features - indices
X=df.as_matrix()
sel = VarianceThreshold(0.95)
sel.fit(X)
selected_feature_idxs = sel.get_support(indices=True)
# Obtain feature name list for indices
selected_features = [column_names[i] for i in selected_feature_idxs]
# Return data frame with selected features
final_df = df[selected_features]
return final_df
def perform_l1(df,labels):
'''Perform L1 norm feature selection
Parameters
----------
df : dataframe, Features to select from
labels : array-like, Labels/scores to select based on
Returns
-------
final_df : dataframe, Features with selected feature columns
'''
# Store feature names
column_names=list(df.columns.values)
# Obtain Selected features using L1 norm - indices
X = df.as_matrix()
y =labels
svc = LogisticRegression(C=1., penalty='l1', dual=False).fit(X,y)
model = SelectFromModel(svc,prefit=True)
selected_feature_idxs = model.get_support(indices=True)
# Obtain feature name list for indices
selected_features = [column_names[i] for i in selected_feature_idxs]
# Return data frame with selected features
final_df = df[selected_features]
return final_df
def select_best_K(df,labels,K):
'''Obtain K bst features
Parameters
----------
df : dataframe, Features to select from
labels : array-like, Labels/scores to select based on
K : int, Number of features to select
Returns
-------
final_df : dataframe, Features with selected feature columns
'''
# Store feature names
column_names=list(df.columns.values)
# Obtain Selected K best features - indices
X = df.as_matrix()
y = labels
kbest = SelectKBest(f_classif, k=K)
kbest.fit(X, y)
score_list = kbest.scores_
# Sort feature indices from best to worst
ind = [i for i in range(len(score_list))]
all_pairs = []
for a, b in zip(score_list, ind):
all_pairs.append((a, b))
sorted_pairs = sorted(all_pairs, key=lambda p: p[0], reverse=True)
selected_feature_idxs = []
for a, b in sorted_pairs:
selected_feature_idxs.append(b)
selected_feature_idxs = selected_feature_idxs[:K]
# Obtain feature name list for indices
selected_features = [column_names[i] for i in selected_feature_idxs]
# Return data frame with selected features
final_df = df[selected_features]
return final_df
def perform_random_forest(df,labels,N):
'''Perform Random Forest feature selection
Parameters
----------
df : dataframe, Features to select from
labels : array-like, Labels/scores to select based on
N : int, Number of features to select
Returns
-------
final_df : dataframe, Features with selected feature columns
'''
# Store feature names
column_names=list(df.columns.values)
# Obtain important features - indices
X = df.as_matrix()
y = labels
forest = RandomForestClassifier(n_estimators = 100)
forest = forest.fit(X, y)
important_features = forest.feature_importances_
# Sort feature indices from best to worst
ind = [i for i in range(len(important_features))]
all_pairs = []
for a, b in zip(important_features, ind):
all_pairs.append((a, b))
sorted_pairs=sorted(all_pairs,key=lambda p:p[0],reverse=True)
selected_feature_idxs = []
for a, b in sorted_pairs:
selected_feature_idxs.append(b)
selected_feature_idxs = selected_feature_idxs[:N]
# Obtain feature name list for indices
selected_features = [column_names[i] for i in selected_feature_idxs]
# Return data frame with selected features
final_df = df[selected_features]
return final_df
def main(qtype,mode,classifier_type, choice = "select"):
'''Performs feature selection for given category and mode
Parameters
----------
qtype : str, {'D','ND','P','N'}
mode : str, {'A','V','L'}
classifier_type : str, {'C','R'}
choice : str, {'select','all'}
Returns
-------
final_feature_list : array-like, Selected features' names
'''
# Determine file name prefixes based on Question Type passed
if qtype=="D":
file_prefix="discriminative"
elif qtype=="ND":
file_prefix="nondiscriminative"
elif qtype=="P":
file_prefix="positive"
else:
file_prefix="negative"
# Determine file_list based on Mode
if mode == "V":
file_list = ["_OPENFACE.csv"]
elif mode == "A":
file_list = ["_COVAREP.csv","_FORMANT.csv"]
else:
file_list = ["_LIWC.csv"]
# Determine final file list for Question Type and Mode passed
for i in range(len(file_list)):
file_list[i] = file_prefix+file_list[i]
print "File List: ",file_list
# Determine directory
if qtype=="D" or qtype=="ND":
dir=config.D_ND_DIR
else:
dir=config.POS_NEG_DIR
# Obtain file list with complete path
file1 = os.path.join(dir,file_list[0])
files = [os.path.join(dir,arg) for arg in file_list[1:]]
# Obtain data frame containing all features from determined file list for TRAINING SET
TRAIN = "train"
df = get_feature_df(TRAIN,file1,files)
# Obtain data frame containing all features from determined file list for VALIDATION SET
TRAIN = "val"
val_df = get_feature_df(TRAIN,file1, files)
# Obtain data frame containing all features from determined file list for TEST SET
TRAIN = "test"
test_df = get_feature_df(TRAIN,file1,files)
'''
# If mode is visual, drop the extra columns from file - standardizes structure of data frame between all modes
if mode=="V":
df = df.drop(['frame', 'timestamp', 'confidence', 'success'], axis=1)
val_df = val_df.drop(['frame', 'timestamp', 'confidence', 'success'], axis=1)
test_df = test_df.drop(['frame', 'timestamp', 'confidence', 'success'], axis=1)
'''
# Create selection_df containing features from train and validation to perform feature selection on
selection_df = pd.concat([df,val_df])
# Obtain labels
labels = selection_df['label'].values
scores = selection_df['score'].values
# Remove 'video' and 'label' column from selection_df
selection_df.drop(['video', 'label','score'], inplace=True , axis=1)
# Pick 'N' to pick from Random Forest method, based on Mode
if mode=="A":
N = 20
else:
N = 50
# Set 'K' to pick from Select Best K method
K = 20
# Set feature_type to contain labels or scores based on classifier_type
if(classifier_type == "C"):
feature_type = labels
elif(classifier_type == "R"):
feature_type = scores
# Call pipeline of feature selection methods on data frame - different pipeline for each Question Type and Mode combination
if choice == "select":
if mode=="V":
selection_df = perform_random_forest(selection_df,feature_type,N)
elif mode == "A":
selection_df = perform_l1(selection_df,feature_type)
selection_df = perform_random_forest(selection_df,feature_type,N)
else:
selection_df = perform_random_forest(selection_df,feature_type,N)
selection_df = select_best_K(selection_df,feature_type,K)
# Obtain Final feature list
final_feature_list = list(selection_df.columns.values)
print "Final Feature List (Sorted): ",final_feature_list
# Obtain data frame (for TRAIN, VALIDATION AND TEST) to write into files
final_selection = ['video']
final_selection.extend(final_feature_list)
final_selection.extend(['label'])
final_selection.extend(['score'])
op_df = df[final_selection]
op_val_df = val_df[final_selection]
#final_selection.remove('label')
#final_selection.remove('score')
op_test_df = test_df[final_selection]
# To construct Output File Name
if mode=="V":
output_file="_visual"
elif mode=="A":
output_file="_acoustic"
else:
output_file="_linguistic"
file_suffix_train="_train.csv"
file_suffix_val="_val.csv"
file_suffix_test = "_test.csv"
# Write output dfs into output files - TRAIN, VALIDATION AND TEST
if classifier_type == "C":
if choice == "select":
directory_path_train = config.SEL_FEAT_TRAIN_REGULAR_CLASSIFY
directory_path_val = config.SEL_FEAT_VAL_REGULAR_CLASSIFY
directory_path_test = config.SEL_FEAT_TEST_REGULAR_CLASSIFY
else:
directory_path_train = config.ALL_FEAT_TRAIN_REGULAR_CLASSIFY
directory_path_val = config.ALL_FEAT_VAL_REGULAR_CLASSIFY
directory_path_test = config.ALL_FEAT_TEST_REGULAR_CLASSIFY
else:
if choice == "select":
directory_path_train = config.SEL_FEAT_TRAIN_REGULAR_ESTIMATE
directory_path_val = config.SEL_FEAT_VAL_REGULAR_ESTIMATE
directory_path_test = config.SEL_FEAT_TEST_REGULAR_ESTIMATE
else:
directory_path_train = config.ALL_FEAT_TRAIN_REGULAR_ESTIMATE
directory_path_val = config.ALL_FEAT_VAL_REGULAR_ESTIMATE
directory_path_test = config.ALL_FEAT_TEST_REGULAR_ESTIMATE
fileOP = os.path.join(directory_path_train,file_prefix + output_file + file_suffix_train)
op_df.to_csv(fileOP,sep=",",index=False)
fileOP = os.path.join(directory_path_val, file_prefix + output_file + file_suffix_val)
op_val_df.to_csv(fileOP, sep=",", index=False)
fileOP = os.path.join(directory_path_test, file_prefix + output_file + file_suffix_test)
op_test_df.to_csv(fileOP, sep=",", index=False)
return final_feature_list
def feature_select(classifier_type, choice = "select"):
'''Calls feature selection method for all categories and modes, with given classifier_type
Parameters
----------
classifier_type : str, {'C','R'}
choice : str, {'select','all'}
'''
all_feature_lists = []
# Call feature select function for all question types and modes
question_types = ["D","ND","P","N"]
modes = ["V","A","L"]
for qtype in question_types:
for mode in modes:
print "Feature Selection for ",qtype," and ",mode
feature_list = main(qtype,mode,classifier_type,choice = choice)
all_feature_lists.append(feature_list)
print "All features: ",all_feature_lists
# Write all feature lists into output file
file = os.path.join(config.SEL_FEAT, classifier_type+"_all_selected_features.csv")
fileOP = open(file,"w")
for each_list in all_feature_lists:
for feature in each_list:
fileOP.write(feature)
fileOP.write(",")
fileOP.write("\n")
if __name__ == '__main__':
choice = "select"
if len(sys.argv) == 2:
choice = sys.argv[1]
# Call feature select function for both classification and regression
feature_select("C",choice = choice)
feature_select("R",choice = choice) |
ab93/Depression-Identification | src/helpers/split_LIWC.py | import csv
depressed=[]
not_depressed=[]
count=0
instance=1
with open('data/training_split.csv','r') as f:
reader=csv.reader(f)
reader.next()
for row in reader:
count+=1
if row[1]=='0':
not_depressed.append(row[0])
else:
depressed.append(row[0])
d_f=open('data/liwc_depressed.csv','w')
nd_f=open('data/liwc_notdepressed.csv','w')
d_csv=csv.writer(d_f)
nd_csv=csv.writer(nd_f)
with open('data/liwc.csv','r') as f:
reader=csv.reader(f)
reader.next()
for row in reader:
if instance<12179:
if row[0] in depressed:
d_csv.writerow([row[0]]+row[2:7]+row[8:])
elif row[0] in not_depressed:
nd_csv.writerow([row[0]]+row[2:7]+row[8:])
d_f.close()
nd_f.close()
|
ab93/Depression-Identification | src/feature_extract/extract_CLM_Gaze.py | import pandas as pd
from pprint import pprint
from glob import glob
import numpy as np
import re
import csv
import os
import fnmatch
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList = {}
questionType_DND={}
questionType_PN={}
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc=pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg=pd.read_csv('data/misc/PN_Annotations.csv')
#Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question=disc_nondisc.iloc[i]['Questions']
qType=disc_nondisc.iloc[i]['Annotations']
questionType_DND[question]=qType
#Positive/Negative annotations
for i in xrange(len(pos_neg)):
question=pos_neg.iloc[i]['Questions']
qType=pos_neg.iloc[i]['Annotations']
questionType_PN[question]=qType
for item in utterrances.itertuples():
if item[3]=="#follow_up" and item[1] not in followUp:
followUp[item[1]]=item[2]
elif item[3]=="#ack" and item[1] not in ack:
ack[item[1]]=item[2]
elif item[3]=="#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]]=item[2]
elif item[3]=="#int" and item[1] not in intimate:
intimate[item[1]]=item[2]
def readTranscript():
global featureList
transcriptFiles=glob(sys.argv[1]+'[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0,len(transcriptFiles)):
t=pd.read_csv(transcriptFiles[i], delimiter='\t')
t = t.fillna("")
captureStarted=False
startTime=0.0
endTime=0.0
prevQuestion=""
participantNo=transcriptFiles[i][-18:-15]
for j in xrange(len(t)):
question=re.search(".*\((.*)\)$", t.iloc[j]['value'])
if question is not None:
question=question.group(1)
else:
question=t.iloc[j]['value']
question=question.strip()
if t.iloc[j]['speaker']=='Ellie':
if question in nonIntimate and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
captureStarted=False
elif question in intimate and question in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
startTime=t.iloc[j]['start_time']
endTime=t.iloc[j]['stop_time']
prevQuestion=question
elif question in intimate and question in questionType_DND and not captureStarted:
startTime=t.iloc[j]['start_time']
endTime=t.iloc[j]['stop_time']
prevQuestion=question
captureStarted=True
elif question in intimate and question not in questionType_DND and captureStarted:
if (participantNo, prevQuestion) not in featureList:
featureList[(participantNo, prevQuestion)]=[startTime, endTime]
else:
featureList[(participantNo, prevQuestion)][1]=endTime
captureStarted=False
elif question in followUp or question in ack and captureStarted:
endTime=t.iloc[j]['stop_time']
elif t.iloc[j]['speaker']=='Participant' and captureStarted:
endTime=t.iloc[j]['stop_time']
def readCLM_DND():
groupByQuestion = {}
dFile2 = open('data/disc_nondisc/discriminative_CLM_Gaze.csv', 'w')
ndFile2 = open('data/disc_nondisc/nondiscriminative_CLM_Gaze.csv', 'w')
dWriter2 = csv.writer(dFile2)
ndWriter2 = csv.writer(ndFile2)
header = ["video","question","starttime","endtime",'frame', 'timestamp', 'confidence', 'success', 'x_0', 'y_0', 'z_0', 'x_1', 'y_1', 'z_1', 'x_h0', 'y_h0', 'z_h0', 'x_h1', 'y_h1', 'z_h1']
dWriter2.writerow(header)
ndWriter2.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName2 = sys.argv[1] + item + '_P/' + item + '_CLM_gaze.txt'
f2 = pd.read_csv(fileName2, delimiter=', ')
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = f2.ix[(f2['timestamp'] - startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame = f2.ix[(f2['timestamp'] - endTime).abs().argsort()[:1]].index.tolist()[0]
#print startFrame, endFrame
features = f2.ix[startFrame:endFrame].mean(0).tolist()
vector = instance[1][:]
vector += features
vector.insert(0, instance[0])
vector.insert(0, item)
vector = np.asarray(vector)
#print(item, instance[0], instance[1][1], instance[1][2])
if questionType_DND[instance[0]] == 'D':
dWriter2.writerow(vector)
else:
ndWriter2.writerow(vector)
def readCLM_PN():
groupByQuestion = {}
pFile2 = open('data/pos_neg/positive_CLM_Gaze.csv', 'w')
nFile2 = open('data/pos_neg/negative_CLM_Gaze.csv', 'w')
pWriter2 = csv.writer(pFile2)
nWriter2 = csv.writer(nFile2)
header = ["video","question","starttime","endtime",'frame', 'timestamp', 'confidence', 'success', 'x_0', 'y_0', 'z_0', 'x_1', 'y_1', 'z_1', 'x_h0', 'y_h0', 'z_h0', 'x_h1', 'y_h1', 'z_h1']
pWriter2.writerow(header)
nWriter2.writerow(header)
for item in featureList:
if item[0] not in groupByQuestion:
groupByQuestion[item[0]] = [(item[1], featureList[item])]
else:
groupByQuestion[item[0]].append((item[1], featureList[item]))
for item in groupByQuestion:
fileName2 = sys.argv[1] + item + '_P/' + item + '_CLM_gaze.txt'
f2 = pd.read_csv(fileName2, delimiter=', ')
for instance in groupByQuestion[item]:
startTime = instance[1][0]
endTime = instance[1][1]
startFrame = f2.ix[(f2['timestamp'] - startTime).abs().argsort()[:1]].index.tolist()[0]
endFrame = f2.ix[(f2['timestamp'] - endTime).abs().argsort()[:1]].index.tolist()[0]
#print startFrame, endFrame
features = f2.ix[startFrame:endFrame].mean(0).tolist()
vector = instance[1][:]
vector += features
vector.insert(0, instance[0])
vector.insert(0, item)
vector = np.asarray(vector)
#print(item, instance[0], instance[1][1], instance[1][2])
if questionType_PN[instance[0]] == 'P':
pWriter2.writerow(vector)
else:
nWriter2.writerow(vector)
if __name__ == "__main__":
readHelperData()
readTranscript()
readCLM_DND()
readCLM_PN() |
ab93/Depression-Identification | src/obsolete/select_test_features.py | <reponame>ab93/Depression-Identification
import numpy as np
from src.main import config
import pandas as pd
import os
def get_feature_df(train, file_, *files):
# Set directory based on Train and Validation
# if train == 'y':
# split_file = config.TRAIN_SPLIT_FILE
# else:
# split_file = config.VAL_SPLIT_FILE
split_file=config.TEST_SPLIT_FILE
# Append file columns to a single data frame
feature_df = pd.read_csv(file_)
if len(files):
for f in files[0]:
print f
feature_df = pd.concat([feature_df, pd.read_csv(f)], axis=1)
feature_df = feature_df.T.drop_duplicates().T
# Trim data frame to hole only train/validation records
split_df = pd.read_csv(split_file,usecols=['Participant_ID', 'PHQ_Binary','PHQ_Score'])
feature_df = feature_df[feature_df['video'].isin(split_df['Participant_ID'])]
# Populate labels accordingly
split_dict = split_df.set_index('Participant_ID').T.to_dict()
del split_df
labels = np.zeros(feature_df.shape[0])
scores = np.zeros(feature_df.shape[0])
for i in xrange(feature_df.shape[0]):
video_id = feature_df.iat[i,0]
labels[i] = split_dict[video_id]['PHQ_Binary']
scores[i] = split_dict[video_id]['PHQ_Score']
feature_df['label'] = pd.Series(labels, index=feature_df.index)
feature_df['score'] = pd.Series(scores, index=feature_df.index)
# Drop common (unwanted) columns - question, starttime, endtime
try:
feature_df.drop(['question','starttime','endtime'], inplace=True, axis=1)
except ValueError:
feature_df.drop(['question'], inplace=True, axis=1)
return feature_df
def main(qtype,mode,feature_list):
# Determine file name prefixes based on Question Type passed
if qtype=="P":
file_prefix="positive"
else:
file_prefix="negative"
# Determine file_list based on Mode
if mode == "V":
#file_list = ["_CLM.csv","_CLM_3D.csv","_CLM_Gaze.csv","_CLM_pose.csv","_FACET.csv"]
file_list = ["_OPENFACE.csv"]
elif mode == "A":
file_list = ["_COVAREP.csv","_FORMANT.csv"]
else:
file_list = ["_LIWC.csv"]
# Determine final file list for Question Type and Mode passed
for i in range(len(file_list)):
file_list[i] = file_prefix+file_list[i]
print "File List: ",file_list
# Determine directory
dir=config.POS_NEG_DIR
# Obtain file list with complete path
file1 = os.path.join(dir,file_list[0])
files = [os.path.join(dir,arg) for arg in file_list[1:]]
# Obtain data frame containing all features from determined file list for TESTING SET
TEST = "y"
feature_list.append("video")
feature_list.append("label")
feature_list.append("score")
df = get_feature_df(TEST,file1,files)
df_new = df[feature_list]
df_new.to_csv(config.SEL_FEAT)
def some_func():
p_v = ['joyEvidence', 'AU9Evidence', 'confusionEvidence', 'isMaleEvidence', 'Z12', 'Z13', 'Z37', 'Z41', 'Z36',
'Z11', 'Z19', 'Z14', 'Z44', 'Z3', 'Z45', 'Z52', 'Z51', 'AU43Evidence', 'Z5', 'Z27']
p_a = ['F0', 'MCEP_7', 'MCEP_4', 'MCEP_8', 'MCEP_12', 'MCEP_17', 'HMPDD_2', 'MCEP_13', 'MCEP_11', 'MCEP_6',
'MCEP_10', 'MCEP_5', 'MCEP_2', 'HMPDM_10', 'HMPDM_8', 'HMPDD_0', 'MCEP_0', 'Rd', 'formant1', 'HMPDM_11']
p_l = ['avg_wordlen', 'coherence', 'word30', 'word83', 'word98', 'word54', 'word35', 'word66', 'word18', 'word28',
'word5', 'word0', 'word52', 'word62', 'word32', 'word77', 'root_deps', 'word20', 'word53', 'word37']
n_v = ['joyEvidence', 'Z21', 'Z20', 'Z19', 'Z37', 'Z38', 'Z40', 'Z18', 'Z1', 'Z2', 'isMaleEvidence', 'Z27', 'Z43',
'Z4', 'Z44', 'Z47', 'AU43Evidence', 'Z13', 'Z28', 'Z51']
n_a = ['MCEP_7', 'MCEP_8', 'MCEP_13', 'F0', 'MCEP_4', 'HMPDD_3', 'MCEP_6', 'MCEP_10', 'HMPDD_1', 'MCEP_3',
'HMPDD_2', 'MCEP_2', 'formant4', 'formant5', 'formant1', 'HMPDM_9', 'MCEP_1', 'HMPDM_24', 'HMPDM_11',
'HMPDM_10']
n_l = ['word83', 'word49', 'word20', 'word35', 'word97', 'word42', 'word28', 'x_tag', 'word4', 'word92', 'word98',
'word91', 'word75', 'word93', 'word60', 'word16', 'word79', 'word46', 'word82', 'word77']
main("P","A",p_a)
main("N", "A", n_a)
main("P", "V", p_v)
main("N", "V", n_v)
main("P", "L", p_l)
main("N", "L", n_l) |
ab93/Depression-Identification | src/obsolete/get_prediction_labels.py | <reponame>ab93/Depression-Identification
import os
import numpy as np
from sklearn.externals import joblib
import src.main.config
import src.obsolete.utils
from src.obsolete import read_labels
def final_classifier(mode,category="PN",problem_type="C",normalize="normalize"):
if category == "PN":
cat_1 = "positive"
cat_2 = "negative"
if mode == "late_fusion":
X_test = [ [ map(np.asarray, read_labels.features("acoustic", cat_1, "test", problem_type, normalize)),
map(np.asarray, read_labels.features("acoustic", cat_2, "test", problem_type, normalize))
],
[ map(np.asarray, read_labels.features("visual", cat_1, "test", problem_type, normalize)),
map(np.asarray, read_labels.features("visual", cat_2, "test", problem_type, normalize))
],
[ map(np.asarray, read_labels.features("linguistic", cat_1, "test", problem_type, normalize)),
map(np.asarray, read_labels.features("linguistic", cat_2, "test", problem_type, normalize))
]
]
else:
X_test = [map(np.asarray, read_labels.features(mode, cat_1, "test", problem_type, normalize)),
map(np.asarray, read_labels.features(mode, cat_2, "test", problem_type, normalize))]
clf = joblib.load(os.path.join(src.main.config.GRID_SEARCH_CLF_DIR, mode + '_pickle' + category + '.pkl'))
preds_label = clf.predict(X_test)
return preds_label
def main():
print "acoustic"
print final_classifier("acoustic")
print "visual"
print final_classifier("visual")
print "linguistic"
print final_classifier("linguistic")
print "late_fusion"
print final_classifier("late_fusion")
main()
|
ab93/Depression-Identification | src/feature_extract/extract_LIWC.py | <filename>src/feature_extract/extract_LIWC.py
import scipy.stats
import pandas as pd
from pprint import pprint
from glob import glob
import numpy as np
import re
import csv
import sys
followUp = {}
ack = {}
nonIntimate = {}
intimate = {}
featureList={}
questionType_DND={}
questionType_PN={}
discriminativeVectors=[]
nonDiscriminativeVectors=[]
questionAnswers={}
liwcVectors={}
listofParticipants=[]
'''
Reads the following data:
IdentifyingFollowUps.csv : Reads tags for each question in the corpus where the tag is intimate, non-intimate,
acknowledgement, follow-up
DND_Annotations.csv : Reads category for each question in the corpus where the category is Discriminative, Non-discriminative
PN_Annotations.csv : Reads category for each question in the corpus where the category is Positive, Negative
'''
def readHelperData():
global followUp, ack, nonIntimate, intimate, questionType_PN, questionType_DND
utterrances = pd.read_csv('data/misc/IdentifyingFollowUps.csv')
disc_nondisc = pd.read_csv('data/misc/DND_Annotations.csv')
pos_neg = pd.read_csv('data/misc/PN_Annotations.csv')
#Discriminative/Non-discriminative annotations
for i in xrange(len(disc_nondisc)):
question=disc_nondisc.iloc[i]['Questions']
qType=disc_nondisc.iloc[i]['Annotations']
questionType_DND[question]=qType
#Positive/Negative annotations
for i in xrange(len(pos_neg)):
question=pos_neg.iloc[i]['Questions']
qType=pos_neg.iloc[i]['Annotations']
questionType_PN[question]=qType
for item in utterrances.itertuples():
if item[3]=="#follow_up" and item[1] not in followUp:
followUp[item[1]]=item[2]
elif item[3]=="#ack" and item[1] not in ack:
ack[item[1]]=item[2]
elif item[3]=="#non_int" and item[1] not in nonIntimate:
nonIntimate[item[1]]=item[2]
elif item[3]=="#int" and item[1] not in intimate:
intimate[item[1]]=item[2]
'''
Reads the transcript of each interview and collects the answer for each question asked to a participant
This is done for all the interviews.
At the end, the dictionary questionAnswers has a list of utterances by the participant (the utterances together make up the answer)
and this is stored as [(300,'when was the last time you felt happy)] = ['i last felt happy','um','yesterday]
In this manner, responses are collected for all questions, for all interviews
'''
def readTranscript():
global featureList
transcriptFiles=glob(sys.argv[1]+'[0-9][0-9][0-9]_P/[0-9][0-9][0-9]_TRANSCRIPT.csv')
for i in range(0,len(transcriptFiles)):
t=pd.read_csv(transcriptFiles[i], delimiter=',|\t')
t = t.fillna("")
captureStarted=False
prevUtterance=""
participantNo=transcriptFiles[i][-18:-15]
listofParticipants.append(participantNo)
responses=[]
for j in xrange(len(t)):
utterance=re.search(".*\((.*)\)$", t.iloc[j]['value'])
if utterance is not None:
utterance=utterance.group(1)
else:
utterance=t.iloc[j]['value']
utterance=utterance.strip()
'''
We start capturing a response right after Ellie asks an intimate question.
If a question is a follow-up question, we continue capturing participant response after Ellie
asks the follow-up question.
If Ellie gives an acknowledgement, we skip and go on. If we are capturing a response already, we continue
capturing from the next participant utterance.
If a question is a non-intimate question, then there are two cases:
1. Previous question was an intimate question: In this case, we have been capturing participant
responses. So, we stop capture and store it in the dictionary. We skip the non-intimate question
and wait until next time Ellie asks an intimate question or conversation ends, whichever is first.
2. Previous question was a non-intimate question: In this case, we skip this question and wait till
Ellie asks an intimate question or conversation ends, whichever is first.
'''
if t.iloc[j]['speaker']=='Ellie':
if utterance in nonIntimate and captureStarted:
if (participantNo, prevUtterance) not in featureList:
questionAnswers[(participantNo, prevUtterance)]=responses
else:
questionAnswers[(participantNo, prevUtterance)]+=responses
captureStarted=False
responses=[]
elif utterance in intimate and utterance in questionType_DND and captureStarted:
if (participantNo, prevUtterance) not in featureList:
questionAnswers[(participantNo, prevUtterance)]=responses
else:
questionAnswers[(participantNo, prevUtterance)]+=responses
prevUtterance=utterance
responses=[]
elif utterance in intimate and utterance in questionType_DND and not captureStarted:
prevUtterance=utterance
captureStarted=True
elif utterance in intimate and utterance not in questionType_DND and captureStarted:
if (participantNo, prevUtterance) not in featureList:
questionAnswers[(participantNo, prevUtterance)]=responses
else:
questionAnswers[(participantNo, prevUtterance)]+=responses
captureStarted=False
responses=[]
elif utterance in followUp or utterance in ack and captureStarted:
continue
elif t.iloc[j]['speaker']=='Participant' and captureStarted:
responses.append(utterance)
'''
Reads LIWC features for all questions that are either Discriminative or Non-discriminative
and writes it to file.
'''
def readLIWC_DND():
global listofParticipants
answerQuestion={}
dFile=open('data/disc_nondisc/discriminative_LIWC.csv','w')
ndFile=open('data/disc_nondisc/nondiscriminative_LIWC.csv','w')
dWriter=csv.writer(dFile)
ndWriter=csv.writer(ndFile)
f=open('data/misc/liwc_new.csv')
reader=csv.reader(f)
header=['video','question']
header+=reader.next()[2:]
dWriter.writerow(header)
ndWriter.writerow(header)
listofParticipants=[int(i) for i in listofParticipants]
listofParticipants.sort()
for row in reader:
if int(row[0])>=listofParticipants[0] and int(row[0])<=listofParticipants[-1]:
if row[0] not in liwcVectors:
liwcVectors[row[0]]=[(row[1], row[2:])]
else:
liwcVectors[row[0]].append((row[1], row[2:]))
#questionAnswers: [(participantNo, question)]=[list of answers]
#liwcVectors: participantNo: [list of (answer, vector)]
'''
A participant response for a question will span multiple rows in the transcript.
Now, we obtain the LIWC feature vector for each of these responses from the liwc_new.csv file.
To get the full LIWC vector for the entire response (set of utterance - think of the response as a single paragraph),
each value in the feature vector is multiplied by the utterance_length (number of words in one utterances).
Finally, for each LIWC feature, we sum the values of that feature for all utterances and divide by total
number of words in the response (average over number of words).
For example,
i was not happy... 0.40 0.62 0.33 ...
yesterday... 0.3 0.00 0.20 ...
but... 0.00 0.10 0.80 ...
Multiplied vectors: (line 194)
i was not happy... 1.60 2.48 1.32 ... (Multiplied by 4 - number of words)
yesterday... 0.3 0.00 0.20 ... (Multiplied by 1)
but no... 0.00 0.20 1.60 ... (Multiplied by 2)
Averaged vectors: (denominator=7 which is total number of words) (line 196)
i was not happy yesterday but no...0.27 0.38 0.44
This averaged vector is written to file. This vector is the LIWC vector for the entire response of the
participant for that question. This is done in readLIWC_PN also, for the positive and negative category
questions.
'''
for item in questionAnswers:
participant_number=item[0]
current_question=item[1]
lines_for_this_question=[]
answer_length=0.0
for answer in questionAnswers[item]:
vectors=liwcVectors[participant_number]
for vector in vectors:
if answer==vector[0]:
utterance_length=len(answer.split(" "))
answer_length+=utterance_length
feature_vector=[float(i)*utterance_length for i in vector[1]]
lines_for_this_question.append(feature_vector)
final_vector=[sum(value)/answer_length for value in zip(*lines_for_this_question)]
final_vector.insert(0,current_question)
final_vector.insert(0,str(participant_number))
if questionType_DND[current_question]=='D':
dWriter.writerow(final_vector)
elif questionType_DND[current_question]=='ND':
ndWriter.writerow(final_vector)
'''
Reads LIWC features for all questions that are either Positive or Negative
and writes it to file.
'''
def readLIWC_PN():
global listofParticipants
answerQuestion={}
pFile=open('data/pos_neg/positive_LIWC.csv','w')
nFile=open('data/pos_neg/negative_LIWC.csv','w')
pWriter=csv.writer(pFile)
nWriter=csv.writer(nFile)
f=open('data/misc/liwc_new.csv')
reader=csv.reader(f)
header=['video','question']
header+=reader.next()[2:]
pWriter.writerow(header)
nWriter.writerow(header)
listofParticipants=[int(i) for i in listofParticipants]
listofParticipants.sort()
for row in reader:
if int(row[0])>=listofParticipants[0] and int(row[0])<=listofParticipants[-1]:
if row[0] not in liwcVectors:
liwcVectors[row[0]]=[(row[1], row[2:])]
else:
liwcVectors[row[0]].append((row[1], row[2:]))
#questionAnswers: [(participantNo, question)]=[list of answers]
#liwcVectors: participantNo: [list of (answer, vector)]
for item in questionAnswers:
participant_number=item[0]
current_question=item[1]
lines_for_this_question=[]
answer_length=0.0
for answer in questionAnswers[item]:
vectors=liwcVectors[participant_number]
for vector in vectors:
if answer==vector[0]:
utterance_length=len(answer.split(" "))
answer_length+=utterance_length
feature_vector=[float(i)*utterance_length for i in vector[1]]
lines_for_this_question.append(feature_vector)
final_vector=[sum(value)/answer_length for value in zip(*lines_for_this_question)]
final_vector.insert(0,current_question)
final_vector.insert(0,str(participant_number))
if questionType_PN[current_question]=='P':
pWriter.writerow(final_vector)
elif questionType_PN[current_question]=='N':
nWriter.writerow(final_vector)
if __name__=="__main__":
readHelperData()
readTranscript()
readLIWC_DND()
readLIWC_PN() |
SparshaMishra/Stroop_Effect | Stroop Effect Analysis.py | <gh_stars>1-10
# coding: utf-8
# # Stroop Effect
# In a Stroop task, participants are presented with a list of words, with each word displayed in a color of ink. The participant’s task is to say out loud the color of the ink in which the word is printed. The task has two conditions: a congruent words condition, and an incongruent words condition. In the congruent words condition, the words being displayed are color words whose names match the colors in which they are printed: for example RED, BLUE. In the incongruent words condition, the words displayed are color words whose names do not match the colors in which they are printed: for example PURPLE, ORANGE. In each case, we measure the time it takes to name the ink colors in equally-sized lists. Each participant will go through and record a time from each condition.
# ### Q1. What is our independent variable? What is our dependent variable?
# ANS - The independent variable is the word which is printed to manipulate our decision. Basically here we will be trying to figure the difference between the two conditions - congruent condition and the incongruent condition.
# The dependent variable is the time that the participant takes to read out the color of the ink of the word that is printed.
# ### Q2. What is an appropriate set of hypotheses for this task? What kind of statistical test do you expect to perform? Justify your choices.
# ANS - The appropriate set of hypothesis for this task in my case is Null Hypothesis (H0) which means that there is no change between the two conditions. A Null Hypothesis says that there is no difference between the time taken to perform both the tasks. But according to the Stroop Task, the Alternative Hypothesis (H1) would be that the incongruent task takes more time than the congruent task. It is a harder task for the participant to say the color out loud without reading the word that is printed with it. Here we are assuming that the incongruent task will be taking more time than the congruent task.
# The mathematical notation for the above statement is as follows:
#
# (Null Hypothesis) H0: μcongruent = μincongruent
#
# (Alternative Hypothesis) H1: μincongruent > μcongruent
# In this case, I will be performing a t-test because we do not know the population standard deviation and the sample set is less than 30. The t-test that will be performed is a one tailed t-test where the Alternative Hypothesis is that the participant's incongruent sample mean is going to be larger than the participant's congruent sample mean. As here our participants took the test at different times, we have dependent samples which is why we can compare the averages of both the dependent samples with the one tailed t-test.
# ### Q3. Report some descriptive statistics regarding this dataset. Include at least one measure of central tendency and at least one measure of variability.
# ANS - The Sample Size is - 24
#
# The Mean is - $xbar = \Sigma{x}/n$
# (Here the xbar is the Sample Mean, x is the value and n is the sample Size)
#
# Congruent: 14.05
# Incogruent: 22.02
#
# Median of the table is -
# Congruent: 14.3565
# Incongruent: 21.0175
#
# Sample Standard Deviation: $\sigma = \Sigma{(x - xbar)^2}/n$
#
# Congruent: 3.559358
# Incongruent: 4.797057
# In[51]:
# Import the Libraries
from scipy import stats
import math
from scipy.stats import t
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Enable inline viewing of graphs
get_ipython().run_line_magic('matplotlib', 'inline')
# Load the data from csv file
stroop_path = "https://raw.githubusercontent.com/SparshaMishra/Stroop_Effect/master/stroopdata.csv"
# Parse the csv into pandas data structures
df = pd.read_csv(stroop_path)
congruentSeries = df['Congruent']
incongruentSeries = df['Incongruent']
# Display the table
df
# In[43]:
df.describe()
# ###### Central Tendency
# As we can observe from the above table that the mean of the congruent condition is 14.051125 and the mean of the incongruent condition is 22.015917
# ###### Variability
# Here we can observe that the standard deviation for the congruent condition is 3.559358 and for incongruent condition is 4.797057
# ###### Q4. Provide one or two visualizations that show the distribution of the sample data. Write one or two sentences noting what you observe about the plot or plots.
# In[24]:
# Box plots for both the conditions
title = 'Box Plot for both the Conditions'
kind = 'box'
dataFrame.plot(title=title, kind=kind)
ylabel = plt.ylabel('Time (in seconds)')
# The above visualization of a box plot shows that the incongruent condition has two outliners, where both the outliner participants took longer time than the other participants in incongruent conditions. The result of these two conditions are shown in the form of a histogram below, where we can observe a uniform right shift from the congruent condition to the incongruent condition.
# In[38]:
# Histogram of the Congruent Condition
title = 'Histogram of the Congruent Condition'
kind = 'hist'
plot = congruentSeries.plot(title=title, kind=kind, bins=7)
xLabel = plt.xlabel('Time (in seconds)')
window = plt.axis([0,48,0,7])
# In[37]:
# Histogram of the Incongruent Condition
title = 'Histogram of the Incongruent Condition'
kind = 'hist'
plot = incongruentSeries.plot(title=title, kind=kind)
xLabel = plt.xlabel('Time (in seconds)')
window = plt.axis([0,48,0,7])
# In[36]:
# Histogram of Both the Conditions
title = 'Histogram of Both the Conditions'
kind = 'hist'
alpha = 0.5
plot = df.plot(title=title, kind=kind, alpha=alpha, bins=12)
xLabel = plt.xlabel('Time (in seconds)')
window = plt.axis([0,48,0,7])
# ### Q5. Now, perform the statistical test and report your results. What is your confidence level and your critical statistic value? Do you reject the null hypothesis or fail to reject it? Come to a conclusion in terms of the experiment task. Did the results match up with your expectations?
# In[48]:
# To get the t-critical value for a 95% confidence level and 23 d.f
t.ppf(.95, 23)
# Here we can see that for confidence level of 95%, the t-critical value is 1.7139
# The point estimate for the difference of the mean is 22.02 - 14.05 = 7.97
# In[49]:
df['Difference'] = df['Congruent'] - df['Incongruent']
df['Difference'].std(axis=0)
# Here we have calculated the standard deviation
# Now let us calculate t-statistic
# In[52]:
7.97/(4.8648 / math.sqrt(24))
# We observe here that our t-statistic is 8.02 which is much greater than our critical value which is 1.713. This is why we will be rejecting the Null Hypothesis.
# So it is proved that it does take a much longer time in the incongruent condition performed by the participants than in the congruent condition.
# I can now say that the results do match up to my expectations
# ### 6. Optional: What do you think is responsible for the effects observed? Can you think of an alternative or similar task that would result in a similar effect? Some research about the problem will be helpful for thinking about these two questions!
# In my opinion, I guess it is our subconsciousness and our stimuli response in the brain which automatically registers the word that is right in front of us. It is easily registered by the brain as the color of the word and the word displayed match with each other. But when the color and the word that is displayed are not the same, our brain takes time to register and first registers the word and then focuses on the color which causes error and thus takes more time to provide the correct response.
#
#
# Another similar task can be that we have to say the number of times the word is present in front of us.
# Like for example "one" , here our answer will be 1.
# But when it is displayed "one one", our mind registers the word "one" first and the count which is 2, later.
# ### Reference
#
# https://en.wikipedia.org/wiki/Stroop_effect
#
# http://www.statisticshowto.com/t-statistic/
#
# http://www.statisticssolutions.com/manova-analysis-paired-sample-t-test/
|
Ken2yLiu/ImageDT | imagedt/file/parse_annotation.py | # coding: utf-8
import os
import csv
import math
import numpy as np
from lxml import etree
class Anno_OP(object):
def __init__(self):
super(Anno_OP, self).__init__()
def read_xml(self, fxml):
"""
:param fxml: xml file path.
:return: annotation is object: annotation.iter('object').
"""
annotation = etree.parse(fxml).getroot()
return annotation
def parse_bndx_object(self, bndbox_object):
"""return bndx list ['xmin', 'ymin', 'xmax', 'ymax'] by float"""
rectangle = [bndbox_object.find(coord).text for coord in ['xmin', 'ymin', 'xmax', 'ymax']]
return map(float, rectangle)
def read_txt_bndbox(self, fpath):
with open(fpath, 'r') as f:
lines = f.readlines()
return [line.strip().split(',') for line in lines]
def parse_lab_xml(self, fxml):
annotation = self.read_xml(fxml)
return [self.parse_bndx_object(iter_object.find('bndbox')) for iter_object in annotation.iter('object')]
def load_annoataion(self, poly_file):
'''
load annotation from the text file
:param p:
:return:
'''
text_polys = []
text_tags = []
if not os.path.exists(poly_file):
return np.array(text_polys, dtype=np.float32)
with open(poly_file, 'r') as f:
reader = csv.reader(f)
for line in reader:
label = line[-1]
# strip BOM. \ufeff for python3, \xef\xbb\bf for python2
line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in line]
x1, y1, x2, y2, x3, y3, x4, y4 = list(map(float, line[:8]))
text_polys.append([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
if label == '*' or label == '###':
text_tags.append(True)
else:
text_tags.append(False)
return np.array(text_polys, dtype=np.float32), np.array(text_tags, dtype=np.bool)
def parse_poly_locations(self, loca_info):
xmin, ymin = map(math.floor, loca_info[0])
xmax, ymax = map(math.ceil, loca_info[2])
return xmin, ymin, xmax, ymax
def convert_poly_to_Xml(self, poly_file):
loca_infos, has_fonts = self.load_annoataion(poly_file)
xml_dir = os.path.join(os.path.dirname(os.path.dirname(poly_file)), 'Annotations_xml')
xml_name = os.path.join(xml_dir, os.path.basename(poly_file).replace('.txt', '.xml'))
if not os.path.exists(xml_dir):
os.mkdir(xml_dir)
annotation = etree.Element("Annotation")
for index, loca_info in enumerate(loca_infos):
vocObject = etree.SubElement(annotation, "object")
name = etree.SubElement(vocObject, "name")
if has_fonts[index] == True:
# fonts are blur
name.text = '2'
else:
name.text = '1'
desc = etree.SubElement(vocObject, "desc")
desc.text = unicode('文字文本', 'utf-8')
xmins, ymins, xmaxs, ymaxs = map(str, self.parse_poly_locations(loca_info))
bndbox = etree.SubElement(vocObject, "bndbox")
xmin = etree.SubElement(bndbox, "xmin")
xmin.text = xmins
ymin = etree.SubElement(bndbox, "ymin")
ymin.text = ymins
xmax = etree.SubElement(bndbox, "xmax")
xmax.text = xmaxs
ymax = etree.SubElement(bndbox, "ymax")
ymax.text = ymaxs
xml = etree.tostring(annotation, pretty_print=True, encoding='UTF-8')
with open(xml_name, "w") as xmlfile:
xmlfile.write(xml)
def reset_xml_cls(self, xml_dir, name='3488', desc=u'圆柱圆台形' ):
for index, f_path in enumerate(os.listdir(xml_dir)):
xml_path = os.path.join(xml_dir, f_path)
an_objects = self.read_xml(xml_path)
for tag in an_objects.iter('object'):
tag.find('name').text = name
if tag.find('desc') is not None:
tag.find('desc').text = desc
xml = etree.tostring(an_objects, pretty_print=True, encoding='UTF-8')
with open(xml_path, "w") as xmlfile:
xmlfile.write(xml)
print("finished reset {0}/{1}".format(index+1, len(os.listdir(xml_dir))))
def write_xml(self, bndboxs, scores, xmlname, thresh=0.1, classes='3488'):
annotation = etree.Element("Annotation")
for index, loca_info in enumerate(bndboxs):
if scores[index] < thresh:
continue
vocObject = etree.SubElement(annotation, "object")
name = etree.SubElement(vocObject, "name")
name.text = classes
desc = etree.SubElement(vocObject, "desc")
desc.text = unicode('object', 'utf-8')
confi = etree.SubElement(vocObject, "confidence")
confi.text = str(scores[index])
# xmin ,ymin, xmax, ymax
xmins, ymins, xmaxs, ymaxs = map(str, loca_info)
# ymin, xmin, ymax, xmax
# ymins, xmins, ymaxs, xmaxs = map(str, loca_info)
bndbox = etree.SubElement(vocObject, "bndbox")
xmin = etree.SubElement(bndbox, "xmin")
xmin.text = xmins
ymin = etree.SubElement(bndbox, "ymin")
ymin.text = ymins
xmax = etree.SubElement(bndbox, "xmax")
xmax.text = xmaxs
ymax = etree.SubElement(bndbox, "ymax")
ymax.text = ymaxs
xml = etree.tostring(annotation, pretty_print=True, encoding='UTF-8')
with open(xmlname, "w") as xmlfile:
xmlfile.write(xml)
Anno_OP = Anno_OP() |
Ken2yLiu/ImageDT | imagedt/tools/metrics/__init__.py | # coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
from .detect_eval import * |
Ken2yLiu/ImageDT | setup.py | <reponame>Ken2yLiu/ImageDT<filename>setup.py<gh_stars>0
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = 'imagedt',
version = '0.0.22',
keywords = ('imagedt'),
description = 'a python lib for neural networks, file and image processing etc. ',
long_description="##[Details](https://github.com/hanranCode/ImageDT)",
long_description_content_type="text/markdown",
license = 'Apache License 2.0',
url = 'https://github.com/hanranCode/ImageDT',
author = 'pytorch_fans11',
author_email = '<EMAIL>',
packages = find_packages(),
include_package_data = True,
platforms = 'any',
install_requires = [],
)
|
Ken2yLiu/ImageDT | imagedt/tensorflow/__init__.py | <reponame>Ken2yLiu/ImageDT<filename>imagedt/tensorflow/__init__.py
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
# from . import network
# from . import optim
# from . import trainer
from . import tools
from . import lite |
Ken2yLiu/ImageDT | test/Test_tensorflow.py | <filename>test/Test_tensorflow.py<gh_stars>0
# coding: utf-8
import sys
sys.path.append('./')
import os
os.environ['CUDA_VISIBLE_DIVICES'] = '1'
import cv2
import numpy as np
import tensorflow as tf
import nose.tools as ntools
from imagedt.tensorflow.tools import DataInterface
from imagedt.tensorflow.tools import data_provider
from imagedt.tensorflow.tools import RecordWriter
import imagedt
class Test_Own_Tensorflow(object):
def __init__(self):
super(Test_Own_Tensorflow, self).__init__()
self.data_instance = DataInterface.DataSets()
self.gen_datasets()
def gen_datasets(self):
self.data_instance.train.set_x(np.zeros([32,10]))
self.data_instance.train.set_y(np.zeros([32]))
self.data_instance.test.set_x(np.ones([32, 10]))
self.data_instance.test.set_y(np.ones([32]))
def test_get_next_batch_datas(self):
train_datas, train_labels = self.data_instance.train.next_batch(8, isFixed=False)
ntools.assert_equal(train_datas[0][0], 0)
ntools.assert_equal(train_labels[0], 0)
test_datas, test_labels = self.data_instance.test.next_batch(8, isFixed=False)
ntools.assert_equal(test_datas[0][0], 1)
ntools.assert_equal(test_labels[0], 1)
def test_data_provider(self):
# train_file line: image [space] label
train_file = '/data/dataset/pg_one/PG/PG_data0726v2/train_datas/train_datasets_original/train.txt'
Data_Provider = data_provider.DataProvider(train_file, test_percent=0.01)
datas = Data_Provider.get_data_provider
images, labels = datas.train.next_batch(32)
images, labels = data_provider.dynamically_loaded_data(images, labels)
def write_class_tfrecords(self):
# write_cls_records(train_image_file, labels_file, save_dir)
# train.train_images_file: image.name label[split by ' ']
# labels_file: class_name label[split by '\t']
train_images_file = '/data/dataset/liby_offline/train_renet50/balance_trainset/train.txt'
labels_file = '/data/dataset/liby_offline/train_renet50/balance_trainset/labels.txt'
save_dir = '/ssd_data/liby/cls_eval_records/'
RecordWriter.write_cls_records(train_images_file, labels_file, save_dir)
def write_detect_tfrecords(self):
# data_dir: contains {Jpg, Annotations}
data_dir = './test/sources/data_dir/'
save_dir = '/ssd_data/tmp'
RecordWriter.write_detect_records(data_dir, save_dir)
def noise_padd_op(self):
img_file = imagedt.dir.loop('./test/sources/data_dir/', ['.jpg'])
cvmat = cv2.imread(img_file[0])
with tf.Session() as sess:
noise_image = imagedt.tensorflow.tools.data_provider.tf_noise_padd(cvmat)
image = sess.run(noise_image)
cv2.imwrite('/ssd_data/tmp/origin.jpg', cvmat)
cv2.imwrite('/ssd_data/tmp/noise_image.jpg', image)
if __name__ == '__main__':
owntf_init = Test_Own_Tensorflow()
owntf_init.noise_padd_op()
|
Ken2yLiu/ImageDT | imagedt/image/process.py | <gh_stars>0
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
import os
import cv2
import numpy as np
from ..dir.dir_loop import loop
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP']
def reduce_imagenet_mean(cv_mat):
_R_MEAN = 123.
_G_MEAN = 117.
_B_MEAN = 104.
[B, G, R] = cv2.split(cv_mat)
return cv2.merge([B-_B_MEAN, G-_G_MEAN, R-_R_MEAN])
def noise_padd(img, edge_size=224, start_pixel_value=0):
"""
img: cvMat
edge_size: image max edge
return: cvMat [rectangle, height=width=edge_size]
"""
h, w, _ = img.shape
width_ratio = float(w) / edge_size
height_ratio = float(h) / edge_size
if width_ratio > height_ratio:
resize_width = edge_size
resize_height = int(round(h / width_ratio))
if (edge_size - resize_height) % 2 == 1:
resize_height += 1
else:
resize_height = edge_size
resize_width = int(round(w / height_ratio))
if (edge_size - resize_width) % 2 == 1:
resize_width += 1
img = cv2.resize(img, (int(resize_width), int(resize_height)), interpolation=cv2.INTER_LINEAR)
channels = 3
# fill ends of dimension that is too short with random noise
if width_ratio > height_ratio:
padding = (edge_size - resize_height) / 2
noise_size = (padding, edge_size)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(start_pixel_value, 256, noise_size).astype('uint8')
# noise = np.zeros(noise_size).astype('uint8')
img = np.concatenate((noise, img, noise), axis=0)
else:
padding = (edge_size - resize_width) / 2
noise_size = (edge_size, padding)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(start_pixel_value, 256, noise_size).astype('uint8')
# noise = np.zeros(noise_size).astype('uint8')
img = np.concatenate((noise, img, noise), axis=1)
return img
def remove_broken_image(data_dir):
image_files = loop(data_dir, IMG_EXTENSIONS)
for image_file in image_files:
try:
img_mat = cv2.imread(image_file)
if img_mat is None:
os.remove(image_file)
print('remove broken image {0}'.format(image_file))
except:
os.remove(image_file)
print('remove broken file {0}'.format(image_file))
def resize_with_scale(cvmat, max_length):
h, w, _ = cvmat.shape
max_edge = max(h, w)
if max_edge > max_length:
ratio = float(max_edge) / max_length
width, height = w/ratio, h/ratio
cvmat = cv2.resize(cvmat, (int(width), int(height)))
return cvmat
def padd_pixel(img, edge_size_w=200, edge_size_h=96):
h, w, _ = img.shape
width_ratio = float(w) / edge_size_w
if width_ratio > 1:
img = cv2.resize(img, (int(w/width_ratio), int(h/width_ratio)))
h, w, _ = img.shape
width_ratio = float(w) / edge_size_w
height_ratio = float(h) / edge_size_h
if height_ratio > 1:
img = cv2.resize(img, (int(w/height_ratio), int(h/height_ratio)))
h, w, _ = img.shape
height_ratio = float(h) / edge_size_h
if width_ratio > height_ratio:
resize_width = edge_size_w
resize_height = int(round(h / width_ratio))
if (edge_size_h - resize_height) % 2 == 1:
resize_height += 1
else:
resize_height = edge_size_h
resize_width = int(round(w / height_ratio))
if (edge_size_w - resize_width) % 2 == 1:
resize_width += 1
img = cv2.resize(img, (int(resize_width), int(resize_height)), interpolation=cv2.INTER_LINEAR)
channels = 3
if width_ratio > height_ratio:
padding = (edge_size_h - resize_height) / 2
noise_size = (padding, edge_size_w)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(230, 240, noise_size).astype('uint8')
img = np.concatenate((noise, img, noise), axis=0)
else:
padding = (edge_size_w - resize_width) / 2
noise_size = (edge_size_h, padding)
if channels > 1:
noise_size += (channels,)
noise = np.random.randint(230, 240, noise_size).astype('uint8')
img = np.concatenate((noise, img, noise), axis=1)
return img |
Ken2yLiu/ImageDT | imagedt/tensorflow/tools/data_provider.py | <reponame>Ken2yLiu/ImageDT
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
import cv2
import numpy as np
import tensorflow as tf
from .DataInterface import *
from ...image.process import noise_padd
class DataProvider(object):
"""docstring for DataProvider
train file: image [space] label
"""
def __init__(self, train_file, test_percent=0):
super(DataProvider, self).__init__()
self.train_file = train_file
self.test_percent = test_percent
self._check_infos()
self.load_image_label_pairs()
def _check_infos(self):
assert self.test_percent >= 0
assert self.test_percent <= 1
def load_image_label_pairs(self):
print("loading data......")
with open(self.train_file, 'r') as f:
lines = f.readlines()
split_str = ' ' if ' ' in lines[0] else ','
self.train_infos = np.array([line.strip().split(split_str) for line in lines])
@property
def get_data_provider(self):
count = len(self.train_infos)
count_test = int(count * self.test_percent)
count_train = count - count_test
datas = DataSets()
datas.train.set_x(self.train_infos[:, 0][0:count_train])
datas.train.set_y(self.train_infos[:, 1][0:count_train])
datas.test.set_x(self.train_infos[:, 0][count_train:])
datas.test.set_y(self.train_infos[:, 1][count_train:])
return datas
def dynamically_loaded_data(image_paths, labels, height=224, width=224):
labels = map(int, labels)
# max(x. 128): prevent memory bomb
images = np.zeros([max(len(image_paths), 128), height, width, 3], np.float32)
for index, image_path in enumerate(image_paths):
cvmat = cv2.imread(image_path)
h, w, c = cvmat.shape
if h != 224 or w != 224:
cvmat = noise_padd(cvmat, edge_size=224,start_pixel_value=0)
images[index] = np.array(cvmat)
return images, labels
def resize_image_keep_aspect(image, lo_dim=224):
# Take width/height
initial_width = tf.shape(image)[0]
initial_height = tf.shape(image)[1]
# Take the greater value, and use it for the ratio
max_value = tf.maximum(initial_width, initial_height)
ratio = tf.to_float(max_value) / tf.constant(lo_dim, dtype=tf.float32)
new_width = tf.to_int32(tf.to_float(initial_width) / ratio)
new_height = tf.to_int32(tf.to_float(initial_height) / ratio)
return tf.image.resize_images(image, [new_width, new_height])
def tf_noise_padd(images, max_edge=224, start_pixel=0):
# resize image with scale
images = resize_image_keep_aspect(images, max_edge)
height = tf.shape(images)[0]
width = tf.shape(images)[1]
channels = 3
# # height > width
def case_height_width():
left_pad_size = tf.div(tf.subtract(max_edge, width), 2)
right_pad_size = tf.subtract(tf.subtract(max_edge, width), left_pad_size)
noise_left = tf.random_uniform((height, left_pad_size, channels), minval=start_pixel, maxval=255,dtype=tf.float32)
noise_right = tf.random_uniform((height, right_pad_size, channels), minval=start_pixel, maxval=255, dtype=tf.float32)
merge = tf.concat([noise_left, images, noise_right], axis=1)
return merge
# width > height
def case_width_height():
top_padd_size = tf.div(tf.subtract(max_edge, height), 2)
bottom_padd_size = tf.subtract(tf.subtract(max_edge, height), top_padd_size)
noise_top = tf.random_uniform((top_padd_size, width, channels), minval=start_pixel, maxval=255,dtype=tf.float32)
noise_bottom = tf.random_uniform((bottom_padd_size, width, channels), minval=start_pixel, maxval=255, dtype=tf.float32)
merge = tf.concat([noise_top, images, noise_bottom], axis=0)
return merge
padd_noise_op = tf.cond(tf.greater(height, width), case_height_width, case_width_height)
return padd_noise_op
|
Ken2yLiu/ImageDT | imagedt/file/file_write.py | # coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
import csv
import json
def write_txt(data, path):
with open(path, "w") as text_file:
if isinstance(data, (str, unicode)):
text_file.write(str(data))
elif isinstance(data, list):
for line in data:
if isinstance(line, (str, unicode)):
text_file.write(str(line) + '\n')
else:
text_file.write(json.dumps(data))
def write_csv(data, path, is_excel=False):
with file(path, 'wb') as csvfile:
if is_excel:
csvfile.write(u"\ufeff")
writer = csv.writer(csvfile)
for row in data:
writer.writerow(row) |
Ken2yLiu/ImageDT | imagedt/tensorflow/tools/tfmodel_wrapper.py | # coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import tensorflow as tf
class TFmodel_Wrapper(object):
"""docstring for TFmodel_Wrapper"""
def __init__(self, pbmodel_path, input_nodename='input', output_nodename='softmax',):
super(TFmodel_Wrapper, self).__init__()
self.pbmodel_path = pbmodel_path
self.input_node = input_nodename
self.output_node = output_nodename
self._load_model()
self._check_node_name()
self._set_output_node()
def _load_model(self):
# easy way! load as default graph
print("load tfmodel {0}".format(self.pbmodel_path))
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.pbmodel_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session()
def _check_node_name(self):
# checking input and output node name, notice, notice node name error
self.ops = tf.get_default_graph().get_operations()
all_tensor_names = [output.name for op in self.ops for output in op.outputs]
for node_name in [self.input_node, self.output_node]:
if not isinstance(node_name, str):
raise ValueError("Node name: {0} should be string, like: 'input' 'data' 'softmax'.".format(node_name))
if node_name+':0' not in all_tensor_names:
print("##### Print some node name : {0}~{1}. #####".format(all_tensor_names[:2], all_tensor_names[-2:]))
raise ValueError("node_name: {0} not in graph, please check input or output node name!".format(node_name))
def _set_output_node(self):
# Get handles to input and output tensors
self.tensor_dict = {}
intput_tensor = self.input_node + ':0'
output_tensor = self.output_node + ':0'
self.image_tensor = tf.get_default_graph().get_tensor_by_name(intput_tensor)
self.tensor_dict[self.output_node] = tf.get_default_graph().get_tensor_by_name(output_tensor)
def predict(self, image):
# Run inference
output_dict = self.sess.run(self.tensor_dict,
feed_dict={self.image_tensor: np.expand_dims(image, axis=0)})
# get top 1 class and confidence
predict_cls = output_dict[self.output_node][0].argsort()[::-1][0]
conf = output_dict[self.output_node][0][predict_cls]
return predict_cls, conf
# tf.Session(config=tf.ConfigProto(allow_soft_placement=False,
# log_device_placement=True,
# )) |
alexnaoki/LHC_Hidroweb_Kivy | main.py | <reponame>alexnaoki/LHC_Hidroweb_Kivy
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.network.urlrequest import UrlRequest
from kivy.uix.popup import Popup
from kivy.properties import ObjectProperty, StringProperty
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.filechooser import FileChooserListView
from kivy.uix.button import Button
from kivy.base import ExceptionHandler, ExceptionManager
from kivy.clock import mainthread
from mapview import MapMarker, MapView
from mapview.clustered_marker_layer import ClusteredMarkerLayer
from mapview.geojson import GeoJsonMapLayer
from shapely.geometry import shape,mapping, Point, Polygon, MultiPolygon
import shapefile
from json import dumps
from requests.models import PreparedRequest
import xml.etree.ElementTree as ET
import datetime
import calendar
import threading
import csv
import time
import os
class MainScreen(Screen):
layer = ClusteredMarkerLayer(cluster_node_size=4,cluster_radius=200)
popup = ObjectProperty()
mapa = ObjectProperty()
bbox = 0
def change_toInventario(self):
self.manager.current = 'inventario'
def change_toShape(self):
self.manager.current = 'shapefilescreen'
def change_toBbox(self):
self.manager.current = 'bboxscreen'
def change_toDownload_shp(self):
self.manager.current = 'downloadscreen_shp'
# def get_bbox(self):
# bbox = self.manager.get_screen('bboxscreen').ids['mapbbox'].bbox
# # self.manager.get_screen('bboxscreen').ids.labelbbox.text = str(self.manager.get_screen('main').ids['map'].bbox)
# self.manager.get_screen('bboxscreen').ids.labelbbox.text = f'Latitude : [{bbox[2]:.3f} {bbox[0]:.3f}]\nLongitude: [{bbox[3]:.3f} {bbox[1]:.3f}]'
# print(self.manager.get_screen('bboxscreen').ids['mapbbox'].bbox)
# self.bbox = bbox
def _donwload_teste(self, req, result):
print('sucesso')
print(result)
def _download_error(self, *args):
print('ERRO download')
@mainthread
def set_screen(self):
self.manager.current = 'main'
def show_codes(self):
print(self.manager.get_screen('shapefilescreen').codes)
class WindowManager(ScreenManager):
pass
class InventarioScreen(Screen):
layer = ClusteredMarkerLayer(cluster_node_size=4,cluster_radius=200)
inventario_path = StringProperty('')
def teste(self):
mark = MapMarker(lat=50, lon=10)
self.manager.get_screen('main').ids['map'].add_marker(mark)
def show_inventarioCluster(self, selection, *args):
# print(selection)
# self.manager.current = 'loading'
# print(self.popup.inventario_path)
# print(filechooserscreen.selection[0])
# self.ids['progressbar'].value = 25
self.inventario_path = selection[0]
with open(self.inventario_path, encoding='utf8') as csvfile:
data = csv.DictReader(csvfile)
for row in data:
self.layer.add_marker(lat=float(row['Latitude']), lon=float(row['Longitude']))
print('Added to layer')
# self.ids['map'].add_widget(self.layer)
self.manager.get_screen('main').ids['map'].add_widget(self.layer)
self.layer.reposition()
print('Added Layer')
self.manager.current = 'main'
self.manager.transition.direction = "left"
# self.ids['progressbar'].value = 50
class ShapefileScreen(Screen):
codes = []
def get_codes(self, selection, *args):
print(selection[0])
shp_path = selection[0]
shp = shapefile.Reader(shp_path)
print(shp)
all_shapes = shp.shapes()
all_records = shp.records()
print(all_shapes)
print(all_records)
print(self.manager.get_screen('inventario').ids.filechooserscreen.selection[0])
for i in range(len(all_shapes)):
boundary = all_shapes[i]
boundary = shape(boundary)
print(boundary)
with open(self.manager.get_screen('inventario').ids.filechooserscreen.selection[0], encoding='utf8') as csvfile:
data = csv.DictReader(csvfile)
for row in data:
if Point((float(row['Longitude']), float(row['Latitude']))).within(boundary):
print('Dentro')
print(float(row['Latitude']), float(row['Longitude']), int(row['Codigo']))
mark = MapMarker(lat=float(row['Latitude']), lon=float(row['Longitude']), source='marker.png')
self.manager.get_screen('main').ids['map'].add_marker(mark)
self.codes.append(int(row['Codigo']))
else:
pass
print(self.codes)
self.manager.current = 'main'
self.manager.transition.direction = "down"
class BBoxScreen(Screen):
codes = []
def get_bbox(self):
bbox = self.manager.get_screen('bboxscreen').ids['mapbbox'].bbox
# self.manager.get_screen('bboxscreen').ids.labelbbox.text = str(self.manager.get_screen('main').ids['map'].bbox)
self.manager.get_screen('bboxscreen').ids.labelbbox.text = f'Latitude : [{bbox[2]:.3f} {bbox[0]:.3f}]\nLongitude: [{bbox[3]:.3f} {bbox[1]:.3f}]'
print(self.manager.get_screen('bboxscreen').ids['mapbbox'].bbox)
self.bbox = bbox
def get_codes(self):
# bbox = self.manager.get_screen('main').bbox
bbox = self.manager.get_screen('bboxscreen').ids['mapbbox'].bbox
print(bbox)
inventario_path = self.manager.get_screen('inventario').ids.filechooserscreen.selection[0]
print(inventario_path)
with open(inventario_path, encoding='utf8') as csvfile:
data = csv.DictReader(csvfile)
for row in data:
if (float(row['Longitude'])>bbox[1]) and (float(row['Longitude'])<bbox[3]) and (float(row['Latitude'])>bbox[0]) and (float(row['Latitude'])<bbox[2]):
print(row['Longitude'], row['Latitude'], row['Codigo'])
mark = MapMarker(lat=float(row['Latitude']), lon=float(row['Longitude']), source='marker.png')
self.manager.get_screen('main').ids['map'].add_marker(mark)
self.codes.append(int(row['Codigo']))
else:
pass
print(len(self.codes))
self.manager.current = 'main'
self.manager.transition.direction = 'up'
class DownloadScreenShp(Screen):
def selected(self, directory, filename):
self.ids.downloadShpPath.text = os.path.dirname(filename)
def teste(self):
print(self.manager.get_screen('main').ids.toggle1.state)
def download_ANA_station(self):
folder_name = 'dados_LHC_hidroweb'
b = 2
self.save_folder = os.path.join(self.ids.downloadShpPath.text, folder_name)
if not os.path.exists(self.save_folder):
print('nao existe, criando pasta')
os.mkdir(self.save_folder)
elif os.path.exists(self.save_folder):
print('existe')
print(self.manager.get_screen('main').ids.toggle1.state)
print(self.manager.get_screen('main').ids.toggle2.state)
if self.manager.get_screen('main').ids.toggle1.state == 'down':
list_codes = self.manager.get_screen('shapefilescreen').codes
if self.manager.get_screen('main').ids.toggle2.state == 'down':
print('toggle2 down')
list_codes = self.manager.get_screen('bboxscreen').codes
print(list_codes)
# if self.manager.get_screen('downloadscreen_shp').ids.togglechuva == 'down':
# b = 2
# if self.manager.get_screen('downloadscreen_shp').ids.togglevazao == 'down':
# b = 3
# else:
# b = 3
# print('erro')
# list_codes = []
for station in list_codes:
api = 'http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroSerieHistorica'
self.params = {'codEstacao': station, 'dataInicio': '', 'dataFim': '', 'tipoDados': '{}'.format(b), 'nivelConsistencia': ''}
url_req = PreparedRequest()
url_req.prepare_url(api, self.params)
self.req = UrlRequest(
url_req.url,
on_success=self._download_sucess,
# on_success=self._donwload_teste,
on_error=self._download_error,
on_failure=self._download_error
)
# print(self.req)
# self.req.wait()
print(station)
def _donwload_teste(self, req, result):
print('sucesso')
print(result)
def _download_sucess(self, req, result):
try:
# print(req)
tree = ET.ElementTree(ET.fromstring(result))
root = tree.getroot()
list_data = []
list_consistenciaF = []
list_month_dates = []
for i in root.iter('SerieHistorica'):
codigo = i.find("EstacaoCodigo").text
consistencia = i.find("NivelConsistencia").text
date = i.find("DataHora").text
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
last_day = calendar.monthrange(date.year, date.month)[1]
month_dates = [date + datetime.timedelta(days=i) for i in range(last_day)]
data = []
list_consistencia = []
print(self.params['tipoDados'])
for day in range(last_day):
# if self.params['tipoDados'] == '3':
# value = 'Vazao{:02}'.format(day+1)
# try:
# data.append(float(i.find(value).text))
# list_consistencia.append(int(consistencia))
# except TypeError:
# data.append(i.find(value).text)
# list_consistencia.append(int(consistencia))
# except AttributeError:
# data.append(None)
# list_consistencia.append(int(consistencia))
if self.params['tipoDados'] == '2':
value = 'Chuva{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(consistencia)
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(consistencia)
except AttributeError:
data.append(None)
list_consistencia.append(consistencia)
list_data = list_data + data
list_consistenciaF = list_consistenciaF + list_consistencia
list_month_dates = list_month_dates + month_dates
# typeData = 2
print(list_data)
# print(list_month_dates)
# print(list_consistenciaF)
if len(list_data) > 0:
# print(typedata)
typedata = self.params['tipoDados']
rows = zip(list_month_dates, list_consistenciaF, list_data)
with open(os.path.join(self.save_folder,f'{typedata}_{codigo}.csv'), 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(('Date', f'Consistence_{typedata}_{codigo}', f'Data_{typedata}_{codigo}'))
for row in rows:
writer.writerow(row)
except:
print('ERRO')
def _download_error(self, *args):
print('ERRO download')
def download_ANA_station_vazao(self):
folder_name = 'dados_LHC_hidroweb'
b = 3
self.save_folder = os.path.join(self.ids.downloadShpPath.text, folder_name)
if not os.path.exists(self.save_folder):
print('nao existe, criando pasta')
os.mkdir(self.save_folder)
elif os.path.exists(self.save_folder):
print('existe')
print(self.manager.get_screen('main').ids.toggle1.state)
print(self.manager.get_screen('main').ids.toggle2.state)
if self.manager.get_screen('main').ids.toggle1.state == 'down':
list_codes = self.manager.get_screen('shapefilescreen').codes
if self.manager.get_screen('main').ids.toggle2.state == 'down':
print('toggle2 down')
list_codes = self.manager.get_screen('bboxscreen').codes
print(list_codes)
# if self.manager.get_screen('downloadscreen_shp').ids.togglechuva == 'down':
# b = 2
# if self.manager.get_screen('downloadscreen_shp').ids.togglevazao == 'down':
# b = 3
# else:
# b = 3
# print('erro')
# list_codes = []
for station in list_codes:
api = 'http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroSerieHistorica'
self.params = {'codEstacao': station, 'dataInicio': '', 'dataFim': '', 'tipoDados': '{}'.format(b), 'nivelConsistencia': ''}
url_req = PreparedRequest()
url_req.prepare_url(api, self.params)
self.req = UrlRequest(
url_req.url,
on_success=self._download_sucess_vazao,
# on_success=self._donwload_teste,
on_error=self._download_error,
on_failure=self._download_error
)
# print(self.req)
# self.req.wait()
print(station)
def _download_sucess_vazao(self, req, result):
try:
tree = ET.ElementTree(ET.fromstring(result))
root = tree.getroot()
list_data = []
list_consistenciaF = []
list_month_dates = []
for i in root.iter('SerieHistorica'):
codigo = i.find("EstacaoCodigo").text
consistencia = i.find("NivelConsistencia").text
date = i.find("DataHora").text
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
last_day = calendar.monthrange(date.year, date.month)[1]
month_dates = [date + datetime.timedelta(days=i) for i in range(last_day)]
data = []
list_consistencia = []
print(self.params['tipoDados'])
for day in range(last_day):
if self.params['tipoDados'] == '3':
value = 'Vazao{:02}'.format(day+1)
try:
data.append(float(i.find(value).text))
list_consistencia.append(int(consistencia))
except TypeError:
data.append(i.find(value).text)
list_consistencia.append(int(consistencia))
except AttributeError:
data.append(None)
list_consistencia.append(int(consistencia))
list_data = list_data + data
list_consistenciaF = list_consistenciaF + list_consistencia
list_month_dates = list_month_dates + month_dates
# typeData = 2
print(list_data)
# print(list_month_dates)
# print(list_consistenciaF)
if len(list_data) > 0:
# print(typedata)
typedata = self.params['tipoDados']
rows = zip(list_month_dates, list_consistenciaF, list_data)
with open(os.path.join(self.save_folder,f'{typedata}_{codigo}.csv'), 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(('Date', f'Consistence_{typedata}_{codigo}', f'Data_{typedata}_{codigo}'))
for row in rows:
writer.writerow(row)
except:
print('ERRO')
class RootApp(App):
def build(self):
return Builder.load_file('main.kv')
RootApp().run()
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyMicrodata/registry.py | <filename>lib/rdflib/plugins/parsers/pyMicrodata/registry.py
# -*- coding: utf-8 -*-
"""
Hardcoded version of the current microdata->RDF registry. There is also a local registry to include some test cases.
Finally, there is a local dictionary for prefix mapping for the registry items; these are the preferred prefixes
for those vocabularies, and are used to make the output nicer.
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{<NAME><a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: registry.py,v 1.5 2012/09/05 16:40:43 ivan Exp $
$Date: 2012/09/05 16:40:43 $
"""
import sys
(py_v_major, py_v_minor, py_v_micro, py_v_final, py_v_serial) = sys.version_info
# To be added soon:
# "Class" : {"subPropertyOf" : "http://www.w3.org/2000/01/rdf-schema#Class"},
# "Property" : {"subPropertyOf" : "http://www.w3.org/1999/02/22-rdf-syntax-ns#Property"}
_registry = """
{
"http://schema.org/": {
"propertyURI": "vocabulary",
"multipleValues": "unordered",
"properties": {
"additionalType": {"subPropertyOf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"},
"blogPosts": {"multipleValues": "list"},
"breadcrumb": {"multipleValues": "list"},
"byArtist": {"multipleValues": "list"},
"creator": {"multipleValues": "list"},
"episode": {"multipleValues": "list"},
"episodes": {"multipleValues": "list"},
"event": {"multipleValues": "list"},
"events": {"multipleValues": "list"},
"founder": {"multipleValues": "list"},
"founders": {"multipleValues": "list"},
"itemListElement": {"multipleValues": "list"},
"musicGroupMember": {"multipleValues": "list"},
"performerIn": {"multipleValues": "list"},
"actor": {"multipleValues": "list"},
"actors": {"multipleValues": "list"},
"performer": {"multipleValues": "list"},
"performers": {"multipleValues": "list"},
"producer": {"multipleValues": "list"},
"recipeInstructions": {"multipleValues": "list"},
"season": {"multipleValues": "list"},
"seasons": {"multipleValues": "list"},
"subEvent": {"multipleValues": "list"},
"subEvents": {"multipleValues": "list"},
"track": {"multipleValues": "list"},
"tracks": {"multipleValues": "list"}
}
},
"http://microformats.org/profile/hcard": {
"propertyURI": "vocabulary",
"multipleValues": "unordered"
},
"http://microformats.org/profile/hcalendar#": {
"propertyURI": "vocabulary",
"multipleValues": "unordered",
"properties": {
"categories": {"multipleValues": "list"}
}
}
}
"""
vocab_names = {
"http://schema.org/" : "schema",
"http://xmlns.com/foaf/0.1/" : "foaf",
"http://microformats.org/profile/hcard#" : "hcard",
"http://microformats.org/profile/hcalendar#" : "hcalendar"
}
# This is the local version, added mainly for testing
_myRegistry = """
{
"http://vocabulary.list/": {
"propertyURI": "vocabulary",
"multipleValues": "list",
"properties": {
"list": {"multipleValues": "list"},
"typed": {"datatype": "http://typed"}
}
},
"http://vocabulary.unordered/": {
"propertyURI": "vocabulary",
"multipleValues": "unordered",
"properties": {
"list": {"multipleValues": "list"},
"typed": {"datatype": "http://typed"}
}
},
"http://contextual.unordered/": {
"propertyURI": "contextual",
"multipleValues": "unordered",
"properties": {
"list": {"multipleValues": "list"},
"typed": {"datatype": "http://typed"}
}
},
"http://contextual.list/": {
"propertyURI": "contextual",
"multipleValues": "list",
"properties": {
"list": {"multipleValues": "list"},
"typed": {"datatype": "http://typed"}
}
},
"http://n.whatwg.org/work": {
"propertyURI" : "contextual",
"multipleValues" : "list"
}
}
"""
registry = []
myRegistry = []
if py_v_major >= 3 or (py_v_major == 2 and py_v_minor >= 6) :
import json
registry = json.loads(_registry)
myRegistry = json.loads(_myRegistry)
else :
import simplejson
registry = simplejson.loads(_registry)
myRegistry = simplejson.loads(_myRegistry)
for (k,v) in list(myRegistry.items()) : registry[k] = v
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/extras/httpheader.py | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
""" Utility functions to work with HTTP headers.
This module provides some utility functions useful for parsing
and dealing with some of the HTTP 1.1 protocol headers which
are not adequately covered by the standard Python libraries.
Requires Python 2.2 or later.
The functionality includes the correct interpretation of the various
Accept-* style headers, content negotiation, byte range requests,
HTTP-style date/times, and more.
There are a few classes defined by this module:
* class content_type -- media types such as 'text/plain'
* class language_tag -- language tags such as 'en-US'
* class range_set -- a collection of (byte) range specifiers
* class range_spec -- a single (byte) range specifier
The primary functions in this module may be categorized as follows:
* Content negotiation functions...
* acceptable_content_type()
* acceptable_language()
* acceptable_charset()
* acceptable_encoding()
* Mid-level header parsing functions...
* parse_accept_header()
* parse_accept_language_header()
* parse_range_header()
* Date and time...
* http_datetime()
* parse_http_datetime()
* Utility functions...
* quote_string()
* remove_comments()
* canonical_charset()
* Low level string parsing functions...
* parse_comma_list()
* parse_comment()
* parse_qvalue_accept_list()
* parse_media_type()
* parse_number()
* parse_parameter_list()
* parse_quoted_string()
* parse_range_set()
* parse_range_spec()
* parse_token()
* parse_token_or_quoted_string()
And there are some specialized exception classes:
* RangeUnsatisfiableError
* RangeUnmergableError
* ParseError
See also:
* RFC 2616, "Hypertext Transfer Protocol -- HTTP/1.1", June 1999.
<http://www.ietf.org/rfc/rfc2616.txt>
Errata at <http://purl.org/NET/http-errata>
* RFC 2046, "(MIME) Part Two: Media Types", November 1996.
<http://www.ietf.org/rfc/rfc2046.txt>
* RFC 3066, "Tags for the Identification of Languages", January 2001.
<http://www.ietf.org/rfc/rfc3066.txt>
Note: I have made a small modification on the regexp for internet date,
to make it more liberal (ie, accept a time zone string of the form +0000)
Ivan Herman <http://www.ivan-herman.net>, March 2011.
Have added statements to make it (hopefully) Python 3 compatible.
I<NAME> <http://www.ivan-herman.net>, August 2012.
"""
__author__ = "<NAME> <http://deron.meranda.us/>"
__date__ = "2012-08-31"
__version__ = "1.02"
__credits__ = """Copyright (c) 2005 <NAME> <http://deron.meranda.us/>
Licensed under GNU LGPL 2.1 or later. See <http://www.fsf.org/>.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
# Character classes from RFC 2616 section 2.2
SEPARATORS = '()<>@,;:\\"/[]?={} \t'
LWS = ' \t\n\r' # linear white space
CRLF = '\r\n'
DIGIT = '0123456789'
HEX = '0123456789ABCDEFabcdef'
import sys
PY3 = (sys.version_info[0] >= 3)
# Try to get a set/frozenset implementation if possible
try:
type(frozenset)
except NameError:
try:
# The demset.py module is available at http://deron.meranda.us/
from demset import set, frozenset
__emulating_set = True # So we can clean up global namespace later
except ImportError:
pass
try:
# Turn character classes into set types (for Python 2.4 or greater)
SEPARATORS = frozenset([c for c in SEPARATORS])
LWS = frozenset([c for c in LWS])
CRLF = frozenset([c for c in CRLF])
DIGIT = frozenset([c for c in DIGIT])
HEX = frozenset([c for c in HEX])
del c
except NameError:
# Python 2.3 or earlier, leave as simple strings
pass
def _is_string( obj ):
"""Returns True if the object is a string or unicode type."""
if PY3 :
return isinstance(obj,str)
else :
return isinstance(obj,str) or isinstance(obj,unicode)
def http_datetime( dt=None ):
"""Formats a datetime as an HTTP 1.1 Date/Time string.
Takes a standard Python datetime object and returns a string
formatted according to the HTTP 1.1 date/time format.
If no datetime is provided (or None) then the current
time is used.
ABOUT TIMEZONES: If the passed in datetime object is naive it is
assumed to be in UTC already. But if it has a tzinfo component,
the returned timestamp string will have been converted to UTC
automatically. So if you use timezone-aware datetimes, you need
not worry about conversion to UTC.
"""
if not dt:
import datetime
dt = datetime.datetime.utcnow()
else:
try:
dt = dt - dt.utcoffset()
except:
pass # no timezone offset, just assume already in UTC
s = dt.strftime('%a, %d %b %Y %H:%M:%S GMT')
return s
def parse_http_datetime( datestring, utc_tzinfo=None, strict=False ):
"""Returns a datetime object from an HTTP 1.1 Date/Time string.
Note that HTTP dates are always in UTC, so the returned datetime
object will also be in UTC.
You can optionally pass in a tzinfo object which should represent
the UTC timezone, and the returned datetime will then be
timezone-aware (allowing you to more easly translate it into
different timzeones later).
If you set 'strict' to True, then only the RFC 1123 format
is recognized. Otherwise the backwards-compatible RFC 1036
and Unix asctime(3) formats are also recognized.
Please note that the day-of-the-week is not validated.
Also two-digit years, although not HTTP 1.1 compliant, are
treated according to recommended Y2K rules.
"""
import re, datetime
m = re.match(r'(?P<DOW>[a-z]+), (?P<D>\d+) (?P<MON>[a-z]+) (?P<Y>\d+) (?P<H>\d+):(?P<M>\d+):(?P<S>\d+(\.\d+)?) (?P<TZ>[a-zA-Z0-9_+]+)$',
datestring, re.IGNORECASE)
if not m and not strict:
m = re.match(r'(?P<DOW>[a-z]+) (?P<MON>[a-z]+) (?P<D>\d+) (?P<H>\d+):(?P<M>\d+):(?P<S>\d+) (?P<Y>\d+)$',
datestring, re.IGNORECASE)
if not m:
m = re.match(r'(?P<DOW>[a-z]+), (?P<D>\d+)-(?P<MON>[a-z]+)-(?P<Y>\d+) (?P<H>\d+):(?P<M>\d+):(?P<S>\d+(\.\d+)?) (?P<TZ>\w+)$',
datestring, re.IGNORECASE)
if not m:
raise ValueError('HTTP date is not correctly formatted')
try:
tz = m.group('TZ').upper()
except:
tz = 'GMT'
if tz not in ('GMT','UTC','0000','00:00'):
raise ValueError('HTTP date is not in GMT timezone')
monname = m.group('MON').upper()
mdict = {'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6,
'JUL':7, 'AUG':8, 'SEP':9, 'OCT':10, 'NOV':11, 'DEC':12}
month = mdict.get(monname)
if not month:
raise ValueError('HTTP date has an unrecognizable month')
y = int(m.group('Y'))
if y < 100:
century = datetime.datetime.utcnow().year / 100
if y < 50:
y = century * 100 + y
else:
y = (century - 1) * 100 + y
d = int(m.group('D'))
hour = int(m.group('H'))
minute = int(m.group('M'))
try:
second = int(m.group('S'))
except:
second = float(m.group('S'))
dt = datetime.datetime( y, month, d, hour, minute, second, tzinfo=utc_tzinfo )
return dt
class RangeUnsatisfiableError(ValueError):
"""Exception class when a byte range lies outside the file size boundaries."""
def __init__(self, reason=None):
if not reason:
reason = 'Range is unsatisfiable'
ValueError.__init__(self, reason)
class RangeUnmergableError(ValueError):
"""Exception class when byte ranges are noncontiguous and can not be merged together."""
def __init__(self, reason=None):
if not reason:
reason = 'Ranges can not be merged together'
ValueError.__init__(self, reason)
class ParseError(ValueError):
"""Exception class representing a string parsing error."""
def __init__(self, args, input_string, at_position):
ValueError.__init__(self, args)
self.input_string = input_string
self.at_position = at_position
def __str__(self):
if self.at_position >= len(self.input_string):
return '%s\n\tOccured at end of string' % self.args[0]
else:
return '%s\n\tOccured near %s' % (self.args[0], repr(self.input_string[self.at_position:self.at_position+16]))
def is_token(s):
"""Determines if the string is a valid token."""
for c in s:
if ord(c) < 32 or ord(c) > 128 or c in SEPARATORS:
return False
return True
def parse_comma_list(s, start=0, element_parser=None, min_count=0, max_count=0):
"""Parses a comma-separated list with optional whitespace.
Takes an optional callback function `element_parser`, which
is assumed to be able to parse an individual element. It
will be passed the string and a `start` argument, and
is expected to return a tuple (parsed_result, chars_consumed).
If no element_parser is given, then either single tokens or
quoted strings will be parsed.
If min_count > 0, then at least that many non-empty elements
must be in the list, or an error is raised.
If max_count > 0, then no more than that many non-empty elements
may be in the list, or an error is raised.
"""
if min_count > 0 and start == len(s):
raise ParseError('Comma-separated list must contain some elements',s,start)
elif start >= len(s):
raise ParseError('Starting position is beyond the end of the string',s,start)
if not element_parser:
element_parser = parse_token_or_quoted_string
results = []
pos = start
while pos < len(s):
e = element_parser( s, pos )
if not e or e[1] == 0:
break # end of data?
else:
results.append( e[0] )
pos += e[1]
while pos < len(s) and s[pos] in LWS:
pos += 1
if pos < len(s) and s[pos] != ',':
break
while pos < len(s) and s[pos] == ',':
# skip comma and any "empty" elements
pos += 1 # skip comma
while pos < len(s) and s[pos] in LWS:
pos += 1
if len(results) < min_count:
raise ParseError('Comma-separated list does not have enough elements',s,pos)
elif max_count and len(results) > max_count:
raise ParseError('Comma-separated list has too many elements',s,pos)
return (results, pos-start)
def parse_token(s, start=0):
"""Parses a token.
A token is a string defined by RFC 2616 section 2.2 as:
token = 1*<any CHAR except CTLs or separators>
Returns a tuple (token, chars_consumed), or ('',0) if no token
starts at the given string position. On a syntax error, a
ParseError exception will be raised.
"""
return parse_token_or_quoted_string(s, start, allow_quoted=False, allow_token=True)
def quote_string(s, always_quote=True):
"""Produces a quoted string according to HTTP 1.1 rules.
If always_quote is False and if the string is also a valid token,
then this function may return a string without quotes.
"""
need_quotes = False
q = ''
for c in s:
if ord(c) < 32 or ord(c) > 127 or c in SEPARATORS:
q += '\\' + c
need_quotes = True
else:
q += c
if need_quotes or always_quote:
return '"' + q + '"'
else:
return q
def parse_quoted_string(s, start=0):
"""Parses a quoted string.
Returns a tuple (string, chars_consumed). The quote marks will
have been removed and all \-escapes will have been replaced with
the characters they represent.
"""
return parse_token_or_quoted_string(s, start, allow_quoted=True, allow_token=False)
def parse_token_or_quoted_string(s, start=0, allow_quoted=True, allow_token=True):
"""Parses a token or a quoted-string.
's' is the string to parse, while start is the position within the
string where parsing should begin. It will returns a tuple
(token, chars_consumed), with all \-escapes and quotation already
processed.
Syntax is according to BNF rules in RFC 2161 section 2.2,
specifically the 'token' and 'quoted-string' declarations.
Syntax errors in the input string will result in ParseError
being raised.
If allow_quoted is False, then only tokens will be parsed instead
of either a token or quoted-string.
If allow_token is False, then only quoted-strings will be parsed
instead of either a token or quoted-string.
"""
if not allow_quoted and not allow_token:
raise ValueError('Parsing can not continue with options provided')
if start >= len(s):
raise ParseError('Starting position is beyond the end of the string',s,start)
has_quote = (s[start] == '"')
if has_quote and not allow_quoted:
raise ParseError('A quoted string was not expected', s, start)
if not has_quote and not allow_token:
raise ParseError('Expected a quotation mark', s, start)
s2 = ''
pos = start
if has_quote:
pos += 1
while pos < len(s):
c = s[pos]
if c == '\\' and has_quote:
# Note this is NOT C-style escaping; the character after the \ is
# taken literally.
pos += 1
if pos == len(s):
raise ParseError("End of string while expecting a character after '\\'",s,pos)
s2 += s[pos]
pos += 1
elif c == '"' and has_quote:
break
elif not has_quote and (c in SEPARATORS or ord(c)<32 or ord(c)>127):
break
else:
s2 += c
pos += 1
if has_quote:
# Make sure we have a closing quote mark
if pos >= len(s) or s[pos] != '"':
raise ParseError('Quoted string is missing closing quote mark',s,pos)
else:
pos += 1
return s2, (pos - start)
def remove_comments(s, collapse_spaces=True):
"""Removes any ()-style comments from a string.
In HTTP, ()-comments can nest, and this function will correctly
deal with that.
If 'collapse_spaces' is True, then if there is any whitespace
surrounding the comment, it will be replaced with a single space
character. Whitespace also collapses across multiple comment
sequences, so that "a (b) (c) d" becomes just "a d".
Otherwise, if 'collapse_spaces' is False then all whitespace which
is outside any comments is left intact as-is.
"""
if '(' not in s:
return s # simple case
A = []
dostrip = False
added_comment_space = False
pos = 0
if collapse_spaces:
# eat any leading spaces before a comment
i = s.find('(')
if i >= 0:
while pos < i and s[pos] in LWS:
pos += 1
if pos != i:
pos = 0
else:
dostrip = True
added_comment_space = True # lie
while pos < len(s):
if s[pos] == '(':
cmt, k = parse_comment( s, pos )
pos += k
if collapse_spaces:
dostrip = True
if not added_comment_space:
if len(A) > 0 and A[-1] and A[-1][-1] in LWS:
# previous part ended with whitespace
A[-1] = A[-1].rstrip()
A.append(' ') # comment becomes one space
added_comment_space = True
else:
i = s.find( '(', pos )
if i == -1:
if dostrip:
text = s[pos:].lstrip()
if s[pos] in LWS and not added_comment_space:
A.append(' ')
added_comment_space = True
else:
text = s[pos:]
if text:
A.append(text)
dostrip = False
added_comment_space = False
break # end of string
else:
if dostrip:
text = s[pos:i].lstrip()
if s[pos] in LWS and not added_comment_space:
A.append(' ')
added_comment_space = True
else:
text = s[pos:i]
if text:
A.append(text)
dostrip = False
added_comment_space = False
pos = i
if dostrip and len(A) > 0 and A[-1] and A[-1][-1] in LWS:
A[-1] = A[-1].rstrip()
return ''.join(A)
def _test_comments():
"""A self-test on comment processing. Returns number of test failures."""
def _testrm( a, b, collapse ):
b2 = remove_comments( a, collapse )
if b != b2:
print( 'Comment test failed:' )
print( ' remove_comments( %s, collapse_spaces=%s ) -> %s' % (repr(a), repr(collapse), repr(b2)) )
print( ' expected %s' % repr(b) )
return 1
return 0
failures = 0
failures += _testrm( r'', '', False )
failures += _testrm( r'(hello)', '', False)
failures += _testrm( r'abc (hello) def', 'abc def', False)
failures += _testrm( r'abc (he(xyz)llo) def', 'abc def', False)
failures += _testrm( r'abc (he\(xyz)llo) def', 'abc llo) def', False)
failures += _testrm( r'abc(hello)def', 'abcdef', True)
failures += _testrm( r'abc (hello) def', 'abc def', True)
failures += _testrm( r'abc (hello)def', 'abc def', True)
failures += _testrm( r'abc(hello) def', 'abc def', True)
failures += _testrm( r'abc(hello) (world)def', 'abc def', True)
failures += _testrm( r'abc(hello)(world)def', 'abcdef', True)
failures += _testrm( r' (hello) (world) def', 'def', True)
failures += _testrm( r'abc (hello) (world) ', 'abc', True)
return failures
def parse_comment(s, start=0):
"""Parses a ()-style comment from a header value.
Returns tuple (comment, chars_consumed), where the comment will
have had the outer-most parentheses and white space stripped. Any
nested comments will still have their parentheses and whitespace
left intact.
All \-escaped quoted pairs will have been replaced with the actual
characters they represent, even within the inner nested comments.
You should note that only a few HTTP headers, such as User-Agent
or Via, allow ()-style comments within the header value.
A comment is defined by RFC 2616 section 2.2 as:
comment = "(" *( ctext | quoted-pair | comment ) ")"
ctext = <any TEXT excluding "(" and ")">
"""
if start >= len(s):
raise ParseError('Starting position is beyond the end of the string',s,start)
if s[start] != '(':
raise ParseError('Comment must begin with opening parenthesis',s,start)
s2 = ''
nestlevel = 1
pos = start + 1
while pos < len(s) and s[pos] in LWS:
pos += 1
while pos < len(s):
c = s[pos]
if c == '\\':
# Note this is not C-style escaping; the character after the \ is
# taken literally.
pos += 1
if pos == len(s):
raise ParseError("End of string while expecting a character after '\\'",s,pos)
s2 += s[pos]
pos += 1
elif c == '(':
nestlevel += 1
s2 += c
pos += 1
elif c == ')':
nestlevel -= 1
pos += 1
if nestlevel >= 1:
s2 += c
else:
break
else:
s2 += c
pos += 1
if nestlevel > 0:
raise ParseError('End of string reached before comment was closed',s,pos)
# Now rstrip s2 of all LWS chars.
while len(s2) and s2[-1] in LWS:
s2 = s2[:-1]
return s2, (pos - start)
class range_spec(object):
"""A single contiguous (byte) range.
A range_spec defines a range (of bytes) by specifying two offsets,
the 'first' and 'last', which are inclusive in the range. Offsets
are zero-based (the first byte is offset 0). The range can not be
empty or negative (has to satisfy first <= last).
The range can be unbounded on either end, represented here by the
None value, with these semantics:
* A 'last' of None always indicates the last possible byte
(although that offset may not be known).
* A 'first' of None indicates this is a suffix range, where
the last value is actually interpreted to be the number
of bytes at the end of the file (regardless of file size).
Note that it is not valid for both first and last to be None.
"""
__slots__ = ['first','last']
def __init__(self, first=0, last=None):
self.set( first, last )
def set(self, first, last):
"""Sets the value of this range given the first and last offsets.
"""
if first is not None and last is not None and first > last:
raise ValueError("Byte range does not satisfy first <= last.")
elif first is None and last is None:
raise ValueError("Byte range can not omit both first and last offsets.")
self.first = first
self.last = last
def __repr__(self):
return '%s.%s(%s,%s)' % (self.__class__.__module__, self.__class__.__name__,
self.first, self.last)
def __str__(self):
"""Returns a string form of the range as would appear in a Range: header."""
if self.first is None and self.last is None:
return ''
s = ''
if self.first is not None:
s += '%d' % self.first
s += '-'
if self.last is not None:
s += '%d' % self.last
return s
def __eq__(self, other):
"""Compare ranges for equality.
Note that if non-specific ranges are involved (such as 34- and -5),
they could compare as not equal even though they may represent
the same set of bytes in some contexts.
"""
return self.first == other.first and self.last == other.last
def __ne__(self, other):
"""Compare ranges for inequality.
Note that if non-specific ranges are involved (such as 34- and -5),
they could compare as not equal even though they may represent
the same set of bytes in some contexts.
"""
return not self.__eq__(other)
def __lt__(self, other):
"""< operator is not defined"""
raise NotImplementedError('Ranges can not be relationally compared')
def __le__(self, other):
"""<= operator is not defined"""
raise NotImplementedError('Ranges can not be ralationally compared')
def __gt__(self, other):
"""> operator is not defined"""
raise NotImplementedError('Ranges can not be relationally compared')
def __ge__(self, other):
""">= operator is not defined"""
raise NotImplementedError('Ranges can not be relationally compared')
def copy(self):
"""Makes a copy of this range object."""
return self.__class__( self.first, self.last )
def is_suffix(self):
"""Returns True if this is a suffix range.
A suffix range is one that specifies the last N bytes of a
file regardless of file size.
"""
return self.first == None
def is_fixed(self):
"""Returns True if this range is absolute and a fixed size.
This occurs only if neither first or last is None. Converse
is the is_unbounded() method.
"""
return first is not None and last is not None
def is_unbounded(self):
"""Returns True if the number of bytes in the range is unspecified.
This can only occur if either the 'first' or the 'last' member
is None. Converse is the is_fixed() method.
"""
return self.first is None or self.last is None
def is_whole_file(self):
"""Returns True if this range includes all possible bytes.
This can only occur if the 'last' member is None and the first
member is 0.
"""
return self.first == 0 and self.last is None
def __contains__(self, offset):
"""Does this byte range contain the given byte offset?
If the offset < 0, then it is taken as an offset from the end
of the file, where -1 is the last byte. This type of offset
will only work with suffix ranges.
"""
if offset < 0:
if self.first is not None:
return False
else:
return self.last >= -offset
elif self.first is None:
return False
elif self.last is None:
return True
else:
return self.first <= offset <= self.last
def fix_to_size(self, size):
"""Changes a length-relative range to an absolute range based upon given file size.
Ranges that are already absolute are left as is.
Note that zero-length files are handled as special cases,
since the only way possible to specify a zero-length range is
with the suffix range "-0". Thus unless this range is a suffix
range, it can not satisfy a zero-length file.
If the resulting range (partly) lies outside the file size then an
error is raised.
"""
if size == 0:
if self.first is None:
self.last = 0
return
else:
raise RangeUnsatisfiableError("Range can satisfy a zero-length file.")
if self.first is None:
# A suffix range
self.first = size - self.last
if self.first < 0:
self.first = 0
self.last = size - 1
else:
if self.first > size - 1:
raise RangeUnsatisfiableError('Range begins beyond the file size.')
else:
if self.last is None:
# An unbounded range
self.last = size - 1
return
def merge_with(self, other):
"""Tries to merge the given range into this one.
The size of this range may be enlarged as a result.
An error is raised if the two ranges do not overlap or are not
contiguous with each other.
"""
if self.is_whole_file() or self == other:
return
elif other.is_whole_file():
self.first, self.last = 0, None
return
a1, z1 = self.first, self.last
a2, z2 = other.first, other.last
if self.is_suffix():
if z1 == 0: # self is zero-length, so merge becomes a copy
self.first, self.last = a2, z2
return
elif other.is_suffix():
self.last = max(z1, z2)
else:
raise RangeUnmergableError()
elif other.is_suffix():
if z2 == 0: # other is zero-length, so nothing to merge
return
else:
raise RangeUnmergableError()
assert a1 is not None and a2 is not None
if a2 < a1:
# swap ranges so a1 <= a2
a1, z1, a2, z2 = a2, z2, a1, z1
assert a1 <= a2
if z1 is None:
if z2 is not None and z2 + 1 < a1:
raise RangeUnmergableError()
else:
self.first = min(a1, a2)
self.last = None
elif z2 is None:
if z1 + 1 < a2:
raise RangeUnmergableError()
else:
self.first = min(a1, a2)
self.last = None
else:
if a2 > z1 + 1:
raise RangeUnmergableError()
else:
self.first = a1
self.last = max(z1, z2)
return
class range_set(object):
"""A collection of range_specs, with units (e.g., bytes).
"""
__slots__ = ['units', 'range_specs']
def __init__(self):
self.units = 'bytes'
self.range_specs = [] # a list of range_spec objects
def __str__(self):
return self.units + '=' + ', '.join([str(s) for s in self.range_specs])
def __repr__(self):
return '%s.%s(%s)' % (self.__class__.__module__,
self.__class__.__name__,
repr(self.__str__()) )
def from_str(self, s, valid_units=('bytes','none')):
"""Sets this range set based upon a string, such as the Range: header.
You can also use the parse_range_set() function for more control.
If a parsing error occurs, the pre-exising value of this range
set is left unchanged.
"""
r, k = parse_range_set( s, valid_units=valid_units )
if k < len(s):
raise ParseError("Extra unparsable characters in range set specifier",s,k)
self.units = r.units
self.range_specs = r.range_specs
def is_single_range(self):
"""Does this range specifier consist of only a single range set?"""
return len(self.range_specs) == 1
def is_contiguous(self):
"""Can the collection of range_specs be coalesced into a single contiguous range?"""
if len(self.range_specs) <= 1:
return True
merged = self.range_specs[0].copy()
for s in self.range_specs[1:]:
try:
merged.merge_with(s)
except:
return False
return True
def fix_to_size(self, size):
"""Changes all length-relative range_specs to absolute range_specs based upon given file size.
If none of the range_specs in this set can be satisfied, then the
entire set is considered unsatifiable and an error is raised.
Otherwise any unsatisfiable range_specs will simply be removed
from this set.
"""
for i in range(len(self.range_specs)):
try:
self.range_specs[i].fix_to_size( size )
except RangeUnsatisfiableError:
self.range_specs[i] = None
self.range_specs = [s for s in self.range_specs if s is not None]
if len(self.range_specs) == 0:
raise RangeUnsatisfiableError('No ranges can be satisfied')
def coalesce(self):
"""Collapses all consecutive range_specs which together define a contiguous range.
Note though that this method will not re-sort the range_specs, so a
potentially contiguous range may not be collapsed if they are
not sorted. For example the ranges:
10-20, 30-40, 20-30
will not be collapsed to just 10-40. However if the ranges are
sorted first as with:
10-20, 20-30, 30-40
then they will collapse to 10-40.
"""
if len(self.range_specs) <= 1:
return
for i in range(len(self.range_specs) - 1):
a = self.range_specs[i]
b = self.range_specs[i+1]
if a is not None:
try:
a.merge_with( b )
self.range_specs[i+1] = None # to be deleted later
except RangeUnmergableError:
pass
self.range_specs = [r for r in self.range_specs if r is not None]
def parse_number( s, start=0 ):
"""Parses a positive decimal integer number from the string.
A tuple is returned (number, chars_consumed). If the
string is not a valid decimal number, then (None,0) is returned.
"""
if start >= len(s):
raise ParseError('Starting position is beyond the end of the string',s,start)
if s[start] not in DIGIT:
return (None,0) # not a number
pos = start
n = 0
while pos < len(s):
c = s[pos]
if c in DIGIT:
n *= 10
n += ord(c) - ord('0')
pos += 1
else:
break
return n, pos-start
def parse_range_spec( s, start=0 ):
"""Parses a (byte) range_spec.
Returns a tuple (range_spec, chars_consumed).
"""
if start >= len(s):
raise ParseError('Starting position is beyond the end of the string',s,start)
if s[start] not in DIGIT and s[start] != '-':
raise ParseError("Invalid range, expected a digit or '-'",s,start)
first, last = None, None
pos = start
first, k = parse_number( s, pos )
pos += k
if s[pos] == '-':
pos += 1
if pos < len(s):
last, k = parse_number( s, pos )
pos += k
else:
raise ParseError("Byte range must include a '-'",s,pos)
if first is None and last is None:
raise ParseError('Byte range can not omit both first and last indices.',s,start)
R = range_spec( first, last )
return R, pos-start
def parse_range_header( header_value, valid_units=('bytes','none') ):
"""Parses the value of an HTTP Range: header.
The value of the header as a string should be passed in; without
the header name itself.
Returns a range_set object.
"""
ranges, k = parse_range_set( header_value, valid_units=valid_units )
if k < len(header_value):
raise ParseError('Range header has unexpected or unparsable characters',
header_value, k)
return ranges
def parse_range_set( s, start=0, valid_units=('bytes','none') ):
"""Parses a (byte) range set specifier.
Returns a tuple (range_set, chars_consumed).
"""
if start >= len(s):
raise ParseError('Starting position is beyond the end of the string',s,start)
pos = start
units, k = parse_token( s, pos )
pos += k
if valid_units and units not in valid_units:
raise ParseError('Unsupported units type in range specifier',s,start)
while pos < len(s) and s[pos] in LWS:
pos += 1
if pos < len(s) and s[pos] == '=':
pos += 1
else:
raise ParseError("Invalid range specifier, expected '='",s,pos)
while pos < len(s) and s[pos] in LWS:
pos += 1
range_specs, k = parse_comma_list( s, pos, parse_range_spec, min_count=1 )
pos += k
# Make sure no trash is at the end of the string
while pos < len(s) and s[pos] in LWS:
pos += 1
if pos < len(s):
raise ParseError('Unparsable characters in range set specifier',s,pos)
ranges = range_set()
ranges.units = units
ranges.range_specs = range_specs
return ranges, pos-start
def _split_at_qfactor( s ):
"""Splits a string at the quality factor (;q=) parameter.
Returns the left and right substrings as a two-member tuple.
"""
# It may be faster, but incorrect, to use s.split(';q=',1), since
# HTTP allows any amount of linear white space (LWS) to appear
# between the parts, so it could also be "; q = ".
# We do this parsing 'manually' for speed rather than using a
# regex, which would be r';[ \t\r\n]*q[ \t\r\n]*=[ \t\r\n]*'
pos = 0
while 0 <= pos < len(s):
pos = s.find(';', pos)
if pos < 0:
break # no more parameters
startpos = pos
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
if pos < len(s) and s[pos] == 'q':
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
if pos < len(s) and s[pos] == '=':
pos = pos + 1
while pos < len(s) and s[pos] in LWS:
pos = pos + 1
return ( s[:startpos], s[pos:] )
return (s, '')
def parse_qvalue_accept_list( s, start=0, item_parser=parse_token ):
"""Parses any of the Accept-* style headers with quality factors.
This is a low-level function. It returns a list of tuples, each like:
(item, item_parms, qvalue, accept_parms)
You can pass in a function which parses each of the item strings, or
accept the default where the items must be simple tokens. Note that
your parser should not consume any paramters (past the special "q"
paramter anyway).
The item_parms and accept_parms are each lists of (name,value) tuples.
The qvalue is the quality factor, a number from 0 to 1 inclusive.
"""
itemlist = []
pos = start
if pos >= len(s):
raise ParseError('Starting position is beyond the end of the string',s,pos)
item = None
while pos < len(s):
item, k = item_parser(s, pos)
pos += k
while pos < len(s) and s[pos] in LWS:
pos += 1
if pos >= len(s) or s[pos] in ',;':
itemparms, qvalue, acptparms = [], None, []
if pos < len(s) and s[pos] == ';':
pos += 1
while pos < len(s) and s[pos] in LWS:
pos += 1
parmlist, k = parse_parameter_list(s, pos)
for p, v in parmlist:
if p == 'q' and qvalue is None:
try:
qvalue = float(v)
except ValueError:
raise ParseError('qvalue must be a floating point number',s,pos)
if qvalue < 0 or qvalue > 1:
raise ParseError('qvalue must be between 0 and 1, inclusive',s,pos)
elif qvalue is None:
itemparms.append( (p,v) )
else:
acptparms.append( (p,v) )
pos += k
if item:
# Add the item to the list
if qvalue is None:
qvalue = 1
itemlist.append( (item, itemparms, qvalue, acptparms) )
item = None
# skip commas
while pos < len(s) and s[pos] == ',':
pos += 1
while pos < len(s) and s[pos] in LWS:
pos += 1
else:
break
return itemlist, pos - start
def parse_accept_header( header_value ):
"""Parses the Accept: header.
The value of the header as a string should be passed in; without
the header name itself.
This will parse the value of any of the HTTP headers "Accept",
"Accept-Charset", "Accept-Encoding", or "Accept-Language". These
headers are similarly formatted, in that they are a list of items
with associated quality factors. The quality factor, or qvalue,
is a number in the range [0.0..1.0] which indicates the relative
preference of each item.
This function returns a list of those items, sorted by preference
(from most-prefered to least-prefered). Each item in the returned
list is actually a tuple consisting of:
( item_name, item_parms, qvalue, accept_parms )
As an example, the following string,
text/plain; charset="utf-8"; q=.5; columns=80
would be parsed into this resulting tuple,
( 'text/plain', [('charset','utf-8')], 0.5, [('columns','80')] )
The value of the returned item_name depends upon which header is
being parsed, but for example it may be a MIME content or media
type (without parameters), a language tag, or so on. Any optional
parameters (delimited by semicolons) occuring before the "q="
attribute will be in the item_parms list as (attribute,value)
tuples in the same order as they appear in the header. Any quoted
values will have been unquoted and unescaped.
The qvalue is a floating point number in the inclusive range 0.0
to 1.0, and roughly indicates the preference for this item.
Values outside this range will be capped to the closest extreme.
(!) Note that a qvalue of 0 indicates that the item is
explicitly NOT acceptable to the user agent, and should be
handled differently by the caller.
The accept_parms, like the item_parms, is a list of any attributes
occuring after the "q=" attribute, and will be in the list as
(attribute,value) tuples in the same order as they occur.
Usually accept_parms will be an empty list, as the HTTP spec
allows these extra parameters in the syntax but does not
currently define any possible values.
All empty items will be removed from the list. However, duplicate
or conflicting values are not detected or handled in any way by
this function.
"""
def parse_mt_only(s, start):
mt, k = parse_media_type(s, start, with_parameters=False)
ct = content_type()
ct.major = mt[0]
ct.minor = mt[1]
return ct, k
alist, k = parse_qvalue_accept_list( header_value, item_parser=parse_mt_only )
if k < len(header_value):
raise ParseError('Accept header is invalid',header_value,k)
ctlist = []
for ct, ctparms, q, acptparms in alist:
if ctparms:
ct.set_parameters( dict(ctparms) )
ctlist.append( (ct, q, acptparms) )
return ctlist
def parse_media_type(media_type, start=0, with_parameters=True):
"""Parses a media type (MIME type) designator into it's parts.
Given a media type string, returns a nested tuple of it's parts.
((major,minor,parmlist), chars_consumed)
where parmlist is a list of tuples of (parm_name, parm_value).
Quoted-values are appropriately unquoted and unescaped.
If 'with_parameters' is False, then parsing will stop immediately
after the minor media type; and will not proceed to parse any
of the semicolon-separated paramters.
Examples:
image/png -> (('image','png',[]), 9)
text/plain; charset="utf-16be"
-> (('text','plain',[('charset,'utf-16be')]), 30)
"""
s = media_type
pos = start
ctmaj, k = parse_token(s, pos)
if k == 0:
raise ParseError('Media type must be of the form "major/minor".', s, pos)
pos += k
if pos >= len(s) or s[pos] != '/':
raise ParseError('Media type must be of the form "major/minor".', s, pos)
pos += 1
ctmin, k = parse_token(s, pos)
if k == 0:
raise ParseError('Media type must be of the form "major/minor".', s, pos)
pos += k
if with_parameters:
parmlist, k = parse_parameter_list(s, pos)
pos += k
else:
parmlist = []
return ((ctmaj, ctmin, parmlist), pos - start)
def parse_parameter_list(s, start=0):
"""Parses a semicolon-separated 'parameter=value' list.
Returns a tuple (parmlist, chars_consumed), where parmlist
is a list of tuples (parm_name, parm_value).
The parameter values will be unquoted and unescaped as needed.
Empty parameters (as in ";;") are skipped, as is insignificant
white space. The list returned is kept in the same order as the
parameters appear in the string.
"""
pos = start
parmlist = []
while pos < len(s):
while pos < len(s) and s[pos] in LWS:
pos += 1 # skip whitespace
if pos < len(s) and s[pos] == ';':
pos += 1
while pos < len(s) and s[pos] in LWS:
pos += 1 # skip whitespace
if pos >= len(s):
break
parmname, k = parse_token(s, pos)
if parmname:
pos += k
while pos < len(s) and s[pos] in LWS:
pos += 1 # skip whitespace
if not (pos < len(s) and s[pos] == '='):
raise ParseError('Expected an "=" after parameter name', s, pos)
pos += 1
while pos < len(s) and s[pos] in LWS:
pos += 1 # skip whitespace
parmval, k = parse_token_or_quoted_string( s, pos )
pos += k
parmlist.append( (parmname, parmval) )
else:
break
return parmlist, pos - start
class content_type(object):
"""This class represents a media type (aka a MIME content type), including parameters.
You initialize these by passing in a content-type declaration
string, such as "text/plain; charset=ascii", to the constructor or
to the set() method. If you provide no string value, the object
returned will represent the wildcard */* content type.
Normally you will get the value back by using str(), or optionally
you can access the components via the 'major', 'minor', 'media_type',
or 'parmdict' members.
"""
def __init__(self, content_type_string=None, with_parameters=True):
"""Create a new content_type object.
See the set() method for a description of the arguments.
"""
if content_type_string:
self.set( content_type_string, with_parameters=with_parameters )
else:
self.set( '*/*' )
def set_parameters(self, parameter_list_or_dict):
"""Sets the optional paramters based upon the parameter list.
The paramter list should be a semicolon-separated name=value string.
Any paramters which already exist on this object will be deleted,
unless they appear in the given paramter_list.
"""
if hasattr(parameter_list_or_dict, 'has_key'):
# already a dictionary
pl = parameter_list_or_dict
else:
pl, k = parse_parameter_list(parameter_list)
if k < len(parameter_list):
raise ParseError('Invalid parameter list',paramter_list,k)
self.parmdict = dict(pl)
def set(self, content_type_string, with_parameters=True):
"""Parses the content type string and sets this object to it's value.
For a more complete description of the arguments, see the
documentation for the parse_media_type() function in this module.
"""
mt, k = parse_media_type( content_type_string, with_parameters=with_parameters )
if k < len(content_type_string):
raise ParseError('Not a valid content type',content_type_string, k)
major, minor, pdict = mt
self._set_major( major )
self._set_minor( minor )
self.parmdict = dict(pdict)
def _get_major(self):
return self._major
def _set_major(self, s):
s = s.lower() # case-insentive
if not is_token(s):
raise ValueError('Major media type contains an invalid character')
self._major = s
def _get_minor(self):
return self._minor
def _set_minor(self, s):
s = s.lower() # case-insentive
if not is_token(s):
raise ValueError('Minor media type contains an invalid character')
self._minor = s
major = property(_get_major,_set_major,doc="Major media classification")
minor = property(_get_minor,_set_minor,doc="Minor media sub-classification")
def __str__(self):
"""String value."""
s = '%s/%s' % (self.major, self.minor)
if self.parmdict:
extra = '; '.join([ '%s=%s' % (a[0],quote_string(a[1],False)) for a in self.parmdict.items()])
s += '; ' + extra
return s
def __unicode__(self):
"""Unicode string value."""
# In Python 3 this is probably unnecessary in general, this is just to avoid possible syntax issues. I.H.
if PY3 :
return str(self.__str__())
else :
return unicode(self.__str__())
def __repr__(self):
"""Python representation of this object."""
s = '%s(%s)' % (self.__class__.__name__, repr(self.__str__()))
return s
def __hash__(self):
"""Hash this object; the hash is dependent only upon the value."""
return hash(str(self))
def __getstate__(self):
"""Pickler"""
return str(self)
def __setstate__(self, state):
"""Unpickler"""
self.set(state)
def __len__(self):
"""Logical length of this media type.
For example:
len('*/*') -> 0
len('image/*') -> 1
len('image/png') -> 2
len('text/plain; charset=utf-8') -> 3
len('text/plain; charset=utf-8; filename=xyz.txt') -> 4
"""
if self.major == '*':
return 0
elif self.minor == '*':
return 1
else:
return 2 + len(self.parmdict)
def __eq__(self, other):
"""Equality test.
Note that this is an exact match, including any parameters if any.
"""
return self.major == other.major and \
self.minor == other.minor and \
self.parmdict == other.parmdict
def __ne__(self, other):
"""Inequality test."""
return not self.__eq__(other)
def _get_media_type(self):
"""Returns the media 'type/subtype' string, without parameters."""
return '%s/%s' % (self.major, self.minor)
media_type = property(_get_media_type, doc="Returns the just the media type 'type/subtype' without any paramters (read-only).")
def is_wildcard(self):
"""Returns True if this is a 'something/*' media type.
"""
return self.minor == '*'
def is_universal_wildcard(self):
"""Returns True if this is the unspecified '*/*' media type.
"""
return self.major == '*' and self.minor == '*'
def is_composite(self):
"""Is this media type composed of multiple parts.
"""
return self.major == 'multipart' or self.major == 'message'
def is_xml(self):
"""Returns True if this media type is XML-based.
Note this does not consider text/html to be XML, but
application/xhtml+xml is.
"""
return self.minor == 'xml' or self.minor.endswith('+xml')
# Some common media types
content_formdata = content_type('multipart/form-data')
content_urlencoded = content_type('application/x-www-form-urlencoded')
content_byteranges = content_type('multipart/byteranges') # RFC 2616 sect 14.16
content_opaque = content_type('application/octet-stream')
content_html = content_type('text/html')
content_xhtml = content_type('application/xhtml+xml')
def acceptable_content_type( accept_header, content_types, ignore_wildcard=True ):
"""Determines if the given content type is acceptable to the user agent.
The accept_header should be the value present in the HTTP
"Accept:" header. In mod_python this is typically obtained from
the req.http_headers_in table; in WSGI it is environ["Accept"];
other web frameworks may provide other methods of obtaining it.
Optionally the accept_header parameter can be pre-parsed, as
returned from the parse_accept_header() function in this module.
The content_types argument should either be a single MIME media
type string, or a sequence of them. It represents the set of
content types that the caller (server) is willing to send.
Generally, the server content_types should not contain any
wildcarded values.
This function determines which content type which is the most
preferred and is acceptable to both the user agent and the server.
If one is negotiated it will return a four-valued tuple like:
(server_content_type, ua_content_range, qvalue, accept_parms)
The first tuple value is one of the server's content_types, while
the remaining tuple values descript which of the client's
acceptable content_types was matched. In most cases accept_parms
will be an empty list (see description of parse_accept_header()
for more details).
If no content type could be negotiated, then this function will
return None (and the caller should typically cause an HTTP 406 Not
Acceptable as a response).
Note that the wildcarded content type "*/*" sent by the client
will be ignored, since it is often incorrectly sent by web
browsers that don't really mean it. To override this, call with
ignore_wildcard=False. Partial wildcards such as "image/*" will
always be processed, but be at a lower priority than a complete
matching type.
See also: RFC 2616 section 14.1, and
<http://www.iana.org/assignments/media-types/>
"""
if _is_string(accept_header):
accept_list = parse_accept_header(accept_header)
else:
accept_list = accept_header
if _is_string(content_types):
content_types = [content_types]
server_ctlist = [content_type(ct) for ct in content_types]
del ct
#print 'AC', repr(accept_list)
#print 'SV', repr(server_ctlist)
best = None # (content_type, qvalue, accept_parms, matchlen)
for server_ct in server_ctlist:
best_for_this = None
for client_ct, qvalue, aargs in accept_list:
if ignore_wildcard and client_ct.is_universal_wildcard():
continue # */* being ignored
matchlen = 0 # how specifically this one matches (0 is a non-match)
if client_ct.is_universal_wildcard():
matchlen = 1 # */* is a 1
elif client_ct.major == server_ct.major:
if client_ct.minor == '*': # something/* is a 2
matchlen = 2
elif client_ct.minor == server_ct.minor: # something/something is a 3
matchlen = 3
# must make sure all the parms match too
for pname, pval in client_ct.parmdict.items():
sval = server_ct.parmdict.get(pname)
if pname == 'charset':
# special case for charset to match aliases
pval = canonical_charset(pval)
sval = canonical_charset(sval)
if sval == pval:
matchlen = matchlen + 1
else:
matchlen = 0
break
else:
matchlen = 0
#print 'S',server_ct,' C',client_ct,' M',matchlen,'Q',qvalue
if matchlen > 0:
if not best_for_this \
or matchlen > best_for_this[-1] \
or (matchlen == best_for_this[-1] and qvalue > best_for_this[2]):
# This match is better
best_for_this = (server_ct, client_ct, qvalue, aargs, matchlen)
#print 'BEST2 NOW', repr(best_for_this)
if not best or \
(best_for_this and best_for_this[2] > best[2]):
best = best_for_this
#print 'BEST NOW', repr(best)
if not best or best[1] <= 0:
return None
return best[:-1]
# Aliases of common charsets, see <http://www.iana.org/assignments/character-sets>.
character_set_aliases = {
'ASCII': 'US-ASCII',
'ISO646-US': 'US-ASCII',
'IBM367': 'US-ASCII',
'CP367': 'US-ASCII',
'CSASCII': 'US-ASCII',
'ANSI_X3.4-1968': 'US-ASCII',
'ISO_646.IRV:1991': 'US-ASCII',
'UTF7': 'UTF-7',
'UTF8': 'UTF-8',
'UTF16': 'UTF-16',
'UTF16LE': 'UTF-16LE',
'UTF16BE': 'UTF-16BE',
'UTF32': 'UTF-32',
'UTF32LE': 'UTF-32LE',
'UTF32BE': 'UTF-32BE',
'UCS2': 'ISO-10646-UCS-2',
'UCS_2': 'ISO-10646-UCS-2',
'UCS-2': 'ISO-10646-UCS-2',
'CSUNICODE': 'ISO-10646-UCS-2',
'UCS4': 'ISO-10646-UCS-4',
'UCS_4': 'ISO-10646-UCS-4',
'UCS-4': 'ISO-10646-UCS-4',
'CSUCS4': 'ISO-10646-UCS-4',
'ISO_8859-1': 'ISO-8859-1',
'LATIN1': 'ISO-8859-1',
'CP819': 'ISO-8859-1',
'IBM819': 'ISO-8859-1',
'ISO_8859-2': 'ISO-8859-2',
'LATIN2': 'ISO-8859-2',
'ISO_8859-3': 'ISO-8859-3',
'LATIN3': 'ISO-8859-3',
'ISO_8859-4': 'ISO-8859-4',
'LATIN4': 'ISO-8859-4',
'ISO_8859-5': 'ISO-8859-5',
'CYRILLIC': 'ISO-8859-5',
'ISO_8859-6': 'ISO-8859-6',
'ARABIC': 'ISO-8859-6',
'ECMA-114': 'ISO-8859-6',
'ISO_8859-6-E': 'ISO-8859-6-E',
'ISO_8859-6-I': 'ISO-8859-6-I',
'ISO_8859-7': 'ISO-8859-7',
'GREEK': 'ISO-8859-7',
'GREEK8': 'ISO-8859-7',
'ECMA-118': 'ISO-8859-7',
'ISO_8859-8': 'ISO-8859-8',
'HEBREW': 'ISO-8859-8',
'ISO_8859-8-E': 'ISO-8859-8-E',
'ISO_8859-8-I': 'ISO-8859-8-I',
'ISO_8859-9': 'ISO-8859-9',
'LATIN5': 'ISO-8859-9',
'ISO_8859-10': 'ISO-8859-10',
'LATIN6': 'ISO-8859-10',
'ISO_8859-13': 'ISO-8859-13',
'ISO_8859-14': 'ISO-8859-14',
'LATIN8': 'ISO-8859-14',
'ISO_8859-15': 'ISO-8859-15',
'LATIN9': 'ISO-8859-15',
'ISO_8859-16': 'ISO-8859-16',
'LATIN10': 'ISO-8859-16',
}
def canonical_charset( charset ):
"""Returns the canonical or preferred name of a charset.
Additional character sets can be recognized by this function by
altering the character_set_aliases dictionary in this module.
Charsets which are not recognized are simply converted to
upper-case (as charset names are always case-insensitive).
See <http://www.iana.org/assignments/character-sets>.
"""
# It would be nice to use Python's codecs modules for this, but
# there is no fixed public interface to it's alias mappings.
if not charset:
return charset
uc = charset.upper()
uccon = character_set_aliases.get( uc, uc )
return uccon
def acceptable_charset( accept_charset_header, charsets, ignore_wildcard=True, default='ISO-8859-1' ):
"""
Determines if the given charset is acceptable to the user agent.
The accept_charset_header should be the value present in the HTTP
"Accept-Charset:" header. In mod_python this is typically
obtained from the req.http_headers table; in WSGI it is
environ["Accept-Charset"]; other web frameworks may provide other
methods of obtaining it.
Optionally the accept_charset_header parameter can instead be the
list returned from the parse_accept_header() function in this
module.
The charsets argument should either be a charset identifier string,
or a sequence of them.
This function returns the charset identifier string which is the
most prefered and is acceptable to both the user agent and the
caller. It will return the default value if no charset is negotiable.
Note that the wildcarded charset "*" will be ignored. To override
this, call with ignore_wildcard=False.
See also: RFC 2616 section 14.2, and
<http://www.iana.org/assignments/character-sets>
"""
if default:
default = _canonical_charset(default)
if _is_string(accept_charset_header):
accept_list = parse_accept_header(accept_charset_header)
else:
accept_list = accept_charset_header
if _is_string(charsets):
charsets = [_canonical_charset(charsets)]
else:
charsets = [_canonical_charset(c) for c in charsets]
# Note per RFC that 'ISO-8859-1' is special, and is implictly in the
# accept list with q=1; unless it is already in the list, or '*' is in the list.
best = None
for c, qvalue, junk in accept_list:
if c == '*':
default = None
if ignore_wildcard:
continue
if not best or qvalue > best[1]:
best = (c, qvalue)
else:
c = _canonical_charset(c)
for test_c in charsets:
if c == default:
default = None
if c == test_c and (not best or best[0]=='*' or qvalue > best[1]):
best = (c, qvalue)
if default and default in [test_c.upper() for test_c in charsets]:
best = (default, 1)
if best[0] == '*':
best = (charsets[0], best[1])
return best
class language_tag(object):
"""This class represents an RFC 3066 language tag.
Initialize objects of this class with a single string representing
the language tag, such as "en-US".
Case is insensitive. Wildcarded subtags are ignored or stripped as
they have no significance, so that "en-*" is the same as "en".
However the universal wildcard "*" language tag is kept as-is.
Note that although relational operators such as < are defined,
they only form a partial order based upon specialization.
Thus for example,
"en" <= "en-US"
but,
not "en" <= "de", and
not "de" <= "en".
"""
def __init__(self, tagname):
"""Initialize objects of this class with a single string representing
the language tag, such as "en-US". Case is insensitive.
"""
self.parts = tagname.lower().split('-')
while len(self.parts) > 1 and self.parts[-1] == '*':
del self.parts[-1]
def __len__(self):
"""Number of subtags in this tag."""
if len(self.parts) == 1 and self.parts[0] == '*':
return 0
return len(self.parts)
def __str__(self):
"""The standard string form of this language tag."""
a = []
if len(self.parts) >= 1:
a.append(self.parts[0])
if len(self.parts) >= 2:
if len(self.parts[1]) == 2:
a.append( self.parts[1].upper() )
else:
a.append( self.parts[1] )
a.extend( self.parts[2:] )
return '-'.join(a)
def __unicode__(self):
"""The unicode string form of this language tag."""
# Probably unnecessary in Python 3
if PY3 :
return str(self.__str__())
else :
return unicode(self.__str__())
def __repr__(self):
"""The python representation of this language tag."""
s = '%s("%s")' % (self.__class__.__name__, self.__str__())
return s
def superior(self):
"""Returns another instance of language_tag which is the superior.
Thus en-US gives en, and en gives *.
"""
if len(self) <= 1:
return self.__class__('*')
return self.__class__( '-'.join(self.parts[:-1]) )
def all_superiors(self, include_wildcard=False):
"""Returns a list of this language and all it's superiors.
If include_wildcard is False, then "*" will not be among the
output list, unless this language is itself "*".
"""
langlist = [ self ]
l = self
while not l.is_universal_wildcard():
l = l.superior()
if l.is_universal_wildcard() and not include_wildcard:
continue
langlist.append(l)
return langlist
def is_universal_wildcard(self):
"""Returns True if this language tag represents all possible
languages, by using the reserved tag of "*".
"""
return len(self.parts) == 1 and self.parts[0] == '*'
def dialect_of(self, other, ignore_wildcard=True):
"""Is this language a dialect (or subset/specialization) of another.
This method returns True if this language is the same as or a
specialization (dialect) of the other language_tag.
If ignore_wildcard is False, then all languages will be
considered to be a dialect of the special language tag of "*".
"""
if not ignore_wildcard and self.is_universal_wildcard():
return True
for i in range( min(len(self), len(other)) ):
if self.parts[i] != other.parts[i]:
return False
if len(self) >= len(other):
return True
return False
def __eq__(self, other):
"""== operator. Are the two languages the same?"""
return self.parts == other.parts
def __neq__(self, other):
"""!= operator. Are the two languages different?"""
return not self.__eq__(other)
def __lt__(self, other):
"""< operator. Returns True if the other language is a more
specialized dialect of this one."""
return other.dialect_of(self) and self != other
def __le__(self, other):
"""<= operator. Returns True if the other language is the same
as or a more specialized dialect of this one."""
return other.dialect_of(self)
def __gt__(self, other):
"""> operator. Returns True if this language is a more
specialized dialect of the other one."""
return self.dialect_of(other) and self != other
def __ge__(self, other):
""">= operator. Returns True if this language is the same as
or a more specialized dialect of the other one."""
return self.dialect_of(other)
def parse_accept_language_header( header_value ):
"""Parses the Accept-Language header.
Returns a list of tuples, each like:
(language_tag, qvalue, accept_parameters)
"""
alist, k = parse_qvalue_accept_list( header_value)
if k < len(header_value):
raise ParseError('Accept-Language header is invalid',header_value,k)
langlist = []
for token, langparms, q, acptparms in alist:
if langparms:
raise ParseError('Language tag may not have any parameters',header_value,0)
lang = language_tag( token )
langlist.append( (lang, q, acptparms) )
return langlist
def acceptable_language( accept_header, server_languages, ignore_wildcard=True, assume_superiors=True ):
"""Determines if the given language is acceptable to the user agent.
The accept_header should be the value present in the HTTP
"Accept-Language:" header. In mod_python this is typically
obtained from the req.http_headers_in table; in WSGI it is
environ["Accept-Language"]; other web frameworks may provide other
methods of obtaining it.
Optionally the accept_header parameter can be pre-parsed, as
returned by the parse_accept_language_header() function defined in
this module.
The server_languages argument should either be a single language
string, a language_tag object, or a sequence of them. It
represents the set of languages that the server is willing to
send to the user agent.
Note that the wildcarded language tag "*" will be ignored. To
override this, call with ignore_wildcard=False, and even then
it will be the lowest-priority choice regardless of it's
quality factor (as per HTTP spec).
If the assume_superiors is True then it the languages that the
browser accepts will automatically include all superior languages.
Any superior languages which must be added are done so with one
half the qvalue of the language which is present. For example, if
the accept string is "en-US", then it will be treated as if it
were "en-US, en;q=0.5". Note that although the HTTP 1.1 spec says
that browsers are supposed to encourage users to configure all
acceptable languages, sometimes they don't, thus the ability
for this function to assume this. But setting assume_superiors
to False will insure strict adherence to the HTTP 1.1 spec; which
means that if the browser accepts "en-US", then it will not
be acceptable to send just "en" to it.
This function returns the language which is the most prefered and
is acceptable to both the user agent and the caller. It will
return None if no language is negotiable, otherwise the return
value is always an instance of language_tag.
See also: RFC 3066 <http://www.ietf.org/rfc/rfc3066.txt>, and
ISO 639, links at <http://en.wikipedia.org/wiki/ISO_639>, and
<http://www.iana.org/assignments/language-tags>.
"""
# Note special instructions from RFC 2616 sect. 14.1:
# "The language quality factor assigned to a language-tag by the
# Accept-Language field is the quality value of the longest
# language- range in the field that matches the language-tag."
if _is_string(accept_header):
accept_list = parse_accept_language_header(accept_header)
else:
accept_list = accept_header
# Possibly add in any "missing" languages that the browser may
# have forgotten to include in the list. Insure list is sorted so
# more general languages come before more specific ones.
accept_list.sort()
all_tags = [a[0] for a in accept_list]
if assume_superiors:
to_add = []
for langtag, qvalue, aargs in accept_list:
if len(langtag) >= 2:
for suptag in langtag.all_superiors( include_wildcard=False ):
if suptag not in all_tags:
# Add in superior at half the qvalue
to_add.append( (suptag, qvalue / 2, '') )
all_tags.append( suptag )
accept_list.extend( to_add )
# Convert server_languages to a list of language_tags
if _is_string(server_languages):
server_languages = [language_tag(server_languages)]
elif isinstance(server_languages, language_tag):
server_languages = [server_languages]
else:
server_languages = [language_tag(lang) for lang in server_languages]
# Select the best one
best = None # tuple (langtag, qvalue, matchlen)
for langtag, qvalue, aargs in accept_list:
# aargs is ignored for Accept-Language
if qvalue <= 0:
continue # UA doesn't accept this language
if ignore_wildcard and langtag.is_universal_wildcard():
continue # "*" being ignored
for svrlang in server_languages:
# The best match is determined first by the quality factor,
# and then by the most specific match.
matchlen = -1 # how specifically this one matches (0 is a non-match)
if svrlang.dialect_of( langtag, ignore_wildcard=ignore_wildcard ):
matchlen = len(langtag)
if not best \
or matchlen > best[2] \
or (matchlen == best[2] and qvalue > best[1]):
# This match is better
best = (langtag, qvalue, matchlen)
if not best:
return None
return best[0]
# Clean up global namespace
try:
if __emulating_set:
del set
del frozenset
except NameError:
pass
# end of file
|
DmPo/Schemaorg_CivicOS | lib/rdflib/plugins/parsers/pyRdfa/extras/__init__.py | <gh_stars>10-100
"""
Collection of external modules that are used by pyRdfa and are added for an easier
distribution
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.