repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
bradshjg/nameko-cron
|
nameko_cron/__init__.py
|
import datetime
from enum import Enum
import time
from logging import getLogger
from croniter import croniter
from eventlet import Timeout
from eventlet.event import Event
import pytz
from nameko.extensions import Entrypoint
_log = getLogger(__name__)
class ConcurrencyPolicy(Enum):
ALLOW = 'allow'
SKIP = 'skip'
WAIT = 'wait'
class Cron(Entrypoint):
def __init__(self, schedule: str, tz: str = None, concurrency: str = ConcurrencyPolicy.WAIT, **kwargs):
"""
Cron entrypoint. Fires according to a (possibly timezone-aware)
cron schedule. If no timezone info is passed, the default is UTC.
Set ``concurrency`` to ``ConcurrencyPolicy.ALLOW`` to allow multiple workers
to run simultaneously. Set ``concurrency`` to ``ConcurrencyPolicy.SKIP`` to
skip lapsed scheduled runs. The default behavior (``ConcurrencyPolicy.WAIT``)
is to wait until the running worker completes and immediately spawn another
if the schedule has lapsed.
Example::
class Service(object):
name = "service"
@cron(schedule='0 12 * * *', tz='America/Chicago')
def ping(self):
# method executes every day at noon America/Chicago time
print("pong")
"""
self.schedule = schedule
self.tz = tz
self.concurrency = concurrency
self.should_stop = Event()
self.worker_complete = Event()
self.gt = None
super().__init__(**kwargs)
def start(self):
_log.debug('starting %s', self)
self.gt = self.container.spawn_managed_thread(self._run)
def stop(self):
_log.debug('stopping %s', self)
self.should_stop.send(True)
self.gt.wait()
def kill(self):
_log.debug('killing %s', self)
self.gt.kill()
def _get_next_interval(self):
now_utc = datetime.datetime.now(tz=pytz.UTC)
if self.tz:
tz = pytz.timezone(self.tz)
base = now_utc.astimezone(tz)
else:
base = now_utc
cron_schedule = croniter(self.schedule, base)
while True:
yield max(cron_schedule.get_next() - time.time(), 0)
def _run(self):
""" Runs the schedule loop. """
interval = self._get_next_interval()
sleep_time = next(interval)
while True:
# sleep for `sleep_time`, unless `should_stop` fires, in which
# case we leave the while loop and stop entirely
with Timeout(sleep_time, exception=False):
self.should_stop.wait()
break
self.handle_timer_tick()
if self.concurrency != ConcurrencyPolicy.ALLOW:
self.worker_complete.wait()
self.worker_complete.reset()
sleep_time = next(interval)
# a sleep time of zero represents that we've elapsed the next start time, so
# if the user set the policy to skip, we need to update the interval again.
if self.concurrency == ConcurrencyPolicy.SKIP and sleep_time == 0:
sleep_time = next(interval)
def handle_timer_tick(self):
args = ()
kwargs = {}
# Note that we don't catch ContainerBeingKilled here. If that's raised,
# there is nothing for us to do anyway. The exception bubbles, and is
# caught by :meth:`Container._handle_thread_exited`, though the
# triggered `kill` is a no-op, since the container is already
# `_being_killed`.
self.container.spawn_worker(
self, args, kwargs, handle_result=self.handle_result)
def handle_result(self, worker_ctx, result, exc_info):
# we only care about the worker completion if we're going to be waiting for it.
if self.concurrency != ConcurrencyPolicy.ALLOW:
self.worker_complete.send()
return result, exc_info
cron = Cron.decorator
|
joramwessels/torcs-client
|
torcs_tournament.py
|
#! /usr/bin/env python3.5
import logging
import os
import re
import pwd
import csv
import time
import shutil
import psutil
import pathlib
import datetime
import subprocess
from collections import OrderedDict, abc
import elo
import yaml
from bs4 import BeautifulSoup
DROPBOX_DEBUG = logging.DEBUG - 1
logger = logging.getLogger(None if __name__ == '__main__' else __name__)
def path_rel_to_dir(path, direcotry):
if not os.path.isabs(path):
path = os.path.join(direcotry, path)
return path
def really_running(proc):
"""Check whether a process is running _and_ isn't a zombie"""
return proc.is_running() and proc.status() != psutil.STATUS_ZOMBIE
class OrderedLoader(yaml.Loader):
def construct_mapping(self, node, deep=False):
# self.flatten_mapping(node)
return OrderedDict(self.construct_pairs(node, deep))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
OrderedLoader.construct_mapping
)
class ParseError(Exception):
pass
class Player(object):
"""
Container for player information.
Every argument of `start_command` will be formatted using
`format(port=<value>)`
`start_command` is issued with `working_dir` as working directory and
`process_owner` as user. If `process_owner` is None, `token` will be used.
The filenames `stdout` and `stderr` are relative to `output_dir`.
"""
def __init__(self, token, working_dir, rating=None,
start_command=['./start.sh', '-p', '{port}'],
output_dir='./output/',
stdout='./{timestamp}-stdout.txt',
stderr='./{timestamp}-stderr.txt',
message_file='./current_rating.txt',
rating_message="Your current rating is: {rating}",
rank_message="You are ranked {rank} out of {total}",
process_owner=None):
self.token = token
self.working_dir = working_dir
if rating is not None:
self.rating = elo.RATING_CLASS(rating)
else:
self.init_rating()
self.start_command = start_command
self.output_dir = path_rel_to_dir(output_dir, self.working_dir)
self.stdout = path_rel_to_dir(stdout, self.output_dir)
self.stderr = path_rel_to_dir(stderr, self.output_dir)
self.message_file = path_rel_to_dir(message_file, self.output_dir)
self.rating_message = rating_message
self.rank_message = rank_message
self.process_owner = process_owner \
if process_owner is not None \
else self.token
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
def __str__(self):
return self.__class__.__name__ + "({self.token!r}, " \
"{self.rating!r}" \
")".format(self=self)
def __repr__(self):
return self.__class__.__name__ + "(" \
"{self.token!r}, " \
"{self.working_dir!r}, " \
"{self.rating!r}, " \
"{self.start_command!r}, " \
"{self.output_dir!r}, " \
"{self.stdout!r}, " \
"{self.stderr!r}, " \
"{self.message_file!r}, " \
"{self.rating_message!r}, " \
"{self.process_owner!r}" \
")".format(self=self)
def init_rating(self):
self.rating = elo.RATING_CLASS(elo.INITIAL)
class Rater(object):
def __init__(self, players=(), filename=None,
ignore_unknown_players=False):
self.player_map = {}
for player in players:
self.add_player(player)
self.filename = filename
self.ignore_unknown_players = ignore_unknown_players
if self.filename is not None and os.path.exists(self.filename):
self.read_file()
def add_player(self, player):
"""Add a player to this rater."""
if player.token in self.player_map:
raise ValueError(
"A token may only be specified once. Token: {}".format(
player.token
)
)
self.player_map[player.token] = player
def filename_check(self, filename=None):
if filename is None:
if self.filename is None:
raise ValueError(
"Please specify a filename as argument or assign it to"
" `self.filename`."
)
else:
filename = self.filename
return filename
def read_file(self, filename=None):
filename = self.filename_check(filename)
with open(filename) as fd:
self.set_ratings(map(self.clean_line, csv.reader(fd)))
def set_ratings(self, iterable):
tokens = set()
for line in iterable:
token = line[0]
if token in tokens:
raise ValueError(
"A token may only be specified once. Token: {}".format(
token
)
)
tokens.add(token)
if len(line) > 2:
raise ValueError(
"No extra information next to a token and the desired "
"rating should be specified: {}".format(line)
)
if len(line) == 2:
if token in self.player_map:
self.player_map[token].rating = elo.RATING_CLASS(line[1])
elif not self.ignore_unknown_players:
raise ValueError(
"Rating specified for unknown player: {}".format(token)
)
@staticmethod
def clean_line(iterable):
li = list(iterable)
if len(li) != 1 and len(li) != 2:
raise ValueError(
"A ratings file should only contain lines with one or two "
"values, got {}".format(li)
)
if len(li) == 2:
try:
li[1] = elo.RATING_CLASS(li[1])
except ValueError as error:
raise ValueError(
"The second value of a rating line should be "
"interpretable as {}. I received the following error "
"while casting:\n\t{}".format(
elo.RATING_CLASS.__name__,
error
)
)
return li
def save_ratings(self, filename=None):
"""
Save the ratings of all players to a file.
If a filename is specified, that file is used, otherwise
`self.filename` is used. If neither is specified, a ValueError is
raised.
"""
filename = self.filename_check(filename)
logger.info("Saving ratings in {}".format(filename))
with open(filename, 'w') as fd:
csv.writer(fd).writerows(
sorted(
((p.token, p.rating) for p in self.player_map.values()),
key=lambda p: p[1]
)
)
@staticmethod
def adjust_all(ranking):
"""
Adjust the ratings of given Players according to the ranked results.
In a ranking every player won from all players before it.
"""
ranking = list(ranking)
# Calculate new ratings
new_ratings = [
elo.rate(
player.rating,
[
((pi < oi), opponent.rating)
for oi, opponent in enumerate(ranking)
if opponent is not player
]
)
for pi, player in enumerate(ranking)
]
# Save new ratings
for player, rating in zip(ranking, new_ratings):
player.rating = rating
def restart(self):
for player in self.player_map.values():
player.init_rating()
class Controller(object):
def __init__(self, rater, queue, torcs_config_file,
server_stdout='{timestamp}-server_out.txt',
server_stderr='{timestamp}-server_err.txt',
separate_player_uid=False,
set_file_owner=False,
set_file_mode=False,
rater_backup_filename=None,
result_filename_format="{driver} - {base}",
timestamp_format='%Y-%m-%d-%H.%M',
result_path='~/.torcs/results/',
torcs_command=['torcs', '-r', '{config_file}'],
driver_to_port=OrderedDict([
('scr_server 1', 3001),
('scr_server 2', 3002),
('scr_server 3', 3003),
('scr_server 4', 3004),
('scr_server 5', 3005),
('scr_server 6', 3006),
('scr_server 7', 3007),
('scr_server 8', 3008),
('scr_server 9', 3009),
('scr_server 10', 3010),
]),
raise_on_too_fast_completion=True,
torcs_min_time=1,
torcs_child_wait=0.5,
shutdown_wait=1,
crash_check_wait=0.2,
file_mode=0o700):
"""
Orchestrate the races and save the ratings.
When the rating is left out of the ratings file for a token, it is
assigned the default rating, which will be saved to the same file
when running `save_ratings`.
N.B. `~` is only expanded to the user directory in `result_path` at
initialisation of the controller.
"""
self.rater = rater
self.queue = queue
self.torcs_config_file = torcs_config_file
self.server_stdout = server_stdout
self.server_stderr = server_stderr
self.separate_player_uid = separate_player_uid
self.set_file_owner = set_file_owner
self.set_file_mode = set_file_mode
self.rater_backup_filename = rater_backup_filename
self.result_filename_format = result_filename_format
self.timestamp_format = timestamp_format
self.result_path = os.path.expanduser(result_path)
self.torcs_command = torcs_command
self.driver_to_port = driver_to_port
self.raise_on_too_fast_completion = raise_on_too_fast_completion
self.torcs_min_time = torcs_min_time
self.torcs_child_wait = torcs_child_wait
self.shutdown_wait = shutdown_wait
self.crash_check_wait = crash_check_wait
self.file_mode = file_mode
logger.debug("Result path: {}".format(self.result_path))
# Read drivers from config
self.drivers = self.read_lineup(self.torcs_config_file)
def timestamp(self):
return datetime.datetime.now().strftime(self.timestamp_format)
@staticmethod
def rank_text(rank):
if rank == 0:
return '1st'
elif rank == 1:
return '2nd'
elif rank == 2:
return '3rd'
else:
return str(rank + 1) + 'th'
@staticmethod
def read_ranking(results_file):
"""
Return a ranked list of driver names read from the given results file.
NB. Driver names are _not_ tokens. One should first look up which token
corresponds with which driver name.
"""
with open(results_file) as fd:
soup = BeautifulSoup(fd, 'xml')
result_soup = soup.find('section', attrs={'name': 'Results'})
rank_soup = result_soup.find('section', attrs={'name': 'Rank'})
ranks = [
(
int(section['name']),
section.find('attstr', attrs={'name': 'name'})['val']
)
for section in rank_soup.findAll('section')
]
return list(zip(*sorted(ranks)))[1]
@staticmethod
def read_lineup(torcs_config_file):
with open(torcs_config_file) as fd:
soup = BeautifulSoup(fd, 'xml')
drivers_sec = soup.find('section', attrs={'name': 'Drivers'})
drivers = []
for sec in drivers_sec.findAll('section'):
tag, attrs = 'attstr', {'name': 'module'}
module = sec.find(tag, attrs=attrs)
if module is None:
raise ParseError(
"Error parsing {file}: expected a {tag} tag with the "
"following attributes: {attrs!r}".format(
file=torcs_config_file,
tag=tag,
attrs=attrs
)
)
expected = 'scr_server'
if module.get('val', Exception()) != expected:
raise ParseError(
"Error parsing {file}: all drivers are expected to be the "
"'{expected}' module.".format(
file=torcs_config_file,
expected=expected
)
)
tag, attrs = 'attnum', {'name': 'idx'}
idx = sec.find(tag, attrs=attrs)
if idx is None:
raise ParseError(
"Error parsing {file}: expected a {tag} tag with the "
"following attributes: {attrs!r}".format(
file=torcs_config_file,
tag=tag,
attrs=attrs
)
)
val = idx.get('val', None)
if val is None:
raise ParseError(
"Error parsing {file}: expected {tag} to have the "
"attribute {attr}.".format(
file=torcs_config_file,
tag=tag,
attr='val',
)
)
drivers.append((sec['name'], val))
# I now have a list of (rank, id) pairs
# Somehow, the number in the name of the scr_server driver is one
# larger that the `idx` of the driver.
return [
'scr_server {}'.format(int(idx) + 1)
for _, idx in sorted(drivers)
]
def restart(self):
"""Restart the tournament, making all ratings equal."""
self.rater.restart()
def race_and_save(self, simulate=False):
"""
Run a race (see `Controller.race`) and save the ratings.
"""
self.race(simulate=simulate)
self.rater.save_ratings()
def race(self, simulate=False):
"""
Run a race
Automatically determine the number of players to be raced and ask the
queue which players are next. Race the players, save the results and
update the queue.
"""
players = self.queue.first_n(len(self.drivers))
logger.info("Racing: {}".format(', '.join(
repr(player.token) for player in players
)))
self.race_once(players, simulate=simulate)
self.queue.requeue(players)
def race_tokens(self, tokens, simulate=False):
return self.race_once(
map(self.rater.player_map.get, tokens),
simulate=simulate
)
def race_once(self, players, simulate=False):
"""
Run one race with TORCS and the given players.
Also make a backup of the ratings if `self.rater_backup_filename` is
not None.
NB. Please make sure the number of players given matches the specified
number of players in the configuration file of this Controller.
The output can be found under:
<torcs installation directory>/results
"""
players = list(players)
if len(self.drivers) != len(players):
raise ValueError(
"{nplay} players where given, but {file} specifies {ndriv} "
"drivers".format(
nplay=len(players),
ndriv=len(self.drivers),
file=self.torcs_config_file
)
)
driver_to_player = OrderedDict(zip(self.drivers, players))
open_files = []
processes = []
try:
# Start server
server_stdout = open(
self.server_stdout.format(timestamp=self.timestamp()),
'w'
)
open_files.append(server_stdout)
server_stderr = open(
self.server_stderr.format(timestamp=self.timestamp()),
'w'
)
open_files.append(server_stderr)
logger.info("Starting TORCS...")
if simulate:
logger.warning(
"This is a simulation! No child processes are started."
)
else:
logger.debug(
"TORCS config to use: {}".format(self.torcs_config_file)
)
config_file = os.path.abspath(self.torcs_config_file)
logger.debug("TORCS config to use: {}".format(config_file))
command = list(map(
lambda s: s.format(
config_file=config_file
),
self.torcs_command
))
logger.debug("TORCS command to be run: {}".format(command))
server_process = psutil.Popen(
command,
stdout=server_stdout,
stderr=server_stderr,
)
processes.append(server_process)
# TORCS starts a child process, which doesn't terminate
# automatically if `server_process` is terminated or crashes.
time.sleep(self.torcs_child_wait)
children = server_process.children()
logger.debug("TORCS server children: {}".format(children))
processes.extend(children)
# Start players
logger.info("Starting players...")
for driver, player in driver_to_player.items():
stdout = open(
player.stdout.format(timestamp=self.timestamp()),
'w'
)
open_files.append(stdout)
stderr = open(
player.stderr.format(timestamp=self.timestamp()),
'w'
)
open_files.append(stderr)
# Set the ownership of the files
if self.set_file_owner:
self.change_owner(player)
if self.set_file_mode:
self.change_mode(player)
if simulate:
# Always simulate these functions, just to be sure they
# work
self.get_change_user_fn(player)
self.get_player_env(player)
elif self.separate_player_uid:
processes.append(psutil.Popen(
map(
lambda s: s.format(
port=self.driver_to_port[driver]
),
player.start_command
),
stdout=stdout,
stderr=stderr,
preexec_fn=self.get_change_user_fn(player),
cwd=player.working_dir,
env=self.get_player_env(player)
))
else:
processes.append(psutil.Popen(
map(
lambda s: s.format(
port=self.driver_to_port[driver]
),
player.start_command
),
stdout=stdout,
stderr=stderr,
cwd=player.working_dir,
))
logger.debug("Started {}".format(player))
time.sleep(self.crash_check_wait)
# Check no one crashed in the mean time
for proc in processes:
if not really_running(proc):
name = proc.name() if hasattr(proc, 'name') else proc
raise subprocess.CalledProcessError(
proc.poll() if hasattr(proc, 'poll') else 0,
list(proc.args) or name
)
# Wait for server
logger.info("Waiting for TORCS to finish...")
start_time = time.time()
if not simulate:
server_process.wait()
end_time = time.time()
# Time TORCS ran in seconds
diff_time = end_time - start_time
if not simulate and diff_time < self.torcs_min_time:
logger.warning(
"TORCS only ran for {:.2f} seconds".format(diff_time)
)
if self.raise_on_too_fast_completion:
raise subprocess.SubprocessError(
"TORCS only took {:.2f} seconds to complete".format(
diff_time
)
)
logger.debug("Finished!")
# Check exit status of TORCS
# However, even if something goes wrong, the exit status is 0,
# so I can't know if something went wrong.
# logger.debug("really_running(server_process): {}".format(
# really_running(server_process)
# ))
# logger.debug("server_process.returncode: {}".format(
# server_process.returncode
# ))
# if server_process.returncode:
# raise subprocess.CalledProcessError(
# proc.returncode,
# proc.args
# )
except:
logger.error("An error occurred, trying to stop gracefully...")
raise
finally:
# Exit running processes
if not simulate:
# Wait a second to give the processes some time
time.sleep(self.shutdown_wait)
# First be nice
for proc in processes:
if really_running(proc):
logger.info("Terminating {}".format(proc))
proc.terminate()
# Wait a second to give the processes some time
time.sleep(self.shutdown_wait)
# Time's up
for proc in processes:
if really_running(proc):
logger.warning("Killing {}".format(proc))
proc.kill()
# Wait a second to give the processes some time
time.sleep(self.shutdown_wait)
# Double check
for proc in processes:
if really_running(proc):
logger.error(
"The following process could not be killed: {}"
.format(proc.cmdline())
)
# Close all open files
for fd in open_files:
logger.debug("Closing {}".format(fd.name))
try:
fd.close()
except Exception as e:
logger.error(e)
logger.info("Closed all files and processes!")
# Give the players the server output
for player in players:
shutil.copyfile(
server_stdout.name,
os.path.join(
player.output_dir,
os.path.basename(server_stdout.name)
)
)
shutil.copyfile(
server_stderr.name,
os.path.join(
player.output_dir,
os.path.basename(server_stderr.name)
)
)
# End of `finally` clause
# Find the correct results file
logger.debug("Result path: {}".format(self.result_path))
out_dir = os.path.join(
self.result_path,
# remove head path and extension
'.'.join(os.path.split(self.torcs_config_file)[1].split('.')[:-1])
)
out_base = sorted(os.listdir(out_dir))[-1]
out_file = os.path.join(
out_dir,
out_base
)
# Give the players the results file
for driver, player in driver_to_player.items():
shutil.copyfile(
out_file,
os.path.join(
player.output_dir,
self.result_filename_format.format(
driver=driver,
base=out_base
)
)
)
# Update ratings according to ranking
ranked_drivers = self.read_ranking(out_file)
self.rater.adjust_all(map(driver_to_player.get, ranked_drivers))
# Make a backup if self.rater_backup_filename is given
if self.rater_backup_filename is not None:
backup_filename = self.rater_backup_filename.format(
timestamp=self.timestamp()
)
# logger.info("Backing up ratings in {}".format(backup_filename))
self.rater.save_ratings(
backup_filename
)
# Tell players their own rating and rank
sorted_players = sorted(
self.rater.player_map.values(),
key=lambda p: p.rating,
reverse=True
)
total = len(sorted_players)
for rank, player in enumerate(sorted_players):
with open(player.message_file, 'w') as fd:
fd.write(player.rating_message.format(rating=player.rating))
fd.write('\n')
fd.write(
player.rank_message.format(
rank=self.rank_text(rank),
total=total
)
)
fd.write('\n')
def change_owner(self, player):
"""
Make `player.process_owner` the owner of all files in
`player.working_dir`
"""
pw_record = pwd.getpwnam(player.process_owner)
logger.debug(
"Changing file ownership for {}".format(player.token)
)
for dirpath, _, filenames in os.walk(player.working_dir):
# Change directory ownership
os.chown(dirpath, pw_record.pw_uid, pw_record.pw_gid)
# Change file ownership
for filename in filenames:
os.chown(
os.path.join(dirpath, filename),
pw_record.pw_uid,
pw_record.pw_gid
)
def change_mode(self, player, mode=None):
"""
Make `player.process_owner` the owner of all files in
`player.working_dir`
"""
if mode is None:
mode = self.file_mode
logger.debug(
"Changing file mode for {}".format(player.token)
)
for dirpath, _, filenames in os.walk(player.working_dir):
# Change directory mode
os.chmod(dirpath, mode)
# Change file mode
for filename in filenames:
os.chmod(os.path.join(dirpath, filename), mode)
@staticmethod
def get_change_user_fn(player):
pw_record = pwd.getpwnam(player.process_owner)
def change_user():
logger.debug(
"Starting demotion. UID: {uid}, GID: {gid}".format(
uid=os.getuid(),
gid=os.getgid()
)
)
try:
logger.debug("Trying to set gid...")
os.setgid(pw_record.pw_gid)
logger.debug("Trying to set uid...")
os.setuid(pw_record.pw_uid)
except Exception as e:
logger.error(e)
raise
logger.debug(
"Finished demotion. UID: {uid}, GID: {gid}".format(
uid=os.getuid(),
gid=os.getgid()
)
)
return change_user
@staticmethod
def get_player_env(player):
# Info from https://stackoverflow.com/questions/1770209/run-child-processes-as-different-user-from-a-long-running-process/6037494#6037494 # NOQA
pw_record = pwd.getpwnam(player.process_owner)
env = os.environ.copy()
env['LOGNAME'] = env['USER'] = pw_record.pw_name
env['HOME'] = pw_record.pw_dir
logger.debug("ENV PWD: {}".format(env.get('PWD', None)))
env['PWD'] = player.working_dir
logger.debug("Set PWD to: {!r}".format(env['PWD']))
logger.debug("PATH: {}".format(env['PATH']))
return env
@classmethod
def load_config(cls, config_file, extra_config={}):
"""
Load a controller from the given config file.
NB. Only the first layer of `extra_config` is merged, everything else
is overwritten, e.g.:
original_config = {
'test': {
'test-one': 'hello'
'test-two': {
'test-two-one': 'bye'
}
}
}
extra_config = {
'test': {
'test-two': {
'test-two-two': 'override'
}
'test-three': 'added'
}
}
results in:
config = {
'test': {
'test-one': 'hello'
'test-two': {
'test-two-two': 'override'
}
'test-three': 'added'
}
}
"""
error_regex = re.compile(
r"__init__\(\) got an unexpected keyword argument '(\w+)'"
)
with open(config_file) as fd:
config = yaml.load(fd, OrderedLoader)
for key, value in extra_config.items():
if isinstance(value, abc.Mapping):
cur_conf = config.setdefault(key, {})
cur_conf.update(value)
else:
config[key] = value
logger.debug("Config: {}".format(config))
try:
rater = cls.load_rater(config)
fbq = cls.load_fbq(config, rater.player_map.values())
controller = cls(rater, fbq, **config.get('controller', {}))
except TypeError as e:
match = error_regex.fullmatch(e.args[0])
if match is not None:
config_key = match.groups()[0]
logger.debug("Match: {}".format(config_key))
raise ValueError(
"Unexpected configuration key in {filename}: {key!r}"
.format(filename=config_file, key=config_key)
) from e
else:
logger.debug("No match...")
raise
return controller
@classmethod
def load_rater(cls, config_dic):
players = cls.load_players(config_dic)
rater = Rater(players, **config_dic.get('rater', {}))
return rater
@staticmethod
def load_players(config_dic):
key = 'players'
if key not in config_dic:
raise ValueError(
"No players specified! Expected a {!r} key in the"
" configuration file.".format(key)
)
players = config_dic[key]
if not isinstance(players, abc.Mapping):
# If it's not a mapping, I'm assuming I can open it.
the_exception = TypeError(
"Expected {key!r} to point to a {{token: config}} mapping or"
" a path to a .yml file containing a {{token: config}}"
" mapping. Instead I found: {players!r}".format(
key=key,
players=players
)
)
fd = None
try:
fd = open(players)
except Exception as e:
raise the_exception from e
else:
players = yaml.load(fd, OrderedLoader)
finally:
if fd is not None:
fd.close()
# logger.debug("Closed players config!")
if not isinstance(players, abc.Mapping):
raise the_exception
return [
Player(token, **player_conf)
for token, player_conf in players.items()
]
@staticmethod
def load_fbq(config_dic, players=()):
return FileBasedQueue(players, **config_dic.get('queue', {}))
class DropboxDisablingController(Controller):
def __init__(self, *args, dropbox_start_command=['dropbox', 'start'],
dropbox_stop_command=['dropbox', 'stop'], start_dropbox=False,
stop_dropbox=False, **kwargs):
self.dropbox_start_command = dropbox_start_command
self.dropbox_stop_command = dropbox_stop_command
self.start_dropbox = start_dropbox
self.stop_dropbox = stop_dropbox
super(DropboxDisablingController, self).__init__(*args, **kwargs)
def race_once(self, *args, **kwargs):
"""Disable Dropbox before racing and start it again afterwards."""
try:
# Try to disable Dropbox
# The catch is that the return status of the Dropbox control script
# is always 0, even if something went wrong...
if self.stop_dropbox:
logger.info("Stopping Dropbox...")
completed = subprocess.run(
self.dropbox_stop_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
logger.info("Dropbox says:\n{}".format(
completed.stdout.decode()
))
del completed
# Race
return super(DropboxDisablingController, self).race_once(
*args,
**kwargs
)
finally:
if self.start_dropbox:
# Enable Dropbox
logger.info("Starting Dropbox...")
# Somehow stderr captures the output of the started Dropbox
# daemon. However, capturing it isn't an option because the
# daemon doesn't stop, which means the stream will hang. Thus
# if you want to see its output, we'll just leave it as is,
# otherwise we'll squelch the daemon's output.
stderr = None if logger.getEffectiveLevel() <= DROPBOX_DEBUG \
else subprocess.DEVNULL
completed = subprocess.run(
self.dropbox_start_command,
stdout=subprocess.PIPE,
stderr=stderr
)
logger.info("Dropbox says:\n{}".format(completed.stdout.decode()))
del completed
class FileBasedQueue(object):
"""
Queue players according to the last modified time of a specifically named
file in their `working_dir`.
"""
def __init__(self, players, filename='start.sh'):
self.filename = filename
self.players = list(players)
@staticmethod
def touch(filename):
"""
Touch a file.
I.E. create it if it does not exist or change the last modified time
to the current time if it does.
"""
logger.debug("Touching: {}".format(filename))
pathlib.Path(filename).touch()
logger.debug("Touched!")
@staticmethod
def get_last_modified(filename):
modified_time = os.path.getmtime(filename)
logger.debug("Filename: {}".format(filename))
logger.debug("Modified time: {}".format(modified_time))
return modified_time
def get_filename(self, player):
"""Get the full path to the queue file of a player"""
return os.path.join(
player.working_dir,
self.filename
)
def first_n(self, n):
"""
Get the `n` players that are first in line
"""
return sorted(
self.players,
key=lambda p: self.get_last_modified(self.get_filename(p)),
# reverse=True,
)[:n]
def requeue(self, players):
"""
Put the given players at the end of the queue
In this case this is done by touching their respective queue files
in the order the players are passed.
"""
for player in players:
self.touch(self.get_filename(player))
def log_level_type(string):
try:
value = int(string)
except:
value = string
return value
if __name__ == '__main__':
# Parse command line arguments
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('config_file', help="YAML file with configuration")
parser.add_argument('-l', '--level', default='INFO', type=log_level_type,
help="Logging level to use")
parser.add_argument(
'-s',
'--simulate',
action='store_true',
help="Attempts to mimic a full run without starting child processes."
" May fail if no old TORCS output files are present in the expected"
" directory.")
parser.add_argument(
'--start-dropbox',
action='store_true',
help="Start Dropbox again after the race."
)
parser.add_argument(
'--stop-dropbox',
action='store_true',
help="Stop Dropbox before the race. Implies --start-dropbox."
)
args = parser.parse_args()
# Initialise logging
logging.basicConfig(level=args.level)
extra_config = {}
if args.stop_dropbox:
control_config = extra_config.setdefault('controller', {})
control_config['stop_dropbox'] = True
if args.start_dropbox or args.stop_dropbox:
control_config = extra_config.setdefault('controller', {})
control_config['start_dropbox'] = True
# Race
# controller = Controller.load_config(args.config_file)
controller = DropboxDisablingController.load_config(
args.config_file,
extra_config
)
controller.race_and_save(simulate=args.simulate)
logger.info("Done!")
|
joramwessels/torcs-client
|
driver_utils.py
|
<reponame>joramwessels/torcs-client
# file: driver_utils.py
# author: <NAME>
# date: 11-12-2017
# dependencies: numpy
# description
# usage
#
from sys import stderr
from numpy import sign
# as stated by Torcs documentation
MAX_WHEEL_ROTATION = 21
# confusing polarities
STR_R, STR_L = -1, 1
ANG_R, ANG_L = 1, -1
DFC_R, DFC_L = -1, 1
# printing
ENABLE_DEBUG_INFO = True
PRINT_CYCLE_INTERVAL = 50 # freqency of print output in game cycles
PRINT_STATE = True
PRINT_COMMAND = False
def to_ang(ang):
""" Steers towards the road angle
Args:
ang: The angle of the car with the road
Returns:
The angle to steer in
"""
if sign(ang) == ANG_R:
return STR_L
elif sign(ang) == ANG_L:
return STR_R
else:
return 0
def away_from_ang(ang):
""" Steers away from the road angle
Args:
ang: The angle of the car with the road
Returns:
The angle to steer in
"""
return -to_ang(ang)
def debug(iter, *args):
""" prints debug info to stderr """
if iter % PRINT_CYCLE_INTERVAL == 0:
err(iter, *args)
def err(iter, *args):
if ENABLE_DEBUG_INFO:
spc = 6-len(str(iter))
ovwr = 50 - len(' '.join([str(x) for x in args]))
print(iter, ' '*spc, *args, ' '*ovwr, file=stderr)
|
joramwessels/torcs-client
|
mlp_driver.py
|
from pytocl.driver import Driver
from pytocl.car import State, Command
import mlp
class MyDriver(Driver):
def __init__(self, model_file="mlp_100x100.pt"):
mlp.use_cuda = False
self.model = mlp.load_model(model_file)
self.it = 0
def drive(self, carstate):
self.it += 1
x = [carstate.angle, carstate.speed_x,
carstate.speed_y, carstate.speed_z] + \
list(carstate.distances_from_edge) + \
[carstate.distance_from_center]
pred_y = list(self.model.predict(x).data)[0]
command = Command()
command.accelerator = pred_y[0]
command.brake = pred_y[1]
command.steering = pred_y[2]
gear_flt = pred_y[3] if self.it > 750 else self.it/250.0
command.gear = min(5, max(1, int(gear_flt + 0.5)))
print(self.it,"acc: %.2f, brk: %.2f, ste: %.2f, gea: %.2f"
%(command.accelerator, command.brake,
command.steering, gear_flt), end='\r')
return command
|
joramwessels/torcs-client
|
basic_control.py
|
<gh_stars>1-10
# file: basic_control.py
# author: <NAME>
# date:
# dependencies:
# description:
# usage:
#
from math import radians
import numpy as np
import torch
import ffnn_speed
import ffnn_steer
ANGLES = [90, 75, 60, 45, 30, 20, 15, 10, 5, 0, -5, -10, -15, -20, -30, -45, -60, -75, -90]
class BasicControl:
def __init__(self, steering_values):
""" Short description
Complete multiline
description of class
Args:
steering_values: ???
"""
self.brake_row = 0
self.speed = ffnn_speed.Speed(10)
self.steer = ffnn_steer.Steer(10)
self.steer.load_state_dict(torch.load("./steer.data"))
self.speed.load_state_dict(torch.load("./ffnn_speed.data"))
self.steering_values = steering_values
self.alphas = [radians(x) for x in ANGLES]
def deal_with_opponents(self, steer_pred, pedal, speed_x,
distance_from_center, opponents_new, opponents_delta):
""" Description
Args:
steer_pred: ?
pedal: ?
speed_x: ?
distance_from_center: ?
opponents_new: ?
opponents_delta: ?
Returns:
???
"""
# index 18 is in front
# index 35 in behind us
adjustment = 0.1
# if there are cars infront-left -> move to right
if opponents_new[17] < 10 or opponents_new[16] < 10 or opponents_new[15] < 10:
print("ADJUSTING SO NOT TO HIT")
steer_pred -= adjustment
if opponents_new[19] < 10 or opponents_new[20] < 10 or opponents_new[21] < 10:
print("ADJUSTING SO NOT TO HIT")
# move to left
steer_pred += adjustment
if opponents_new[18] < 50:
# we are on left side -> move right
if distance_from_center > 0:
steer_pred -= adjustment
# o.w. move left
else:
steer_pred += adjustment
if speed_x > 100:
# we are getting closer to the car in front (and we can't avoid it). We need to slow down a bit
if (opponents_delta[18] < 0 and opponents_new[18] < 20) or (opponents_delta[17] < 0 and opponents_new[17] < 4) or (opponents_delta[19] < 0 and opponents_new[19] < 4):
pedal -= 0.1
return steer_pred, pedal
def steer_decider(self, carstate):
""" Description
Args:
carstate: The full carstate
Returns:
Steering angle?
"""
alpha_index = np.argmax(carstate.distances_from_edge)
if is_straight_line(carstate=carstate, radians=self.alphas[alpha_index], factor=self.steering_values[4]):
return carstate.angle * 0.5
steering_function = lambda index, offset:\
(self.alphas[index-offset] * carstate.distances_from_edge[index-offset] \
+ self.alphas[index+offset] * carstate.distances_from_edge[index+offset]) \
/ (carstate.distances_from_edge[index+offset] \
+ carstate.distances_from_edge[index-offset])
steer = self.steering_values[0] * self.alphas[alpha_index]
for x in range(1, 4):
if alpha_index - x > -1 and alpha_index + x < len(self.steering_values):
steer += self.steering_values[x]*steering_function(alpha_index, x)
return steer
def speed_decider(self, carstate, max_speed=120):
""" Description
Args:
carstate: The full carstate
max_speed: ???
Returns:
???
"""
# we predict speed and map that to pedal
x_in = ffnn_speed.carstate_to_variable(carstate)
target_speed = self.speed(x_in).data[0]
# we limit the speed
if target_speed >= max_speed:
target_speed = max_speed
pedal = 2/(1 + np.exp(carstate.speed_x - target_speed))-1
return pedal
def gear_decider(self, carstate):
""" Description
Args:
carstate: The full carstate
Returns:
The gear to shift to (int)
"""
gear = carstate.gear
rpm = carstate.rpm
# we do gears by hand
# up if {9500 9500 9500 9500 9000}
# down if {4000 6300 7000 7300 7300}
if gear == -1:
return 1
elif gear == 0:
if rpm >= 5000:
gear = 1
elif gear == 1:
if rpm >= 9500:
gear = 2
elif gear == 2:
if rpm >= 9500:
gear = 3
elif rpm <= 4000:
gear = 2
elif gear == 3:
if rpm >= 9500:
gear = 4
elif rpm <= 6300:
gear = 3
elif gear == 4:
if rpm >= 9500:
gear = 5
elif rpm <= 7000:
gear = 3
elif gear == 5:
if rpm >= 9000:
gear = 6
elif rpm <= 7300:
gear = 4
elif gear == 6:
if rpm <= 7300:
gear = 5
return gear
def disambiguate_pedal(self, pedal, accel_cap=0.5, break_cap=0.75, break_max_length=5):
""" Description
Args:
???
Returns:
The break and accelerator command values
"""
if pedal >= 0.0:
accelerator = pedal*accel_cap
brake = 0
else:
# we need to make sure that we don't break hard enough and not too long
self.brake_row += 1
if self.brake_row <= break_max_length:
brake = abs(pedal)*break_cap
else:
self.brake_row = 0
brake = 0
accelerator = 0
return brake, accelerator
def is_straight_line(carstate, radians, factor):
""" Decides whether ??? is a straight line
Args:
carstate: The full carstate
radians: ???
factor: ???
Returns:
A boolean indicating whether ???
"""
if abs(carstate.distance_from_center) < 0.75:
if radians == 0:
return True
if carstate.distances_from_edge[9] > 190:
return True
if carstate.distances_from_edge[9] > factor * carstate.speed_x:
return True
return False
|
joramwessels/torcs-client
|
combined_driver.py
|
# file: crisis_driver.py
# authors: Haukur, <NAME>
# date: 07-12-2017
# dependencies: pytocl, basic_control, swarm, crisis_driver
# description
# usage
#
from pytocl.driver import Driver
from pytocl.car import State, Command
from sys import stderr
from math import radians
from operator import sub
from driver_utils import *
from basic_control import BasicControl
from swarm import FeromoneTrail
from crisis_driver import CrisisDriver
from mlp import load_model
# - NOTE crash detection for swarm only checks for off road
# - NOTE collision detection for swarm might be too sensitive
# - TODO clear up swarm global parameters
# - TODO swarm debug output is piped to stderr
ENABLE_SWARM = True
ENABLE_CRISIS_DRIVER = True
ENABLE_NETWORK = False
# Neural network parameters
STR_MODELS = ["steering_model_1.pt", "steering_model_2.pt", "steering_model_2b.pt",
"steering_model_3.pt", "steering_model_3b.pt"]
MODEL_FILENAME = STR_MODELS[4]
# swarm metaparameters
swarm_pos_int = 50
swarm_spd_int = 20
swarm_spd0 = 0
swarm_spd_n = 20
swarm_expl_int = 40
class Final_Driver(Driver):
def __init__(self, steering_values, global_max_speed):
""" Short description
Multiline description on
details and usage
Args:
steering_values: ???
global_max_speed: ???
"""
super(Final_Driver, self).__init__()
self.iter = 0
self.basic_control = BasicControl(steering_values)
self.back_up_driver = CrisisDriver(logdata=False)
self.bad_counter = 0
self.lap_counter = 0
self.last_opponents = [0 for x in range(36)]
self.global_max_speed = global_max_speed
self.max_speed = global_max_speed
self.cummulative_time = 0
if ENABLE_SWARM:
self.swarm = FeromoneTrail(
swarm_pos_int, swarm_spd_int,
swarm_spd0, swarm_spd_n,
swarm_expl_int, self.global_max_speed)
self.crashed_in_last_frame = False
self.contact_in_last_frame = False
self.previous_frame_position = 0
if ENABLE_NETWORK:
self.steering_model = load_model(MODEL_FILENAME)
def drive(self, carstate: State) -> Command:
""" Description
Args:
carstate: All parameters packed in a State object
Returns:
command: The next move packed in a Command object
"""
self.iter += 1
self.back_up_driver.update_status(carstate)
# trackers
self.update_trackers(carstate)
if PRINT_STATE:# and (self.iter % PRINT_CYCLE_INTERVAL) == 0:
self.print_trackers(carstate, r=True)
# crash and collision detection for swarm
if ENABLE_SWARM:
if self.back_up_driver.needs_help or self.back_up_driver.is_off_road:
self.crashed_in_last_frame = True
if not self.crashed_in_last_frame:
debug(self.iter, "SWARM: crashed")
for dist in carstate.opponents:
if dist == 0:
self.contact_in_last_frame = True
# crisis handling
if ENABLE_CRISIS_DRIVER:
if self.back_up_driver.is_in_control:
return self.back_up_driver.drive(carstate)
elif self.back_up_driver.needs_help:
self.back_up_driver.pass_control(carstate)
return self.back_up_driver.drive(carstate)
# since the data and python's values differ we need to adjust them
try:
carstate.angle = radians(carstate.angle)
carstate.speed_x = carstate.speed_x*3.6
command = self.make_next_command(carstate)
except Exception as e:
err(self.iter, str(e))
command = self.back_up_driver.driver.drive(carstate)
return command
def make_next_command(self, carstate):
""" Description
Args:
carstate: The full carstate object as passed to Driver()
Returns:
command: The command object to pass back to the server
"""
# checking in on the swarm
position = carstate.distance_from_start
position = int(position - (position % self.swarm.pos_int))
new_frame = position > (self.previous_frame_position + self.swarm.pos_int)
new_lap = self.previous_frame_position > (position + self.swarm.pos_int)
if ENABLE_SWARM and (new_frame or new_lap):
self.max_speed = self.swarm.check_in(
position,
carstate.speed_x,
self.crashed_in_last_frame,
self.contact_in_last_frame)
self.crashed_in_last_frame = False
self.contact_in_last_frame = False
self.previous_frame_position = position
err(self.iter, "SWARM: pos=%i, max_speed=%i" %(position, self.max_speed))
# basic predictions
if ENABLE_NETWORK:
steer_pred = self.steering_model.predict([carstate.angle, carstate.speed_x]
+ list(carstate.distances_from_edge)
+ [carstate.distance_from_center])
steer_pred = steer_pred[0]
else:
steer_pred = self.basic_control.steer_decider(carstate)
gear = self.basic_control.gear_decider(carstate)
pedal = self.basic_control.speed_decider(carstate, max_speed=self.max_speed)
# making sure we don't drive at people
opponents_deltas = list(map(sub, carstate.opponents, self.last_opponents))
steer_pred, pedal = self.basic_control.deal_with_opponents(steer_pred,
pedal,
carstate.speed_x,
carstate.distance_from_center,
carstate.opponents,
opponents_deltas)
# if too fast descelerate to max speed
if carstate.speed_x > self.max_speed:
pedal = 0.0
err(self.iter, "MAIN: capping speed")
# disambiguating pedal with smoothing
brake, accel = self.basic_control.disambiguate_pedal(pedal, accel_cap=1.0)
# debug output
if PRINT_COMMAND and self.iter % PRINT_CYCLE_INTERVAL:
print("Executing comand: gear=%.2f, acc=%.2f," %(gear, accel),
"break=%.2f, steering=%.2f" %(brake, steer_pred))
# command construction
command = Command()
command.brake = brake
command.accelerator = accel
command.steering = steer_pred
command.gear = gear
if command.steering > 0.10:
debug(self.iter, "BASIC: turning left")
elif command.steering < -0.10:
debug(self.iter, "BASIC: turning right")
return command
def update_trackers(self, carstate):
""" Updates info about the race """
self.iter += 1
if abs(carstate.current_lap_time) < 0.020:
self.lap_counter += 1
self.cummulative_time += carstate.last_lap_time + self.cummulative_time
def print_trackers(self, carstate, r=False):
""" Prints info on the race """
line_end = '\r' if r else '\n'
print(" Lap=%i CurLapTime=%.2f dist=%.2f time=%.2f"
%(self.lap_counter,
carstate.current_lap_time,
carstate.distance_raced,
self.cummulative_time + carstate.current_lap_time)
, end=line_end)
|
joramwessels/torcs-client
|
ffnn_steer.py
|
<reponame>joramwessels/torcs-client<filename>ffnn_steer.py
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import time
import sys
import numpy as np
import argparse
import driver_support
import math
from os import listdir
from os.path import isfile, join
class Steer(nn.Module):
def __init__(self, hidden_dimension):
super(Steer, self).__init__()
n_states = 22
n_actions = 1
self.layer_1 = nn.Linear(n_states, hidden_dimension)
self.non_lin = nn.Sigmoid()
self.layer_2 = nn.Linear(hidden_dimension, n_actions)
def forward(self, inputs):
out = self.layer_1(inputs)
out = self.non_lin(out)
out = self.layer_2(out)
return out
def carstate_to_variable(carstate):
# y=speed, x=angle, distance*19, distToMiddle
return Variable(torch.FloatTensor([math.radians(carstate.angle), carstate.speed_x] + list(carstate.distances_from_edge) + [carstate.distance_from_center]), requires_grad=True)
def create_model(out_file, training_folder, learning_rate, epochs, hidden_dimension):
# Read in the data
training = []
for file_in in [join(training_folder, f) for f in listdir(training_folder) if isfile(join(training_folder, f))]:
training += list(driver_support.read_lliaw_dataset_steer_angle_speed_dist_middle(file_in))
model = Steer(hidden_dimension)
print(model)
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
loss = nn.MSELoss()
for ITER in range(epochs):
train_loss = 0.0
start = time.time()
for y_true, state in training:
# forward pass
optimizer.zero_grad()
in_state = Variable(torch.FloatTensor(state))
y_pred = model(in_state)
y_true = Variable(torch.FloatTensor(y_true))
output = loss(y_pred, y_true)
train_loss += output.data[0]
# backward pass
output.backward()
# update weights
optimizer.step()
print("last prediction made:", y_pred, y_true)
print("iter %r: train loss/action=%.4f, time=%.2fs" %(ITER, train_loss/len(training), time.time()-start))
torch.save(model.state_dict(), out_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int)
parser.add_argument('--hidden', type=int)
parser.add_argument('--learn', type=float)
parser.add_argument('--in_file', type=str)
parser.add_argument('--out_file', type=str)
args = parser.parse_args()
create_model(args.out_file, args.in_file, args.learn, args.epochs, args.hidden)
if __name__ == "__main__":
main()
|
joramwessels/torcs-client
|
ffnn_gears.py
|
<filename>ffnn_gears.py
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import time
import sys
import numpy as np
import argparse
import driver_support
from os import listdir
from os.path import isfile, join
from collections import defaultdict
gear_number = 9 # 0 = reverse, 1 = neutral, 2=1st gear, etc. to 7th gear
class Gear_switcher(nn.Module):
def __init__(self, hidden_dimension):
super(Gear_switcher, self).__init__()
n_states = 1 + gear_number
n_actions = gear_number
self.layer_1 = nn.Linear(n_states, hidden_dimension)
self.layer_2 = nn.Linear(hidden_dimension, n_actions)
def forward(self, inputs):
out = self.layer_1(inputs)
out = nn.functional.relu(out)
out = self.layer_2(out)
return out
def gear_to_tensor(gear_value):
gear_value += 1
return torch.LongTensor([gear_value])
def to_tensor(carstate):
#gear, rpm
return torch.FloatTensor(driver_support.binerize_input(value=carstate.gear, mapping=get_gear_map()) + [carstate.rpm])
def prediction_to_action(prediction):
# the index is the gear
index = prediction.data.numpy().argmax()
index -= 1
return index
def get_gear_map():
gear_to_index_map = dict()
for x in range(-1, gear_number - 1):
gear_to_index_map[str(x)] = x + 1
return gear_to_index_map
def evaluate(model, data):
"""Evaluate a model on a data set."""
correct = 0.0
for y_true, state in data:
y_true = int(y_true[0])
lookup_tensor = Variable(torch.FloatTensor(state))
scores = model(lookup_tensor)
action = prediction_to_action(scores)
if action == y_true:
correct += 1
print("percent correct={}".format(correct/len(data)))
def split_data_set(data_set, eval_perc=0.2):
total = len(data_set)
split = int(total*eval_perc)
train = data_set[:split]
evaluate = data_set[split:]
return train, evaluate
def create_model(out_file, training_folder, learning_rate, epochs, hidden_dimension):
# Read in the data
training = []
for file_in in [join(training_folder, f) for f in listdir(training_folder) if isfile(join(training_folder, f))]:
training += list(driver_support.read_lliaw_dataset_gear_gear_rpm_spe(file_in))
model = Gear_switcher(hidden_dimension)
training = driver_support.binerize_data_input(data=training, index=0, mapping=get_gear_map())
training, evalu = split_data_set(training)
print(model)
evaluate(model, evalu)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss = nn.CrossEntropyLoss()
for ITER in range(epochs):
train_loss = 0.0
start = time.time()
lowest_gear = 10
highest_gear = 0
last_state = None
for y_true, state in training:
if last_state == None:
last_state = state
continue
correct_gear = int(y_true[0])
optimizer.zero_grad()
in_state = Variable(torch.FloatTensor(last_state))
y_pred = model(in_state).view(1, gear_number)
y_true = Variable(gear_to_tensor(correct_gear))
#print(y_true, prediction_to_action(y_pred))
output = loss(y_pred, y_true)
train_loss += output.data[0]
# backward pass
output.backward()
# update weights
optimizer.step()
last_state = state
print("last prediction made:pred={}, actual={}".format(prediction_to_action(y_pred), y_true))
print("iter %r: train loss/action=%.4f, time=%.2fs" %(ITER, train_loss/len(training), time.time()-start))
evaluate(model, evalu)
torch.save(model.state_dict(), out_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int)
parser.add_argument('--hidden', type=int)
parser.add_argument('--learn', type=float)
parser.add_argument('--in_file', type=str)
parser.add_argument('--out_file', type=str)
args = parser.parse_args()
create_model(args.out_file, args.in_file, args.learn, args.epochs, args.hidden)
if __name__ == "__main__":
main()
|
joramwessels/torcs-client
|
data_generator.py
|
import argparse
import xml.etree.ElementTree as ET
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('--drivers', nargs='+', type=str)
parser.add_argument('--tracks', nargs='+', type=str)
parser.add_argument('--length', type=int)
parser.add_argument('--laps', type=int)
parser.add_argument('--file', type=str)
args = parser.parse_args()
drivers = args.drivers
tracks = args.tracks
length = args.length
laps = args.laps
file_in = args.file
file_out = "quickrace_auto_gen.xml"
all_drivers = ["berniw", "berniw3", "damned", "inferno", "lliaw", "tita", "berniw2", "bt", "inferno2", "olethros", "sparkle"]
best_drivers = ["lliaw", "inferno", "olethros", "tita"]
best_driver = ["lliaw"]
all_tracks = ["aalborg", "alpine-1", "alpine-2", "brondehach", "corkscrew", "e-track-1", "e-track-2", "e-track-3", "e-track-4", "e-track-6", "eroad", "forza", "g-track-1", "g-track-2", "g-track-3", "ole-road-1", "ruudskogen", "spring", "street-1", "wheel-1", "wheel-2"]
if tracks[0] == "all":
tracks = all_tracks
if drivers[0] == "best":
drivers = best_driver
def set_drivers(root, drivers):
xml_drivers = root[4] #Drivers section
del xml_drivers[3] #a specific driver definition
for index, driver in enumerate(drivers):
ele = ET.Element("section", attrib={"name": str(index + 1)})
ele.append(ET.Element("attnum", attrib={"name": "idx", "val": str(index + 1)}))
ele.append(ET.Element("attstr", attrib={"name": "module", "val": driver}))
xml_drivers.append(ele)
def set_track(root, track):
xml_track = root[1]
xml_track[1][0].attrib["val"] = track
def write_and_run(file_out, tree, track):
# Write back to file
tree.write(file_out)
# Run TORCS
command = "torcs -r ~/ci/torcs-client/" + file_out
completed_command = subprocess.run(command, shell=True, check=True)
command = "mv /tmp/lliaw_data.csv ./lliaw_"+track+".data"
completed_command = subprocess.run(command, shell=True, check=True)
# Open original file
tree = ET.parse(file_in)
root = tree.getroot()
set_drivers(root, drivers)
for track in tracks:
set_track(root, track)
write_and_run(file_out, tree, track)
|
joramwessels/torcs-client
|
combined_driver_old.py
|
<reponame>joramwessels/torcs-client<gh_stars>1-10
from pytocl.driver import Driver
from pytocl.car import State, Command
import torch
import time
import sys
import math
import numpy as np
from os import listdir
from os.path import isfile, join
import driver_support
from torch.autograd import Variable
from operator import sub
import ffnn_steer
import ffnn_speed
class Final_Driver(Driver):
def __init__(self, steering_values, global_max_speed):
super(Final_Driver, self).__init__()
self.steer = ffnn_steer.Steer(10)
self.steer.load_state_dict(torch.load("./steer.data"))
self.speed = ffnn_speed.Speed(10)
self.speed.load_state_dict(torch.load("./ffnn_speed.data"))
self.back_up_driver = Driver(logdata=False)
self.bad_counter = 0
self.lap_counter = 0
self.brake_row = 0
self.angles = [90, 75, 60, 45, 30, 20, 15, 10, 5, 0, -5, -10, -15, -20, -30, -45, -60, -75, -90]
self.alphas = [math.radians(x) for x in self.angles]
self.last_opponents = [0 for x in range(36)]
self.steering_values = steering_values
self.global_max_speed = global_max_speed
def update_trackers(self, carstate):
if carstate.current_lap_time == 0:
self.lap_counter += 1
print("Lap={}".format(self.lap_counter))
print("distance:",carstate.distance_raced)
def drive(self, carstate: State) -> Command:
self.update_trackers(carstate)
if self.in_a_bad_place(carstate):
command = self.back_up_driver.drive(carstate)
if self.bad_counter >= 600 and is_stuck(carstate):
# we try reversing
command.gear = -command.gear
if command.gear < 0:
command.steering = -command.steering
command.gear = -1
self.bad_counter = 200
else:
# since the data and python's values differ we need to adjust them
carstate.angle = math.radians(carstate.angle)
carstate.speed_x = carstate.speed_x*3.6
command = self.make_next_command(carstate)
# based on target, implement speed/steering manually
print("Executing command: gear={}, acc={}, break={}, steering={}".format(command.gear,
command.accelerator,
command.brake,
command.steering))
return command
def make_next_command(self, carstate):
command = Command()
# we switch gears manually
gear = self.gear_decider(carstate)
# we get the steering prediction
steer_pred = self.steer_decider(carstate, steering_values=self.steering_values)
# steer_pred = self.steer_decider_nn(carstate)
# pedal =[-1;1], combining breaking and accelerating to one variable
pedal = self.speed_decider(carstate, max_speed=self.global_max_speed)
# make sure we don't drive at people
opponents_deltas = list(map(sub, carstate.opponents, self.last_opponents))
steer_pred, pedal = self.deal_with_opponents(steer_pred, pedal, carstate.speed_x, carstate.distance_from_center, carstate.opponents, opponents_deltas)
# disambiguate pedal with smoothing
brake, accel = self.disambiguate_pedal(pedal, accel_cap=1.0, break_cap=0.75, break_max_length=5)
command.brake = brake
command.accelerator = accel
command.steering = steer_pred
command.gear = gear
return command
def deal_with_opponents(self, steer_pred, pedal, speed, distance_from_center, opponents_new, opponents_delta):
# index 18 is in front
# index 35 in behind us
adjustment = 0.1
# if there are cars infront-left -> move to right
if opponents_new[17] < 10 or opponents_new[16] < 10 or opponents_new[15] < 10:
print("ADJUSTING SO NOT TO HIT")
steer_pred -= adjustment
if opponents_new[19] < 10 or opponents_new[20] < 10 or opponents_new[21] < 10:
print("ADJUSTING SO NOT TO HIT")
# move to left
steer_pred += adjustment
if opponents_new[18] < 50:
# we are on left side -> move right
if distance_from_center > 0:
steer_pred -= adjustment
# o.w. move left
else:
steer_pred += adjustment
if speed > 100:
# we are getting closer to the car in front (and we can't avoid it). We need to slow down a bit
if (opponents_delta[18] < 0 and opponents_new[18] < 20) or (opponents_delta[17] < 0 and opponents_new[17] < 4) or (opponents_delta[19] < 0 and opponents_new[19] < 4):
pedal -= 0.1
return steer_pred, pedal
def disambiguate_pedal(self, pedal, accel_cap=0.5, break_cap=0.75, break_max_length=5):
if pedal >= 0.0:
accelerator = pedal*accel_cap
brake = 0
else:
# we need to make sure that we don't break hard enough and not too long
self.brake_row += 1
if self.brake_row <= break_max_length:
brake = abs(pedal)*break_cap
else:
self.brake_row = 0
brake = 0
accelerator = 0
return brake, accelerator
def steer_decider(self, carstate, steering_values):
alpha_index = np.argmax(carstate.distances_from_edge)
if is_straight_line(carstate=carstate, radians=self.alphas[alpha_index], factor=steering_values[4]):
return carstate.angle*0.5
steering_function = lambda index, offset: (self.alphas[index-offset]*carstate.distances_from_edge[index-offset] + self.alphas[index+offset]*carstate.distances_from_edge[index+offset])/(carstate.distances_from_edge[index+offset]+carstate.distances_from_edge[index-offset])
steer = steering_values[0]*self.alphas[alpha_index]
for x in range(1, 4):
if alpha_index - x > -1 and alpha_index + x < len(steering_values):
steer += steering_values[x]*steering_function(alpha_index, x)
return steer
def speed_decider(self, carstate, max_speed=120):
# we predict speed and map that to pedal
x_in = ffnn_speed.carstate_to_variable(carstate)
target_speed = self.speed(x_in).data[0]
# we limit the speed
if target_speed >= max_speed:
target_speed = max_speed
pedal = 2/(1 + np.exp(carstate.speed_x - target_speed))-1
return pedal
def gear_decider(self, carstate):
gear = carstate.gear
rpm = carstate.rpm
# we do gears by hand
# up if {9500 9500 9500 9500 9000}
# down if {4000 6300 7000 7300 7300}
if gear == -1:
return 1
elif gear == 0:
if rpm >= 5000:
gear = 1
elif gear == 1:
if rpm >= 9500:
gear = 2
elif gear == 2:
if rpm >= 9500:
gear = 3
elif rpm <= 4000:
gear = 2
elif gear == 3:
if rpm >= 9500:
gear = 4
elif rpm <= 6300:
gear = 3
elif gear == 4:
if rpm >= 9500:
gear = 5
elif rpm <= 7000:
gear = 3
elif gear == 5:
if rpm >= 9000:
gear = 6
elif rpm <= 7300:
gear = 4
elif gear == 6:
if rpm <= 7300:
gear = 5
return gear
def in_a_bad_place(self, carstate):
something_wrong = False
if is_offroad(carstate):
print("I'm offroad!")
something_wrong = True
if is_reversed(carstate):
print("I'm reversed!")
something_wrong = True
if is_stuck(carstate):
print("I'm stuck!")
something_wrong = True
if (something_wrong):
self.bad_counter += 1
else:
self.bad_counter = 0
# if we have been in a bad place for 2 seconds
if self.bad_counter >= 100:
return True
return False
def is_straight_line(carstate, radians, factor):
if abs(carstate.distance_from_center) < 0.75:
if radians == 0:
return True
if carstate.distances_from_edge[9] > 190:
return True
if carstate.distances_from_edge[9] > factor * carstate.speed_x:
return True
return False
def is_offroad(carstate):
return max(carstate.distances_from_edge) == -1
def is_stuck(carstate):
return abs(carstate.speed_x) <= 5 and carstate.current_lap_time >= 10
def is_reversed(carstate):
return abs(carstate.angle) >= 90
|
joramwessels/torcs-client
|
gru.py
|
<reponame>joramwessels/torcs-client<gh_stars>1-10
# file: gru.py
# author: <NAME>
# date: 23-11-2017
# source: https://github.com/spro/practical-pytorch
# description:
#
# Trains a GRU RNN on your training data and saves the weights.
# Run by calling "python gru.py <train_data_file> <save_file>".
#
"""
TODO not tested
TODO index out of range in GRU.forward()
"""
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import sys
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
allow_cuda = False
use_cuda = torch.cuda.is_available() and allow_cuda
teacher_forcing_ratio = 0.5
class GRU(nn.Module):
def __init__(self, i_dim, h_dim, o_dim, n_layers=1):
""" The encoder turns a continuous input into a continuous output
i_dim: The input dimension
h_dim: The amount of hidden units
o_dim: The output dimension
n_layers: The amount of hidden layers (default=1)
"""
super(GRU, self).__init__()
self.n_layers = n_layers
self.h_dim = h_dim
self.input_layer = nn.Linear(i_dim, h_dim)
self.hidden_layer = nn.GRU(i_dim, h_dim, n_layers, batch_first=True)
self.output_layer = nn.Linear(h_dim, o_dim)
def forward(self, input, hidden):
#input = input.view(1, 1, -1)
#output = self.input_layer(input)
#for i in range(self.n_layers):
# output, hidden = self.hidden_layer(output, hidden)
print(input.size())
print(hidden.size())
output, hidden = self.hidden_layer(input, hidden)
output = self.output_layer(output)
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(self.n_layers, 1, self.h_dim))
if use_cuda:
return result.cuda()
else:
return result
def train(input, target, encoder, optimizer, criterion):
""" Trains all timesteps for a sequence of parameters
input: Sequence of input variables
target: Sequence of target variables
encoder: The GRU encoder model
optimizer: The encoder optimizer object
criterion: The loss function
"""
hidden = encoder.initHidden()
optimizer.zero_grad()
loss = 0
#for i in range(len(input)):
# output, hidden = encoder(input[i], hidden)
# loss += criterion(output, target)
output, hidden = encoder(input, hidden)
loss = criterion(output, target)
loss.backward()
optimizer.step()
return loss.data[0] / target.size()[0]
def train_iters(data, encoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
""" Trains the model for 'n_iters' epochs and reports on the progress
data: The training sequence pairs as a list of Variables of
floats, e.g [Variable(FloatTensor(FloatTensor))]
encoder: The encoder model
n_iters: The amount of sequences to train
print_every: The interval for printing the current loss
plot_every: The interval for adding a datapoint to the plot
learning_rate: The learning rate of the model
"""
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = data[iter - 1]
input_variable = training_pair[0]
target_variable = training_pair[1]
loss = train(input_variable, target_variable, encoder, encoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
return
def train_gru_on(filename, target_indices, save_as, sep=',', skip_first_line=True):
""" Trains a GRU RNN on a given data file
filename: The name of a CSV file containing the training data
target_indices: A list of integers indicating the columns with target variables
save_as: Filename of the saved model parameter file
sep: The separator used in the CSV file
skip_first_line: Boolean indicating labels on the first line of the CSV
"""
layers = 1
units = 10
iters = 500
print("Reading data file",filename,"...")
training_pairs = readTrainingData(filename, target_indices, sep, skip_first_line)
print("Creating GRU model with %i layers of %i hidden units" %(layers, units),"...")
model = GRU(len(training_pairs[0][0][0]), units, len(target_indices), n_layers=layers)
print("Training model on data...")
train_iters(training_pairs, model, iters)
print("Model trained for %i iterations" %iters)
model.save_state_dict(save_as)
print("Model saved as",save_as)
def readTrainingData(filename, target_indices, sep, skip_first_line):
""" Reads the data files to a list of sequence pairs
filename: The name of a CSV file containing the training data
target_indices: A list of integers indicating the columns with target variables
sep: The separator used in the CSV file
skip_first_line: Boolean indicating labels on the first line of the CSV
"""
inputs = []
targets = []
with open(filename) as file:
if skip_first_line: next(file)
for line in file:
params = [float(s) for s in line.strip().split(sep)]
if not len(params) == 25: continue
inputs.append(params)
targets.append([params[i] for i in target_indices])
if use_cuda:
inputs = Variable(torch.FloatTensor([inputs])).cuda()
targets = Variable(torch.FloatTensor([targets])).cuda()
else:
inputs = Variable(torch.FloatTensor([inputs]))
targets = Variable(torch.FloatTensor([targets]))
return [(inputs, targets)]
if __name__ == "__main__":
train_gru_on(sys.argv[1], [0,1,2], sys.argv[2], sep=',', skip_first_line=True)
|
joramwessels/torcs-client
|
run.py
|
<filename>run.py<gh_stars>1-10
#! /usr/bin/env python3
from pytocl.main import main
from my_driver import MyDriver
from nn_driver import FFNNDriver
if __name__ == '__main__':
main(FFNNDriver())
|
joramwessels/torcs-client
|
driver_support.py
|
import typing as t
import numpy as np
def get_gear(current_rpm, current_gear):
if current_rpm >= 8500:
current_gear += 1
if current_rpm <= 2000:
current_gear -= 1
if current_gear <= 0:
current_gear = 1
if current_gear >= 6:
current_gear = 6
return current_gear
def get_steer(target_pos, actual_pos, angle, epsilon=0.1):
error = target_pos - actual_pos
angle = angle + error * epsilon
steer = angle/180
return steer
def get_accel(target_speed, actual_speed):
accel = 0
if (target_speed - actual_speed) > 0:
accel = (target_speed - actual_speed)/20
if target_speed - actual_speed > 20:
accel = 1
return accel
def get_break(target_speed, actual_speed):
brake = 0
if (target_speed - actual_speed) < 0:
brake = -(target_speed - actual_speed)/20
if target_speed - actual_speed < -20:
brake = 1
return brake
def map_to_gear(prediction):
# we represent the reverse gear as index 0
# then gear=1 is just index 1 of the list, etc.
index = np.argmax(prediction)
if index == 0:
return -1
else:
return index
def binerize_input(value, mapping):
value = str(value)
index = mapping[value]
classes = [0 for x in range(len(mapping))]
classes[index] = 1
return classes
def binerize_data_input(data, index, mapping):
new_data = []
for correct, state in data:
value = int(state[index])
new_data.append((correct, state[:index] + binerize_input(value, mapping) + state[:index + 1]))
return new_data
def read_dataset_stear_speed(filename: str) -> t.Iterable[t.Tuple[t.List[float], t.List[float]]]:
with open(filename, "r") as f:
next(f) # as the file is a csv, we don't want the first line
# acc, break, steer,
# 1.0, 0.0, -1.6378514771737686E-5,
# speed, pos, angle,
# -0.0379823,-5.61714E-5,4.30409E-4,
# distances... len(distances) = 19
# 5.00028, 5.0778, 5.32202, 5.77526, 6.52976, 7.78305, 10.008, 14.6372, 28.8659, 200.0,
# 28.7221, 14.6009, 9.99199, 7.7742, 6.52431, 5.77175, 5.31976, 5.07646, 4.99972
for line in f:
# we predict based on steer + speed
# based on pos + angle + distances
yield ([float(x) for x in line.strip().split(",")[2:4]], [float(x) for x in line.strip().split(",")[4:]])
def read_lliaw_dataset_gear_acc_bre_rpm_spe(filename: str) -> t.Iterable[t.Tuple[t.List[float], t.List[float]]]:
with open(filename, "r") as f:
for line in f:
# 0 1 2 3 4 5 6 7 8 9
# accel break steer angle curLapTime distFromStartLine distRaced fuel gear racepos
# 10 11 12 13 14-22 23 24-49
# rpm speedx speedy speedz tracksensor1_19 distToMiddle oppSenso1_36
# y=gear, x=accel, break, rpm, speedx
line_values = [float(x) for x in line.strip().split(",")[:-1]]
yield ([line_values[8]], [line_values[0], line_values[1], line_values[10], line_values[11]])
def read_lliaw_dataset_gear_gear_rpm_spe(filename: str) -> t.Iterable[t.Tuple[t.List[float], t.List[float]]]:
with open(filename, "r") as f:
for line in f:
# 0 1 2 3 4 5 6 7 8 9
# accel break steer angle curLapTime distFromStartLine distRaced fuel gear racepos
# 10 11 12 13 14-22 23 24-49
# rpm speedx speedy speedz tracksensor1_19 distToMiddle oppSenso1_36
# y=gear, x=gear, rpm
line_values = [float(x) for x in line.strip().split(",")[:-1]]
yield ([line_values[8]], [line_values[8], line_values[10]])
def read_lliaw_dataset_speed_angle_dist_middle(filename: str) -> t.Iterable[t.Tuple[t.List[float], t.List[float]]]:
with open(filename, "r") as f:
for line in f:
# 0 1 2 3 4 5 6 7 8 9
# accel break steer angle curLapTime distFromStartLine distRaced fuel gear racepos
# 10 11 12 13 14-32 33 34-59
# rpm speedx speedy speedz tracksensor1_19 distToMiddle oppSenso1_36
# y=speed, x=angle, distance*19, distToMiddle
line_values = [float(x) for x in line.strip().split(",")[:-1]]
yield ([line_values[11]], [line_values[3],
line_values[14], line_values[15], line_values[16],
line_values[17], line_values[18], line_values[19],
line_values[20], line_values[21], line_values[22],
line_values[23], line_values[24], line_values[25],
line_values[26], line_values[27], line_values[28],
line_values[29], line_values[30], line_values[31], line_values[32],
line_values[33]])
def read_lliaw_dataset_steer_angle_speed_dist_middle(filename: str) -> t.Iterable[t.Tuple[t.List[float], t.List[float]]]:
with open(filename, "r") as f:
for line in f:
# 0 1 2 3 4 5 6 7 8 9
# accel break steer angle curLapTime distFromStartLine distRaced fuel gear racepos
# 10 11 12 13 14-32 33 34-59
# rpm speedx speedy speedz tracksensor1_19 distToMiddle oppSenso1_36
# y=steer, x=angle, speed, distance*19, distToMiddle
line_values = [float(x) for x in line.strip().split(",")[:-1]]
yield ([line_values[2]], [line_values[3], line_values[11],
line_values[14], line_values[15], line_values[16],
line_values[17], line_values[18], line_values[19],
line_values[20], line_values[21], line_values[22],
line_values[23], line_values[24], line_values[25],
line_values[26], line_values[27], line_values[28],
line_values[29], line_values[30], line_values[31], line_values[32],
line_values[33]])
def read_lliaw_dataset_acc_bre_steer_bunch(filename: str) -> t.Iterable[t.Tuple[t.List[float], t.List[float]]]:
with open(filename, "r") as f:
for line in f:
# 0 1 2 3 4 5 6 7 8 9
# accel break steer angle curLapTime distFromStartLine distRaced fuel gear racepos
# 10 11 12 13 14-32 33 34-59
# rpm speedx speedy speedz tracksensor1_19 distToMiddle oppSenso1_36
# y=accel, break, stear, x=angle, speed, distance*19, distToMiddle
line_values = [float(x) for x in line.strip().split(",")[:-1]]
yield ([line_values[0],line_values[1], line_values[2]], [line_values[3], line_values[11],
line_values[14], line_values[15], line_values[16],
line_values[17], line_values[18], line_values[19],
line_values[20], line_values[21], line_values[22],
line_values[23], line_values[24], line_values[25],
line_values[26], line_values[27], line_values[28],
line_values[29], line_values[30], line_values[31], line_values[32],
line_values[33]])
def read_lliaw_dataset_acc_br_stee_ge(filename: str) -> t.Iterable[t.Tuple[t.List[float], t.List[float]]]:
with open(filename, "r") as f:
# accel break steer angle curLapTime distFromStartLine distRaced fuel gear racepos
# rpm speedx speedy speedz tracksensor1_19 distToMiddle
for line in f:
# we predict based on steer + speed
# based on pos + angle + distances
yield ([float(x) for x in line.strip().split(",")[0:3] + line.strip().split(",")[8:9]], [float(x) for x in line.strip().split(",")[3:34]])
def read_dataset_acc_break_steer(filename: str) -> t.Iterable[t.Tuple[t.List[float], t.List[float]]]:
with open(filename, "r") as f:
next(f) # as the file is a csv, we don't want the first line
# acc, break, steer,
# 1.0, 0.0, -1.6378514771737686E-5,
# speed, pos, angle,
# -0.0379823,-5.61714E-5,4.30409E-4,
# distances... len(distances) = 19
# 5.00028, 5.0778, 5.32202, 5.77526, 6.52976, 7.78305, 10.008, 14.6372, 28.8659, 200.0,
# 28.7221, 14.6009, 9.99199, 7.7742, 6.52431, 5.77175, 5.31976, 5.07646, 4.99972
for line in f:
# we predict based on steer + speed
# based on pos + angle + distances
yield ([float(x) for x in line.strip().split(",")[0:3]], [float(x) for x in line.strip().split(",")[3:]])
|
joramwessels/torcs-client
|
train.py
|
<filename>train.py
#! /usr/bin/env python3
from pytocl.main import main
from combined_driver import Final_Driver
steering = [REPLACE_STEERING]
max_speed = REPLACE_MAX_SPEED
if __name__ == '__main__':
main(Final_Driver(steering, max_speed))
|
joramwessels/torcs-client
|
ffnn_driver.py
|
from pytocl.driver import Driver
from pytocl.car import State, Command
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import time
import sys
from os import listdir
from os.path import isfile, join
import driver_support
class FFNN_Driver(Driver):
def __init__(self, hidden_dimension, in_file):
super(FFNN_Driver, self).__init__()
self.model = FFNN(hidden_dimension)
self.model.load_state_dict(torch.load(in_file))
self.last_command = Command()
self.last_command.accelerator = 1
self.last_command.brake = 0
self.last_command.steering = 0
self.last_command.gear = 1
self.sensor_data = []
def drive(self, carstate: State) -> Command:
# translate carstate to tensor for NN
x_in = Variable(carstate_to_tensor(carstate))
# get speed/steering target
acc_break_steer_prediction = self.model(x_in).data
print(acc_break_steer_prediction)
# based on target, implement speed/steering manually
command = Command()
command.accelerator = acc_break_steer_prediction[0]
command.brake = acc_break_steer_prediction[1]
command.steering = acc_break_steer_prediction[2]
command.gear = acc_break_steer_prediction[3]
self.last_command = command
return command
def carstate_to_tensor(carstate: State) -> torch.FloatTensor:
# angle curLapTime distFromStartLine distRaced fuel gear racepos
# rpm speedx speedy speedz tracksensor1_19 distToMiddle
return torch.FloatTensor([carstate.angle,
carstate.current_lap_time,
carstate.distance_from_start,
carstate.distance_raced,
carstate.fuel,
carstate.gear,
carstate.race_position,
carstate.rpm,
carstate.speed_x,
carstate.speed_y,
carstate.speed_z] +
list(carstate.distances_from_edge) +
[carstate.distance_from_center]
)
class FFNN(nn.Module):
def __init__(self, hidden_dimension):
super(FFNN, self).__init__()
n_states = 31 # see above
n_actions = 4 # acc + break + steering + gear
self.layer_1 = nn.Linear(n_states, hidden_dimension)
self.non_lin = nn.Sigmoid()
self.layer_2 = nn.Linear(hidden_dimension, n_actions)
def forward(self, inputs):
out = self.layer_1(inputs)
out = self.non_lin(out)
out = self.layer_2(out)
return out
def create_model(out_file, training_folder, learning_rate, epochs, hidden_dimension):
epochs = int(epochs)
learning_rate = float(learning_rate)
hidden_dimension = int(hidden_dimension)
# Read in the data
training = []
for file_in in [join(training_folder, f) for f in listdir(training_folder) if isfile(join(training_folder, f))]:
training += list(driver_support.read_lliaw_dataset_acc_br_stee_ge(file_in))
model = FFNN(hidden_dimension)
print(model)
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# I'm not sure about this loss...
loss = nn.MSELoss()
for ITER in range(epochs):
train_loss = 0.0
start = time.time()
for y_true, state in training:
# forward pass
in_state = Variable(torch.FloatTensor(state))
y_pred = model(in_state)
y_true = Variable(torch.FloatTensor(y_true))
output = loss(y_pred, y_true)
train_loss += output.data[0]
# backward pass
optimizer.zero_grad()
output.backward()
# update weights
optimizer.step()
print("last prediction made:", y_pred, y_true)
print("iter %r: train loss/action=%.4f, time=%.2fs" %(ITER, train_loss/len(training), time.time()-start))
torch.save(model.state_dict(), out_file)
def main():
create_model(*sys.argv[1:])
if __name__ == "__main__":
main()
|
joramwessels/torcs-client
|
swarm.py
|
<reponame>joramwessels/torcs-client
# file: swarm.py
# author: <NAME>
# date: 05-12-2017
# depedencies: numpy
# description:
# Handles the swarm technology behind the autonomous TORCS driver.
# usage:
# Assuming the variables
# *distTraveled*, *pos*, *spd*, *crashed*, and *contact*,
#
# if distTraveled % trail.pos_int == 0:
# max_speed = trail.check_in(pos, spd, crashed, contact)
# crashed, contact = False, False
#
from sys import stderr
from collections import defaultdict
from numbers import Real
from os.path import isfile
from os import remove
import numpy as np
SEP = ',' # the separator used in the feromone trail files
NAME = '.feromones' # the basic filename for feromone trail files
NOMAX = 10000 # returned when an error occurs
# - TODO unit test: back_prop, report_result, check_in
# - TODO if track is known, or if feromone trail looks like a known track, switch to known trail
# - NOTE if they drive close behind each other, they'll always explore the same max_speeds
# - NOTE initializing a FeromoneTrail will not read the previous .feromone entries
# - NOTE back_prop will stop at the finish line, since the length of the track isn't known
class FeromoneTrail:
def __init__(self, pos_int, spd_int, spd0, spd_n, expl_int, glb_max,
track_id=None):
""" FeromoneTrail keeps track of-, and syncs the feromone trail
A FeromoneTrail contains a table of known feromones and syncs to
a .feromone file in storage to communicate with the swarm. The
initialization requires a position interval in meters at which
to check and update the feromone trail, a grid of possible
max_speed values to explore, an exploration interval to increase
the max_speed with when no negative experiences are known, and
a global maximum speed to default to when there are no positive
experiences. The resulting max_speeds can be lower than the
global maximum when this speed resulted in a negative experience.
Make sure to choose your speed grid big enough to harbor any speed
category you want to learn.
Args:
pos_int: The interval at which to check feromones in m (int)
spd_int: The interval between speed boxes in km/h (int)
spd0: The first speed box in km/h (int)
spd_n: The amount of speed boxes (int)
expl_int: The jump interval (dividable by spd_int) in km/h (int)
glb_max: The global max speed that ensures a finish in km/h (int)
track_id: The name of the race track if known
"""
self.pos_int = int(pos_int)
self.spd_int = int(spd_int)
self.spd0 = int(spd0)
self.spd_n = int(spd_n)
self.spd_max = (spd0 + spd_n * spd_int) - spd_int
self.expl_int = int(expl_int)
self.glb_max = int(glb_max)
self.prev_pos = 0
self.prev_spd = 0
self.filename = NAME + '_' + track_id if track_id else NAME
self.table = defaultdict(lambda: np.zeros(spd_n))
self.leave_feromone(0, 0, 0)
if isfile(self.filename):
remove(self.filename)
def ___str___(self):
""" Casts the feromone trail table to a string representation """
return self.__repr__()
def __repr__(self):
""" Casts the feromone trail table to a string representation """
i = 0
speeds = [str(self.to_speed(i)) for i in range(self.spd_n)]
string = "\t " + ' '.join(speeds) + " km/h" + '\n'
while str(i) in self.table:
string += str(i) + ':\t' + str(self.table[str(i)]) + '\n'
i += self.pos_int
string += "m\n"
return string
def to_index(self, spd):
""" Converts absolute speed to table index """
return int((spd - self.spd0) // self.spd_int)
def to_speed(self, ind):
""" Converts table index to absolute speed """
return self.spd0 + ind * self.spd_int
def is_on_grid(self, spd):
""" Returns True if speed value is on the speed grid """
return not (spd < self.spd0) or (spd > self.spd_max)
def write_feromone(self, pos, speed, val):
""" Writes a new feromone to the .feromone file
Args:
pos: The position on the track, CurLapTime (int)
speed: The speed that has been tested (int)
val: The result of the test (-1, 0, 1)
"""
file = open(self.filename, 'a')
file.write('\n' + SEP.join([str(pos), str(speed), str(val)]))
file.close()
def read_feromone(self):
""" Reads the last feromones and updates it if they're new
Returns:
List of [pos, speed, val] lists if there are any
"""
file = open(self.filename, 'r')
contents = file.readlines()
file.close()
i = 1
changes = []
changed = True
while changed:
if contents[-i].strip() == '': continue
feromone = [int(s) for s in contents[-i].strip().split(SEP)]
if feromone == self.last_change: break
changes.append(feromone)
i += 1
if changes: self.last_change = changes[0]
return changes
def update_table(self, pos, spd, val):
""" Updates a newly received feromone in the table
Args:
pos: The position on the track, CurLapTime (int)
spd: The speed that has been tested (int)
val: The result of the test (-1, 0, 1)
"""
index = self.to_index(spd)
if val == -1:
for i in range(index, self.to_index(self.spd_max) +1):
self.table[str(pos)][i] = -1
elif val == 1:
for i in range(index, -1, -1):
self.table[str(pos)][i] = 1
def next_experiment(self, pos):
""" Checks the table for the next best max speed experiment
Returns the ideal next max speed to try out, regardless
of the current speed of the car.
Args:
pos: The position on the track, CurLapTime (int)
Returns:
The next best max speed value (int)
"""
row = self.table[str(pos)]
i1 = find_first(row, 1, rev=True)
i2 = find_first(row, -1)
i_glb_max = self.to_index(self.glb_max)
# if there are no occurences of + above glb_max
if i1 == -1 or (i1 < i_glb_max and not row[i_glb_max] == -1):
if row[i_glb_max] == -1:
i1 = i2 - 1 # last 0 before first -
else:
i1 = i_glb_max # resort to global max
# exploring, value in between known values, or safe value
if i2 == -1:
spd = min(self.spd_max, self.to_speed(i1) + self.expl_int)
index = self.to_index(spd)
else:
index = i1 + (i2 - i1) // 2
return index * self.spd_int + self.spd0
def leave_feromone(self, pos, spd, val):
""" Updates the table and writes the new feromone to the file
If an off-grid pos value is passed,
it defaults to the last on-grid value
Args:
pos: The position on the track, CurLapTime (int)
spd: The speed that has been tested (int)
val: The result of the test (-1, 0, 1)
"""
self.last_change = [pos, spd, val]
self.update_table(pos, spd, val)
self.write_feromone(pos, spd, val)
def back_prop(self, pos, max_spd):
""" Updates previous frames to anticipate this failed *max_spd*
Args:
pos: The position on the track, CurLapTime (int)
max_spd: The max speed that has failed (int)
"""
while max_spd < self.spd_max and pos > -1:
first_minus = find_first(self.table[str(pos)], -1)
if self.to_index(max_spd) >= first_minus and first_minus > -1:
break
self.leave_feromone(pos, max_spd, -1)
max_spd = int(breakable_speed(max_spd, self.pos_int))
max_spd -= int(max_spd % self.spd_int)
pos -= self.pos_int
def get_max_speed(self, pos):
""" Updates the feromone table and returns the next max speed
If an off-grid pos value is passed,
it defaults to the next on-grid value
Args:
pos: The position on the track, CurLapTime (int)
Returns:
The next best max speed value (int)
"""
if not pos % self.pos_int == 0:
err("SWARM WARNING: Invalid position:", pos)
pos += self.pos_int - (pos % self.pos_int)
err(" Defaulted to", pos)
change = self.read_feromone()
while change:
ppos, speed, val = change.pop()
self.update_table(ppos, speed, val)
max_speed = self.next_experiment(pos)
return max_speed
def report_result(self, pos, spd, val):
""" Updates the feromone trail with the new information
Args:
pos: The position on the track, CurLapTime (int)
spd: The current speed of the car (int)
val: The result of the experiment (-1, 0, 1)
"""
spd -= 1 # such that 160 falls into the category with max_spd=160
max_spd = spd - (spd % self.spd_int) + self.spd_int
spd_i = self.to_index(max_spd)
if val == -1:
self.back_prop(pos, max_spd)
elif not self.table[str(pos)][spd_i] == val:
self.leave_feromone(pos, max_spd, val)
def check_in(self, pos, spd, crashed, contact):
""" Called at the start of ever frame to check/update feromones
Args:
pos: The position on the track, distTraveled in m (num)
spd: The current speed of the car in km/h (num)
crashed: Indicates a crash or off-track in last frame (bool)
contact: Indicates contact with another car in last frame (bool)
Returns:
The maximum speed for the next frame according to the swarm
"""
# input verification
if not isinstance(pos, Real):
err("SWARM ERROR: pos isn't a real number, but:", pos)
return NOMAX
if not isinstance(spd, Real):
err("SWARM ERROR: spd isn't a real number, but:", pos)
return NOMAX
if spd > self.spd_max:
err("SWARM WARNING: Speed is beyond speed grid:", spd)
err(" Swarm can't learn from this experience")
if not pos % self.pos_int == 0:
err("SWARM WARNING: Invalid position:", pos)
pos -= pos % self.pos_int
err(" Defaulted to: ", pos)
pos, spd = int(pos), int(spd)
# update
if self.is_on_grid(self.prev_spd):
if crashed and not contact:
self.report_result(self.prev_pos, self.prev_spd, -1)
elif not crashed and not contact:
self.report_result(self.prev_pos, self.prev_spd, 1)
# predict
self.prev_pos, self.prev_spd = pos, spd
max_speed = self.get_max_speed(pos)
return max_speed
def err(*args):
""" prints to standard error """
print(*args, file=stderr)
def find_first(array, val, rev=False):
""" Finds the first (or last) occurence of val in array
Args:
array: The numpy array to evaluate
val: The value to find
rev: If True, returns the last occurence of val
Returns:
The index of the first (or last) occurence of val
in array, or -1 if the value doesn't appear in array
"""
ar = np.array(list(array)) # copies the array
if rev:
ar = np.flip(ar, 0)
i = np.argmax(ar==val)
if i == 0 and not ar[0] == val:
return -1
if rev:
i = abs(i - len(ar) + 1)
return i
def breakable_speed(end_speed, trajectory):
""" Computes the max speed that can break to reach *end_speed*
Args:
end_speed: The speed at the end of the trajectory in km/h (num)
trajectory: The distance over which to descelerate in m (num)
Returns:
The maximum absolute speed at the beginning of the
trajectory that ensures a desceleration to *end_speed*
"""
# The car is about 5m long, it descelerated from 280 to 0
# in about 12-14 times its length, which would be 60-70m.
# Assuming a linear decrease in speed, the maximum rate
# of desceleration is therefore -280/65 = -4.31 km/h/m.
# To be safe, we use half that: -2.15
return trajectory * 2.15 + end_speed
|
joramwessels/torcs-client
|
rnn.py
|
import torch.nn as nn
from torch.autograd import Variable
import torch
import torch.optim as optim
import argparse
import driver_support
import time
import sys
from os import listdir
from os.path import isfile, join
class RNNMove(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, input_dimension, hidden_dimension, output_dimension, nlayers, dropout=0.5, tie_weights=False):
super(RNNMove, self).__init__()
self.drop = nn.Dropout(dropout)
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
self.encoder = nn.Linear(input_dimension, hidden_dimension)
self.rnn = nn.RNN(hidden_dimension, hidden_dimension, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(hidden_dimension, output_dimension)
self.init_weights()
self.rnn_type = rnn_type
self.hidden_dimension = hidden_dimension
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
output = self.encoder(input).view(-1, 1)
output, hidden = self.rnn(output, hidden)
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
return nn.Parameter(torch.zeros(self.nlayers, bsz, self.hidden_dimension), requires_grad=True)
def split_data_set(data_set, eval_perc=0.2):
total = len(data_set)
split = int(total*eval_perc)
train = data_set[:split]
evaluate = data_set[split:]
return train, evaluate
def create_model(out_file, training_folder, learning_rate, epochs, hidden_dimension):
# Read in the data
training = []
for file_in in [join(training_folder, f) for f in listdir(training_folder) if isfile(join(training_folder, f))]:
training += list(driver_support.read_lliaw_dataset_acc_bre_steer_bunch(file_in))
n_states = 22
n_actions = 3
n_layers = 1
model = RNNMove("RNN_RELU", n_states, hidden_dimension, n_actions, n_layers, dropout=0.5, tie_weights=False)
training, evalu = split_data_set(training)
print(model)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
loss = nn.MSELoss()
for ITER in range(epochs):
train_loss = 0.0
start = time.time()
hidden = model.init_hidden(1)
for y_true, state in training:
optimizer.zero_grad()
in_state = Variable(torch.FloatTensor(state))
print(in_state, hidden)
y_pred, hidden = model(in_state, hidden)
y_true = Variable(torch.FloatTensor(y_true))
#print(y_true, prediction_to_action(y_pred))
output = loss(y_pred, y_true)
train_loss += output.data[0]
# backward pass
output.backward()
# update weights
optimizer.step()
print("last prediction made:pred={}, actual={}".format(prediction_to_action(y_pred), y_true))
print("iter %r: train loss/action=%.4f, time=%.2fs" %(ITER, train_loss/len(training), time.time()-start))
#evaluate(model, evalu)
torch.save(model.state_dict(), out_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int)
parser.add_argument('--hidden', type=int)
parser.add_argument('--learn', type=float)
parser.add_argument('--in_file', type=str)
parser.add_argument('--out_file', type=str)
args = parser.parse_args()
create_model(args.out_file, args.in_file, args.learn, args.epochs, args.hidden)
if __name__ == "__main__":
main()
|
joramwessels/torcs-client
|
crisis_driver.py
|
# file: crisis_driver.py
# author: <NAME>
# date: 07-12-2017
# dependencies: numpy, pytocl
# description
# usage
#
from pytocl.driver import Driver
from pytocl.car import State, Command
from sys import stderr
from numpy import sign
from driver_utils import *
# margins of error when trying the navigate_to_middle approach
CTR_ANG_MARGIN = 10
PRP_ANG_MARGIN = 10
# careful acceleration values during crisis handling
ACC = 0.1
REV_ACC = 0.2
OFF_ROAD_ACC = 0.1
OFF_ROAD_REV_ACC = 0.2
# timers (100 = 2 sec)
BAD_COUNTER_THRESHOLD = 100
BAD_COUNTER_MANUAL_THRESHOLD = 600
APPROACH_TIMEOUT = 1000
# cross-cycle trackers
MAX_ANGLES = 64 # amount of angles to keep track of
MAX_ACCELS = 16 # amount of cycles in which you should have moved
MAX_GEARS = MAX_ACCELS
GEAR_SHIFTING_TRANSITION_CYCLES = 5
# NOTE
# steer left = positive
# angle left = negative
# left of the road = positive
class CrisisDriver(Driver):
def __init__(self, logdata=True):
""" Handles difficult situations in which the car gets stuck
"""
super(CrisisDriver, self).__init__(logdata=logdata)
self.iter = 0
self.driver = Driver(logdata=False)
self.is_in_control = False
self.approaches = [self.navigate_to_middle, self.original_implementation]
self.previous_angles = []
self.previous_accels = []
self.previous_gears = []
self.bad_counter = 0
self.needs_help = False
def drive(self, carstate):
""" Gets the car out of a difficult situation
Tries different approaches and gives each one a time out.
A class level variable keeps track of the current approach
across game cycles. If the timer runs out, the approach
is terminated and the next approach gets initiated.
Args:
carstate: All parameters packed in a State object
Returns:
command: The next move packed in a Command object
"""
command = Command()
command.accelerator = 0
self.appr_counter += 1
if self.appr_counter > APPROACH_TIMEOUT:
self.next_approach()
try:
command = self.approaches[self.approach](carstate, command)
self.previous_accels.append(command.accelerator)
self.previous_gears.append(command.gear)
except Exception as e:
err(self.iter, "ERROR:", str(e))
return command
def pass_control(self, carstate):
""" Initializes a new crisis that has not been handled yet
Args:
carstate: The original carstate
"""
err(self.iter,"CRISIS: control received")
self.is_in_control = True
self.approach = 0
self.appr_counter = 0
# check if car in front
# check if car behind
# check track angle and side of the road
# determine reverse or straight ahead
def return_control(self):
""" Passes control back to the main driver """
err(self.iter,"CRISIS: control returned")
self.is_in_control = False
self.needs_help = False
def next_approach(self):
""" Adjusts state to next approach """
self.approach += 1
self.appr_counter = 0
if self.approach >= len(self.approaches):
self.approach -= 1
self.return_control()
else:
err(self.iter,"CRISIS: next approach:",
self.approaches[self.approach].__name__)
def approach_succesful(self):
""" Called when a technique finished executing
"""
err(self.iter,"CRISIS: approach succesful:",
self.approaches[self.approach].__name__)
if self.has_problem:
self.next_approach()
else:
self.return_control()
def update_status(self, carstate):
""" Updates the status of the car regarding its problems
Args:
carstate: The full carstate
"""
self.iter += 1
if len(self.previous_angles) >= MAX_ANGLES:
self.previous_angles.pop(0)
self.previous_angles.append(carstate.angle)
if len(self.previous_accels) >= MAX_ACCELS:
self.previous_accels.pop(0)
self.previous_gears.append(sign(carstate.gear))
if len(self.previous_gears) >= MAX_GEARS:
self.previous_gears.pop(0)
self.is_on_left_side = sign(carstate.distance_from_center) == DFC_L
self.is_on_right_side = not self.is_on_left_side
self.faces_left = sign(carstate.angle) == ANG_L
self.faces_right = not self.faces_left
self.faces_front = abs(carstate.angle) < 90
self.faces_back = not self.faces_front
self.faces_middle = self.is_on_left_side == self.faces_right
self.is_standing_still = abs(carstate.speed_x) < 0.1
self.has_car_in_front = car_in_front(carstate.opponents)
self.has_car_behind = car_behind(carstate.opponents)
self.is_blocked = blocked(self.previous_accels,
self.previous_gears,
carstate.speed_x)
self.is_off_road = max(carstate.distances_from_edge) == -1
self.is_reversed = self.faces_back
self.is_going_in_circles = going_in_circles(self.previous_angles)
self.is_traveling_sideways = abs(carstate.speed_y) > 15
self.has_problem = self.is_off_road or \
self.is_going_in_circles or \
self.is_blocked or \
self.is_reversed or \
self.is_traveling_sideways
if self.has_problem:
self.bad_counter += 1
if self.bad_counter >= BAD_COUNTER_THRESHOLD:
if self.is_off_road:
debug(self.iter, " off road")
if self.is_going_in_circles:
debug(self.iter, " going in circles")
if self.is_blocked:
debug(self.iter, " blocked")
if self.is_reversed:
debug(self.iter, " reversed")
self.needs_help = True
else:
self.bad_counter = 0
self.needs_help = False
#
# Approach Implementations
#
def original_implementation(self, carstate, command):
"""
approach 0)
Args:
carstate: The full carstate as passed down by the server
command: The command to adjust
"""
if not self.is_off_road and abs(carstate.angle) < CTR_ANG_MARGIN:
self.approach_succesful()
command.gear, carstate.gear = 1, 1
command = self.driver.drive(carstate) # TODO is this legal?
is_stuck = abs(carstate.speed_x) <= 5 and carstate.current_lap_time >= 10
#if self.bad_counter >= BAD_COUNTER_MANUAL_THRESHOLD and is_stuck:
# we try reversing
# command.gear = -command.gear
# if command.gear < 0:
# command.steering = -command.steering
# command.gear = -1
# self.bad_counter = 200
command.accelerator /= 2.0
return command
def navigate_to_middle(self, carstate, command):
""" Finds it way to the middle of the road by driving in reverse
approach 1) reverse towards the road, then once on the road,
reverse towards the the middle until facing foward
with an angle that's within the margin
Args:
carstate: The full carstate as passed down by the server
command: The command to adjust
"""
debug(self.iter,"CRISIS: navigate_to_middle")
dfc = carstate.distance_from_center
if self.is_off_road:
dist_spd = 2 if abs(dfc) > 10 else 1
perp_angle = 90 * sign(dfc)
if self.faces_middle:
debug(self.iter," off road and facing road")
diff_with_perp_angle = perp_angle - carstate.angle
if not abs(diff_with_perp_angle) < PRP_ANG_MARGIN:
command.steering = sign(diff_with_perp_angle) * STR_R
command.gear = 1
command.accelerator = OFF_ROAD_ACC * dist_spd
else:
debug(self.iter," off road and not facing road")
diff_with_perp_angle = perp_angle + carstate.angle
if not abs(diff_with_perp_angle) < PRP_ANG_MARGIN:
command.steering = sign(diff_with_perp_angle) * STR_R
command.gear = -1
command.accelerator = OFF_ROAD_REV_ACC * dist_spd
else:
if abs(carstate.angle) < CTR_ANG_MARGIN:
self.approach_succesful()
else:
if self.faces_middle or abs(carstate.angle) < 50: # TODO globalize
debug(self.iter," on road facing middle")
command.steering = to_ang(carstate.angle)
command.gear = 1
command.accelerator = ACC
elif abs(dfc) > 0.5:
debug(self.iter," on road not facing middle")
command.steering = away_from_ang(carstate.angle)
command.gear = -1
command.accelerator = REV_ACC
if self.is_blocked:
command.gear = 1
command.accelerator *= 2
debug(self.iter, "ang=%.2f, spd=%.2f, dfc=%.2f"
%(carstate.angle, carstate.speed_x, carstate.distance_from_center))
debug(self.iter, "ste=%.2f, acc=%.2f, gea=%.2f"
%(command.steering, command.accelerator, command.gear))
# when spinning out of control, don't do anything
if self.is_traveling_sideways or self.is_going_in_circles:
command.acceleration = 0
gstc = GEAR_SHIFTING_TRANSITION_CYCLES
# braking when shifting gear so it doesn't continue its course
if any(not g == command.gear for g in self.previous_gears[-gstc:]):
command.brake = 1
command.acceleration = 0
# faster acceleration after having shifted gears
elif any(not g == command.gear for g in self.previous_gears[-gstc*2:-gstc]):
command.brake = 0
command.acceleration = 1.0
return command
#
# Problem Detectors
#
def car_in_front(opp):
""" Returns True if there's a car in front of ours """
return any([o < 1 for o in opp[17:18]]) # TODO are these the 20deg slices in front?
def car_behind(opp):
""" Returns True if there's a car behind ours """
return any([0 < 0 for o in [opp[0], opp[35]]]) # TODO and these in the back?
def going_in_circles(angles):
""" Returns True if the car is rapidly going round in circles """
# TODO checks if there is a pattern in the angles that
# constitutes rotation. Take care of skip from 180 to -180.
return False
def blocked(accels, gears, speed):
""" Returns True if the car is blocked by something """
same_gear = all([g > 0 for g in gears]) or all([g < 0 for g in gears])
been_accel = all([a > 0 for a in accels])
return been_accel and same_gear and abs(speed) < 5
|
joramwessels/torcs-client
|
empty_driver.py
|
from pytocl.driver import Driver
from pytocl.car import State, Command
class EmptyDriver(Driver):
...
|
joramwessels/torcs-client
|
genetic_approach.py
|
<filename>genetic_approach.py<gh_stars>1-10
import numpy as np
import random
import run_torcs
import matplotlib.pyplot as plt
from multiprocessing import TimeoutError
DATA_FILE = "gen.data"
PLOT_NAME = "evolutionary.png"
ranges = [(-5,5,float),(-5,5,float),(-5,5,float),(-5,5,float),(-5,5,float),(10,360,int)]
def mutate_small(gene, ranges, mutation_probability=0.1, increment=0.01):
new_gene = []
for index, feature in enumerate(gene):
new_feature = feature
# with probability we mutate
if np.random.random_sample() >= (1 - mutation_probability):
total_range = ranges[index][1] - ranges[index][0]
change = increment * total_range
sign = 1
if np.random.random_sample() >= 0.5:
sign = -1
change *= sign
new_feature += change
# if we are at the maximum, we don't go over it
if new_feature > ranges[index][1]:
new_feature = ranges[index][1]
elif new_feature < ranges[index][0]:
new_feature = ranges[index][0]
new_gene.append(ranges[index][2](new_feature))
return new_gene
def mutate_random(gene, ranges, mutation_probability=0.05):
new_gene = []
for index, feature in enumerate(gene):
new_feature = feature
# with probability we mutate
if np.random.random_sample() >= (1 - mutation_probability):
new_feature = np.random.uniform(ranges[index][0], ranges[index][1])
new_gene.append(ranges[index][2](new_feature))
return new_gene
def evaluate(gene):
evaluation = -99999999
client, server = run_torcs.run_on_ea_tracks('scr_server', steering_values=gene[:5], max_speed=int(gene[5]), timeout=5)
times = []
distances = []
for track_index, track in enumerate(client):
distance = run_torcs.get_distance_covered(track)
if distance == -1:
print("error, server=",server[track_index])
print("error, client=",client[track_index])
else:
distances.append(distance)
time_run = run_torcs.get_total_time_covered(track)
if time_run != -1:
times.append(time_run)
# if all runs failed, we just try again.
if len(times) == 0:
return evaluate(gene)
print(distances)
print(times)
evaluation = sum(list(map((lambda x, y: x/y), distances, times)))/len(client)
return evaluation
def select(population, evaluations, count):
surviving_parents = []
surv_fitness = []
for x in range(count):
index = np.argmax(np.array(evaluations))
# we make a copy of the gene
surviving_parents.append(population[index][:])
surv_fitness.append(evaluations[index])
del population[index]
del evaluations[index]
return surviving_parents, surv_fitness
def get_random_gene(ranges):
gene = []
for low, high,_ in ranges:
gene.append(np.random.uniform(low, high))
return gene
def terminate(max_generations, generation):
if generation > max_generations:
print("Maximum generation reached")
return True
# if maximum - average <= 1:
# print("Maximum and average are close")
# return True
# else:
return False
def print_generation_values(maximum, average, minimum):
print("maximum:", maximum)
print("average:", average)
print("minimum:", minimum)
def print_generation(number):
print("Generation={}".format(number))
def print_gene(gene, gene_index, fitness):
floats_adjusted = ", ".join(["%.2f"%x for x in gene[:5]])
print("gene_{}: {}, speed={}, fitness={}".format(gene_index, floats_adjusted, int(gene[5]), fitness))
def print_survivors(survivors, evaluations):
print("selecting survivors:")
for survivor_index, survivor in enumerate(survivors):
print_gene(survivor, survivor_index, evaluations[survivor_index])
def run_eval_only(population):
evaluation = [evaluate(gene) for gene in population]
for gene_index, gene in enumerate(population):
print_gene(gene, gene_index, evaluation[gene_index])
def main(population_size, ranges, max_generations=100, survivor_count=5, file_name=DATA_FILE):
population = []
# randomly initialized population
for index in range(population_size):
population.append(get_random_gene(ranges))
generation = 1
print("max_generations={}, population_size={}, survivor_count={}".format(max_generations, population_size, survivor_count))
with open(file_name, "+w") as f:
f.writelines("generation,max_fitness\n")
while not terminate(max_generations, generation):
print_generation(generation)
# evaluate population
evaluation = [evaluate(gene) for gene in population]
# write best gene value to file
f.writelines("{},{} \n".format(generation, max(evaluation)))
for gene_index, gene in enumerate(population):
print_gene(gene, gene_index, evaluation[gene_index])
# select changes the population and evaluation object
survivors, surv_fitness = select(population=population, evaluations=evaluation, count=survivor_count)
print_survivors(survivors=survivors, evaluations=surv_fitness)
population = []
for survivor in survivors:
population.append(survivor)
for child_i in range(0, int(population_size/survivor_count) - 1):
changed = False
while not changed:
new_gene = mutate_small(gene=survivor, ranges=ranges, mutation_probability=0.1, increment=0.01)
new_gene = mutate_random(gene=new_gene, ranges=ranges, mutation_probability=0.05)
if new_gene != survivor:
changed = True
population.append(new_gene)
generation += 1
def plot(filename, plot_name):
with open(filename) as fl:
xs = []
ys = []
next(fl)
for line in fl:
x, y = line.strip().split(",")
xs.append(int(x))
ys.append(float(y))
plt.plot(xs, ys)
#plt.show()
plt.savefig(plot_name)
if __name__ == "__main__":
# run_eval_only([[0.21, 1.56, 0.68, 0.53, 1.25, 120], \
# [0.75, 0.75, 0, 0, 1.5, 120], \
# [0.19, 1.56, 0.68, 0.53, 1.25, 120], \
# [0.23, 1.56, 0.68, 0.53, 1.25, 120], \
# [0.23, 1.56, 0.68, 0.53, 1.25, 110], \
# [0.60, 0.80, 0.1, 0.2, 1.5, 120]])
plot(DATA_FILE, PLOT_NAME)
#main(population_size=20, ranges=ranges, max_generations=100, survivor_count=4)
|
joramwessels/torcs-client
|
mlp.py
|
<reponame>joramwessels/torcs-client
# file: mlp.py
# author: <NAME>
# source: http://pytorch.org/tutorials/beginner/
# date: 15-11-2017
# description:
# Trains a fully connected feed forward neural network with
# a variable amount of units and layers. The model can be
# trained using CUDA, saved, loaded, and used without CUDA
# by calling model.predict([v1, v2, ..., vn])
# TODO Every training batch shows the same sequence of losses
# There's no improvement among batches
import sys, os
import torch
from torch.autograd import Variable
learning_rate = 5e-7
epochs = 10
layers = 10
units = 10
allow_cuda = False
use_cuda = torch.cuda.is_available() and allow_cuda
class MLP(torch.nn.Module):
def __init__(self, D_inp, D_hid, D_out, layers, x_scale, y_scale):
""" Multilayer Perceptron with a variable amount of layers
Args:
D_inp: Dimension of the input layer
D_hid: Dimension of all hidden layers
D_out: Dimension of the output layer
layers: The total amount of layers (2 means 1 hidden layer)
x_scale: The maximum values for all input variables
y_scale: The maximum values for all target variables
"""
super(MLP, self).__init__()
self.input_layer = torch.nn.Linear(D_inp, D_hid)
self.hidden_layer = torch.nn.Linear(D_hid, D_hid)
self.output_layer = torch.nn.Linear(D_hid, D_out)
self.layers = layers
self.sigmoid = torch.nn.Sigmoid()
self.x_scale = x_scale.cuda() if use_cuda else x_scale
self.y_scale = y_scale.cuda() if use_cuda else y_scale
def forward(self, x):
""" Forward propagation: call to predict using current weights
Args:
x: Input tensor as a 'D_inp'-dimensional torch.autograd.Variable
Returns:
The predicted target value given this input
"""
h = self.input_layer(x)
#h = self.sigmoid(h)
for _ in range(self.layers-1):
h = self.hidden_layer(h)
#h = self.sigmoid(h)
y_pred = self.output_layer(h)
return y_pred.cuda() if use_cuda else y_pred
def predict(self, x):
""" Predicts a single variable, given as a list
Args:
x: The input variables, given as a normal list
Returns:
The prediction as a list
"""
x_var = Variable(torch.FloatTensor([x]), requires_grad=False)
if use_cuda: x_norm = x_var.cuda() / self.x_scale
else: x_norm = x_var / self.x_scale
pred_var = self.forward(x_norm)[0]
pred_var = pred_var * self.y_scale
return [p.data[0] for p in pred_var]
def train_model(x, y, metaparams):
""" Trains a fully connected feed forward network
Args:
x: An autograd Variable with inputs (batch, n_samples, n_var)
y: An autograd Variable with targets (batch, n_samples, n_var)
metaparams: A dictionary including the fields
d_inp: Dimension of the input layer
d_hid: Dimension of all hidden layers
d_out: Dimension of the output layer
layers: The total amount of layers (2 means 1 hidden layer)
x_max: The maximum values for all input variables
y_max: The maximum values for all target variables
Returns:
A torch model object
"""
model = MLP(metaparams['d_inp'], metaparams['d_hid'],
metaparams['d_out'], metaparams['layers'],
metaparams['x_max'], metaparams['y_max'])
if use_cuda: model = model.cuda()
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for t in range(epochs):
print("Batch",t)
for b in range(len(x)):
# Forward
y_pred = model(x[b])
loss = criterion(y_pred, y[b])
#print(t, loss.data[0])
print("Training loss:",loss.data[0])
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
return model
def read_file(filename, tgt_ind, inp_ind, sep=',', skip_first_line=False):
""" Reads the CSV training data
Args:
filename: The name of the CSV data file
tgt_ind: A list of indices indicating the targets
inp_ind: A list of indices indicating the inputs
sep: The separator used in the CSV
skip_first_line: Boolean indicating labels on the first line
Returns:
(x, y) tuple with 2D torch autograd Variables
"""
x = []
y = []
with open(filename) as file:
if skip_first_line: next(file)
for line in file:
clean_line = line.strip().split(sep)
if len(clean_line) > 1:
params = [float(s) for s in clean_line if not s == '']
x.append([params[i] for i in inp_ind])
y.append([params[i] for i in tgt_ind])
x = Variable(torch.FloatTensor(x))
y = Variable(torch.FloatTensor(y), requires_grad=False)
return (x.cuda(), y.cuda()) if use_cuda else (x, y)
def read_all_files(folder, tgt_ind, inp_ind):
""" Reads every file into a batch of the data
Args:
folder: The path to the folder with the data files
tgt_ind: A list of indices indicating the targets
inp_ind: A list of indices indicating the inputs
Returns:
Two lists of autograd Variables (batch, n_samples, n_var)
"""
xs, ys = [], []
for f in os.listdir(folder):
file = os.path.join(folder, f)
if os.path.isfile(file):
x, y = read_file(file, tgt_ind, inp_ind)
xs.append(x)
ys.append(y)
return xs, ys
def load_model(filename, cuda=False):
""" Loads a model from just the filename
Args:
filename: The name of the save file
cuda: Set to True to make predictions use CUDA
Returns:
model
"""
global use_cuda
use_cuda = cuda
metaparams = torch.load(filename + ".meta")
model = MLP(metaparams['d_inp'], metaparams['d_hid'],
metaparams['d_out'], metaparams['layers'],
metaparams['x_max'], metaparams['y_max'])
model.load_state_dict(torch.load(filename, map_location=lambda storage, loc: storage))
return model.cuda() if use_cuda else model
def normalize(x, y, metaparams):
""" Normalizes the data by dividing all variables by their max
Args:
x: The x data (batch, n_samples, n_vars)
y: The y data (batch, n_samples, n_vars)
metaparams: The dictionary of metaparameters containing the max
Returns:
A tuple with the normalized x, y Variables
"""
x_max, y_max = metaparams['x_max'], metaparams['y_max']
if use_cuda: x_max, y_max = x_max.cuda(), y_max.cuda()
y_max[y_max==0] = 1 # to prevent
x_max[x_max==0] = 1 # division by zero
for i in range(len(x)):
for j in range(len(x[i])):
x[i][j] = torch.div(x[i][j], x_max)
y[i][j] = torch.div(y[i][j], y_max)
return x, y
def find_max(x):
""" Finds the maximum value for each of the variables
Args:
x: The list of batches as autograd Variables
Returns:
An autograd Variable the size of a single datapoint
"""
x_max = Variable(torch.ones(len(x), len(x[0][0])))
for b in range(len(x)):
x_max[b] = torch.max(x[b], 0)[0].data
max_v = torch.max(x_max, 0)[0]
return max_v
def test_on_train_set(model, x, max_pred=500):
"""
"""
for i in range(min(len(x), max_pred)):
pred_y = list(model.predict(list(x[i].data)).data)
print(i,"ang:%.2f, ste: %.2f"
%(x[0].data, pred_y[0]), end='\n')
def main(folder, save_as, targets, inputs):
x, y = read_all_files(folder, targets, inputs)
print("Read %i batches" %len(x))
metaparams = {'d_inp':len(x[0][0]), 'd_hid':units,
'd_out':len(y[0][0]), 'layers':layers,
'x_max':find_max(x), 'y_max':find_max(y)}
print("Normalizing data...")
x, y = normalize(x, y, metaparams)
model = train_model(x[:12], y[:12], metaparams)
print("Trained model for %i epochs" %epochs)
# Always save as CPU model, cast to CUDA while loading if required
torch.save(metaparams, save_as + ".meta")
torch.save(model.float().state_dict(), save_as)
print("Model saved as",save_as)
#test_on_train_set(model, x[0])
return model
if __name__ == "__main__":
# targets: steerCmd
targets = [2]
# inputs: angle, speed_x, trackSens(0-18), distToMiddle
inputs = [3] + [11] + list(range(14, 34))
model = main(sys.argv[1], sys.argv[2], targets, inputs)
data_folder = "C:/Users/Joram/Documents/Studie/torcs-client/train_single/"
|
joramwessels/torcs-client
|
run_torcs.py
|
<filename>run_torcs.py<gh_stars>1-10
import argparse
import xml.etree.ElementTree as ET
import multiprocessing
import subprocess
from pytocl.main import main
from combined_driver import Final_Driver
import os
import signal
import time
parser = argparse.ArgumentParser()
parser.add_argument('--drivers', nargs='+', type=str)
parser.add_argument('--tracks', nargs='+', type=str)
parser.add_argument('--length', type=int)
parser.add_argument('--laps', type=int)
args = parser.parse_args()
drivers = args.drivers
tracks = args.tracks
length = args.length
laps = args.laps
file_in = "quickrace.xml"
our_driver = ["src_server 1"]
all_drivers = ["berniw", "berniw3", "damned", "inferno", "lliaw", "tita", "berniw2", "bt", "inferno2", "olethros", "sparkle"]
best_drivers = ["lliaw", "inferno", "olethros", "tita"]
best_driver = ["lliaw"]
all_tracks = ["aalborg", "alpine-1", "alpine-2", "brondehach", "corkscrew", "e-track-1", "e-track-2", "e-track-3", "e-track-4", "e-track-6", "eroad", "forza", "g-track-1", "g-track-2", "g-track-3", "ole-road-1", "ruudskogen", "spring", "street-1", "wheel-1", "wheel-2"]
# if not tracks or tracks[0] == "all":
# tracks = all_tracks
#
# if drivers[0] == "best":
# drivers = best_driver
# elif drivers[0] == "our":
# drivers = our_driver
def set_drivers(root, drivers, port_offset):
xml_drivers = root[4] #Drivers section
del xml_drivers[3] #a specific driver definition
for index, driver in enumerate(drivers):
ele = ET.Element("section", attrib={"name": str(index + 1)})
ele.append(ET.Element("attnum", attrib={"name": "idx", "val": str(port_offset)}))
ele.append(ET.Element("attstr", attrib={"name": "module", "val": driver}))
xml_drivers.append(ele)
def set_track(root, track):
xml_track = root[1]
xml_track[1][0].attrib["val"] = track
def write_and_run(tree, track, steering_values, max_speed, timeout, port):
file_out = "tmp.quickrace.xml"
# Write back to file
tree.write(file_out)
# Run TORCS
# translate results
# fitness = -running time*100 + distance done
# print out result fitness
steering_values = ", ".join([str(x) for x in steering_values])
command = "./run_evaluation.sh '{}' {} {} {}".format(steering_values, max_speed, timeout, port)
#["./run_evaluation.sh", steering_values, str(max_speed), str(timeout)]
completed_command = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE)
pids = list(completed_command.stdout.decode("utf-8").strip().split())
try:
os.kill(pid[0], signal.SIGTERM)
except:
pass
try:
os.kill(pid[1], signal.SIGTERM)
except:
pass
return completed_command
def run_on_ea_tracks(driver, steering_values, max_speed, timeout):
return run_on_tracks(driver, ["ole-road-1", "alpine-1", "forza"], steering_values, max_speed, timeout)
def run_on_all_tracks(driver, steering_values, max_speed, timeout):
return run_on_tracks(driver, all_tracks, steering_values, max_speed, timeout)
def run_on_tracks(driver, tracks, steering_values, max_speed, timeout):
# Open original file
tree = ET.parse(file_in)
root = tree.getroot()
client = []
server = []
server_temp_file = "server.out"
client_temp_file = "client.out"
base_port = 3001
port_offset = 0
for track in tracks:
port = base_port + port_offset
set_drivers(root, [driver], port_offset)
if os.path.isfile(server_temp_file):
os.remove(server_temp_file)
if os.path.isfile(client_temp_file):
os.remove(client_temp_file)
time.sleep(1)
set_track(root, track)
completed_command = write_and_run(tree, track, steering_values, max_speed, timeout, port)
with open("server.out") as f:
server.append(f.readlines())
with open("client.out") as f:
client.append(list(f.readlines()))
time.sleep(1)
# we cycle through the ports
port_offset += 1
if base_port + port_offset > 3009:
port_offset = 0
return client, server
def get_distance_covered(client_out):
distance = -1
for index in range(len(client_out) - 1, 0, -1):
if "dist" in client_out[index]:
distance = int(float(client_out[index].strip().split()[2].split("=")[1]))
break
return distance
def get_total_time_covered(client_out):
time = -1
for index in range(len(client_out) - 1, 0, -1):
if "time" in client_out[index]:
time = int(float(client_out[index].strip().split()[3].split("=")[1]))
break
if time == -1:
print(client_out)
return time
|
joramwessels/torcs-client
|
my_driver.py
|
from pytocl.driver import Driver
from pytocl.car import State, Command
class MyDriver(Driver):
def drive(self, carstate):
command = Command()
command.steering = carstate.angle / 180
if carstate.angle > 30 or carstate.angle < -30:
command.brake = 0.5
command.accelerator = 0
else:
command.brake = 0
command.accelerator = 1
command.gear = 1
return command
|
lankunyao/txt_to_json
|
main.py
|
<reponame>lankunyao/txt_to_json<filename>main.py
# -*- coding: utf-8 -*-
import argparse
import os
import json
from pathlib import Path
parser = argparse.ArgumentParser(description="命令行传入参数")
parser.add_argument('-t', '--txt', default="SciKG_min_1.0")
args = parser.parse_args()
txt = args.txt
root_dir = os.path.dirname(__file__)
def main():
path = os.path.normcase(os.path.join(root_dir, "txt", txt))
print(path)
for root, dirs, files in os.walk(path):
for name in files:
file_dir = os.path.join(path, name)
f = open(file_dir, 'r', encoding='utf-8')
data = f.read().encode('utf-8')
data_dict = json.loads(data)
data_json = json.dumps(data_dict, ensure_ascii=False)
json_path = os.path.normcase(os.path.join(root_dir, "json", txt))
if not Path(json_path).is_dir():
os.mkdir(json_path)
json_path = os.path.normcase(os.path.join(json_path, name[:-3]+"json"))
w = open(json_path, 'wb')
w.write(data_json.encode('utf-8'))
w.close()
f.close()
if __name__ == '__main__':
main()
|
bactopia/bactopia-parser
|
bactopia/__init__.py
|
"""Top-level package for Bactopia."""
__author__ = """<NAME> III"""
__email__ = '<EMAIL>'
__version__ = '0.1.1'
from .parse import parse, get_bactopia_files, parse_bactopia_files
|
bactopia/bactopia-parser
|
bactopia/parse.py
|
"""
Bactopia's parser entry-point.
Example: bactopia.parse(result_type, filename)
"""
import errno
import os
from collections import OrderedDict
from typing import Union
from . import parsers
from .const import RESULT_TYPES, IGNORE_LIST
def parse(result_type: str, *files: str) -> Union[list, dict]:
"""
Use the result type to automatically select the appropriate parsing method for an input.
Args:
result_type (str): the type of results (e.g. assembly, mlst, qc, etc...)
*files (str): one or more input files to be parsed
Raises:
FileNotFoundError: the input file could not be found
ValueError: the result type is not an accepted type
Returns:
Union[list, dict]: The results parsed for a given input.
"""
if result_type in RESULT_TYPES:
for f in files:
if not os.path.exists(f):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), f)
return getattr(parsers, result_type).parse(*files)
else:
raise ValueError(f"'{result_type}' is not an accepted result type. Accepted types: {', '.join(RESULT_TYPES)}")
def parse_genome_size(gs_file: str) -> int:
"""
Parse genome size from input file
Args:
gs_file (str): File containing the genome size of the sample
Returns:
int: genome size
"""
with open(gs_file, 'rt') as gs_fh:
return int(gs_fh.readline().rstrip())
def _is_bactopia_dir(path: str, name: str) -> list:
"""
Check if a directory contains Bactopia output and any errors.
Args:
path (str): a path to expected Bactopia results
name (str): the name of sample to test
Returns:
list: 0 (bool): path looks like Bactopia, 1 (list): any errors found
"""
from .parsers.error import ERROR_TYPES
errors = []
is_bactopia = os.path.exists(f"{path}/{name}/{name}-genome-size.txt")
for error_type in ERROR_TYPES:
filename = f"{path}/{name}/{name}-{error_type}-error.txt"
if os.path.exists(filename):
is_bactopia = True
errors.append(parsers.error.parse(filename))
return [is_bactopia, errors]
def get_bactopia_files(path: str, name: str) -> dict:
"""
Build a list of all parsable Bactopia files.
Args:
path (str): a path to expected Bactopia results
name (str): the name of sample to test
Returns:
dict: path and info on all parsable Bactopia files
"""
path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
is_bactopia, errors = _is_bactopia_dir(path, name)
bactopia_files = OrderedDict()
bactopia_files['has_errors'] = True if errors else False
bactopia_files['errors'] = errors
bactopia_files['ignored'] = False
bactopia_files['message'] = ""
bactopia_files['files'] = OrderedDict()
if is_bactopia:
if not errors:
bactopia_files['genome_size'] = parse_genome_size(f"{path}/{name}/{name}-genome-size.txt")
for result_type in RESULT_TYPES:
result_key = result_type
if result_type == "amr":
result_key = "antimicrobial-resistance"
elif result_type == "qc":
result_key = "quality-control"
if result_type not in ['error', 'generic', 'kmers']:
bactopia_files['files'][result_key] = getattr(parsers, result_type).get_parsable_list(path, name)
else:
bactopia_files['ignored'] = True
if name not in IGNORE_LIST:
bactopia_files['message'] = f"'{path}/{name}' is does not look like Bactopia directory."
else:
bactopia_files['message'] = f"'{path}/{name}' is on the Bactopia ignore list."
return bactopia_files
def parse_bactopia_files(path: str, name: str) -> dict:
"""
Parse all results associated with an input sample.
Args:
path (str): a path to expected Bactopia results
name (str): the name of sample to test
Returns:
dict: The parsed set of results associated with the input sample
"""
from bactopia.parsers.qc import is_paired
bactopia_files = get_bactopia_files(path, name)
bactopia_results = OrderedDict((
('sample', name),
('genome_size', None),
('is_paired', None),
('has_errors', bactopia_files['has_errors']),
('errors', bactopia_files['errors']),
('has_missing', False),
('missing', []),
('ignored', bactopia_files['ignored']),
('message', bactopia_files['message']),
('results', OrderedDict())
))
if not bactopia_results['has_errors'] and not bactopia_results['ignored']:
bactopia_results['genome_size'] = bactopia_files['genome_size']
for result_type, results in bactopia_files['files'].items():
bactopia_results['is_paired'] = is_paired(path, name)
bactopia_results['results'][result_type] = OrderedDict()
result_key = result_type
if result_type == "-_resistance":
result_key = "amr"
elif result_type == "quality-control":
result_key = "qc"
for result in results:
if result['missing']:
if not result['optional']:
bactopia_results['has_missing'] = True
bactopia_results['missing'].append([result_type, result["files"]])
else:
bactopia_results['results'][result_type][result['result_name']] = {}
else:
bactopia_results['results'][result_type][result['result_name']] = parse(result_key, *result['files'])
return bactopia_results
def parse_bactopia_directory(path: str) -> list:
"""
Scan a Bactopia directory and return parsed results.
Args:
path (str): a path to expected Bactopia results
Returns:
list: Parsed results for all samples in a Bactopia directory
"""
results = []
with os.scandir(path) as dirs:
for directory in dirs:
if directory.name not in IGNORE_LIST:
sample = directory.name
results.append(parse_bactopia_files(path, sample))
return results
|
bactopia/bactopia-parser
|
bactopia/parsers/__init__.py
|
<reponame>bactopia/bactopia-parser<gh_stars>1-10
import os
import glob
modules = glob.glob(os.path.join(os.path.dirname(__file__), "*.py"))
__all__ = [os.path.basename(f)[:-3] for f in modules if os.path.isfile(f) and not f.endswith('__init__.py')]
del modules
del glob
del os
from . import *
|
bactopia/bactopia-parser
|
tests/__init__.py
|
<reponame>bactopia/bactopia-parser<gh_stars>1-10
"""Unit test package for bactopia."""
|
bactopia/bactopia-parser
|
bactopia/parsers/qc.py
|
<reponame>bactopia/bactopia-parser
"""
Parsers for QC (FASTQ) related results.
"""
import os
from .generic import get_file_type, parse_json
RESULT_TYPE = 'quality-control'
ACCEPTED_FILES = ["final.json", "original.json"]
def parse(r1: str, r2: str = None) -> dict:
"""
Check input file is an accepted file, then select the appropriate parsing method.
Args:
r1 (str): input file associated with R1 or SE FASTQ
r2 (str, optional): input file associated with R2 FASTQ. Defaults to None.
Raises:
ValueError: summary results to not have a matching origin (e.g. original vs final FASTQ)
Returns:
dict: parsed results
"""
filetype = get_file_type(ACCEPTED_FILES, r1)
filetype2 = filetype
if r2:
filetype2 = get_file_type(ACCEPTED_FILES, r2)
if r1.endswith(".json"):
if r2 and filetype != filetype2:
raise ValueError(f"Original and Final QC files were mixed. R1: {filetype}, R2: {filetype2}")
return _merge_qc_stats(parse_json(r1), parse_json(r2)) if r2 else parse_json(r1)
def _merge_qc_stats(r1: dict, r2: dict) -> dict:
"""
Merge appropriate metrics (e.g. coverage) for R1 and R2 FASTQs.
Args:
r1 (dict): parsed metrics associated with R1 FASTQ
r2 (dict): parsed metrics associated with R2 FASTQ
Returns:
dict: the merged FASTQ metrics
"""
from statistics import mean
merged = {
'qc_stats': {},
'r1_per_base_quality': r1['per_base_quality'],
'r2_per_base_quality': r2['per_base_quality'],
'r1_read_lengths': r1['read_lengths'],
'r2_read_lengths': r2['read_lengths']
}
for key in r1['qc_stats']:
if key in ['total_bp', 'coverage', 'read_total']:
merged['qc_stats'][key] = r1['qc_stats'][key] + r2['qc_stats'][key] if r2 else r1['qc_stats'][key]
else:
val = mean([r1['qc_stats'][key], r2['qc_stats'][key]]) if r2 else r1['qc_stats'][key]
merged['qc_stats'][key] = f'{val:.4f}' if isinstance(val, float) else val
return merged
def is_paired(path: str, name: str) -> bool:
"""
Check if in input sample had paired-end or single-end reads
Args:
path (str): a path to expected Bactopia results
name (str): the name of sample to test
Raises:
ValueError: Processed FASTQ(s) could not be found.
Returns:
bool: True: reads are paired, False: reads are single-end
"""
r1 = f"{path}/{name}/quality-control/{name}_R1.fastq.gz"
r2 = f"{path}/{name}/quality-control/{name}_R2.fastq.gz"
se = f"{path}/{name}/quality-control/{name}.fastq.gz"
if os.path.exists(r1) and os.path.exists(r2):
return True
elif os.path.exists(se):
return False
else:
raise ValueError(f"Processed FASTQs not found in {path}/{name}/quality-control/")
def get_parsable_list(path: str, name: str) -> list:
"""
Generate a list of parsable files.
Args:
path (str): a path to expected Bactopia results
name (str): the name of sample to test
Returns:
list: information about the status of parsable files
"""
import os
parsable_results = []
for result in ACCEPTED_FILES:
result_name = None
filename = None
r1 = None
r2 = None
se = None
if result.endswith('original.json'):
result_name = 'original'
r1 = f"{path}/{name}/{RESULT_TYPE}/summary-original/{name}_R1-{result}"
r2 = f"{path}/{name}/{RESULT_TYPE}/summary-original/{name}_R2-{result}"
se = f"{path}/{name}/{RESULT_TYPE}/summary-original/{name}-{result}"
elif result.endswith('final.json'):
result_name = 'final'
r1 = f"{path}/{name}/{RESULT_TYPE}/summary-final/{name}_R1-{result}"
r2 = f"{path}/{name}/{RESULT_TYPE}/summary-final/{name}_R2-{result}"
se = f"{path}/{name}/{RESULT_TYPE}/summary-final/{name}-{result}"
if (se):
if os.path.exists(se):
parsable_results.append({
'result_name': result_name,
'files': [se],
'optional': False,
'missing': False
})
else:
missing = True
if os.path.exists(r1) and os.path.exists(r2):
missing = False
parsable_results.append({
'result_name': result_name,
'files': [r1, r2],
'optional': False,
'missing': missing
})
return parsable_results
|
bactopia/bactopia-parser
|
tests/test_bactopia.py
|
#!/usr/bin/env python
"""Tests for `bactopia` package."""
import unittest
from bactopia import bactopia
class TestBactopia_parser(unittest.TestCase):
"""Tests for `bactopia` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
|
bactopia/bactopia-parser
|
bactopia/const.py
|
<reponame>bactopia/bactopia-parser
"""
Constant values used throughout the Bactopia package.
"""
RESULT_TYPES = [
"amr",
"annotation",
"ariba",
"assembly",
"blast",
"error",
"generic",
"kmers",
"mapping",
"minmers",
"mlst",
"qc",
"variants"
]
IGNORE_LIST = ['.nextflow', '.nextflow.log', 'bactopia-info', 'work', 'bactopia-tools']
|
vivatoviva/shell
|
first.py
|
import math
def quadratic(a, b, c):
# 首先计算代尔塔
temp = b**2 - 4*a*c
if temp < 0:
return 0,0
# 两个解为
x1 = (-b + math.sqrt(temp)) / 2*a
x2 = (-b - math.sqrt(temp)) / 2*a
return x1,x2
pass
# 测试:
print('quadratic(2, 3, 1) =', quadratic(2, 3, 1))
print('quadratic(1, 3, -4) =', quadratic(1, 3, -4))
if quadratic(2, 3, 1) != (-0.5, -1.0):
print('测试失败')
elif quadratic(1, 3, -4) != (1.0, -4.0):
print('测试失败')
else:
print('测试成功')
|
Lornatang/TensorFlow-AE
|
main.py
|
# Copyright 2019 ChangyuLiu Authors. All Rights Reserved.
#
# Licensed under the MIT License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://opensource.org/licenses/MIT
# ==============================================================================
"""The training loop begins with generator receiving a random seed as input.
That seed is used to produce an image.
The discriminator is then used to classify real images (drawn from the training set)
and fakes images (produced by the generator).
The loss is calculated for each of these models,
and the gradients are used to update the generator and discriminator.
"""
from data.datasets import load_data
from model.decoder import make_decoder_model
from model.encoder import make_encoder_model
from util.loss_and_optim import ae_loss, encoder_optimizer, decoder_optimizer
from util.save_checkpoints import save_checkpoints
from util.generate_and_save_images import generate_and_save_images
import tensorflow as tf
import time
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', default=50, type=int,
help='Epochs for training.')
args = parser.parse_args()
print(args)
# define model save path
save_path = './training_checkpoints'
BUFFER_SIZE = 60000
BATCH_SIZE = 128
num_examples_to_generate = 16
noise_dim = 64
# create dir
if not os.path.exists(save_path):
os.makedirs(save_path)
# define random noise
seed = tf.random.normal([num_examples_to_generate, noise_dim])
# load dataset
train_dataset = load_data(BUFFER_SIZE, BATCH_SIZE)
# load network and optim paras
decoder = make_decoder_model()
decoder_optimizer = decoder_optimizer()
encoder = make_encoder_model()
encoder_optimizer = encoder_optimizer()
checkpoint_dir, checkpoint, checkpoint_prefix = save_checkpoints(decoder,
encoder,
decoder_optimizer,
encoder_optimizer,
save_path)
# This annotation causes the function to be "compiled".
@tf.function
def train_step(images):
""" break it down into training steps.
Args:
images: input images.
"""
with tf.GradientTape() as de_tape, tf.GradientTape() as en_tape:
z = encoder(images)
_x = decoder(z)
ae_of_loss = ae_loss(images, _x)
gradients_of_decoder = de_tape.gradient(ae_of_loss,
decoder.trainable_variables)
gradients_of_encoder = en_tape.gradient(ae_of_loss,
encoder.trainable_variables)
decoder_optimizer.apply_gradients(
zip(gradients_of_decoder, decoder.trainable_variables))
encoder_optimizer.apply_gradients(
zip(gradients_of_encoder, encoder.trainable_variables))
def train(dataset, epochs):
""" train op
Args:
dataset: mnist dataset or cifar10 dataset.
epochs: number of iterative training.
"""
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
generate_and_save_images(decoder,
epoch + 1,
seed,
save_path)
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix=checkpoint_prefix)
print(f'Time for epoch {epoch + 1} is {time.time() - start:.3f} sec.')
# Generate after the final epoch
generate_and_save_images(decoder,
epochs,
seed,
save_path)
if __name__ == '__main__':
train(train_dataset, args.epochs)
|
Lornatang/TensorFlow-AE
|
util/loss_and_optim.py
|
# Copyright 2019 ChangyuLiu Authors. All Rights Reserved.
#
# Licensed under the MIT License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://opensource.org/licenses/MIT
# ==============================================================================
"""Generate optim loss and Discriminate optim loss"""
import tensorflow as tf
def ae_loss(noise, fake_output):
""" Automatic coding loss function
Args:
noise: .
fake_output: generate pic for use encoder model
Returns:
tf.reduce_mean.
"""
return tf.reduce_mean(tf.square(noise - fake_output))
def decoder_optimizer():
""" The training generator optimizes the network.
Returns:
tf.optimizers.Adam.
"""
return tf.optimizers.Adam()
def encoder_optimizer():
""" The training discriminator optimizes the network.
Returns:
tf.optimizers.Adam.
"""
return tf.optimizers.Adam()
|
Lornatang/TensorFlow-AE
|
util/save_checkpoints.py
|
# Copyright 2019 ChangyuLiu Authors. All Rights Reserved.
#
# Licensed under the MIT License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://opensource.org/licenses/MIT
# ==============================================================================
"""save model function"""
import os
import tensorflow as tf
def save_checkpoints(generator, discriminator, generator_optimizer, discriminator_optimizer, save_path):
""" save gan model
Args:
generator: generate model.
discriminator: discriminate model.
generator_optimizer: generate optimizer func.
discriminator_optimizer: discriminator optimizer func.
save_path: save gan model dir path.
Returns:
checkpoint path
"""
checkpoint_dir = save_path
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
return checkpoint_dir, checkpoint, checkpoint_prefix
|
danya02/booru-mirror-unified
|
danbooru_dump/danbooru_dump_bulkdata_download_missing.py
|
<filename>danbooru_dump/danbooru_dump_bulkdata_download_missing.py
import subprocess
import os
import sys
sys.path.append('..')
from database import *
import time
import random
import danbooru_dump_transfer_queue_into_redis_from_find
os.chdir('/hugedata/booru/danbooru_temp')
def download_content(id):
print (['rsync', '--recursive', '--verbose', f'rsync://172.16.31.10:873/danbooru2020/original/0{str(id)[-3:].zfill(3)}/{id}.*', './danbooru2020/original/'])
subprocess.run(['rsync', '--recursive', '--verbose', f'rsync://172.16.31.10:873/danbooru2020/original/0{str(id)[-3:].zfill(3)}/{id}.*', './danbooru2020/original/'])
while 1:
#time.sleep(3)
query = Post.select().where(~fn.EXISTS(Content.select().where(Content.post_id == Post.id))).where(Post.board == Imageboard.get(Imageboard.name == 'danbooru')).order_by(random.choice([Post.id, Post.local_id])).limit(200)
for post in query:
print(post.__data__)
download_content(post.local_id)
danbooru_dump_transfer_queue_into_redis_from_find.find_transfer()
|
danya02/booru-mirror-unified
|
danbooru_dump/danbooru_dump_redis_load_json.py
|
<filename>danbooru_dump/danbooru_dump_redis_load_json.py
import os
import json
import redis
import time
info = redis.Redis(db=12)
time_start = time.time()
count = 0
os.chdir('/hugedata/booru_old/danbooru2020/danbooru2020/metadata/split')
for i in sorted(os.listdir('.')):
while info.dbsize() > 100000:
print('too many files', info.dbsize())
time.sleep(60)
i = i.strip()
print('read', i)
with open(i) as handle:
for row in handle:
row = row.strip()
id = json.loads(row)['id']
info.set(id, row)
print('delete', i)
os.unlink(i)
|
danya02/booru-mirror-unified
|
queue_work.py
|
<reponame>danya02/booru-mirror-unified
from database import *
import functools
import traceback
ENTITY_IMPORT_METHODS = dict()
def to_import_entity(imageboard, ent_type):
def wrapper(func):
@functools.wraps(func)
def wrapped(queued_ent):
queued_ent.row_locked = True
queued_ent.save()
try:
props = func(queued_ent.entity_local_id, additional_data=queued_ent.additional_data)
queued_ent.report_success(and_mark_as_final=props.get('MARK_FINAL', False))
return True, ret_val
except:
queued_ent.report_error(traceback.print_exc())
traceback.print_exc()
return False, None
ENTITY_IMPORT_METHODS[imageboard] = ENTITY_IMPORT_METHODS.get(imageboard, dict())
ENTITY_IMPORT_METHODS[imageboard].update({ent_type: wrapped})
return wrapped
return wrapper
def import_single_entity():
with db.atomic():
task = QueuedImportEntity.tasks_query().get()
print('Taken task: ' + str(task))
result, props = ENTITY_IMPORT_METHODS[task.board.name][task.entity_type.name](task)
print('Succeded' if result else 'Failed')
|
danya02/booru-mirror-unified
|
danbooru_dump/danbooru_dump_import.py
|
import sys
sys.path.append('..')
from booru_db import BooruDatabase
from database import *
import datetime
import json
import traceback
import random
import time
import logging
import redis
import collections
r = redis.Redis(db=12)
bdb = BooruDatabase('danbooru')
def parse_date(val):
try:
v = datetime.datetime.strptime(val, '%Y-%m-%d %H:%M:%S %Z')
except:
val = val.split(' ')
val[1] = val[1] + '0'
val = ' '.join(val)
v = datetime.datetime.strptime(val, '%Y-%m-%d %H:%M:%S.%f %Z')
if v == datetime.datetime(1970, 1, 1, 0, 0):
return None
return v
def insert_row(struct, skip_if_exists=False):
print(struct['id'])
post = Post()
post_existing = bdb.post[ int(struct['id']) ]
if skip_if_exists and (post_existing is not None):
return None
if post_existing:
post = post_existing
#print(post, post.__data__, flush=True)
post.local_id = struct['id']
#print(post.post_created_at, parse_date(struct['created_at']))
#print(post.post_updated_at, parse_date(struct['updated_at']))
#input()
post.post_created_at = parse_date(struct['created_at'])
post.post_updated_at = parse_date(struct['updated_at'])
post.row_updated_at = datetime.datetime.now()
post.rating = struct['rating']
post.source = struct['source'] or None
post.score = struct['score']
post.parent_local_id = int(struct['parent_id']) or None
post.uploaded_by = User.get_or_create(board=bdb.booru, local_id=int(struct['uploader_id']))[0]
bdb.post[ int(struct['id']) ] = post
post = bdb.post[ int(struct['id']) ]
#print(post, post.__data__, flush=True)
old_tags = sorted(bdb.tag[post])
new_tags = sorted([tag['name'] for tag in struct['tags']])
if old_tags != new_tags:
#print('updating tag set', old_tags, new_tags)
bdb.tag[post] = new_tags
else:
pass
#print('skipped tag set')
postfavs_existing = set( [i[0] for i in PostFavs.select(User.local_id).join(User).where(PostFavs.post==post).tuples()] )
postfavs_new = set([int(i) for i in struct['favs']])
diff_set = postfavs_existing.symmetric_difference(postfavs_new)
if diff_set:
#print('fav set difference so updating', postfavs_existing, '->', postfavs_new)
fav_users = dict()
for i in User.select().where(User.board == bdb.booru).where(User.local_id.in_([int(i) for i in struct['favs']])):
fav_users[i.local_id] = i
PostFavs.delete().where(PostFavs.post == post).execute()
postfavs_list = []
for fav in struct['favs']:
fav = int(fav)
if fav not in fav_users:
fav_users[fav] = User.get_or_create(board=bdb.booru, local_id=int(fav))[0]
postfavs_list.append(PostFavs(post=post, user=fav_users[fav]))
PostFavs.bulk_create(postfavs_list)
danpost = DanbooruPostMetadata.get_or_none(post=post) or DanbooruPostMetadata(post=post)
#print(danpost.__data__)
danpost.up_score = int(struct['up_score'])
danpost.down_score = int(struct['up_score'])
danpost.pixiv_id = int(struct['pixiv_id']) or None
danpost.approved_by = User.get_or_create(board=bdb.booru, local_id=int(struct['approver_id']))[0]
danpost.is_pending = struct['is_pending']
danpost.is_flagged = struct['is_flagged']
danpost.is_deleted = struct['is_deleted']
danpost.is_banned = struct['is_banned']
danpost.is_status_locked = struct['is_status_locked']
danpost.is_note_locked = struct['is_note_locked']
try:
danpost.save(force_insert=True)
except IntegrityError:
danpost.save()
imgpost = ImageMetadata.get_or_none(post=post) or ImageMetadata(post=post)
#print(imgpost.__data__)
imgpost.image_width = int(struct['image_width'])
imgpost.image_height = int(struct['image_height'])
imgpost.file_size = int(struct['file_size'])
imgpost.md5 = struct['md5']
try:
imgpost.save(force_insert=True)
except IntegrityError:
imgpost.save()
def insert_row_atomic(*args, **kwargs):
with db.atomic():
return insert_row(*args, **kwargs)
#insert_row_atomic(json.loads(input()))
class CountHandler(logging.Handler):
def __init__(self):
self.count = 0
super().__init__()
def emit(self, record):
self.count += 1
def handle(self, record):
self.count += 1
def reset(self):
self.count = 0
counter = CountHandler()
logger = logging.getLogger('peewee')
logger.addHandler(counter)
logger.setLevel(logging.DEBUG)
class DanbooruDumpRow(MyModel):
post_id = IntegerField()
content = TextField()
DANBOORU = Imageboard.get(name='danbooru')
POST_ENTITY, _ = EntityType.get_or_create(name='post')
when = time.time()
#for row in DanbooruDumpRow.select().order_by(fn.Rand()):
def infinite_counter():
i = 0
while 1:
i += 1
yield i
start = time.time()
try:
start_times = collections.deque(maxlen=50)
to_delete = collections.deque()
for ind in db.batch_commit(infinite_counter(), 100):
# count = DanbooruDumpRow.select(fn.COUNT(DanbooruDumpRow.id)).scalar()
# if not count:
# print('All done!!!')
# break
# row = DanbooruDumpRow.select().where(DanbooruDumpRow.id >= random.randint(0, count)).get()
start_times.append(time.time())
rid = r.randomkey()
row = r.get(rid)
# print('selecting row', rid, 'took', time.time()-when, 'seconds and', counter.count, 'queries')
when = time.time()
insert_row_atomic(json.loads(row))
print('inserting took', time.time()-when, 'seconds and', counter.count, 'queries')
when = time.time()
ImportedEntity.get_or_create(entity_type=POST_ENTITY, board=DANBOORU, entity_local_id=rid, final=False)
to_delete.append(rid)
while len(to_delete) > 500:
v = to_delete.popleft()
print('deleting from redis', v)
r.delete(v)
# print('removing took', time.time()-when, 'seconds and', counter.count, 'queries')
counter.reset()
when = time.time()
print('Current rate:', len(start_times) / (when - start_times[0]))
print()
except KeyboardInterrupt:
input('Stopped, enter to continue or another ^C to quit')
|
danya02/booru-mirror-unified
|
danbooru_dump/danbooru_dump_bulkdata_transfer.py
|
<filename>danbooru_dump/danbooru_dump_bulkdata_transfer.py
import sys
sys.path.append('..')
from database import *
import traceback
import os
import shutil
#import threading
import queue
import time
import magic
import redis
import functools
import concurrent.futures
r = redis.Redis()
rc = redis.Redis(db=2)
#rhashes_mimes = redis.Redis(db=2)
BOARD, _ = Imageboard.get_or_create(name='danbooru', base_url='https://danbooru.donmai.us')
started_work_at = time.time()
jobs_executed = 0
find_files = False
executor = concurrent.futures.ThreadPoolExecutor()
<EMAIL>(maxsize=8192)
def get_post(id):
return Post.get_or_none(Post.local_id == id, Post.board == BOARD)
#@create_table
class FileToImportTMP(MyModel):
path = CharField(unique=True)
locked = BooleanField(default=False)
@functools.lru_cache(maxsize=None)
def get_mimetype(name, ext):
mimetype_row = MimeType.get_or_none(MimeType.name == name)
if mimetype_row is None:
mimetype_row = MimeType.create(name=name, ext=ext)
return mimetype_row
def time_it(fn):
def exec(*args, **kwargs):
started = time.time()
res = fn(*args, **kwargs)
return time.time()-started, res
return exec
def data_import(path, thread=None, select_row_time=None):
started_at = time.time()
n=6
ext = path.split('.')[-1]
read_start = time.time()
id = int( path.split('/')[-1].split('.')[0] )
post = get_post(id)
if post is None:
raise Post.DoesNotExist
res = rc.get(path)
if res is not None:
res = str(res, 'utf-8')
sha256, mt_str = res.split(' ', 1)
read_time = time.time() - read_start
compute_time = 0
else:
@time_it
def read_data(path=path):
data = rc.get(path)
if data is None:
handle = open(path, 'rb')
data = handle.read()
handle.close()
if data == b'':
print('!!!! file at', path, 'is empty!!')
return None, None
return id, data
read_time, a = read_data()
id, data = a
if id is None: return
@time_it
def get_mimetype_from_file(data=data):
mimetype = magic.from_buffer(data, mime=True)
return mimetype
mimetype_get_time, mt_str = get_mimetype_from_file()
compute_time = mimetype_get_time
@time_it
def hashit(data=data):
return sha256_hash(data)
hash_time, sha256 = hashit()
compute_time += hash_time
if os.path.exists(get_path(sha256)):
raise FileExistsError()
print(f'id={id}\tread={round(read_time, n)}', end='\t')
print(f'compute={round(compute_time, n)}', end='\t')
@time_it
def db_ops(id=id, mt_str=mt_str, sha256=sha256):
mt_row = get_mimetype(mt_str, ext)
try:
c = Content.get(post=post)
c.sha256_current=sha256
c.mimetype_id=mt_row.id
c.file_size_current=os.path.getsize(path)
c.save()
except Content.DoesNotExist:
c = Content.create(post=post, sha256_current=sha256, mimetype_id=mt_row.id, file_size_current=os.path.getsize(path))
return mt_row
db_ops_time, mt_row = db_ops()
@time_it
def save(mt_row=mt_row):
ensure_dir(sha256)
#print(path, '->', IMAGE_DIR+get_path(sha256))
shutil.move(path, IMAGE_DIR+get_path(sha256)+'.'+mt_row.ext)
return None
save_file_time, _ = save()
print(f'db_ops={round(db_ops_time, n)}', f'save={round(save_file_time, n)}', sep='\t', end='\t')
ended_at = time.time()
global jobs_executed
global time_running
time_on_job = ended_at - started_at
jobs_executed += 1
avg_time = (time.time() - started_work_at) / jobs_executed
print(f'time={round(time_on_job, n)}', f'avg={round(avg_time,n)}', f'jobs={jobs_executed}', sep='\t')
path = '/hugedata/booru/danbooru2020/danbooru2020/original'
count = 0
if __name__ == '__main__':
while True:
count += 1
if count > 10000:
#get_path.cache_clear()
count = 0
started_at = time.time()
path = r.randomkey()
if path is None:
print('out of jobs! waiting...')
time.sleep(1)
continue
path = str(path, 'utf8')
try:
data_import(path, select_row_time=time.time()-started_at)
r.delete(path)
except FileNotFoundError:
traceback.print_exc()
r.delete(path)
except Post.DoesNotExist:
r.delete(path)
except:
traceback.print_exc()
continue
|
danya02/booru-mirror-unified
|
danbooru_dump/danbooru_dump_transfer_queue_into_redis_from_find.py
|
<filename>danbooru_dump/danbooru_dump_transfer_queue_into_redis_from_find.py<gh_stars>0
import sys
sys.path.append('..')
from database import *
import sys
import os
import redis
import time
r = redis.Redis()
def find_transfer():
os.system('find /hugedata/booru/danbooru_temp -type f -empty -print -delete')
f = os.popen('find /hugedata/booru/danbooru_temp -type f')
for row in f:
row = row.strip()
if row.split('/')[-1].startswith('.'): continue
print(row, file=sys.stderr)
r.set(row, '0')
if __name__ == '__main__':
while 1:
find_transfer()
time.sleep(20)
|
danya02/booru-mirror-unified
|
mv_hash_tree_files.py
|
from database import *
import os
import shutil
import time
import humanize
import datetime
import redis
import tqdm
import traceback
queue = redis.Redis(db=5)
os.chdir('/hugedata/booru_old/unified/files') # copy from
cwd = os.getcwd()
TARGET = '/hugedata/booru/' # copy to
if input('input Q to enqueue paths, anything else to move them').lower() == 'q':
search = os.popen('find -type f')
for path in search:
path = path.strip()
path = cwd + path[1:]
print(path)
queue.set(path, 0)
else:
dbsize = queue.dbsize()
dbsize_first = dbsize
time_started = time.time()
while dbsize:
current_time = time.time()
time_running = current_time - time_started
changed_dbsize = dbsize - dbsize_first
speed = changed_dbsize / time_running
if speed > 0:
eta = 'inf'
human_eta = 'inf'
else:
eta = dbsize / (speed or 0.1)
eta = -eta
eta_td = datetime.timedelta(seconds=eta)
human_eta = humanize.precisedelta(eta_td, minimum_unit='seconds')
eta = round(eta)
print('cursize:', dbsize, 'change:', changed_dbsize, 'speed:', speed, 'it/s', 'eta:', eta, 'seconds or', human_eta)
path = queue.randomkey()
path = str(path, 'utf8')
filename = path.split('/')[-1]
ensure_dir(filename)
target_path = TARGET + get_path(filename)
try:
shutil.move(path, target_path)
queue.delete(path)
except:
traceback.print_exc()
dbsize = queue.dbsize()
|
danya02/booru-mirror-unified
|
booru_db.py
|
<reponame>danya02/booru-mirror-unified
from database import *
import functools
class BooruDatabase:
def __init__(self, booru):
if not isinstance(booru, Imageboard):
if isinstance(booru, str):
booru = Imageboard.get(Imageboard.name == booru)
else:
booru = Imageboard.get_by_id(booru)
else:
pass
self.booru = booru
@property
def post(self):
return PostDatabase(self.booru)
@property
def tag(self):
return TagDatabase(self.booru)
# @property
# def comment(self):
# return CommentDatabase(self.booru)
@property
def user(self):
return UserDatabase(self.booru)
class PostDatabase:
def __init__(self, booru):
self.booru = booru
@functools.lru_cache(maxsize=32)
def __getitem__(self, id):
return Post.get_or_none(Post.local_id==id, Post.board==self.booru)
def __setitem__(self, id, model):
if not isinstance(model, Post):
raise TypeError
model.board = self.booru
model.local_id = id
try:
model.save(force_insert=True)
except IntegrityError:
model.save()
class TagDatabase:
def __init__(self, booru):
pass
@staticmethod
def __getitem__(item):
if isinstance(item, int) or isinstance(item, Post):
if isinstance(item, int):
item = Post.select(Post.id).where(Post.id == item).where(Post.board == self.booru).get()
return set(
[i.name for i in Tag.select(Tag.name).join(PostTag).where(PostTag.post == item)]
)
elif isinstance(item, str):
return Tag.get_or_none(Tag.name == item)
else:
raise TypeError
@staticmethod
@functools.lru_cache(maxsize=1024)
def get_tag(name):
try:
return Tag.get(Tag.name == name)
except Tag.DoesNotExist:
return Tag.create(name=name)
@staticmethod
def __setitem__(item, value):
if isinstance(item, Post):
if isinstance(item, int):
item = Post.select(Post.id).where(Post.id == item).where(Post.board == self.booru).get()
if not isinstance(value, list): raise TypeError
with db.atomic():
subquery_tag_ids = PostTag.select(PostTag.tag_id).where(PostTag.post == item)
#TagPostCount.update(post_count = TagPostCount.post_count - 1, changed_at=fn.Now()).where(TagPostCount.board == item.board).where(TagPostCount.tag.in_(subquery_tag_ids)).execute()
PostTag.delete().where(PostTag.post == item).execute()
post_tags = []
for tagname in value:
post_tags.append(PostTag(post=item, tag=TagDatabase.get_tag(tagname)))
# TagPostCount.get_or_create(board=item.board, tag=TagDatabase.get_tag(tagname))
try:
PostTag.bulk_create(post_tags)
except IntegrityError:
for i in post_tags:
try:
i.save(force_insert=True)
except IntegrityError:
pass
#TagPostCount.update(post_count = TagPostCount.post_count + 1, changed_at=fn.Now()).where(TagPostCount.board == item.board).where(TagPostCount.tag.in_(subquery_tag_ids)).execute()
else:
raise TypeError
class UserDatabase:
def __init__(self, booru):
self.booru = booru
def __getitem__(self, item):
if isinstance(item, int):
return User.get_or_none(User.local_id==item, User.board==self.booru)
else:
raise TypeError
def __setitem__(self, item, value):
if not isinstance(item, int): raise TypeError
if not isinstance(value, User): raise TypeError
value.local_id = item
value.board = self.booru
try:
value.save(force_insert=True)
except IntegrityError:
value.save()
|
danya02/booru-mirror-unified
|
create_thumbnails.py
|
<reponame>danya02/booru-mirror-unified<filename>create_thumbnails.py
from database import *
import subprocess
import os
import datetime
import random
mimetype_jpg = MimeType.get(MimeType.ext == 'jpg')
mimetype_png = MimeType.get(MimeType.ext == 'png')
mts = [mimetype_jpg, mimetype_png]
mtids = [mt.id for mt in mts]
def create_thumbnail(content, skip_existing=True):
print(content)
if content.mimetype_id not in mtids:
return
if len(content.thumbnail) and skip_existing:
return
original_path = IMAGE_DIR + get_path(content.sha256_current) + '.' + content.mimetype.ext
thumb_path = IMAGE_DIR + get_thumbnail_path(content.sha256_current) + '.' + content.mimetype.ext
old_path = IMAGE_DIR + 'old/' + get_path(content.sha256_current)
old_ext_path = IMAGE_DIR + 'old_with_exts/' + get_path(content.sha256_current) + '.' + content.mimetype.ext
if not os.path.isfile(thumb_path) or not skip_existing:
ensure_dir(content.sha256_current)
cmd = ['convert', original_path, '-thumbnail', '256x256', thumb_path]
print(cmd)
subprocess.run(cmd)
size = os.path.getsize(thumb_path)
ct = ContentThumbnail.get_or_none(content=content, mimetype=content.mimetype)
if ct is None:
ct = ContentThumbnail.create(content=content, mimetype=content.mimetype, size=size)
else:
ct.size = size
ct.generated_at = datetime.datetime.now()
ct.save()
else: print('skipping', content)
# try:
# open(old_path).close()
if os.path.exists(old_path):
print('deleted', old_path)
os.unlink(old_path)
if os.path.exists(old_ext_path):
print('deleted', old_ext_path)
os.unlink(old_ext_path)
# except FileNotFoundError: pass
page = 0
iterated = True
while iterated:
print('============= PAGE', page, '===============')
#iterated = False
try:
for c in Content.select().where(~fn.EXISTS(ContentThumbnail.select().where(ContentThumbnail.content_id==Content.post_id))).join(Post).order_by(Post.local_id).iterator():
try:
with db.atomic(): create_thumbnail(c, skip_existing=False)
except: continue
iterated = True
except IntegrityError: continue
page += 1
|
danya02/booru-mirror-unified
|
database.py
|
<reponame>danya02/booru-mirror-unified<filename>database.py
from peewee import *
import peewee
import datetime
from PIL import Image
import hashlib
import io
import os
import random
import hashlib
IMAGE_DIR = '/hugedata/booru/'
DIR_TREE_DEPTH = 2
DIR_TREE_SEGMENT_LEN = 2
#db = SqliteDatabase('test.db', timeout=600)
db = MySQLDatabase('unifiedbooru', user='booru', password='<PASSWORD>', host='10.0.0.128')
class MediumBlobField(BlobField):
field_type = 'MEDIUMBLOB'
content_databases = dict()
def sha256_hash(data):
hasher = hashlib.sha256()
hasher.update(data)
return hasher.hexdigest()
def get_dir(name):
path = ''
cursor = DIR_TREE_SEGMENT_LEN
for ind in range(DIR_TREE_DEPTH):
path += name[:cursor] + '/'
cursor += DIR_TREE_SEGMENT_LEN
return path
def get_path(name):
return get_dir(name) + name
def get_thumbnail_path(name):
return 'thumbnails/' + get_dir(name) + name
def ensure_dir(name):
target_dir = get_dir(name)
os.makedirs(IMAGE_DIR + target_dir, exist_ok=True)
os.makedirs(IMAGE_DIR + 'thumbnails/' + target_dir, exist_ok=True)
class File:
@staticmethod
def save_file(data, ext, raise_if_exists=True, but_check_for_same=False):
name = sha256_hash(data)
ensure_dir(name)
path_no_ext = IMAGE_DIR + get_path(name)
path = path_no_ext + '.' + ext
return_value = None
if (os.path.isfile(path) or os.path.isfile(path_no_ext)) and raise_if_exists:
if not but_check_for_same:
raise FileExistsError('file with name', path, '(maybe omitting extension) already exists')
else:
if os.path.isfile(path):
with open(path, 'rb') as handle:
exist_data = handle.read()
if data == exist_data:
return_value = name
else:
raise FileExistsError('file with name', path, 'already exists and is different from new data')
if os.path.isfile(path_no_ext):
with open(path_no_ext, 'rb') as handle:
exist_data = handle.read()
if data == exist_data:
print(path_no_ext, 'renamed to', path)
os.rename(path_no_ext, path)
return_value = name
else:
raise FileExistsError('file with name', path_no_ext, 'already exists and is different from new data')
with open(path, 'wb') as handle:
handle.write(data)
return name
import logging
logger = logging.getLogger('peewee')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
class ForeignKeyField(peewee.ForeignKeyField):
@property
def field_type(self):
return self.rel_field.field_type.replace('BIGAUTO', 'BIGINT').replace('AUTO_INCREMENT', '').replace('AUTO', 'INTEGER')
class MyModel(Model):
class Meta:
database = db
legacy_table_names = False
def create_table(cls):
db.create_tables([cls])
return cls
class TinyIntegerField(SmallIntegerField):
field_type = 'TINYINT UNSIGNED'
class TinyIntegerAutoField(BigAutoField):
field_type = 'TINYINT UNSIGNED AUTO_INCREMENT'
auto_increment = True
class UnsignedSmallIntegerField(IntegerField):
field_type = 'SMALLINT UNSIGNED'
class SmallIntegerAutoField(BigAutoField):
field_type = 'SMALLINT UNSIGNED AUTO_INCREMENT'
auto_increment = True
class UnsignedIntegerField(IntegerField):
field_type = 'INTEGER UNSIGNED'
class UnsignedAutoField(AutoField):
field_type = 'INTEGER UNSIGNED AUTO_INCREMENT'
auto_increment = True
@create_table
class Imageboard(MyModel):
id = TinyIntegerAutoField(primary_key=True)
name = CharField(unique=True)
base_url = CharField(unique=True)
def Board(backref=None, null=None):
return ForeignKeyField(Imageboard, index=True, backref=backref, null=null)
@create_table
class User(MyModel):
id = UnsignedAutoField(primary_key=True)
board = Board('users')
local_id = UnsignedIntegerField(index=True, null=True)
username = CharField(null=True)
row_created_at = DateTimeField(index=True, default=datetime.datetime.now)
row_updated_at = DateTimeField(index=True, default=datetime.datetime.now)
class Meta:
indexes = (
(('board', 'local_id'), True),
)
@create_table
class UserJoinDate(MyModel):
user = ForeignKeyField(User, primary_key=True)
join_date = DateTimeField()
@create_table
class AccessLevel(MyModel):
id = SmallIntegerAutoField(primary_key=True)
name = CharField(unique=True)
@create_table
class UserAccessLevel(MyModel):
user = ForeignKeyField(User, primary_key=True)
level = ForeignKeyField(AccessLevel)
@create_table
class Tag(MyModel):
id = UnsignedAutoField(primary_key=True)
name = CharField(unique=True)
created_at = DateTimeField(index=True, default=datetime.datetime.now)
@create_table
class TagPostCount(MyModel):
tag = ForeignKeyField(Tag, index=True)
board = Board('tag_counts', null=True)
post_count = UnsignedIntegerField(index=True, default=0)
changed_at = DateTimeField(index=True, default=datetime.datetime.now)
@classmethod
def set(cls, tag=None, board=None, post_count=None):
if tag is None or post_count is None: raise TypeError('tag and post_count cannot be None')
with db.atomic() as tx:
board_expr = cls.board == board
if board is None: board_expr = cls.board.is_null(True)
changed = cls.update(post_count = post_count, changed_at=datetime.datetime.now()).where(board_expr).where(cls.tag == tag).execute()
#print(tag, board, post_count, changed)
if changed > 1:
tx.rollback()
raise ValueError('Too many rows changed by query -- wtf?? tag is ', tag, ' board is ', board, ' post_count is ', post_count)
elif changed == 1: return
else:
cls.create(board=board, tag=tag, post_count=post_count, changed_at=datetime.datetime.now())
class Meta:
indexes = (
(('board', 'tag'), True),
)
@create_table
class Type(MyModel):
id = TinyIntegerAutoField(primary_key=True)
name = CharField(unique=True)
created_at = DateTimeField(index=True, default=datetime.datetime.now)
@create_table
class TagType(MyModel):
board = Board('tag_types')
tag = ForeignKeyField(Tag, backref='types')
type = ForeignKeyField(Type, backref='tags')
class Meta:
primary_key = CompositeKey('board', 'tag', 'type')
@create_table
class Post(MyModel):
id = UnsignedAutoField(primary_key=True)
board = Board('posts')
local_id = UnsignedIntegerField(index=True)
row_created_at = DateTimeField(default=datetime.datetime.now)
row_updated_at = DateTimeField(index=True, default=datetime.datetime.now)
post_created_at = DateTimeField(index=True)
post_updated_at = DateTimeField(index=True, null=True)
uploaded_by = ForeignKeyField(User, null=True)
source = TextField(null=True)
rating = CharField(max_length=1)
score = IntegerField(index=True)
parent_local_id = UnsignedIntegerField(null=True)
parent = ForeignKeyField('self', null=True)
class Meta:
indexes = (
(('board', 'local_id'), True),
)
@create_table
class PostFavs(MyModel):
user = ForeignKeyField(User)
post = ForeignKeyField(Post)
class Meta:
indexes = (
(('post', 'user'), True),
)
primary_key = CompositeKey('user', 'post')
@create_table
class ImageMetadata(MyModel):
post = ForeignKeyField(Post, primary_key=True)
image_width = UnsignedIntegerField(index=True)
image_height = UnsignedIntegerField(index=True)
file_size = IntegerField(index=True, null=True)
md5 = CharField(index=True, max_length=32)
@create_table
class PreviewSizeInfo(MyModel):
post = ForeignKeyField(Post, primary_key=True)
sample_width = UnsignedIntegerField()
sample_height = UnsignedIntegerField()
preview_width = UnsignedSmallIntegerField()
preview_height = UnsignedSmallIntegerField()
@create_table
class ImageURL(MyModel):
post = ForeignKeyField(Post, primary_key=True)
img_url = CharField()
sample_url = CharField()
preview_url = CharField()
@create_table
class Status(MyModel):
id = TinyIntegerAutoField(primary_key=True)
value = CharField(unique=True)
@create_table
class PostStatus(MyModel):
post = ForeignKeyField(Post, primary_key=True)
status = ForeignKeyField(Status)
@create_table
class DanbooruPostMetadata(MyModel):
post = ForeignKeyField(Post, primary_key=True)
up_score = IntegerField(null=True)
down_score = IntegerField(null=True)
props = BitField()
is_rating_locked = props.flag()
is_status_locked = props.flag()
is_pending = props.flag()
is_flagged = props.flag()
is_deleted = props.flag()
is_banned = props.flag()
pixiv_id = IntegerField(null=True)
last_commented_at = DateTimeField(null=True)
last_noted_at = DateTimeField(null=True)
approved_by = ForeignKeyField(User)
@create_table
class MimeType(MyModel):
id = TinyIntegerAutoField(primary_key=True)
name = CharField(unique=True)
ext = CharField(index=True, max_length=16)
@create_table
class Content(MyModel):
post = ForeignKeyField(Post, backref='content', primary_key=True)
sha256_current = FixedCharField(unique=True, max_length=64)
sha256_when_acquired = FixedCharField(unique=True, null=True, max_length=64)
mimetype = ForeignKeyField(MimeType, backref='contents')
file_size_current = IntegerField(index=True)
file_size_when_acquired = IntegerField(index=True, null=True)
we_modified_it = BooleanField(index=True, default=False)
@create_table
class ContentThumbnail(MyModel):
content = ForeignKeyField(Content, backref='thumbnail', primary_key=True)
generated_at = DateTimeField(default=datetime.datetime.now, index=True)
mimetype = ForeignKeyField(MimeType)
size = IntegerField(index=True)
class ContentOld(MyModel):
post = ForeignKeyField(Post, backref='content')
ext = CharField(index=True, null=True, max_length=16)
sha256_current = FixedCharField(unique=True, max_length=64)
sha256_when_acquired = FixedCharField(unique=True, null=True, max_length=64)
mimetype = ForeignKeyField(MimeType, backref='contents')
file_size_current = IntegerField(index=True)
file_size_when_acquired = IntegerField(index=True, null=True)
we_modified_it = BooleanField(index=True, default=False)
@create_table
class Comment(MyModel):
id = UnsignedAutoField(primary_key=True)
post = ForeignKeyField(Post, backref='comments')
local_id = UnsignedIntegerField(index=True)
body = TextField()
creator = ForeignKeyField(User, backref='comments')
score = IntegerField(index=True, null=True)
comment_created_at = DateTimeField()
row_created_at = DateTimeField(default=datetime.datetime.now)
row_updated_at = DateTimeField(default=datetime.datetime.now)
class Meta:
indexes = (
(('post', 'local_id'), True),
)
@create_table
class PostTag(MyModel):
tag = ForeignKeyField(Tag, backref='posts')
post = ForeignKeyField(Post, backref='tags')
class Meta:
primary_key = CompositeKey('tag', 'post')
@create_table
class EntityType(MyModel):
id = SmallIntegerAutoField(primary_key=True)
name = CharField(unique=True)
@create_table
class QueuedImportEntity(MyModel):
id = UnsignedAutoField(primary_key=True)
board = Board('queued_import_entities')
entity_type = ForeignKeyField(EntityType)
entity_local_id = UnsignedIntegerField()
additional_data = TextField(null=True)
enqueued_at = DateTimeField(default=datetime.datetime.now, index=True)
errors_encountered = UnsignedIntegerField(null=True, index=True)
priority = FloatField(default=random.random, index=True)
row_locked = BooleanField(index=True)
@staticmethod
def tasks_query(board=None, type=None, with_less_than_errors=10):
query = QueuedImportEntity.select()
if board:
if isinstance(board, 'str'):
board = Imageboard.get(Imageboard.name==board)
query = query.where(QueuedImportEntity.board == board)
if type:
if isinstance(type, str):
type = EntityType.get(EntityType.name == type)
query = query.where(QueuedImportEntity.type == type)
query = query.where(QueuedImportEntity.row_locked == False).where(QueuedImportEntity.errors_encountered <= with_less_than_errors)
query = query.order_by(-QueuedImportEntity.priority)
return query
def report_error(self, error):
with db.atomic():
ImportEntityError.create(queued_entity=self, error=error)
self.errors_encountered = (self.errors_encountered or 0) + 1
self.priority = random.random()
self.row_locked = False
self.save()
def report_success(self, and_delete_self=True, and_mark_as_final=False):
with db.atomic():
imported, did_create = ImportedEntity.get_or_create(board=self.board, entity_type=self.entity_type, entity_local_id=self.entity_local_id, additional_data=self.additional_data)
if not did_create:
imported.latest_update_at = datetime.datetime.now()
imported.save()
if and_delete_self:
self.delete_instance()
class Meta:
indexes = (
(('board', 'entity_type', 'entity_local_id'), True),
)
@create_table
class ImportEntityError(MyModel):
id = UnsignedAutoField(primary_key=True)
queued_entity = ForeignKeyField(QueuedImportEntity, index=True, on_delete='CASCADE')
when = DateTimeField(default=datetime.datetime.now)
error = TextField()
@create_table
class ImportedEntity(MyModel):
id = UnsignedAutoField(primary_key=True)
board = Board('imported_entities')
entity_type = ForeignKeyField(EntityType)
entity_local_id = UnsignedIntegerField(index=True)
additional_data = TextField(null=True)
latest_update_at = DateTimeField(default=datetime.datetime.now, index=True)
final = BooleanField()
def enqueue_again(self, ignore_final=False):
if (self.final and ignore_final) or (not self.final):
QueuedImportEntity.create(board=self.board, entity_type=self.entity_type, entity_local_id=self.entity_local_id, additional_data=self.additional_data)
else:
raise ValueError('not allowed to enqueue a final entity')
class Meta:
indexes = (
(('board', 'entity_type', 'entity_local_id'), True),
)
@create_table
class Note(MyModel):
id = UnsignedAutoField(primary_key=True)
board = Board('notes')
local_id = UnsignedIntegerField(index=True)
author = ForeignKeyField(User, backref='notes')
post = ForeignKeyField(Post, backref='notes')
body = TextField()
version = SmallIntegerField()
note_created_at = DateTimeField()
note_updated_at = DateTimeField()
row_created_at = DateTimeField(default=datetime.datetime.now)
is_active = BooleanField()
x = UnsignedIntegerField()
y = UnsignedIntegerField()
width = UnsignedIntegerField()
height = UnsignedIntegerField()
class Meta:
indexes = (
(('board', 'local_id', 'version'), True),
(('board', 'local_id'), False),
)
|
danya02/booru-mirror-unified
|
transfer_rule34.py
|
import database
from peewee import *
import datetime
import functools
import magic
import hashlib
import os
import shutil
BOARD, _ = database.Imageboard.get_or_create(name='rule34', base_url='https://rule34.xxx')
olddb = MySQLDatabase('rule34_bk', user='booru', password='<PASSWORD>')
class OldModel(Model):
deleted = BooleanField(index=True, default=False) # this is used so as not to remove rows from the database but still keep track of those which have been processed. This column must be manually added to all tables.
class Meta:
database = olddb
def confirm_delete(row):
print('OK to delete', repr(row), str(row), row.__dict__)
return True or input('y/n> ').lower() == 'y'
IMAGE_DIR = '/hugedata/booru_old/rule34.xxx-files/'
#db = SqliteDatabase(SITE+'.db', timeout=600)
db = MySQLDatabase('rule34', user='booru', password='<PASSWORD>', host='10.0.0.2')
class File:
@staticmethod
def get_file_content(name):
with open(IMAGE_DIR + name[:2] + '/' + name, 'rb') as handle:
return handle.read()
@staticmethod
def delete_file(name):
os.makedirs(IMAGE_DIR + 'deleted/' + name[:2] + '/', exist_ok=True)
try:
shutil.move(IMAGE_DIR + name[:2] + '/' + name, IMAGE_DIR + 'deleted/' + name[:2] + '/' + name)
except FileNotFoundError as e:
if not os.path.isfile(IMAGE_DIR + 'deleted/' + name[:2] + '/' + name):
raise e
class AccessLevel(OldModel):
name = CharField(unique=True)
def migrate(self):
row, _ = database.AccessLevel.get_or_create(name=self.name)
return row
class User(OldModel):
id = IntegerField(primary_key=True, unique=True)
username = CharField(null=True)
level = ForeignKeyField(AccessLevel, null=True)
join_date = DateField(null=True)
def migrate(self):
row, _ = database.User.get_or_create(board=BOARD, local_id=self.id, username=self.username)
if self.join_date:
database.UserJoinDate.get_or_create(user=row, join_date=self.join_date)
if self.level:
database.UserAccessLevel.get_or_create(user=row, level=self.level.migrate())
#count = Post.select(fn.COUNT(1)).where(Post.creator == self).scalar() + Comment.select(fn.COUNT(1)).where(Comment.author == self).scalar() + Note.select(fn.COUNT(1)).where(Note.author == self).scalar()
#if count == 0:
# if confirm_delete(self):
# self.delete_instance()
return row
class Rating(OldModel):
value = CharField(unique=True)
class Status(OldModel):
value = CharField(unique=True)
def migrate(self):
row, _ = database.Status.get_or_create(value=self.value)
return row
class Tag(OldModel):
name = CharField(unique=True)
def migrate(self):
row, _ = database.Tag.get_or_create(name=self.name)
return row
class Type(OldModel):
name = CharField(unique=True)
def migrate(self):
row, _ = database.Type.get_or_create(name=self.name)
return row
class TagPostCount(OldModel):
tag = ForeignKeyField(Tag, primary_key=True)
value = IntegerField()
class TagType(OldModel):
tag = ForeignKeyField(Tag)
type = ForeignKeyField(Type)
def migrate(self):
type = self.type.migrate()
tag = self.tag.migrate()
database.TagType.get_or_create(board=BOARD, type=type, tag=tag)
if confirm_delete(self):
TagType.update(TagType.deleted == True).where(TagType.tag == self.tag).where(TagType.type == self.type).execute()
class Meta:
primary_key = CompositeKey('tag', 'type')
class UnavailablePost(OldModel):
id = IntegerField(primary_key=True, unique=True)
reason = TextField(null=True)
first_detected_at = DateTimeField(default=datetime.datetime.now)
class Post(OldModel):
id = IntegerField(primary_key=True, unique=True)
width = IntegerField()
height = IntegerField()
url = CharField(unique=True)
sample_width = IntegerField()
sample_height = IntegerField()
sample = CharField()
preview_width = IntegerField()
preview_height = IntegerField()
preview = CharField()
md5 = CharField(unique=True)
created_at = DateTimeField()
changed_at = DateTimeField()
score = IntegerField()
creator = ForeignKeyField(User)
rating = ForeignKeyField(Rating)
status = ForeignKeyField(Status)
source = CharField()
parent = ForeignKeyField('self', backref='children', null=True)
def migrate(self):
post, _ = database.Post.get_or_create(board=BOARD,
local_id=self.id,
post_created_at=self.created_at,
post_updated_at=self.changed_at,
uploaded_by=self.creator.migrate(),
source=(self.source or None),
rating=self.rating.value,
score=self.score, parent_id=None if self.parent is None else self.parent.id)
database.ImageMetadata.get_or_create(post=post, image_width=self.width, image_height=self.height, md5=self.md5)
database.PreviewSizeInfo.get_or_create(post=post, sample_width=self.sample_width, sample_height=self.sample_height, preview_width=self.preview_width, preview_height=self.preview_height)
database.ImageURL.get_or_create(post=post, img_url=self.url, sample_url=self.sample, preview_url=self.preview)
database.PostStatus.get_or_create(post=post, status=database.Status.get_or_create(value=self.status.value)[0])
for i in self.comments:
i.migrate(post)
for i in self.notes:
i.migrate(post)
tags = []
for i in self.tags:
tags.append(i.tag.migrate())
data = []
for i in tags:
data.append({'post': post, 'tag': i})
try:
database.PostTag.insert_many(data).execute()
except:
with db.atomic():
for i in self.tags:
i.migrate(post_row=post)
PostTag.update(deleted=True).where(PostTag.post==self).where(PostTag.tag.in_(tags)).execute()
if len(self.content):
self.content.get().migrate(post)
self.deleted = True
self.save()
return post
class Content(OldModel):
post = ForeignKeyField(Post, backref='content')
path = CharField(unique=True)
original_length = IntegerField()
current_length = IntegerField()
def migrate(self, post_row=None):
data = File.get_file_content(self.path)
sha256 = hashlib.sha256(data).hexdigest()
mt = magic.from_buffer(data, mime=True)
mt_row, _ = database.MimeType.get_or_create(name=mt)
ext = self.path.split('.')[-1]
row, _ = database.Content.get_or_create(post=post_row or self.post.migrate(),
sha256_current=sha256,
mimetype=mt_row,
file_size_current=len(data),
we_modified_it=False)
database.File.save_file(data, ext, raise_if_exists=True, but_check_for_same=True)
self.deleted = True
self.save()
File.delete_file(self.path)
return row
class PostTag(OldModel):
tag = ForeignKeyField(Tag, backref='posts')
post = ForeignKeyField(Post, backref='tags')
class Meta:
primary_key = CompositeKey('tag', 'post')
def migrate(self, post_row=None):
database.PostTag.get_or_create(post=post_row or self.post.migrate(), tag=self.tag.migrate())
# if confirm_delete(self):
# PostTag.update(deleted=True).where(PostTag.tag==self.tag).where(PostTag.post==self.post).execute()
class Comment(OldModel):
author = ForeignKeyField(User, backref='comments')
post = ForeignKeyField(Post, backref='comments')
id = IntegerField(primary_key=True, unique=True)
body = TextField()
score = IntegerField()
created_at = DateTimeField()
def migrate(self, post_row=None):
row, _ = database.Comment.get_or_create(post=post_row or self.post.migrate(),
local_id=self.id,
creator=self.author.migrate(),
body=self.body,
score=self.score,
comment_created_at=self.created_at)
self.deleted = True
self.save()
return row
class Note(OldModel):
id = IntegerField(primary_key=True, unique=True)
author = ForeignKeyField(User, backref='notes')
post = ForeignKeyField(Post, backref='notes')
body = TextField()
created_at = DateTimeField()
updated_at = DateTimeField()
is_active = BooleanField()
version = IntegerField()
x = IntegerField()
y = IntegerField()
width = IntegerField()
height = IntegerField()
def migrate(self, post_row=None):
row, _ = database.Note.get_or_create(
board = BOARD,
local_id = self.id,
author = self.author.migrate(),
post = post_row or self.post.migrate(),
body = self.body,
note_created_at = self.created_at,
note_updated_at = self.updated_at,
is_active = self.is_active,
version = self.version,
x = self.x, y = self.y, width = self.width, height = self.height)
self.deleted = True
self.save()
return row
page = 1
iterated = True
while iterated:
iterated = False
#for i in Post.select().where(Post.deleted == False).where(Post.status_id.in_([3, 16])).limit(1).iterator():
for i in Content.select().where(Content.deleted == False).limit(1).iterator():
i.migrate()
iterated = True
page += 1
|
danya02/booru-mirror-unified
|
count_post_tags.py
|
from database import *
#boards = list(Imageboard.select())
#last_tag_id = TagPostCount.select(fn.MAX(TagPostCount.tag_id)).scalar() or 1
for tag in Tag.select().join(TagPostCount, JOIN.LEFT_OUTER).where(TagPostCount.tag.is_null(True)).orwhere(TagPostCount.changed_at < (datetime.datetime.now() - datetime.timedelta(days=5))).order_by(Tag.id).iterator():
counts = PostTag.select(Post.board_id, fn.COUNT(1)).join(Post).where(PostTag.tag == tag).group_by(Post.board).tuples()
with db.atomic():
sumcount = 0
for board, count in counts:
board = Imageboard.get_by_id(board)
print(board.name, tag.name, count)
sumcount += count
#if count != 0:
TagPostCount.set(tag=tag, post_count=count, board=board)
TagPostCount.set(tag=tag, post_count=sumcount, board=None)
|
shreyashsrivastava/wagtail
|
wagtail/admin/tests/test_reports_views.py
|
import datetime
from io import BytesIO
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from openpyxl import load_workbook
from wagtail.core.models import Page, PageLogEntry
from wagtail.tests.utils import WagtailTestUtils
class TestLockedPagesView(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_reports:locked_pages'), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/reports/locked_pages.html')
# Initially there should be no locked pages
self.assertContains(response, "No locked pages found.")
self.page = Page.objects.first()
self.page.locked = True
self.page.locked_by = self.user
self.page.locked_at = timezone.now()
self.page.save()
# Now the listing should contain our locked page
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/reports/locked_pages.html')
self.assertNotContains(response, "No locked pages found.")
self.assertContains(response, self.page.title)
def test_csv_export(self):
self.page = Page.objects.first()
self.page.locked = True
self.page.locked_by = self.user
if settings.USE_TZ:
# 12:00 UTC
self.page.locked_at = '2013-02-01T12:00:00.000Z'
self.page.latest_revision_created_at = '2013-01-01T12:00:00.000Z'
else:
# 12:00 in no specific timezone
self.page.locked_at = '2013-02-01T12:00:00'
self.page.latest_revision_created_at = '2013-01-01T12:00:00'
self.page.save()
response = self.get(params={'export': 'csv'})
# Check response
self.assertEqual(response.status_code, 200)
data_lines = response.getvalue().decode().split("\n")
self.assertEqual(data_lines[0], 'Title,Updated,Status,Type,Locked At,Locked By\r')
if settings.USE_TZ:
self.assertEqual(data_lines[1], 'Root,2013-01-01 12:00:00+00:00,live,Page,2013-02-01 12:00:00+00:00,test@email.com\r')
else:
self.assertEqual(data_lines[1], 'Root,2013-01-01 12:00:00,live,Page,2013-02-01 12:00:00,<EMAIL>\r')
def test_xlsx_export(self):
self.page = Page.objects.first()
self.page.locked = True
self.page.locked_by = self.user
if settings.USE_TZ:
# 12:00 UTC
self.page.locked_at = '2013-02-01T12:00:00.000Z'
self.page.latest_revision_created_at = '2013-01-01T12:00:00.000Z'
else:
# 12:00 in no specific timezone
self.page.locked_at = '2013-02-01T12:00:00'
self.page.latest_revision_created_at = '2013-01-01T12:00:00'
self.page.save()
response = self.get(params={'export': 'xlsx'})
# Check response - the locked page info should be in it
self.assertEqual(response.status_code, 200)
workbook_data = response.getvalue()
worksheet = load_workbook(filename=BytesIO(workbook_data))['Sheet1']
cell_array = [[cell.value for cell in row] for row in worksheet.rows]
self.assertEqual(cell_array[0], ['Title', 'Updated', 'Status', 'Type', 'Locked At', 'Locked By'])
self.assertEqual(cell_array[1], ['Root', datetime.datetime(2013, 1, 1, 12, 0), 'live', 'Page', datetime.datetime(2013, 2, 1, 12, 0), '<EMAIL>'])
self.assertEqual(len(cell_array), 2)
class TestFilteredLockedPagesView(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.login()
self.unpublished_page = Page.objects.get(url_path='/home/events/tentative-unpublished-event/')
self.unpublished_page.locked = True
self.unpublished_page.locked_by = self.user
self.unpublished_page.locked_at = timezone.now()
self.unpublished_page.save()
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_reports:locked_pages'), params)
def test_filter_by_live(self):
response = self.get(params={'live': 'true'})
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Tentative Unpublished Event")
self.assertContains(response, "My locked page")
response = self.get(params={'live': 'false'})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Tentative Unpublished Event")
self.assertNotContains(response, "My locked page")
def test_filter_by_user(self):
response = self.get(params={'locked_by': self.user.pk})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Tentative Unpublished Event")
self.assertNotContains(response, "My locked page")
class TestFilteredLogEntriesView(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.login()
self.home_page = Page.objects.get(url_path='/home/')
self.create_log = PageLogEntry.objects.log_action(self.home_page, 'wagtail.create')
self.edit_log_1 = PageLogEntry.objects.log_action(self.home_page, 'wagtail.edit')
self.edit_log_2 = PageLogEntry.objects.log_action(self.home_page, 'wagtail.edit')
self.edit_log_3 = PageLogEntry.objects.log_action(self.home_page, 'wagtail.edit')
self.create_comment_log = PageLogEntry.objects.log_action(self.home_page, 'wagtail.comments.create', data={
'comment': {
'contentpath': 'title',
'text': 'Foo',
}
})
self.edit_comment_log = PageLogEntry.objects.log_action(self.home_page, 'wagtail.comments.edit', data={
'comment': {
'contentpath': 'title',
'text': 'Edited',
}
})
self.create_reply_log = PageLogEntry.objects.log_action(self.home_page, 'wagtail.comments.create_reply', data={
'comment': {
'contentpath': 'title',
'text': 'Foo',
}
})
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_reports:site_history'), params)
def assert_log_entries(self, response, expected):
actual = set(response.context['object_list'])
self.assertSetEqual(actual, set(expected))
def test_unfiltered(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assert_log_entries(response, [
self.create_log,
self.edit_log_1,
self.edit_log_2,
self.edit_log_3,
self.create_comment_log,
self.edit_comment_log,
self.create_reply_log,
])
def test_filter_by_action(self):
response = self.get(params={'action': 'wagtail.edit'})
self.assertEqual(response.status_code, 200)
self.assert_log_entries(response, [
self.edit_log_1,
self.edit_log_2,
self.edit_log_3,
])
def test_hide_commenting_actions(self):
response = self.get(params={'hide_commenting_actions': 'on'})
self.assertEqual(response.status_code, 200)
self.assert_log_entries(response, [
self.create_log,
self.edit_log_1,
self.edit_log_2,
self.edit_log_3,
])
|
shreyashsrivastava/wagtail
|
wagtail/admin/tests/pages/test_copy_page.py
|
from django.contrib.auth.models import Group, Permission
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from django.urls import reverse
from wagtail.core.models import GroupPagePermission, Page
from wagtail.tests.testapp.models import SimplePage
from wagtail.tests.utils import WagtailTestUtils
class TestPageCopy(TestCase, WagtailTestUtils):
def setUp(self):
# Find root page
self.root_page = Page.objects.get(id=2)
# Create a page
self.test_page = self.root_page.add_child(instance=SimplePage(
title="Hello world!",
slug='hello-world',
content="hello",
live=True,
has_unpublished_changes=False,
))
# Create a couple of child pages
self.test_child_page = self.test_page.add_child(instance=SimplePage(
title="Child page",
slug='child-page',
content="hello",
live=True,
has_unpublished_changes=True,
))
self.test_unpublished_child_page = self.test_page.add_child(instance=SimplePage(
title="Unpublished Child page",
slug='unpublished-child-page',
content="hello",
live=False,
has_unpublished_changes=True,
))
# Login
self.user = self.login()
def test_page_copy(self):
response = self.client.get(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/copy.html')
# Make sure all fields are in the form
self.assertContains(response, "New title")
self.assertContains(response, "New slug")
self.assertContains(response, "New parent page")
self.assertContains(response, "Copy subpages")
self.assertContains(response, "Publish copies")
self.assertContains(response, "Alias")
def test_page_copy_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
# Get copy page
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.test_page.id),
'copy_subpages': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# A user with no page permissions at all should be redirected to the admin home
self.assertRedirects(response, reverse('wagtailadmin_home'))
# A user with page permissions, but not add permission at the destination,
# should receive a form validation error
publishers = Group.objects.create(name='Publishers')
GroupPagePermission.objects.create(
group=publishers, page=self.root_page, permission_type='publish'
)
self.user.groups.add(publishers)
self.user.save()
# Get copy page
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.test_page.id),
'copy_subpages': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
form = response.context['form']
self.assertFalse(form.is_valid())
self.assertTrue('new_parent_page' in form.errors)
def test_page_copy_post(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
'publish_copies': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
self.assertTrue(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were not copied
self.assertEqual(page_copy.get_children().count(), 0)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_copy_subpages(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
self.assertTrue(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# Neither of them should be live
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertFalse(child_copy.live)
self.assertTrue(child_copy.has_unpublished_changes)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
self.assertTrue(unpublished_child_copy.has_unpublished_changes)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_copy_subpages_publish_copies(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': True,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is live
self.assertTrue(page_copy.live)
self.assertFalse(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# The child_copy should be live but the unpublished_child_copy shouldn't
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertTrue(child_copy.live)
self.assertTrue(child_copy.has_unpublished_changes)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
self.assertTrue(unpublished_child_copy.has_unpublished_changes)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_new_parent(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.test_child_page.id),
'copy_subpages': False,
'publish_copies': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the new parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_child_page.id, )))
# Check that the page was copied to the correct place
self.assertTrue(Page.objects.filter(slug='hello-world-2').first().get_parent(), self.test_child_page)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_post_existing_slug_within_same_parent_page(self):
# This tests the existing slug checking on page copy when not changing the parent page
# Attempt to copy the page but forget to change the slug
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(
response,
'form',
'new_slug',
"This slug is already in use within the context of its parent page \"Welcome to your new Wagtail site!\""
)
def test_page_copy_post_and_subpages_to_same_tree_branch(self):
# This tests that a page cannot be copied into itself when copying subpages
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.test_child_page.id),
'copy_subpages': True,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id,)), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(
response, 'form', 'new_parent_page', "You cannot copy a page into itself when copying subpages"
)
def test_page_copy_post_existing_slug_to_another_parent_page(self):
# This tests the existing slug checking on page copy when changing the parent page
# Attempt to copy the page and changed the parent page
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world',
'new_parent_page': str(self.test_child_page.id),
'copy_subpages': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_child_page.id, )))
def test_page_copy_post_invalid_slug(self):
# Attempt to copy the page but set an invalid slug string
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello world!',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Should not be redirected (as the save should fail)
self.assertEqual(response.status_code, 200)
# Check that a form error was raised
self.assertFormError(
response, 'form', 'new_slug', "Enter a valid “slug” consisting of Unicode letters, numbers, underscores, or hyphens."
)
def test_page_copy_post_valid_unicode_slug(self):
post_data = {
'new_title': "Hello wɜːld",
'new_slug': 'hello-wɜːld',
'new_parent_page': str(self.test_page.id),
'copy_subpages': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check response
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.test_page.id, )))
# Get copy
page_copy = self.test_page.get_children().filter(slug=post_data['new_slug']).first()
# Check that the copy exists with the good slug
self.assertNotEqual(page_copy, None)
self.assertEqual(page_copy.slug, post_data['new_slug'])
def test_page_copy_no_publish_permission(self):
# Turn user into an editor who can add pages but not publish them
self.user.is_superuser = False
self.user.groups.add(
Group.objects.get(name="Editors"),
)
self.user.save()
# Get copy page
response = self.client.get(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )))
# The user should have access to the copy page
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/copy.html')
# Make sure the "publish copies" field is hidden
self.assertNotContains(response, "Publish copies")
def test_page_copy_no_publish_permission_post_copy_subpages_publish_copies(self):
# This tests that unprivileged users cannot publish copied pages even if they hack their browser
# Turn user into an editor who can add pages but not publish them
self.user.is_superuser = False
self.user.groups.add(
Group.objects.get(name="Editors"),
)
self.user.save()
# Post
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': True,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().filter(slug='hello-world-2').first()
# Check that the copy exists
self.assertNotEqual(page_copy, None)
# Check that the copy is not live
self.assertFalse(page_copy.live)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# Neither of them should be live
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertFalse(child_copy.live)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertFalse(unpublished_child_copy.live)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_before_copy_page_hook(self):
def hook_func(request, page):
self.assertIsInstance(request, HttpRequest)
self.assertIsInstance(page.specific, SimplePage)
return HttpResponse("Overridden!")
with self.register_hook('before_copy_page', hook_func):
response = self.client.get(reverse('wagtailadmin_pages:copy', args=(self.test_page.id,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
def test_before_copy_page_hook_post(self):
def hook_func(request, page):
self.assertIsInstance(request, HttpRequest)
self.assertIsInstance(page.specific, SimplePage)
return HttpResponse("Overridden!")
with self.register_hook('before_copy_page', hook_func):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
'publish_copies': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id,)), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# page should not be copied
self.assertFalse(Page.objects.filter(title="Hello world 2").exists())
def test_after_copy_page_hook(self):
def hook_func(request, page, new_page):
self.assertIsInstance(request, HttpRequest)
self.assertIsInstance(page.specific, SimplePage)
self.assertIsInstance(new_page.specific, SimplePage)
return HttpResponse("Overridden!")
with self.register_hook('after_copy_page', hook_func):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
'publish_copies': False,
'alias': False,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id,)), post_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"Overridden!")
# page should be copied
self.assertTrue(Page.objects.filter(title="Hello world 2").exists())
def test_page_copy_alias_post(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': False,
'publish_copies': False,
'alias': True,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().get(slug='hello-world-2')
# Check the copy is an alias of the original
self.assertEqual(page_copy.alias_of, self.test_page.page_ptr)
# Check that the copy is live
# Note: publish_copies is ignored. Alias pages always keep the same state as their original
self.assertTrue(page_copy.live)
self.assertFalse(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were not copied
self.assertEqual(page_copy.get_children().count(), 0)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
def test_page_copy_alias_post_copy_subpages(self):
post_data = {
'new_title': "Hello world 2",
'new_slug': 'hello-world-2',
'new_parent_page': str(self.root_page.id),
'copy_subpages': True,
'publish_copies': False,
'alias': True,
}
response = self.client.post(reverse('wagtailadmin_pages:copy', args=(self.test_page.id, )), post_data)
# Check that the user was redirected to the parents explore page
self.assertRedirects(response, reverse('wagtailadmin_explore', args=(self.root_page.id, )))
# Get copy
page_copy = self.root_page.get_children().get(slug='hello-world-2')
# Check the copy is an alias of the original
self.assertEqual(page_copy.alias_of, self.test_page.page_ptr)
# Check that the copy is live
# Note: publish_copies is ignored. Alias pages always keep the same state as their original
self.assertTrue(page_copy.live)
self.assertFalse(page_copy.has_unpublished_changes)
# Check that the owner of the page is set correctly
self.assertEqual(page_copy.owner, self.user)
# Check that the children were copied
self.assertEqual(page_copy.get_children().count(), 2)
# Check the the child pages
# Neither of them should be live
child_copy = page_copy.get_children().filter(slug='child-page').first()
self.assertNotEqual(child_copy, None)
self.assertEqual(child_copy.alias_of, self.test_child_page.page_ptr)
self.assertTrue(child_copy.live)
self.assertFalse(child_copy.has_unpublished_changes)
unpublished_child_copy = page_copy.get_children().filter(slug='unpublished-child-page').first()
self.assertNotEqual(unpublished_child_copy, None)
self.assertEqual(unpublished_child_copy.alias_of, self.test_unpublished_child_page.page_ptr)
self.assertFalse(unpublished_child_copy.live)
self.assertTrue(unpublished_child_copy.has_unpublished_changes)
# treebeard should report no consistency problems with the tree
self.assertFalse(any(Page.find_problems()), 'treebeard found consistency problems')
|
shreyashsrivastava/wagtail
|
wagtail/admin/urls/reports.py
|
<reponame>shreyashsrivastava/wagtail<filename>wagtail/admin/urls/reports.py<gh_stars>1000+
from django.urls import path
from wagtail.admin.views.reports.audit_logging import LogEntriesView
from wagtail.admin.views.reports.locked_pages import LockedPagesView
from wagtail.admin.views.reports.workflows import WorkflowTasksView, WorkflowView
app_name = 'wagtailadmin_reports'
urlpatterns = [
path('locked/', LockedPagesView.as_view(), name='locked_pages'),
path('workflow/', WorkflowView.as_view(), name='workflow'),
path('workflow_tasks/', WorkflowTasksView.as_view(), name='workflow_tasks'),
path('site-history/', LogEntriesView.as_view(), name='site_history'),
]
|
shreyashsrivastava/wagtail
|
wagtail/admin/views/reports/base.py
|
from django.utils.translation import gettext_lazy as _
from django.views.generic.base import TemplateResponseMixin
from django.views.generic.list import BaseListView
from wagtail.admin.views.mixins import SpreadsheetExportMixin
class ReportView(SpreadsheetExportMixin, TemplateResponseMixin, BaseListView):
header_icon = ""
page_kwarg = "p"
template_name = "wagtailadmin/reports/base_report.html"
title = ""
paginate_by = 50
filterset_class = None
def filter_queryset(self, queryset):
filters = None
if self.filterset_class:
filters = self.filterset_class(self.request.GET, queryset=queryset, request=self.request)
queryset = filters.qs
return filters, queryset
def dispatch(self, request, *args, **kwargs):
self.is_export = self.request.GET.get("export") in self.FORMATS
if self.is_export:
self.paginate_by = None
return self.as_spreadsheet(self.filter_queryset(self.get_queryset())[1], self.request.GET.get("export"))
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, *args, object_list=None, **kwargs):
queryset = object_list if object_list is not None else self.object_list
filters, queryset = self.filter_queryset(queryset)
context = super().get_context_data(*args, object_list=queryset, **kwargs)
context["title"] = self.title
context["header_icon"] = self.header_icon
context["filters"] = filters
return context
class PageReportView(ReportView):
template_name = "wagtailadmin/reports/base_page_report.html"
export_headings = {
"latest_revision_created_at": _("Updated"),
"status_string": _("Status"),
"content_type.model_class._meta.verbose_name.title": _("Type"),
}
list_export = [
"title",
"latest_revision_created_at",
"status_string",
"content_type.model_class._meta.verbose_name.title",
]
|
shreyashsrivastava/wagtail
|
wagtail/admin/tests/pages/test_workflow_history.py
|
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from wagtail.core.models import Page, PageLogEntry
from wagtail.tests.utils import WagtailTestUtils
class TestWorkflowHistoryDetail(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.create_test_user()
self.login(self.user)
self.christmas_event = Page.objects.get(url_path='/home/events/christmas/')
self.christmas_event.save_revision()
workflow = self.christmas_event.get_workflow()
self.workflow_state = workflow.start(self.christmas_event, self.user)
def test_get_index(self):
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id])
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('wagtailadmin_pages:edit', args=[self.christmas_event.id]))
self.assertContains(response, reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id]))
def test_get_index_with_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id])
)
self.assertEqual(response.status_code, 302)
def test_get_detail(self):
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id])
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, reverse('wagtailadmin_pages:edit', args=[self.christmas_event.id]))
self.assertContains(response, reverse('wagtailadmin_pages:workflow_history', args=[self.christmas_event.id]))
def test_get_detail_with_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='wagtailadmin', codename='access_admin')
)
self.user.save()
response = self.client.get(
reverse('wagtailadmin_pages:workflow_history_detail', args=[self.christmas_event.id, self.workflow_state.id])
)
self.assertEqual(response.status_code, 302)
class TestFiltering(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.login()
self.home_page = Page.objects.get(url_path='/home/')
self.create_log = PageLogEntry.objects.log_action(self.home_page, 'wagtail.create')
self.edit_log_1 = PageLogEntry.objects.log_action(self.home_page, 'wagtail.edit')
self.edit_log_2 = PageLogEntry.objects.log_action(self.home_page, 'wagtail.edit')
self.edit_log_3 = PageLogEntry.objects.log_action(self.home_page, 'wagtail.edit')
self.create_comment_log = PageLogEntry.objects.log_action(self.home_page, 'wagtail.comments.create', data={
'comment': {
'contentpath': 'title',
'text': 'Foo',
}
})
self.edit_comment_log = PageLogEntry.objects.log_action(self.home_page, 'wagtail.comments.edit', data={
'comment': {
'contentpath': 'title',
'text': 'Edited',
}
})
self.create_reply_log = PageLogEntry.objects.log_action(self.home_page, 'wagtail.comments.create_reply', data={
'comment': {
'contentpath': 'title',
'text': 'Foo',
}
})
def get(self, params={}):
return self.client.get(reverse('wagtailadmin_reports:site_history'), params)
return self.client.get(reverse('wagtailadmin_pages:workflow_history', args=[self.home_page.id]), params)
def assert_log_entries(self, response, expected):
actual = set(response.context['object_list'])
self.assertSetEqual(actual, set(expected))
def test_unfiltered(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assert_log_entries(response, [
self.create_log,
self.edit_log_1,
self.edit_log_2,
self.edit_log_3,
self.create_comment_log,
self.edit_comment_log,
self.create_reply_log,
])
def test_filter_by_action(self):
response = self.get(params={'action': 'wagtail.edit'})
self.assertEqual(response.status_code, 200)
self.assert_log_entries(response, [
self.edit_log_1,
self.edit_log_2,
self.edit_log_3,
])
def test_hide_commenting_actions(self):
response = self.get(params={'hide_commenting_actions': 'on'})
self.assertEqual(response.status_code, 200)
self.assert_log_entries(response, [
self.create_log,
self.edit_log_1,
self.edit_log_2,
self.edit_log_3,
])
|
shreyashsrivastava/wagtail
|
wagtail/admin/views/reports/__init__.py
|
from .base import PageReportView, ReportView # noqa
|
shreyashsrivastava/wagtail
|
wagtail/admin/tests/test_buttons_hooks.py
|
<filename>wagtail/admin/tests/test_buttons_hooks.py
from django.test import TestCase
from django.urls import reverse
from django.utils.http import urlencode
from wagtail.admin import widgets as wagtailadmin_widgets
from wagtail.admin.wagtail_hooks import page_listing_more_buttons
from wagtail.core import hooks
from wagtail.core.models import Page
from wagtail.tests.utils import WagtailTestUtils
class PagePerms:
def can_move(self):
return False
def can_copy(self):
return False
def can_delete(self):
return True
def can_unpublish(self):
return False
def can_view_revisions(self):
return False
class TestButtonsHooks(TestCase, WagtailTestUtils):
def setUp(self):
self.root_page = Page.objects.get(id=2)
self.login()
def test_register_page_listing_buttons(self):
def page_listing_buttons(page, page_perms, is_parent=False, next_url=None):
yield wagtailadmin_widgets.PageListingButton(
'Another useless page listing button',
'/custom-url',
priority=10
)
with hooks.register_temporarily('register_page_listing_buttons', page_listing_buttons):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, ))
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/listing/_button_with_dropdown.html')
self.assertTemplateUsed(response, 'wagtailadmin/pages/listing/_buttons.html')
self.assertContains(response, 'Another useless page listing button')
def test_register_page_listing_more_buttons(self):
def page_listing_more_buttons(page, page_perms, is_parent=False, next_url=None):
yield wagtailadmin_widgets.Button(
'Another useless button in default "More" dropdown',
'/custom-url',
priority=10
)
with hooks.register_temporarily('register_page_listing_more_buttons', page_listing_more_buttons):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, ))
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/listing/_button_with_dropdown.html')
self.assertTemplateUsed(response, 'wagtailadmin/pages/listing/_buttons.html')
self.assertContains(response, 'Another useless button in default "More" dropdown')
def test_custom_button_with_dropdown(self):
def page_custom_listing_buttons(page, page_perms, is_parent=False, next_url=None):
yield wagtailadmin_widgets.ButtonWithDropdownFromHook(
'One more more button',
hook_name='register_page_listing_one_more_more_buttons',
page=page,
page_perms=page_perms,
is_parent=is_parent,
next_url=next_url,
attrs={'target': '_blank', 'rel': 'noopener noreferrer'},
priority=50
)
def page_custom_listing_more_buttons(page, page_perms, is_parent=False, next_url=None):
yield wagtailadmin_widgets.Button(
'Another useless dropdown button in "One more more button" dropdown',
'/custom-url',
priority=10
)
with hooks.register_temporarily('register_page_listing_buttons', page_custom_listing_buttons), hooks.register_temporarily('register_page_listing_one_more_more_buttons', page_custom_listing_more_buttons):
response = self.client.get(
reverse('wagtailadmin_explore', args=(self.root_page.id, ))
)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'wagtailadmin/pages/listing/_button_with_dropdown.html')
self.assertTemplateUsed(response, 'wagtailadmin/pages/listing/_buttons.html')
self.assertContains(response, 'One more more button')
self.assertContains(response, 'Another useless dropdown button in "One more more button" dropdown')
def test_delete_button_next_url(self):
page_perms = PagePerms()
page = self.root_page
base_url = reverse('wagtailadmin_pages:delete', args=[page.id])
next_url = "a/random/url/"
full_url = base_url + '?' + urlencode({'next': next_url})
# page_listing_more_button generator yields only `Delete button`
delete_button = next(page_listing_more_buttons(
page,
page_perms,
is_parent=False,
next_url=next_url
))
self.assertEqual(delete_button.url, full_url)
next_url = reverse('wagtailadmin_explore', args=[page.id])
delete_button = next(page_listing_more_buttons(
page,
page_perms,
is_parent=False,
next_url=next_url
))
self.assertEqual(delete_button.url, base_url)
|
shreyashsrivastava/wagtail
|
wagtail/admin/tests/test_templatetags.py
|
from datetime import timedelta
from unittest import mock
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from freezegun import freeze_time
from wagtail.admin.staticfiles import versioned_static
from wagtail.admin.templatetags.wagtailadmin_tags import (
avatar_url, notification_static, timesince_last_update, timesince_simple)
from wagtail.images.tests.utils import get_test_image_file
from wagtail.tests.utils import WagtailTestUtils
from wagtail.users.models import UserProfile
class TestAvatarTemplateTag(TestCase, WagtailTestUtils):
def setUp(self):
# Create a user
self.test_user = self.create_user(
username='testuser',
email='<EMAIL>',
password='password',
)
def test_use_gravatar_by_default(self):
url = avatar_url(self.test_user)
self.assertIn('www.gravatar.com', url)
def test_skip_gravatar_if_no_email(self):
self.test_user.email = ''
url = avatar_url(self.test_user)
self.assertIn('default-user-avatar', url)
@override_settings(WAGTAIL_GRAVATAR_PROVIDER_URL='https://robohash.org')
def test_custom_gravatar_provider(self):
url = avatar_url(self.test_user)
self.assertIn('robohash.org', url)
@override_settings(WAGTAIL_GRAVATAR_PROVIDER_URL=None)
def test_disable_gravatar(self):
url = avatar_url(self.test_user)
self.assertIn('default-user-avatar', url)
def test_uploaded_avatar(self):
user_profile = UserProfile.get_for_user(self.test_user)
user_profile.avatar = get_test_image_file(filename='custom-avatar.png')
user_profile.save()
url = avatar_url(self.test_user)
self.assertIn('custom-avatar', url)
class TestNotificationStaticTemplateTag(TestCase):
@override_settings(STATIC_URL='/static/')
def test_local_notification_static(self):
url = notification_static('wagtailadmin/images/email-header.jpg')
self.assertEqual('{}/static/wagtailadmin/images/email-header.jpg'.format(settings.BASE_URL), url)
@override_settings(STATIC_URL='/static/', BASE_URL='http://localhost:8000')
def test_local_notification_static_baseurl(self):
url = notification_static('wagtailadmin/images/email-header.jpg')
self.assertEqual('http://localhost:8000/static/wagtailadmin/images/email-header.jpg', url)
@override_settings(STATIC_URL='https://s3.amazonaws.com/somebucket/static/', BASE_URL='http://localhost:8000')
def test_remote_notification_static(self):
url = notification_static('wagtailadmin/images/email-header.jpg')
self.assertEqual('https://s3.amazonaws.com/somebucket/static/wagtailadmin/images/email-header.jpg', url)
class TestVersionedStatic(TestCase):
def test_versioned_static(self):
result = versioned_static('wagtailadmin/js/core.js')
self.assertRegex(result, r'^/static/wagtailadmin/js/core.js\?v=(\w+)$')
@mock.patch('wagtail.admin.staticfiles.static')
def test_versioned_static_version_string(self, mock_static):
mock_static.return_value = '/static/wagtailadmin/js/core.js?v=123'
result = versioned_static('wagtailadmin/js/core.js')
self.assertEqual(result, '/static/wagtailadmin/js/core.js?v=123')
mock_static.assert_called_once_with('wagtailadmin/js/core.js')
def test_versioned_static_absolute_path(self):
result = versioned_static('/static/wagtailadmin/js/core.js')
self.assertEqual(result, '/static/wagtailadmin/js/core.js')
def test_versioned_static_url(self):
result = versioned_static('http://example.org/static/wagtailadmin/js/core.js')
self.assertEqual(result, 'http://example.org/static/wagtailadmin/js/core.js')
@freeze_time("2020-07-01 12:00:00")
class TestTimesinceTags(TestCase):
def test_timesince_simple(self):
now = timezone.now()
ts = timesince_simple(now)
self.assertEqual(ts, "Just now")
ts = timesince_simple(now - timedelta(hours=1, minutes=10))
self.assertEqual(ts, "1\xa0hour ago")
ts = timesince_simple(now - timedelta(weeks=2, hours=1, minutes=10))
self.assertEqual(ts, "2\xa0weeks ago")
def test_timesince_last_update_today_shows_time(self):
dt = timezone.now() - timedelta(hours=1)
formatted_time = dt.astimezone(timezone.get_current_timezone()).strftime('%H:%M')
timesince = timesince_last_update(dt)
self.assertEqual(timesince, formatted_time)
# Check prefix output
timesince = timesince_last_update(dt, time_prefix='my prefix')
self.assertEqual(timesince, 'my prefix {}'.format(formatted_time))
def test_timesince_last_update_before_today_shows_timeago(self):
dt = timezone.now() - timedelta(weeks=1, days=2)
timesince = timesince_last_update(dt, use_shorthand=False)
self.assertEqual(timesince, '1\xa0week, 2\xa0days ago')
timesince = timesince_last_update(dt)
self.assertEqual(timesince, '1\xa0week ago')
|
shreyashsrivastava/wagtail
|
wagtail/admin/filters.py
|
import django_filters
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_filters.widgets import SuffixedMultiWidget
from wagtail.admin.widgets import AdminDateInput, BooleanButtonSelect, ButtonSelect, FilteredSelect
class DateRangePickerWidget(SuffixedMultiWidget):
"""
A widget allowing a start and end date to be picked.
"""
template_name = 'wagtailadmin/widgets/daterange_input.html'
suffixes = ['after', 'before']
def __init__(self, attrs=None):
widgets = (AdminDateInput(attrs={'placeholder': _("Date from")}), AdminDateInput(attrs={'placeholder': _("Date to")}))
super().__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.start, value.stop]
return [None, None]
class FilteredModelChoiceIterator(django_filters.fields.ModelChoiceIterator):
"""
A variant of Django's ModelChoiceIterator that, instead of yielding (value, label) tuples,
returns (value, label, filter_value) so that FilteredSelect can drop filter_value into
the data-filter-value attribute.
"""
def choice(self, obj):
return (
self.field.prepare_value(obj),
self.field.label_from_instance(obj),
self.field.get_filter_value(obj)
)
class FilteredModelChoiceField(django_filters.fields.ModelChoiceField):
"""
A ModelChoiceField that uses FilteredSelect to dynamically show/hide options based on another
ModelChoiceField of related objects; an option will be shown whenever the selected related
object is present in the result of filter_accessor for that option.
filter_field - the HTML `id` of the related ModelChoiceField
filter_accessor - either the name of a relation, property or method on the model instance which
returns a queryset of related objects, or a function which accepts the model instance and
returns such a queryset.
"""
widget = FilteredSelect
iterator = FilteredModelChoiceIterator
def __init__(self, *args, **kwargs):
self.filter_accessor = kwargs.pop('filter_accessor')
filter_field = kwargs.pop('filter_field')
super().__init__(*args, **kwargs)
self.widget.filter_field = filter_field
def get_filter_value(self, obj):
# Use filter_accessor to obtain a queryset of related objects
if callable(self.filter_accessor):
queryset = self.filter_accessor(obj)
else:
# treat filter_accessor as a method/property name of obj
queryset = getattr(obj, self.filter_accessor)
if isinstance(queryset, models.Manager):
queryset = queryset.all()
elif callable(queryset):
queryset = queryset()
# Turn this queryset into a list of IDs that will become the 'data-filter-value' used to
# filter this listing
return queryset.values_list('pk', flat=True)
class FilteredModelChoiceFilter(django_filters.ModelChoiceFilter):
field_class = FilteredModelChoiceField
class WagtailFilterSet(django_filters.FilterSet):
@classmethod
def filter_for_lookup(cls, field, lookup_type):
filter_class, params = super().filter_for_lookup(field, lookup_type)
if filter_class == django_filters.ChoiceFilter:
params.setdefault('widget', ButtonSelect)
params.setdefault('empty_label', _("All"))
elif filter_class in [django_filters.DateFilter, django_filters.DateTimeFilter]:
params.setdefault('widget', AdminDateInput)
elif filter_class == django_filters.DateFromToRangeFilter:
params.setdefault('widget', DateRangePickerWidget)
elif filter_class == django_filters.BooleanFilter:
params.setdefault('widget', BooleanButtonSelect)
return filter_class, params
|
DapperDino/Learning-TensorFlow
|
BreastCancerDataSVM/breast_cancer_data_svm.py
|
<filename>BreastCancerDataSVM/breast_cancer_data_svm.py
import sklearn as sk
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# Load cancer data from dataset.
cancer = load_breast_cancer()
# Define which data will be on the x and y axis.
x = cancer.data
y = cancer.target
# Use 80% of the data to train with and then test on the remaining 20%.
x_train, x_test, y_train, y_test = sk.model_selection.train_test_split(
cancer.data, cancer.target, test_size=0.2)
# Fit the data using a linear kernal to the classifier.
clf = SVC(kernel="linear", C=2, gamma="auto")
clf.fit(x_train, y_train)
# Predict the y test values using our x test values.
y_prediction = clf.predict(x_test)
# Store and display the accuracy of this model.
accuracy = accuracy_score(y_test, y_prediction)
print(accuracy)
|
DapperDino/Learning-TensorFlow
|
CarDataKNN/car_data_knn.py
|
<filename>CarDataKNN/car_data_knn.py
import pandas as pd
import sklearn as sk
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
# Load car data from file.
data = pd.read_csv("CarDataKNN/Car.data")
# Convert attribute names to int.
le = preprocessing.LabelEncoder()
buying = le.fit_transform(list(data["buying"]))
maint = le.fit_transform(list(data["maint"]))
door = le.fit_transform(list(data["door"]))
persons = le.fit_transform(list(data["persons"]))
lug_boot = le.fit_transform(list(data["lug_boot"]))
safety = le.fit_transform(list(data["safety"]))
cls = le.fit_transform(list(data["class"]))
# Define the label we wish to predict.
preict = "class"
# Define which data will be on the x and y axis.
x = list(zip(buying, maint, door, persons, lug_boot, safety))
y = list(cls)
# Use 90% of the data to train with and then test on the remaining 10%.
x_train, x_test, y_train, y_test = sk.model_selection.train_test_split(
x, y, test_size=0.1)
# Create and fit our model.
model = KNeighborsClassifier(n_neighbors=9)
model.fit(x_train, y_train)
# Store and display the accuracy of this model.
accuracy = model.score(x_test, y_test)
print(accuracy)
# Preict the y test values using our x test values.
predicted = model.predict(x_test)
names = ["unacc", "acc", "good", "vgood"]
# Display how accurate each prediction was.
for i in range(len(predicted)):
print("Predicted: {}\nData: {}\nActual: {}".format(
names[predicted[i]], x_test[i], names[y_test[i]]))
n = model.kneighbors([x_test[i]], 9)
print("N: {}".format(n))
|
DapperDino/Learning-TensorFlow
|
StudentDataRegression/student_data_regression.py
|
import pickle
import pandas as pd
import numpy as np
import sklearn as sk
from sklearn import linear_model
import matplotlib.pyplot as pyplot
from matplotlib import style
# Load student data from file.
data = pd.read_csv("StudentDataRegression/StudentData.csv", sep=";")
# Filter only the data we wish to model.
data = data[["G1", "G2", "G3", "studytime", "failures", "absences"]]
# Define the label we wish to predict.
predict = "G3"
# Drop the prediction from the input data since we won't be using it as input data.
x = np.array(data.drop([predict], 1))
y = np.array(data[predict])
# Use 90% of the data to train with and then test on the remaining 10%.
x_train, x_test, y_train, y_test = sk.model_selection.train_test_split(
x, y, test_size=0.1)
"""
best_accuracy = 0
while(True):
#Use 90% of the data to train with and then test on the remaining 10%.
x_train, x_test, y_train, y_test = sk.model_selection.train_test_split(x, y, test_size = 0.1)
#Create an instance of the Linear Regression class.
linear = linear_model.LinearRegression()
#Fit the training data to the model.
linear.fit(x_train, y_train)
accuracy = linear.score(x_test, y_test)
#Check to see if this model is more accurate than our previous best.
if accuracy > best_accuracy:
#Store the new best accuracy to compare future models to.
best_accuracy = accuracy
print(best_accuracy)
#Save the model to a file.
with open("StudentDataRegression/student_model.pickle", "wb") as f:
pickle.dump(linear, f)
"""
# Load model from file.
pickle_in = open("StudentDataRegression/student_model.pickle", "rb")
linear = pickle.load(pickle_in)
# Output statistics from the testing.
print("Co: {}".format(linear.coef_))
print("Intercept: {}".format(linear.intercept_))
# Output data from the testing.
predictions = linear.predict(x_test)
for i in range(len(predictions)):
print(predictions[i], x_test[i], y_test[i])
# Display data as scatter graph.
p = "failures"
style.use("ggplot")
pyplot.scatter(data[p], data[predict])
pyplot.xlabel(p)
pyplot.ylabel("Final Grade")
pyplot.show()
|
OrlandoCano/JavaDevDay2019
|
data/sessionize_data/transform_data.py
|
import requests
import json
import re
API_URL = "https://sessionize.com/api/v2/ezuter2b/view/all"
res = requests.get(API_URL)
json_data = json.loads(res.text)
speakers = {}
def clean_name(name):
regex = re.compile('[^a-zA-Z]')
res = regex.sub('', name)
return res.lower()
# Speakers
for i, s in enumerate(json_data['speakers']):
new_key = clean_name(s['fullName'])
speakers[new_key] = {}
speakers[new_key]['bio'] = s['bio']
speakers[new_key]['id'] = new_key
speakers[new_key]['title'] = s['tagLine']
speakers[new_key]['company'] = s['tagLine']
speakers[new_key]['order'] = i
speakers[new_key]['photoUrl'] = s['profilePicture']
speakers[new_key]['name'] = s['fullName']
speakers[new_key]['featured'] = True
speakers[new_key]['socials'] = []
for l in s['links']:
to_add = {}
to_add['name'] = l['title']
to_add['link'] = l['url']
to_add['icon'] = l['title'].lower()
speakers[new_key]['socials'].append(to_add)
with open('sessionize_speakers.json', 'w') as f:
f.write(json.dumps(speakers))
|
TeamMacLean/cgr_view
|
cgr/__init__.py
|
"""
cgr
A module for creating, saving and drawing k-mer matrices and Chaos Game Representations (CGRs) of nucleotide sequences
Prerequisites
-------------
- Jellyfish
An external program for counting k-mers. Must be accessible on the path. You can install from conda as follows:
.. highlight:: bash
.. code-block:: bash
conda install -c bioconda jellyfish
Quickstart
----------
+ Input fasta file, get cgr
* one cgr for each entry in the fasta file
.. highlight:: python
.. code-block:: python
cgr.from_fasta("my_seqs.fa", outfile = "my_cgrs", k = 7)
* just one cgr with all entries in the fasta file (eg for genomes and contigs)
.. highlight:: python
.. code-block:: python
cgr.from_fasta("my_genome.fa", outfile = "genome_cgr", k = 7, as_single = True)
Workflow:
---------
1. make kmer count db in Jellyfish from fasta -> generate cgr from db.
2. optionally merge cgrs into single cgr as separate channels
3. stack all composed cgrs into an array of cgrs
4. save as numpy binary (.npy) files
Usage:
------
1. Import module
.. highlight:: python
.. code-block:: python
import cgr
2. Make kmer count db
.. highlight:: python
.. code-block:: python
cgr.run_jellyfish("test_data/NC_012920.fasta", 11, "11mer.jf")
cgr.run_jellyfish("test_data/NC_012920.fasta", 10, "10_mer.jf")
2. Load CGRs from kmer count db
.. highlight:: python
.. code-block:: python
cgr1 = cgr.cgr_matrix("/Users/macleand/Desktop/athal-5-mers.jf")
cgr2 = cgr.cgr_matrix("test_data/five_mer.jf")
3. Draw a cgr and save to file
* just one cgr, can choose colour (value of 'h') and which channel to put cgr in
.. highlight:: python
.. code-block:: python
cgr.draw_cgr(cgr1, h = 0.64, v = 1.0, out = "my_cgr.png", resize = 1000, main = "s" )
* two cgrs, first in tuple goes in 'h', second goes in 's'. Can set 'v'
.. highlight:: python
.. code-block:: python
cgr.draw_cgr( (cgr1, cgr1), v = 1.0, out = "two_cgrs.png")
* three cgrs 'h','s' and 'v' are assigned as order in tuple
.. highlight:: python
.. code-block:: python
cgr.draw_cgr( (cgr1, cgr1, cgr1) )
4. Save a single cgr into a text file
.. highlight:: python
.. code-block:: python
cgr.save_as_csv(cgr1, file = "out.csv")
5. Join n cgrs into one, extending the number of channels ...
.. highlight:: python
.. code-block:: python
merged_cgr = cgr.join_cgr( (cgr1, cgr2, ... ) )
6. Write to file (numpy binary)
.. highlight:: python
.. code-block:: python
cgr.save_cgr("my_cgr, merged_cgr )
7. Input fasta file, get cgr
* one cgr for each entry in the fasta file
.. highlight:: python
.. code-block:: python
cgr.from_fasta("my_seqs.fa", outfile = "my_cgrs", k = 7)
* just one cgr with all entries in the fasta file (eg for genomes and contigs)
.. highlight:: python
.. code-block:: python
cgr.from_fasta("my_genome.fa", outfile = "genome_cgr", k = 7, as_single = True)
"""
import os
import subprocess
import math
import numpy
import scipy
import re
import matplotlib.pyplot as plt
import skimage.color
import skimage.io
import skimage.transform
import tempfile
from Bio import SeqIO
from typing import Generator, List, Tuple
def estimate_genome_size(fasta: str) -> int:
"""
Guesses genome size from fasta file size, assumes 1 byte ~= 1 base
:param: fasta str -- a fasta file
:return: int -- approximate genome size in nucleotides
"""
return (os.path.getsize(fasta))
def run_jellyfish(fasta: str, k: int, out: str) -> int:
"""
runs Jellyfish on fasta file using k kmer size, produces Jellyfish db file as side effect.
:param: fasta str -- a fasta file
:param: k int -- size of kmers to use
:param: out str -- file in which to save kmer db
:return: int -- return code of Jellyfish subprocess
"""
genome_size = estimate_genome_size(fasta)
cmd = ["jellyfish", "count", "-m", str(k), "-s", str(genome_size), fasta, "-o", out]
result = subprocess.run(cmd)
return result.returncode
def get_kmer_list(jellyfish: str) -> Generator[List, str, None]:
"""
runs jellyfish dump on a Jellyfish DB. Captures output as a generator stream.
Each item returned is a list [kmer: str, count: str]
:param: jellyfish str -- a Jellyfish DB file
:return: Generator -- a list of [kmer string, times_kmer_seen]
"""
cmd = ["jellyfish", "dump", "-c", jellyfish]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in proc.stdout:
yield line.decode("utf-8").rstrip().split(" ")
proc.wait()
proc.stdout.close()
def get_grid_size(k: int) -> int:
"""
returns the grid size (total number of elements for a
cgr of k length kmers
:param: k int -- the value of k to be used
:return: int -- the total number of elements in the grid
"""
return int(math.sqrt(4 ** k))
def get_coord(kmer: str) -> List[int]:
"""
given a kmer gets the coordinates of the box position in the cgr grid,
returns as list [x,y] of coordinates
:param: kmer str -- a string of nucleotides
:return: coords [x,y] -- the x,y positions of the nucleotides in the cgr
"""
grid_size = get_grid_size(len(kmer))
maxx = grid_size
maxy = grid_size
posx = 1
posy = 1
for char in kmer:
if char == "C":
posx += (maxx / 2)
elif char == "T":
posy += (maxy / 2)
elif char == "G":
posx += (maxx / 2)
posy += (maxy / 2)
maxx = (maxx / 2)
maxy /= 2
return [int(posx) - 1, int(posy) - 1]
def get_k(jellyfish: str) -> int:
"""
asks the jellyfish file what value was used for k
:param: jellyfish str -- jellyfish DB file
:return: int -- length of k used
"""
cmd = ["jellyfish", "info", jellyfish]
result = subprocess.run(cmd, capture_output=True)
r = re.match(r".*count\s-m\s(\d+)", result.stdout.decode("utf-8"))
return int(r.group(1))
def get_max_count(jellyfish) -> int:
"""
estimates the count of the most represented kmer in the jellyfish file by using the last bucket of the
:param jellyfish:
:return: int estimated count of the most represented kmer
"""
cmd = ["jellyfish", "histo", jellyfish ]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
counts = []
for line in proc.stdout:
counts.append( line.decode("utf-8").rstrip().split(" ") )
proc.wait()
proc.stdout.close()
return int(counts[-1][0])
def cgr_matrix(jellyfish: str) -> scipy.sparse.dok_matrix:
"""
Main function, creates the cgr matrix, a sparse matrix of type scipy.sparse.dok_matrix
Runs the cgr process on a jellyfish file and returns a scipy.sparse.dok_matrix object of the CGR with dtype int32
Only observed kmers are represented, absent coordinates mean 0 counts for the kmer at that coordinate.
:param: jellyfish str -- jellyfish DB file
:return: scipy.sparse.dok_matrix -- sparse matrix of kmer counts
"""
k = get_k(jellyfish)
max_c = get_max_count(jellyfish)
dtype_to_use = numpy.uint8
if max_c > 255:
dtype_to_use = numpy.uint16
grid_size = get_grid_size(k)
cgr_mat = scipy.sparse.dok_matrix((grid_size, grid_size), dtype=dtype_to_use)
for kmer, count in get_kmer_list(jellyfish):
x, y = get_coord(kmer)
cgr_mat[x, y] = count
return cgr_mat
def join_cgr(cgrs: tuple) -> numpy.ndarray:
"""
Takes tuple of cgrs of shape (n,n) and returns one stacked array of size (n,n, len(cgrs) )
:param: cgrs tuple -- tuple of cgrs to be joined
:return: numpy.ndarray
"""
return numpy.dstack(cgrs)
def save_as_csv(cgr_matrix: scipy.sparse.dok_matrix, file: str = "cgr_matrix.csv", delimiter: str = ",", fmt: str = '%d'):
"""
Writes simple 1 channel cgr matrix to CSV file.
See also numpy.savetxt
:param: cgr_matrix scipy.sparse.dok_matrix -- cgr_matrix to save
:param: file str -- filename to write to
:param: delimiter str -- column separator character
:param: fmt str -- text format string
:return: None
"""
numpy.savetxt(file, cgr_matrix.toarray(), delimiter=delimiter, fmt=fmt)
def make_blanks_like(a: scipy.sparse.dok_matrix , h: float=1.0, s: float=1.0, v: float=1.0) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]:
"""
returns tuple of numpy.ndarrays with default values of h,s and v of shape of a
:param: a scipy.sparse.dok_matrix -- a cgr matrix to make blanks like
:param: h float -- the values with which to fill the first numpy.ndarray
:param: s float -- the values with which to fill the second numpy.ndarray
:param: v float -- the values with which to fill the third numpy.ndarray
:return: Tuple of numpy.ndarray
"""
return numpy.full_like(a, h), numpy.full_like(a, s), numpy.full_like(a, v)
def scale_cgr(cgr_matrix: scipy.sparse.dok_matrix) -> scipy.sparse.dok_matrix:
"""
returns scaled version of cgr_matrix in range 0..1
:param: cgr_matrix scipy.sparse.dok_matrix -- matrix to scale
:return: scaled scipy.sparse.dok_matrix
"""
return (cgr_matrix / max(cgr_matrix.values())).toarray()
def blocky_scale(im: numpy.ndarray, nR: int, nC: int) -> numpy.ndarray:
"""
Upscales an array in preparation for drawing. By default the array is a square with sqrt(k ** 4) wide and high.
For many values of k this will be too small to view well on a monitor. This function does a scale operartion that
increases the size of the image by simply increasing the pixels in each square.
:param: im numpy.ndarray -- the image to be scaled
:param: nR int -- the number of height pixels to be in the final image
:param: nC int -- the number of width pixels to be in the final image
:return: numpy.ndarray -- upscaled image
"""
nR0 = len(im) # source number of rows
nC0 = len(im[0]) # source number of columns
return numpy.asarray([[im[int(nR0 * r / nR)][int(nC0 * c / nC)]
for c in range(nC)] for r in range(nR)])
def resize_rgb_out(rgb: numpy.ndarray,resize: int) -> numpy.ndarray:
"""
given an rgb image in one pixel per kmer size, increases size so that the resulting image is resize * resize pixels
:param: rgb numpy.ndarray -- an RGB image array
:param: resize -- pixel width (and therefore height) of resulting image
:return: numpy.ndarray -- resized image with shape (resize, resize)
"""
r = blocky_scale(rgb[:, :, 0], resize, resize)
g = blocky_scale(rgb[:, :, 1], resize, resize)
b = blocky_scale(rgb[:, :, 2], resize, resize)
return numpy.dstack((r, g, b))
def is_cgr_matrix(obj) -> bool:
"""returns true if obj is a scipy.sparse.dok.dok_matrix object """
return type(obj) == scipy.sparse.dok.dok_matrix
def draw_cgr(cgr_matrices: scipy.sparse.dok_matrix,
h: float = 0.8,
s: float = 0.5,
v: float = 1.0,
main: str = "s",
show: bool = True,
write: bool = True,
out: str = "cgr.png",
resize: bool = False) -> None:
"""Draws cgrs to a file. Allows user to set which of up to 3 provided cgr matrices goes in at which of the H, S or V image channels.
Typically for one channel, select h to specify the image colour and set cgr as s to change that colour according to counts in cgr.
Set v to 1.0 for maximum brightness.
:param: cgr_matrices scipy.sparse.dok_matrix or tuple of scipy.sparse.dok_matrix elements, cgrs to be drawn. Tuple provides order for HSV channels of image.
:param: h float -- (0..1) value for h channel if not used for cgr data
:param: s float -- (0..1) value for s channel if not used for cgr data
:param: v float -- (0..1) value for v channel if not used for cgr data
:param: main str -- the channel to place the cgr matrix in if a single cgr matrix is passed
:param: show bool -- render CGR picture to screen
:param: write -- write CGR picture to file
:param: out str -- filename to write to
:param: resize bool or int -- if False no image resizing is done, if an int image is rescaled to resize pixels width and height
:return: None
"""
if is_cgr_matrix(cgr_matrices): #one channel
draw_single_cgr(cgr_matrices, h=h, s=s, v=v, main=main, show = show, write = write, out = out, resize = resize)
elif all( [is_cgr_matrix(o) for o in cgr_matrices] ) and len(cgr_matrices) ==2: #all cgr matrices
draw_two_cgrs(cgr_matrices, v = v, show = show, write = write, out = out, resize = resize)
elif all( [is_cgr_matrix(o) for o in cgr_matrices] ) and len(cgr_matrices) == 3 :
draw_three_cgrs(cgr_matrices, show = show, write = write, out = out, resize = resize)
else:
raise Exception("don't know what to do, cgr_matrices must be one cgr_matrix or a tuple of 2 or 3 cgr_matrices.")
def draw_single_cgr(cgr_matrix, h=0.8, s=0.5, v=1.0, main="s", show = True, write = True, out = "cgr.png", resize = False):
"""
draws a single cgr image, selecting channels and resizing as appropriate
:param: cgr_matrix scipy.sparse.dok_matrix to be drawn.
:param: h float -- (0..1) value for h channel if not used for cgr data
:param: s float -- (0..1) value for s channel if not used for cgr data
:param: v float -- (0..1) value for v channel if not used for cgr data
:param: main str -- the channel to place the cgr matrix in
:param: show bool -- render CGR picture to screen
:param: write -- write CGR picture to file
:param: out str -- filename to write to
:param: resize bool or int -- if False no image resizing is done, if an int image is rescaled to resize pixels width and height
:return: None
"""
scaled = scale_cgr( cgr_matrix )
h_blank, s_blank, v_blank = make_blanks_like(scaled, h,s,v)
hsv = None
if main == "h":
hsv = numpy.dstack((scaled, s_blank, v_blank))
elif main == "s":
hsv = numpy.dstack((h_blank, scaled, v_blank))
elif main == "v":
hsv = numpy.dstack((h_blank, s_blank, scaled))
rgb = skimage.color.hsv2rgb(hsv)
if show:
plt.imshow(rgb)
plt.show()
if write:
if resize:
rgb = resize_rgb_out(rgb, resize)
skimage.io.imsave(out, rgb)
def draw_two_cgrs(cgr_matrices, v = 1.0, show = True, write = True, out = "cgr.png", resize = False ):
"""draws two cgr matrices into a single image. first matrix of tuple becomes h channel, second of tuple becomes v channel
:param: cgr_matrices tuple of scipy.sparse.dok_matrix elements, cgrs to be drawn.
:param: v float -- (0..1) value for v channel
:param: show bool -- render CGR picture to screen
:param: write -- write CGR picture to file
:param: out str -- filename to write to
:param: resize bool or int -- if False no image resizing is done, if an int image is rescaled to resize pixels width and height
:return: None
"""
scaled_l = [scale_cgr(cgrm) for cgrm in cgr_matrices]
v_blank = make_blanks_like(scaled_l[0], v=v)[2]
hsv_stack = numpy.dstack((scaled_l[0], scaled_l[1], v_blank))
rgb = skimage.color.hsv2rgb(hsv_stack)
if show:
draw(rgb)
if write:
write_out(rgb, out, resize)
def draw_three_cgrs(cgr_matrices,show = True, write = True, out = "cgr.png", resize = False):
"""Draws a tuple of 3 cgr matrices as an image
:param: cgr_matrices tuple of scipy.sparse.dok_matrix elements, cgrs to be drawn. Tuple provides order for HSV channels of image
:param: show bool -- render CGR picture to screen
:param: write -- write CGR picture to file
:param: out str -- filename to write to
:param: resize bool or int -- if False no image resizing is done, if an int image is rescaled to resize pixels width and height
:return: None
"""
scaled_t = (scale_cgr(cgrm) for cgrm in cgr_matrices)
hsv_stack = numpy.dstack(scaled_t)
rgb = skimage.color.hsv2rgb(hsv_stack)
if show:
draw(rgb)
if write:
write_out(rgb, out, resize)
def draw(rgb: numpy.ndarray) -> None:
"""
renders RGB array on the screen.
:param: rgb numpy.ndarray -- RGB channel image
"""
plt.imshow(rgb)
plt.show()
def write_out(rgb: numpy.ndarray, out: str, resize: int) -> None:
"""
writes RGB array as image
:param rgb: numpy.ndarray -- RGB channel image
:param out: str file to write to
:param resize: bool or int. If False will not resize, if int will resize image up to that size
:return: None
"""
if resize:
rgb = resize_rgb_out(rgb, resize)
skimage.io.imsave(out, rgb)
def stack_cgrs(cgr_matrices: Tuple) -> numpy.ndarray:
"""
stacks cgrs of tuple of N numpy.ndarrays of shape (w,h)
returns ndarray of ndarrays of shape (w,h,N)
:param cgr_matrices: tuple of cgr_matrices
:return: numpy.ndarray
"""
cgr_t = tuple(c.toarray() for c in cgr_matrices)
return numpy.stack(cgr_t, axis=-1)
def save_cgr(cgr_obj: numpy.ndarray, outfile: str = "cgr") -> None:
"""
Saves cgr_obj as numpy .npy file.
cgr_obj one or more dimensional numpy.ndarray.
saves as ndarray not dokmatrix, so can be loaded in regular numpy as collections of cgrs
:param cgr_obj: numpy.ndarray constructed cgr_object to save
:param outfile: str file
:return: None
"""
numpy.save(outfile, cgr_obj, allow_pickle=True)
def load_npy(file: str) -> numpy.ndarray:
"""
loads numpy .npy file as ndarray.
Useful for restoring collections of cgrs but resulting array is not compatible directly with
drawing methods here.
:param file str -- numpy .npy file to load
:return: numpy.ndarray
"""
return numpy.load(file, allow_pickle=True)
def many_seq_record_to_one_cgr(fa_file: str, k: int) -> scipy.sparse.dok_matrix:
"""
Reads many sequence records in a FASTA file into a single CGR matrix, treating all sequence records as if they are one sequence, EG as if for a genome sequence in Chromosomes.
:param fa_file: str FASTA FILE name
:param k: int length of k to use
:return: scipy.sparse.dok_matrix
"""
temp_jf = tempfile.NamedTemporaryFile()
run_jellyfish(fa_file, k, temp_jf.name)
cgr1 = cgr_matrix(temp_jf.name)
temp_jf.close()
return cgr1
def many_seq_record_to_many_cgr(seq_record: SeqIO.FastaIO, k: int) -> scipy.sparse.dok_matrix:
"""
:param seq_record: Bio.SeqIO FASTA record
:param k: int size of k to use
:return: scipy.sparse.dok_matrix
"""
temp_fa = tempfile.NamedTemporaryFile()
temp_jf = tempfile.NamedTemporaryFile()
SeqIO.write(seq_record, temp_fa.name, "fasta")
run_jellyfish(temp_fa.name, k, temp_jf.name)
cgr1 = cgr_matrix(temp_jf.name)
temp_fa.close()
temp_jf.close()
return cgr1
def from_fasta(fasta_file: str, outfile: str = "my_cgrs", as_single: bool=False, k: int = 7) -> None:
"""
Factory function to load in a FASTA file and generate a binary .npy of CGRs
:param fasta_file: str FASTA file to load
:param outfile: str outfile to save
:param as_single: bool If True treats all entries as single sequence and return one CGR. If False, treats all entries individually and returns many CGR
:param k: int length of kmer to use
:return: None
"""
if as_single:
cgr1 = many_seq_record_to_one_cgr(fasta_file, k)
save_cgr(cgr1.toarray(), outfile=outfile )
else:
cgr_t = tuple( many_seq_record_to_many_cgr(seq_record, k) for seq_record in SeqIO.parse(fasta_file, "fasta") )
cgr1 = stack_cgrs(cgr_t)
save_cgr(cgr1, outfile = outfile )
# TODO
# test new dtype switching cgr matrix function - try using from_fasta
|
TeamMacLean/cgr_view
|
test_cgr.py
|
<reponame>TeamMacLean/cgr_view
import unittest
import cgr
import os
from scipy.sparse import dok_matrix
import numpy
class CGRTest(unittest.TestCase):
def test_guess_genome_size(self):
self.assertEqual(cgr.estimate_genome_size("test_data/10kfile"),10240)
def test_run_jellyfish(self):
result = cgr.run_jellyfish("test_data/NC_012920.fasta", 11, "test_data/tmp.jf")
os.remove("test_data/tmp.jf")
self.assertEqual(result.returncode, 0)
def test_get_kmer_list(self):
cgr.run_jellyfish("test_data/NC_012920.fasta", 11, "test_data/tmp.jf")
counts = []
for i in cgr.get_kmer_list("test_data/tmp.jf"):
counts.append(i)
self.assertEqual(len(counts), 16438)
self.assertEqual(len(counts[0]), 2)
os.remove("test_data/tmp.jf")
def test_get_coord(self):
self.assertEqual(cgr.get_coord("A"), [0,0])
self.assertEqual(cgr.get_coord("G"), [1,1])
self.assertEqual(cgr.get_coord("C"), [1,0])
self.assertEqual(cgr.get_coord("T"), [0,1])
self.assertEqual(cgr.get_coord("CTGA"), [10,6])
def test_get_k(self):
self.assertEqual(cgr.get_k("test_data/five_mer.jf"), 5)
def test_cgr_matrix(self):
m = cgr.cgr_matrix("test_data/five_mer.jf")
row1_col1 = m.toarray()[0][0]
self.assertEqual(row1_col1, 70)
def test_join_cgr(self):
a = [ [1,1,1], [1,1,1] ]
a = dok_matrix(a)
b = [ [2,2,2], [2,2,2] ]
b = dok_matrix(b)
c = numpy.array( [ [ [1,2], [1,2], [1,2] ], [ [1,2], [1,2], [1,2] ] ] )
d = cgr.join_cgr(a,b)
self.assertEqual(d.tolist(), c.tolist() )
def test_pad_two_channel_to_three(self):
a = numpy.array( [ [ [1,2], [1,2], [1,2] ], [ [1,2], [1,2], [1,2] ] ] )
b = numpy.array( [ [ [1,2,0], [1,2,0], [1,2,0] ], [ [1,2,0], [1,2,0], [1,2,0] ] ])
c = cgr.pad_two_channel_to_three(a)
self.assertEqual(c.tolist(), b.tolist() )
if __name__ == '__main__':
unittest.main()
|
TeamMacLean/cgr_view
|
setup.py
|
<gh_stars>0
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='cgr_view',
version='0.0.1dev2',
url='https://github.com/TeamMacLean/cgr_view',
packages=setuptools.find_packages(),
long_description_content_type="text/markdown",
license='LICENSE.txt',
author='<NAME>',
author_email='<EMAIL>',
description='A package for rendering Chaos Game Representations of DNA sequences',
python_requires='>=3.7',
install_requires = [
"numpy >= 1.17",
"matplotlib >= 3.1.0",
"scipy >= 1.3.1",
"pillow >= 6.2.1",
"scikit-image >= 0.15.0",
"biopython >= 1.7"
]
)
|
cazalaa/connectedhomeip
|
src/controller/python/chip-repl.py
|
<reponame>cazalaa/connectedhomeip
#!/usr/bin/env python
#
# Copyright (c) 2021 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from IPython import embed
import chip
def main():
# The chip import at the top level will be visible in the ipython REPL.
embed(header = '''
Welcome to the CHIP python REPL utilty.
Usage examples:
######## List available BLE adapters #########
import chip.ble
print(chip.ble.GetAdapters())
'''.strip())
if __name__ == "__main__":
main()
|
cazalaa/connectedhomeip
|
src/controller/python/chip/ble/__init__.py
|
<filename>src/controller/python/chip/ble/__init__.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BLE-related functionality within CHIP"""
import sys
import platform
import chip.native
import ctypes
from typing import List
from ctypes import c_bool, c_void_p, c_char_p, c_uint
from dataclasses import dataclass
@dataclass
class AdapterInfo:
index: int
address: str
name: str
alias: str
powered_on: bool
def _GetBleLibraryHandle() -> ctypes.CDLL:
""" Get the native library handle with BLE method initialization.
Retreives the CHIP native library handle and attaches signatures to
native methods.
"""
handle = chip.native.GetLibraryHandle()
# Uses one of the type decorators as an indicator for everything being
# initialized. Native methods default to c_int return types
if handle.pychip_ble_adapter_list_new.restype != c_void_p:
setter = chip.native.NativeLibraryHandleMethodArguments(handle)
setter.Set('pychip_ble_adapter_list_new', c_void_p, [])
setter.Set('pychip_ble_adapter_list_next', c_bool, [c_void_p])
setter.Set('pychip_ble_adapter_list_get_index', c_uint, [c_void_p])
setter.Set('pychip_ble_adapter_list_get_address', c_char_p, [c_void_p])
setter.Set('pychip_ble_adapter_list_get_alias', c_char_p, [c_void_p])
setter.Set('pychip_ble_adapter_list_get_name', c_char_p, [c_void_p])
setter.Set('pychip_ble_adapter_list_is_powered', c_bool, [c_void_p])
setter.Set('pychip_ble_adapter_list_delete', None, [c_void_p])
return handle
def GetAdapters() -> List[AdapterInfo]:
"""Get a list of BLE adapters available on the system. """
handle = _GetBleLibraryHandle()
result = []
nativeList = handle.pychip_ble_adapter_list_new()
if nativeList == 0:
raise Exception('Failed to get BLE adapter list')
try:
while handle.pychip_ble_adapter_list_next(nativeList):
result.append(
AdapterInfo(
index=handle.pychip_ble_adapter_list_get_index(nativeList),
address=handle.pychip_ble_adapter_list_get_address(nativeList).decode('utf8'),
name=handle.pychip_ble_adapter_list_get_name(nativeList).decode('utf8'),
alias=handle.pychip_ble_adapter_list_get_alias(nativeList).decode('utf8'),
powered_on=handle.pychip_ble_adapter_list_is_powered(nativeList),
))
finally:
handle.pychip_ble_adapter_list_delete(nativeList)
return result
__all__ = [
'GetAdapters',
]
|
xyb/python-xxhash
|
tests/test_xxh3_64.py
|
<filename>tests/test_xxh3_64.py
from __future__ import print_function
import os
import sys
import unittest
import random
import xxhash
class TestXXH(unittest.TestCase):
def test_xxh3_64(self):
self.assertEqual(xxhash.xxh3_64('a').intdigest(), 16629034431890738719)
self.assertEqual(xxhash.xxh3_64('a', 0).intdigest(), 16629034431890738719)
self.assertEqual(xxhash.xxh3_64('a', 1).intdigest(), 15201566949650179872)
self.assertEqual(xxhash.xxh3_64('a', 2**64-1).intdigest(), 4875116479388997462)
def test_xxh3_64_intdigest(self):
self.assertEqual(xxhash.xxh3_64_intdigest('a'), 16629034431890738719)
self.assertEqual(xxhash.xxh3_64_intdigest('a', 0), 16629034431890738719)
self.assertEqual(xxhash.xxh3_64_intdigest('a', 1), 15201566949650179872)
self.assertEqual(xxhash.xxh3_64_intdigest('a', 2**64-1), 4875116479388997462)
def test_xxh3_64_update(self):
x = xxhash.xxh3_64()
x.update('a')
self.assertEqual(xxhash.xxh3_64('a').digest(), x.digest())
self.assertEqual(xxhash.xxh3_64_digest('a'), x.digest())
x.update('b')
self.assertEqual(xxhash.xxh3_64('ab').digest(), x.digest())
self.assertEqual(xxhash.xxh3_64_digest('ab'), x.digest())
x.update('c')
self.assertEqual(xxhash.xxh3_64('abc').digest(), x.digest())
self.assertEqual(xxhash.xxh3_64_digest('abc'), x.digest())
seed = random.randint(0, 2**64)
x = xxhash.xxh3_64(seed=seed)
x.update('a')
self.assertEqual(xxhash.xxh3_64('a', seed).digest(), x.digest())
self.assertEqual(xxhash.xxh3_64_digest('a', seed), x.digest())
x.update('b')
self.assertEqual(xxhash.xxh3_64('ab', seed).digest(), x.digest())
self.assertEqual(xxhash.xxh3_64_digest('ab', seed), x.digest())
x.update('c')
self.assertEqual(xxhash.xxh3_64('abc', seed).digest(), x.digest())
self.assertEqual(xxhash.xxh3_64_digest('abc', seed), x.digest())
def test_xxh3_64_reset(self):
x = xxhash.xxh3_64()
h = x.intdigest()
x.update('x' * 10240)
x.reset()
self.assertEqual(h, x.intdigest())
def test_xxh3_64_seed_reset(self):
seed = random.randint(0, 2**64-1)
x = xxhash.xxh3_64(seed=seed)
h = x.intdigest()
x.update('x' * 10240)
x.reset()
self.assertEqual(h, x.intdigest())
def test_xxh3_64_reset_more(self):
x = xxhash.xxh3_64()
h = x.intdigest()
for i in range(random.randint(100, 200)):
x.reset()
self.assertEqual(h, x.intdigest())
for i in range(10, 1000):
x.update(os.urandom(i))
x.reset()
self.assertEqual(h, x.intdigest())
for i in range(10, 1000):
x.update(os.urandom(100))
x.reset()
self.assertEqual(h, x.intdigest())
def test_xxh3_64_seed_reset_more(self):
seed = random.randint(0, 2**64-1)
x = xxhash.xxh3_64(seed=seed)
h = x.intdigest()
for i in range(random.randint(100, 200)):
x.reset()
self.assertEqual(h, x.intdigest())
for i in range(10, 1000):
x.update(os.urandom(i))
x.reset()
self.assertEqual(h, x.intdigest())
for i in range(10, 1000):
x.update(os.urandom(100))
x.reset()
self.assertEqual(h, x.intdigest())
def test_xxh3_64_copy(self):
a = xxhash.xxh3_64()
a.update('xxhash')
b = a.copy()
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
b.update('xxhash')
self.assertNotEqual(a.digest(), b.digest())
self.assertNotEqual(a.intdigest(), b.intdigest())
self.assertNotEqual(a.hexdigest(), b.hexdigest())
a.update('xxhash')
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
def test_xxh3_64_overflow(self):
s = 'I want an unsigned 64-bit seed!'
a = xxhash.xxh3_64(s, seed=0)
b = xxhash.xxh3_64(s, seed=2**64)
self.assertEqual(a.seed, b.seed)
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), xxhash.xxh3_64_intdigest(s, seed=0))
self.assertEqual(a.intdigest(), xxhash.xxh3_64_intdigest(s, seed=2**64))
self.assertEqual(a.digest(), xxhash.xxh3_64_digest(s, seed=0))
self.assertEqual(a.digest(), xxhash.xxh3_64_digest(s, seed=2**64))
self.assertEqual(a.hexdigest(), xxhash.xxh3_64_hexdigest(s, seed=0))
self.assertEqual(a.hexdigest(), xxhash.xxh3_64_hexdigest(s, seed=2**64))
a = xxhash.xxh3_64(s, seed=1)
b = xxhash.xxh3_64(s, seed=2**64+1)
self.assertEqual(a.seed, b.seed)
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), xxhash.xxh3_64_intdigest(s, seed=1))
self.assertEqual(a.intdigest(), xxhash.xxh3_64_intdigest(s, seed=2**64+1))
self.assertEqual(a.digest(), xxhash.xxh3_64_digest(s, seed=1))
self.assertEqual(a.digest(), xxhash.xxh3_64_digest(s, seed=2**64+1))
self.assertEqual(a.hexdigest(), xxhash.xxh3_64_hexdigest(s, seed=1))
self.assertEqual(a.hexdigest(), xxhash.xxh3_64_hexdigest(s, seed=2**64+1))
a = xxhash.xxh3_64(s, seed=2**65-1)
b = xxhash.xxh3_64(s, seed=2**66-1)
self.assertEqual(a.seed, b.seed)
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), xxhash.xxh3_64_intdigest(s, seed=2**65-1))
self.assertEqual(a.intdigest(), xxhash.xxh3_64_intdigest(s, seed=2**66-1))
self.assertEqual(a.digest(), xxhash.xxh3_64_digest(s, seed=2**65-1))
self.assertEqual(a.digest(), xxhash.xxh3_64_digest(s, seed=2**66-1))
self.assertEqual(a.hexdigest(), xxhash.xxh3_64_hexdigest(s, seed=2**65-1))
self.assertEqual(a.hexdigest(), xxhash.xxh3_64_hexdigest(s, seed=2**66-1))
if __name__ == '__main__':
unittest.main()
|
xyb/python-xxhash
|
tests/test_xxh3_128.py
|
import os
import unittest
import random
import xxhash
class TestXXH(unittest.TestCase):
def test_xxh3_128(self):
self.assertEqual(xxhash.xxh3_128('a').intdigest(), 225219434562328483135862406050043285023)
self.assertEqual(xxhash.xxh3_128('a', 0).intdigest(), 225219434562328483135862406050043285023)
self.assertEqual(xxhash.xxh3_128('a', 1).intdigest(), 337425133163118381928709500770786453280)
self.assertEqual(xxhash.xxh3_128('a', 2**64-1).intdigest(), 198297796855923085494266857744987477846)
def test_xxh3_128_intdigest(self):
self.assertEqual(xxhash.xxh3_128_intdigest('a'), 225219434562328483135862406050043285023)
self.assertEqual(xxhash.xxh3_128_intdigest('a', 0), 225219434562328483135862406050043285023)
self.assertEqual(xxhash.xxh3_128_intdigest('a', 1), 337425133163118381928709500770786453280)
self.assertEqual(xxhash.xxh3_128_intdigest('a', 2**64-1), 198297796855923085494266857744987477846)
def test_xxh3_128_update(self):
x = xxhash.xxh3_128()
x.update('a')
self.assertEqual(xxhash.xxh3_128('a').digest(), x.digest())
self.assertEqual(xxhash.xxh3_128_digest('a'), x.digest())
x.update('b')
self.assertEqual(xxhash.xxh3_128('ab').digest(), x.digest())
self.assertEqual(xxhash.xxh3_128_digest('ab'), x.digest())
x.update('c')
self.assertEqual(xxhash.xxh3_128('abc').digest(), x.digest())
self.assertEqual(xxhash.xxh3_128_digest('abc'), x.digest())
seed = random.randint(0, 2**64)
x = xxhash.xxh3_128(seed=seed)
x.update('a')
self.assertEqual(xxhash.xxh3_128('a', seed).digest(), x.digest())
self.assertEqual(xxhash.xxh3_128_digest('a', seed), x.digest())
x.update('b')
self.assertEqual(xxhash.xxh3_128('ab', seed).digest(), x.digest())
self.assertEqual(xxhash.xxh3_128_digest('ab', seed), x.digest())
x.update('c')
self.assertEqual(xxhash.xxh3_128('abc', seed).digest(), x.digest())
self.assertEqual(xxhash.xxh3_128_digest('abc', seed), x.digest())
def test_xxh3_128_reset(self):
x = xxhash.xxh3_128()
h = x.intdigest()
x.update('x' * 10240)
x.reset()
self.assertEqual(h, x.intdigest())
def test_xxh3_128_seed_reset(self):
seed = random.randint(0, 2**64-1)
x = xxhash.xxh3_128(seed=seed)
h = x.intdigest()
x.update('x' * 10240)
x.reset()
self.assertEqual(h, x.intdigest())
def test_xxh3_128_reset_more(self):
x = xxhash.xxh3_128()
h = x.intdigest()
for i in range(random.randint(100, 200)):
x.reset()
for i in range(10, 1000):
x.update(os.urandom(i))
x.reset()
self.assertEqual(h, x.intdigest())
for i in range(10, 1000):
x.update(os.urandom(100))
x.reset()
self.assertEqual(h, x.intdigest())
def test_xxh3_128_seed_reset_more(self):
seed = random.randint(0, 2**64-1)
x = xxhash.xxh3_128(seed=seed)
h = x.intdigest()
for i in range(random.randint(100, 200)):
x.reset()
for i in range(10, 1000):
x.update(os.urandom(i))
x.reset()
self.assertEqual(h, x.intdigest())
for i in range(10, 1000):
x.update(os.urandom(100))
x.reset()
self.assertEqual(h, x.intdigest())
def test_xxh3_128_copy(self):
a = xxhash.xxh3_128()
a.update('xxhash')
b = a.copy()
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
b.update('xxhash')
self.assertNotEqual(a.digest(), b.digest())
self.assertNotEqual(a.intdigest(), b.intdigest())
self.assertNotEqual(a.hexdigest(), b.hexdigest())
a.update('xxhash')
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
def test_xxh3_128_overflow(self):
s = 'I want an unsigned 64-bit seed!'
a = xxhash.xxh3_128(s, seed=0)
b = xxhash.xxh3_128(s, seed=2**64)
self.assertEqual(a.seed, b.seed)
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), xxhash.xxh3_128_intdigest(s, seed=0))
self.assertEqual(a.intdigest(), xxhash.xxh3_128_intdigest(s, seed=2**64))
self.assertEqual(a.digest(), xxhash.xxh3_128_digest(s, seed=0))
self.assertEqual(a.digest(), xxhash.xxh3_128_digest(s, seed=2**64))
self.assertEqual(a.hexdigest(), xxhash.xxh3_128_hexdigest(s, seed=0))
self.assertEqual(a.hexdigest(), xxhash.xxh3_128_hexdigest(s, seed=2**64))
a = xxhash.xxh3_128(s, seed=1)
b = xxhash.xxh3_128(s, seed=2**64+1)
self.assertEqual(a.seed, b.seed)
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), xxhash.xxh3_128_intdigest(s, seed=1))
self.assertEqual(a.intdigest(), xxhash.xxh3_128_intdigest(s, seed=2**64+1))
self.assertEqual(a.digest(), xxhash.xxh3_128_digest(s, seed=1))
self.assertEqual(a.digest(), xxhash.xxh3_128_digest(s, seed=2**64+1))
self.assertEqual(a.hexdigest(), xxhash.xxh3_128_hexdigest(s, seed=1))
self.assertEqual(a.hexdigest(), xxhash.xxh3_128_hexdigest(s, seed=2**64+1))
a = xxhash.xxh3_128(s, seed=2**65-1)
b = xxhash.xxh3_128(s, seed=2**66-1)
self.assertEqual(a.seed, b.seed)
self.assertEqual(a.intdigest(), b.intdigest())
self.assertEqual(a.hexdigest(), b.hexdigest())
self.assertEqual(a.digest(), b.digest())
self.assertEqual(a.intdigest(), xxhash.xxh3_128_intdigest(s, seed=2**65-1))
self.assertEqual(a.intdigest(), xxhash.xxh3_128_intdigest(s, seed=2**66-1))
self.assertEqual(a.digest(), xxhash.xxh3_128_digest(s, seed=2**65-1))
self.assertEqual(a.digest(), xxhash.xxh3_128_digest(s, seed=2**66-1))
self.assertEqual(a.hexdigest(), xxhash.xxh3_128_hexdigest(s, seed=2**65-1))
self.assertEqual(a.hexdigest(), xxhash.xxh3_128_hexdigest(s, seed=2**66-1))
if __name__ == '__main__':
unittest.main()
|
taipahuchu/language-Identification-
|
code/batcher.py
|
"""This file holds the Dataset class, which helps with the loading
and organizing of the training data."""
import argparse
import collections
import gzip
import itertools
import numpy as np
import re
import sys
np.random.seed(666)
def LoadData(filename, mode='train', model='tweet'):
"""Load data stored in tweetlid format.
(i.e. tab-separated tweetid, language, tweet)
Partitioning between train/dev/eval is done by the last digit of the
id number for each training example. Digits 2 through 9 are used for
training and 1 is used as a dev set. Currently, the eval set is
never loaded and confusingly to load the dev set you have to use
'eval' for the mode argument.
This function splits the tweet into a list of units. The level of
splitting is controlled by the model argument.
Args:
filename: where to get the data
mode: train or eval or all
unit: word, tweets or chars
Returns:
tuple of sentences, labels and ids
"""
ids, labels, sentences = [], [], []
with gzip.open(filename, 'r') as f:
for line in f:
tweetid, lang, tweet = line.split('\t')
idx = int(tweetid) % 10 # use this to partition data
if mode == 'train' and idx < 2:
continue
if mode == 'eval+final' and idx > 2:
continue
if mode == 'eval' and idx != 1:
continue
if mode == 'final' and idx != 0:
continue
ids.append(tweetid)
# split used to handle code switching tweets
labels.append(re.split(r'\+|/', lang))
# The { and } brackets are used for start/end symbols
if model in ['word', 'tweet']:
#split on whitespace to get words
sentences.append(['{'] + [unicode(x_.decode('utf8'))
for x_ in tweet.split()] + ['}'])
elif model=='char':
#include full tweet as single unicode string (list of length 3)
sentences.append([u'{'] + [unicode(tweet.decode('utf8'))] + [u'}'])
else:
msg = 'Invalid unit type <{0}> for tokenizing tweet'.format(model)
raise ValueError(msg)
print '{0} examples loaded'.format(len(sentences))
return sentences, labels, ids
class Dataset(object):
def __init__(self, batch_size, preshuffle=True, name='unnamed'):
"""Init the dataset object.
Args:
batch_size: size of mini-batch
preshuffle: should the order be scrambled before the first epoch
name: optional name for the dataset
"""
self._sentences = []
self._labels = []
self._ids = []
self.dataset_weights = []
self._lines = []
self.name = name
self.batch_size = batch_size
self.preshuffle = preshuffle
def ReadData(self, filename, mode, modeltype, weight=1.0):
d = LoadData(filename, mode, modeltype)
self.AddDataSource(d, weight=weight)
def AddDataSource(self, data, weight=1.0):
sentences, labels, ids = data
self._sentences.append(sentences)
self._labels.append(labels)
self.dataset_weights.append(weight)
self._ids.append(ids)
def GetSentences(self):
return itertools.chain(*self._sentences)
def GetIds(self):
return [x for x in itertools.chain(*self._ids)]
def GetLabelSet(self):
label_set = set()
for d in self._labels:
label_set.update([x for x in itertools.chain(*d)])
return label_set
def Prepare(self,in_vocab,out_vocab,und_label='und',ignore_categories=[]):
# Add a dummy dataset to make the batch size evenly divide the number
# of sentences.
batch_size = self.batch_size
total_sentences = sum([len(x) for x in self._sentences])
r = total_sentences % batch_size
if r > 0:
n = batch_size - r
self.AddDataSource(([list('{dummy}')] * n, [[und_label]] * n, [0] * n),
weight=0.0)
sentences = list(itertools.chain(*self._sentences))
labels = list(itertools.chain(*self._labels))
self.example_weights = []
for i in xrange(len(self.dataset_weights)):
w = self.dataset_weights[i]
for _ in xrange(len(self._sentences[i])):
self.example_weights.append(w)
self.example_weights = np.array(self.example_weights)
self.seq_lens = np.array([len(x) for x in sentences])
self.max_sequence_len = self.seq_lens.max()
self.batch_size = batch_size
self.current_idx = 0
self.sentences = self.GetNumberLines(sentences, in_vocab,
self.max_sequence_len)
self.labels = np.zeros((len(labels), len(out_vocab)))
for i, w in enumerate(labels):
for w_ in w:
self.labels[i, out_vocab[w_]] = 1.0
self.labels[i, :] /= self.labels[i, :].sum()
# class weights
# There is a hack to only use the examples with weight 1 as a way
# to prevent wikipedia from dominating the weights.
counts = self.labels[self.example_weights == 1, :].sum(axis=0)
self.w = 1.0/(1.0 + counts)
self.w /= self.w.mean() # scale the class weights to reasonable values
self.N = len(sentences)
if self.preshuffle:
self._Permute()
@staticmethod
def GetNumberLines(lines, vocab, pad_length):
"""Convert list of words to matrix of word ids."""
out = []
for line in lines:
if len(line) < pad_length:
line += ['}'] * (pad_length - len(line))
out.append([vocab[w] for w in line])
return np.array(out)
def GetNumBatches(self):
"""Returns num batches per epoch."""
return self.N / self.batch_size
def _Permute(self):
"""Shuffle the training data."""
s = np.arange(self.N)
np.random.shuffle(s)
self.sentences = self.sentences[s, :]
self.seq_lens = self.seq_lens[s]
self.labels = self.labels[s, :]
self.example_weights = self.example_weights[s]
def GetNextBatch(self):
if self.current_idx + self.batch_size > self.N:
self.current_idx = 0
self._Permute()
idx = range(self.current_idx, self.current_idx + self.batch_size)
self.current_idx += self.batch_size
return (self.sentences[idx, :], self.seq_lens[idx],
self.labels[idx, :], self.example_weights[idx])
# print some dataset statistics
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data')
args = parser.parse_args()
_, labels, _ = LoadData(args.data, 'all')
total = len(labels)
print 'total sentences: {0}'.format(total)
unique_labels = len(set([tuple(x) for x in labels]))
print 'unique labels: {0}'.format(unique_labels)
counts = collections.Counter([tuple(s) for s in labels])
for lang in sorted(counts.keys()):
print '{0}\t{1}\t{2:.1f}'.format(' '.join(lang), counts[lang],
100 * counts[lang] / float(total))
|
taipahuchu/language-Identification-
|
code/models.py
|
<reponame>taipahuchu/language-Identification-
import tensorflow as tf
import numpy as np
class BaseModel(object):
"""Holds code shared between all the different model variants."""
def __init__(self, batch_size, max_sequence_len, out_vocab_size, c2v,
dropout_keep_prob=0.0):
self._batch_size = batch_size
self._dropout_keep_prob = dropout_keep_prob
self._out_vocab_size = out_vocab_size
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
tf.split(1, max_sequence_len, embeddings)]
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
# Use the logical operations to create a mask
indicator = tf.less(range_tiled, lengths_tiled)
sz = [batch_size, max_sequence_len]
self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz))
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
Args:
in_size: size of the hidden state vectors
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.cs = self._mask / tf.reduce_sum(self._mask, 1, keep_dims=True)
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
preds_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(self.preds_by_word,
[-1, self._out_vocab_size]))
preds_weighted_reshaped = tf.reshape(preds_weighted,
self.preds_by_word.get_shape())
self.probs = tf.reduce_sum(preds_weighted_reshaped, 0)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights)
class WordAvgModel(BaseModel): #formerly SimpleModel
"""A bag of word /predictions/."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
super(WordAvgModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v)
super(WordAvgModel, self)._DoPredictions(c2v.embedding_dims,
self._inputs)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class WordSeqModel(BaseModel):
"""A bag of word embeddings."""
def __init__(self, out_vocab_size=None,
batch_size=10,
model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
super(WordSeqModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v)
in_size = self._inputs[0].get_shape()[1].value
# Also, output confidence scores at every word.
confidence_mat = tf.get_variable('confidence_mat', [in_size, 1])
confidence_scores = tf.concat(1, [tf.matmul(o_, confidence_mat)
for o_ in self._inputs])
# dropout on confidence_scores
random_tensor = (1.0 - self._dropout_keep_prob +
tf.random_uniform(tf.shape(confidence_scores)))
binary_tensor = -50.0 * tf.floor(random_tensor)
csshape = confidence_scores.get_shape()
self.cs = tf.nn.softmax(tf.constant(1.0, shape=csshape))
# The final prediction is the average of the predictions for each word
# weighted by the individual confidence/utility scores.
wvs = tf.pack(self._inputs)
wvs_weighted = tf.mul(tf.reshape(tf.transpose(self.cs), [-1, 1]),
tf.reshape(wvs, [-1, in_size]))
wvs_weighted_reshaped = tf.reshape(wvs_weighted, wvs.get_shape())
wvsum = tf.reduce_sum(wvs_weighted_reshaped,0)
pred_mat = tf.get_variable('pred_mat', [in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction for each tweet.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
preds = GetWordPred(wvsum)
z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size])
self.preds, self.z = preds, z
self.probs = tf.div(preds, z) #normalize
self.unweighted_xent = _SafeXEnt(self.y, self.probs)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class TweetSeqModel(BaseModel): #formerly SeqModel
"""Single layer LSTM on top of the word embeddings.
Lang id predictions are done on each word and then combined via
a weighted average.
"""
def __init__(self, out_vocab_size=None,
batch_size=10, model_params=None,
c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
"""Initialize the TweetSeqModel
Args:
out_vocab_size: how many languages we are predicting
batch_size: minibatch size
model_params: dictionary of other model parameters
c2v: char2vec class instance
max_sequence_len: length of all the input sequences
dropout_keep_prob: dropout probability indicator
weights: class weights
"""
hidden_size = model_params['model_hidden_size']
proj_size = model_params['model_proj_size'] # optional, can be None
super(TweetSeqModel, self).__init__(batch_size, max_sequence_len,
out_vocab_size, c2v,
dropout_keep_prob)
weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
super(TweetSeqModel, self)._DoPredictions(out_size, rnnout, class_weights=weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class CharSeqModel(object): #formerly TweetSeqModel
"""
Treats each document (tweet) as a single "word," which is fed through c2v,
and the output "embedding" sized to be a vector of language predictions.
"""
def __init__(self, out_vocab_size=None,
batch_size=10, model_params=None, c2v=None,
max_sequence_len=None,
dropout_keep_prob=None,
weights=None):
self.params = model_params
self._out_vocab_size = out_vocab_size # num. of languages
self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
with tf.variable_scope("tweetff"):
hidden = tf.get_variable("ff_hidden",
[c2v.embedding_dims, out_vocab_size])
bias = tf.get_variable('ff_bias', [out_vocab_size])
#probably useless. at least I don't want to use it
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32, [batch_size, out_vocab_size],
name='y')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
# get one 'word' embedding for the full tweet
tweet_embedding = c2v.GetEmbeddings(self.x)[:,1,:]
logits = tf.nn.xw_plus_b(tweet_embedding, hidden, bias)
self.probs = tf.nn.softmax(logits)
self._xent = tf.nn.softmax_cross_entropy_with_logits(logits, self.y)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
class WordLevelModel(object):
"""
Model to evaluate on word-level predictions
Args:
batch_size: minibatch size
model_params: dictionary of other model parameters
c2v: char2vec class instance
max_sequence_len: length of all the input/output sequences
out_vocab_size: how many languages we are predicting
dropout_keep_prob: dropout probability indicator
weights: class weights
"""
def __init__(self, batch_size, model_params, c2v, max_sequence_len,
out_vocab_size, dropout_keep_prob=0.0, weights=None):
self._batch_size = batch_size
self._dropout_keep_prob = dropout_keep_prob
self._out_vocab_size = out_vocab_size
self.x = tf.placeholder(tf.int32, [batch_size, max_sequence_len],
name='x')
self.y = tf.placeholder(tf.float32,
[batch_size, max_sequence_len, out_vocab_size],
name='y')
# The bidirectional rnn code requires seq_lens as int64
self.seq_lens = tf.placeholder(tf.int64, [batch_size], name='seq_lens')
self.example_weights = tf.placeholder(tf.float32, [batch_size],
name='example_weights')
embeddings = c2v.GetEmbeddings(self.x)
self._inputs = [tf.squeeze(input_, [1]) for input_ in
tf.split(1, max_sequence_len, embeddings)]
# Need to prepare a mask to zero out the padding symbols.
# Make a batch_size x max_sequence_len matrix where each
# row contains the length repeated max_sequence_len times.
lengths_transposed = tf.expand_dims(tf.to_int32(self.seq_lens), 1)
lengths_tiled = tf.tile(lengths_transposed, [1, max_sequence_len])
# Make a matrix where each row contains [0, 1, ..., max_sequence_len]
r = tf.range(0, max_sequence_len, 1)
range_row = tf.expand_dims(r, 0)
range_tiled = tf.tile(range_row, [batch_size, 1])
self.lengths_transposed = lengths_transposed
self.lengths_tiled = lengths_tiled
self.range_row = range_row
self.range_tiled = range_tiled
# Use the logical operations to create a mask
indicator = tf.less(range_tiled, lengths_tiled+1) #i.e. where seq len is less than index
trim = np.ones(indicator.get_shape())
trim[:,0] = 0 #ignore start symbol
indicator = tf.logical_and(indicator, trim.astype(bool))
self.indicator = indicator
sz = [batch_size, max_sequence_len]
self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz))
#-------------------------------#
self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
hidden_size = model_params['model_hidden_size']
proj_size = model_params['model_proj_size'] # optional, can be None
def GetCell():
"""Creates an LSTM cell with dropout."""
c = tf.nn.rnn_cell.LSTMCell(hidden_size,
use_peepholes=model_params['peepholes'],
num_proj=proj_size)
if dropout_keep_prob is not None:
c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob)
return c
# Create the bi-directional LSTM
with tf.variable_scope('wordrnn'):
with tf.variable_scope('fw'):
cell_fw = GetCell()
with tf.variable_scope('bw'):
cell_bw = GetCell()
rnnout, _, _ = tf.nn.bidirectional_rnn(cell_fw, cell_bw, self._inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
if proj_size:
out_size = 2 * proj_size
else:
out_size = 2 * hidden_size
self._DoPredictions(out_size, rnnout, self.weights)
self.cost = tf.reduce_mean(self.example_weights * self._xent)
def _DoPredictions(self, in_size, mats, class_weights=None):
"""Takes in an array of states and calculates predictions.
Get the cross-entropy for each example in the vector self._xent.
Args:
in_size: size of the hidden state vectors
mats: list of hidden state vectors
"""
pred_mat = tf.get_variable('pred_mat',
[in_size, self._out_vocab_size])
pred_bias = tf.get_variable('pred_bias', [self._out_vocab_size])
# Make a prediction on every word.
def GetWordPred(o_):
logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias)
return tf.nn.softmax(logits)
#self.preds_by_word1 = tf.pack([GetWordPred(o_) for o_ in mats])
#self.preds_by_word = tf.reshape(self.preds_by_word1, self.y.get_shape())
#self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_word)
self.preds_by_word = tf.pack([GetWordPred(o_) for o_ in mats])
self.preds_by_instance = tf.pack([self.preds_by_word[:,i,:] for i in range(self.preds_by_word.get_shape()[1])])
self.probs = tf.mul(tf.expand_dims(self._mask,2), self.preds_by_instance)
self._xent = _SafeXEnt(self.y, self.probs, class_weights=class_weights, sumd=[1,2])
def _SafeXEnt(y, probs, eps=0.0001, class_weights=None, sumd=[1]):
"""Version of cross entropy loss that should not produce NaNs.
If the predicted proability for the true class is near zero then when
taking the log it can produce a NaN, which ruins everything. This
function ensures each probability is at least eps and no more than one
before taking the log.
Args:
y: matrix of true probabilities same size as probs
probs: matrix of probabilities for the minibatch
eps: value to clip the probabilities at
class_weights: vector of relative weights to be assigned to each class
sumd: dimensions along which to sum the x-ent matrix
Returns:
cross entropy loss for each example in the minibatch
"""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
xent_mat = -y * tf.log(adjusted_probs)
if class_weights is not None:
xent_mat *= class_weights
return tf.reduce_sum(xent_mat, sumd)
def _SafeNegEntropy(probs, batch_size, eps=0.0001):
"""Computes negative entropy in a way that will not overflow."""
adjusted_probs = tf.clip_by_value(probs, eps, 1.0 - eps)
entropy = tf.mul(probs, tf.log(adjusted_probs))
return tf.reduce_sum(entropy) / batch_size
|
taipahuchu/language-Identification-
|
code/char2vec.py
|
"""Models to create word embeddings from character sequences.
Use BasicEmbedding for traditional word embeddings or use the CharLSTM or
CharCNN implementations of char2vec.
"""
import numpy as np
import tensorflow as tf
import util
initializer = tf.random_uniform_initializer(-0.1, 0.1)
class BasicEmbedding(object):
""" Baseline (traditional) word embeddings to compare against c2v.
If building a model that uses a concatenation of traditional word embeddings
with those generated by char2vec then it can be useful to use dropout on
the traditional embeddings to avoid favoring them too much during early
training.
"""
def __init__(self, model_params, vocab_size=None,
dropout_keep_prob=None):
self.dropout_keep_prob = dropout_keep_prob
self.embedding_dims = model_params['word_embed_dims']
self.word_embeddings = tf.get_variable("word_embeddings",
[vocab_size, self.embedding_dims],
initializer=initializer)
def GetEmbeddings(self, x):
"""Looks up some embeddings from the embedding table.
Args:
x: matrix of word ids to look up
Returns:
word embedding vectors for the given ids.
"""
e = tf.nn.embedding_lookup(self.word_embeddings, x)
if self.dropout_keep_prob:
e = tf.nn.dropout(e, self.dropout_keep_prob)
return e
def SaveVariables(self, *nargs):
pass
class Char2Vec(object):
"""Maps character sequences to word embeddings."""
def __init__(self, char_vocab, max_sequence_len=15):
"""Initialize the Char2Vec model.
Args:
char_vocab: vocab class instance for the character vocabulary
max_sequence_len: length of the longest word
"""
self.max_sequence_len = max_sequence_len
self.char_vocab = char_vocab
self._vocab_size = char_vocab.vocab_size
# Placeholder for the character sequences in the form of an
# n x k array where n is the number of words and k is the
# length of the longest word. Characters are encoded as ints.
self.words_as_chars = tf.placeholder(tf.int32, [None, max_sequence_len],
name='words_as_chars')
def GetEmbeddings(self, x):
return tf.nn.embedding_lookup(self.word_embeddings, x)
def MakeMat(self, word_list, pad_len=None):
"""Make a matrix to hold the character sequences in.
Special start and end tokens are added to the beggining and end of
each word.
Args:
word_list: A list of strings
pad_len: Pad all character sequences to this length. If a word is
longer than the pad_len it will be truncated.
Returns:
Array containing character sequences and a vector of sequence lengths.
"""
if not pad_len:
pad_len = self.max_sequence_len
# make the padded char mat
the_words = []
word_lengths = []
for word in word_list:
word_idx = [self.char_vocab[c] for c in util.Graphemes(word)]
word_idx = ([self.char_vocab['<S>']] + word_idx[:pad_len-2] +
[self.char_vocab['</S>']])
if len(word_idx) < pad_len:
word_idx += [self.char_vocab['</S>']] * (pad_len - len(word_idx))
the_words.append(word_idx)
word_lengths.append(min(pad_len, len(word)+2))
the_words = np.array(the_words)
word_lengths = np.array(word_lengths)
return the_words, word_lengths
@staticmethod
def GetBatchVocab(words):
batch_vocab = np.unique(words)
words_remapped = np.copy(words)
for i in xrange(len(batch_vocab)):
np.place(words_remapped, words==batch_vocab[i], i)
return batch_vocab, words_remapped
class CharLSTM(Char2Vec):
""" LSTM implementation of char2vec.
The model is a two layer deep bi-LSTM. Dropout is used
as a regularizer on the 2nd layer. The output word embeddings
are available in the c2v.word_embeddings variable.
The required placeholders are words_as_chars and seq_lens, which
contain the words in the form of padded character sequences and
the length of each word respectively. The batch_dim placeholder
must be supplied.
"""
def __init__(self, char_vocab, model_params,
max_sequence_len=15, dropout_keep_prob=None):
super(CharLSTM, self).__init__(char_vocab, max_sequence_len)
char_embed_dims = np.log(len(char_vocab)) + 1
word_embed_dims = model_params['word_embed_dims']
self.embedding_dims = word_embed_dims
layer1_hidden_size = model_params['c2v_layer1_hidden_size']
layer1_out_size = model_params['c2v_layer1_out_size']
self.hidden_size = model_params['c2v_layer2_hidden_size']
# Placeholder for the word length vector.
self.seq_lens = tf.placeholder(tf.int64, [None], name='seq_lens')
# Placeholder for the number of words (n) in the minibatch.
self.batch_dim = tf.placeholder(tf.int32, name='char_batch_dim')
# The following variables define the model.
with tf.variable_scope('c2v'):
# continuous space character embeddings
self.embedding = tf.get_variable("embedding",
[self._vocab_size, char_embed_dims],
initializer=initializer)
def GetCell(hidden_size, num_proj=None, use_peepholes=False):
"""Helper function to make LSTM cells."""
layer = LSTMCell(hidden_size, num_proj=num_proj,
use_peepholes=use_peepholes)
layer = tf.nn.rnn.rnn_cell.DropoutWrapper(layer,
output_keep_prob=dropout_keep_prob,
input_keep_prob=dropout_keep_prob)
return layer
# This is the 1st bi-LSTM layer.
layer1_fw = GetCell(layer1_hidden_size, layer1_out_size, model_params['peepholes'])
layer1_bw = GetCell(layer1_hidden_size, layer1_out_size, model_params['peepholes'])
# This is the 2nd layer, also a bi-LSTM. The input size is twice
# the size of the output size from layer one because the concatenation
# of the layer one outputs is the layer 2 input.
if layer1_out_size: # Check if proj layer is enabled
layer2_input_size = 2 * layer1_out_size
else:
layer2_input_size = 2 * layer1_hidden_size
layer2_fw = LSTMCell(self.hidden_size, use_peepholes=model_params['peepholes'])
layer2_bw = LSTMCell(self.hidden_size, use_peepholes=model_params['peepholes'])
# The final embeddings is the output from the layer2 LSTM multiplied
# by this matrix. One purpose of this matrix is to scale the layer2
# output to match the specified number of dimentions for the word
# embedding.
out_mat = tf.get_variable('out_mat',
[2 * self.hidden_size, word_embed_dims],
initializer=initializer)
# z should be a tensor of dimensions batch_sz x word_len x embed_dims.
z = tf.nn.embedding_lookup(self.embedding, self.words_as_chars)
# Each entry in this list is a matrix of dim batch_sz x embed_dims.
# There is one entry per timestep and one character is processed per
# timestep.
inputs = [tf.squeeze(input_) for input_ in
tf.split(1, max_sequence_len, z)]
for _i in inputs: # newest version of tf needs help with shape inference
_i.set_shape((None, char_embed_dims))
# Feed the inputs through a bidirectional LSTM. Output is a list of
# word_len tensors with dim batch_sz x 2 * hidden_sz.
out1, _, _ = tf.nn.rnn.bidirectional_rnn(layer1_fw, layer1_bw, inputs,
dtype=tf.float32,
sequence_length=self.seq_lens)
# For the 2nd bi-LSTM layer we won't use the bidirectional_rnn
# wrapper. This is because we only want to save the last output
# from the forward direction and the first output from the backward
# direction. It is a little tricky to grab these states because they
# are in different positions for each word in the minibatch.
batch_range = tf.range(self.batch_dim)
# The slices variables keeps track of which position the appropriate
# outputs are for each word in the minibatch.
slices = self.batch_dim * tf.to_int32(self.seq_lens-1) + batch_range
with tf.variable_scope('fw'):
outputs_forward, _ = tf.nn.rnn.rnn(layer2_fw, out1, dtype=tf.float32)
out_forward = self._GetLastOutput(outputs_forward, slices)
with tf.variable_scope('bw'):
# Reverse the sequences before processing with the backwards LSTM.
out1_bw = reverse_seq(out1, self.seq_lens)
outputs_backward, _ = tf.nn.rnn.rnn(layer2_bw, out1_bw, dtype=tf.float32)
out_backward = self._GetLastOutput(outputs_backward, slices)
# This is the concatenation of the output from the two directions.
out = tf.concat(1, [out_forward, out_backward])
# Project to the proper output dimension. This is a dimensionality
# reduction step.
self.word_embeddings = tf.matmul(out, out_mat)
def _GetLastOutput(self, outputs, slices):
"""Helper function to pull out the last output for each word."""
reshaped = tf.reshape(tf.pack(outputs), [-1, self.hidden_size])
return tf.gather(reshaped, slices)
class CharCNN(Char2Vec):
"""CNN implementation of char2vec.
The model uses two layers of convolution. The second one is followed by a
max pooling operation. After that there is a resnet layer.
"""
def __init__(self, char_vocab, model_params,
max_sequence_len=15, dropout_keep_prob=None):
super(CharCNN, self).__init__(char_vocab, max_sequence_len)
char_embed_dims = int(np.log(len(char_vocab))) + 1
layer1_out_size = model_params['c2v_layer1_out_size']
hidden_size = model_params['c2v_layer2_hidden_size']
word_embed_dims = model_params['word_embed_dims']
# The following variables define the model.
with tf.variable_scope('c2v'):
# continuous space character embeddings
self.embedding = tf.get_variable("embedding",
[self._vocab_size, char_embed_dims],
initializer=initializer)
the_filter, filter_b = MakeFilter(3, char_embed_dims, layer1_out_size,
'filt')
# z is a tensor of dimensions batch_sz x word_len x embed_dims.
z = tf.nn.embedding_lookup(self.embedding, self.words_as_chars)
z_expanded = tf.expand_dims(z, -1)
conv = tf.nn.conv2d(z_expanded, the_filter, strides=[1, 1, 1, 1],
padding='VALID' )
h = tf.nn.relu(tf.nn.bias_add(tf.squeeze(conv), filter_b))
h.set_shape((None, max_sequence_len - 2, layer1_out_size))
if dropout_keep_prob is not None:
h_expanded = tf.nn.dropout(tf.expand_dims(h, -1), dropout_keep_prob)
else:
h_expanded = tf.expand_dims(h, -1)
pools = []
filter_sizes = range(3,6)
for width in filter_sizes:
f, f_bias = MakeFilter(width, layer1_out_size, hidden_size,
'filter_w{0}'.format(width))
conv2 = tf.nn.conv2d(h_expanded, f, strides=[1, 1, 1, 1],
padding='VALID')
h2 = tf.nn.relu(tf.nn.bias_add(conv2, f_bias))
pooled = tf.nn.max_pool(h2, ksize=[1, max_sequence_len-1-width, 1, 1],
strides=[1, 1, 1, 1], padding='VALID')
pools.append(pooled)
if width == 3: # debugging
self.hh = tf.squeeze(pooled)
self.hidx = tf.argmax(h2, 1)
pooled = tf.squeeze(tf.concat(3, pools), [1,2])
# resnet layer https://arxiv.org/abs/1512.03385
sz = len(filter_sizes) * hidden_size
t_mat = tf.get_variable('t_mat', [sz, sz])
t_bias = tf.Variable(tf.constant(0.1, shape=[sz]), name='t_bias')
t = tf.nn.relu(tf.matmul(pooled, t_mat) + t_bias)
self.word_embeddings = t + pooled
self.embedding_dims = sz
def MakeFilter(width, in_size, num_filters, name):
filter_sz = [width, in_size, 1, num_filters]
filter_b = tf.Variable(tf.constant(0.1, shape=[num_filters]),
name='{0}_bias'.format(name))
the_filter = tf.get_variable(name, filter_sz)
return the_filter, filter_b
def reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = tf.pack(input_seq)
# Reverse along dimension 0
s_reversed = tf.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = tf.unpack(s_reversed)
return result
|
taipahuchu/language-Identification-
|
code/util.py
|
# Misc. helper functions go in this file.
import collections
import numpy as np
import os
import random
import sys
# Need to use these three lines to import pyplot with a good font
import matplotlib
matplotlib.rc('font', family='DejaVu Sans')
from matplotlib import pyplot
def Metrics(preds, labs, show=True):
"""Print precision, recall and F1 for each language.
Assumes a single language per example, i.e. no code switching.
Args:
preds: list of predictions
labs: list of labels
show: flag to toggle printing
"""
all_langs = set(preds + labs)
preds = np.array(preds)
labs = np.array(labs)
label_totals = collections.Counter(labs)
pred_totals = collections.Counter(preds)
confusion_matrix = collections.Counter(zip(preds, labs))
num_correct = 0
for lang in all_langs:
num_correct += confusion_matrix[(lang, lang)]
acc = num_correct / float(len(preds))
print 'accuracy = {0:.3f}'.format(acc)
if show:
print ' Lang Prec. Rec. F1'
print '------------------------------'
scores = []
fmt_str = ' {0:6} {1:6.2f} {2:6.2f} {3:6.2f}'
for lang in sorted(all_langs):
idx = preds == lang
total = max(1.0, pred_totals[lang])
precision = 100.0 * confusion_matrix[(lang, lang)] / total
idx = labs == lang
total = max(1.0, label_totals[lang])
recall = 100.0 * confusion_matrix[(lang, lang)] / total
if precision + recall == 0.0:
f1 = 0.0
else:
f1 = 2.0 * precision * recall / (precision + recall)
scores.append([precision, recall, f1])
if show:
print fmt_str.format(lang, precision, recall, f1)
totals = np.array(scores).mean(axis=0)
if show:
print '------------------------------'
print fmt_str.format('Total:', totals[0], totals[1], totals[2])
return totals[2]
def ConfusionMat(preds, labs):
"""Plot and show a confusion matrix.
Args:
preds: list of predicted labels
labs: list of true labels
"""
all_langs = set(preds + labs) # this is the set of all possible labels
num_langs = len(all_langs)
# create a mapping from labels to id numbers
lookup = dict(zip(sorted(all_langs), range(num_langs)))
# make the counts for the confusion matrix
counts = np.zeros((num_langs, num_langs))
for p, l in zip(preds, labs):
counts[lookup[p], lookup[l]] += 1
# plot a colormap using log scale
pyplot.imshow(np.log(counts+1.0), interpolation='none')
# plot the text labels
for i in xrange(num_langs):
for j in xrange(num_langs):
pyplot.text(i, j, str(int(counts[i, j])), color='white',
horizontalalignment='center')
# take care of the axes
pyplot.xticks(range(num_langs), sorted(all_langs))
pyplot.yticks(range(num_langs), sorted(all_langs))
pyplot.xlabel('Prediction')
pyplot.ylabel('True Label')
pyplot.show()
def GetColor(percent):
"""Returns an RGB color scale to represent a given percentile."""
z = int(percent * 512)
z = 255 - min(255, z)
hexz = hex(z)[2:]
if len(hexz) == 1:
hexz = '0' + hexz
elif len(hexz) == 0:
hexz = '00'
color = '#ff{0}{0}'.format(hexz)
return color
def PrintParams(param_list, handle=sys.stdout.write):
"""Print the names of the parameters and their sizes.
Args:
param_list: list of tensors
handle: where to write the param sizes to
"""
handle('NETWORK SIZE REPORT\n')
param_count = 0
fmt_str = '{0: <25}\t{1: >12}\t{2: >12,}\n'
for p in param_list:
shape = p.get_shape()
shape_str = 'x'.join([str(x.value) for x in shape])
handle(fmt_str.format(p.name, shape_str, np.prod(shape).value))
param_count += np.prod(shape).value
handle(''.join(['-'] * 60))
handle('\n')
handle(fmt_str.format('total', '', param_count))
if handle==sys.stdout.write:
sys.stdout.flush()
def GetProj(feat_mat):
"""Projects a feature matrix into 2 dimensions using PCA."""
m = feat_mat.mean(axis=0)
feat_mat -= m
covmat = feat_mat.T.dot(feat_mat)
m, v = np.linalg.eig(covmat)
m /= m.sum()
v = v[:, 0:2]
proj = feat_mat.dot(v)
return proj
def PlotText(pts, labels):
pyplot.plot(pts[:, 0], pts[:, 1], 'x')
for i in xrange(pts.shape[0]):
pyplot.text(pts[i, 0], pts[i, 1], labels[i])
pyplot.show()
def Graphemes(s):
""" Given a string return a list of graphemes.
Args:
s the input string
Returns:
A list of graphemes.
"""
graphemes = []
current = []
if type(s) == unicode:
s = s.encode('utf8')
for c in s:
val = ord(c) & 0xC0
if val == 128:
# this is a continuation
current.append(c)
else:
# this is a new grapheme
if len(current) > 0:
graphemes.append(''.join(current))
current = []
if val < 128:
graphemes.append(c) # single byte grapheme
else:
current.append(c) # multi-byte grapheme
if len(current) > 0:
graphemes.append(''.join(current))
return graphemes
def Bytes(s):
if type(s) == unicode:
s = s.encode('utf8')
z = s.encode('hex')
return [z[2*i:2*i+2] for i in range(len(z)/2)]
def GetLangName(code):
"""Convert ISO lang codes to full language names.
Args:
code: an ISO language code as a string
Returns:
name of language or ISO code if language name not available
"""
names = {
'am': 'amharic',
'ar': 'arabic',
'bg': 'bulgarian',
'bn': 'bengali',
'bo': 'tibetan',
'bs': 'bosnian',
'ca': 'catalan',
'ckb': 'kurdish',
'cs': 'czech',
'cy': 'welsh',
'da': 'danish',
'de': 'german',
'dv': 'divehi',
'el': 'greek',
'en': 'english',
'es': 'spanish',
'et': 'estonian',
'eu': 'basque',
'fa': 'persian',
'fi': 'finnish',
'fr': 'french',
'gl': 'galician',
'gu': 'gujarati',
'he': 'hebrew',
'hi': 'hindi',
'hi-Latn': 'hindi-latin',
'hr': 'croatian',
'ht': 'haitian',
'hu': 'hungarian',
'hy': 'armenian',
'id': 'indonesian',
'is': 'icelandic',
'it': 'italian',
'ja': 'japanese',
'ka': 'georgian',
'km': 'cambodian',
'kn': 'kannada',
'ko': 'korean',
'lo': 'lao',
'lt': 'lithuanian',
'lv': 'latvian',
'ml': 'malayalam',
'mr': 'marathi',
'my': 'burmese',
'ne': 'nepali',
'nl': 'dutch',
'no': 'norwegian',
'or': 'oriya',
'pa': 'punjabi',
'pl': 'polish',
'ps': 'pashto',
'pt': 'portuguese',
'ro': 'romanian',
'ru': 'russian',
'sd': 'sindhi',
'si': 'sinhala',
'sk': 'slovak',
'sl': 'slovene',
'sr': 'serbian',
'sv': 'swedish',
'ta': 'tamil',
'te': 'telugu',
'th': 'thai',
'tl': 'tagalog',
'tr': 'turkish',
'ug': 'uighur',
'uk': 'ukranian',
'ur': 'urdu',
'vi': 'vietnamese',
'zh-CN': 'chinese',
'zh-TW': 'taiwanese'
}
if code in names:
return names[code]
return code
|
taipahuchu/language-Identification-
|
code/vocab.py
|
<reponame>taipahuchu/language-Identification-
import argparse
import collections
import numpy as np
import pickle
import re
class Vocab(object):
def _normalize(self, word):
if re.match(ur'^<.*>$', word):
return word
if self.specialcase:
newword = []
prev_is_lower = True
for letter in word:
if letter.isupper():
if prev_is_lower:
newword.append('~')
prev_is_lower = False
else:
prev_is_lower = True
newword.append(letter.lower())
word = ''.join(newword)
if self.lowercase:
word = word.lower()
if self.numreplace:
word = re.sub(ur'\d', '#', word)
return word
def __init__(self, tokenset, unk_symbol='<UNK>',
lowercase=False, numreplace=False):
self.lowercase = lowercase
self.numreplace = numreplace
self.specialcase = False
tokenset = set([self._normalize(w) for w in tokenset])
self.vocab_size = len(tokenset)
self.unk_symbol = unk_symbol
self.word_to_idx = dict(zip(sorted(tokenset), range(self.vocab_size)))
self.idx_to_word = dict(zip(self.word_to_idx.values(),
self.word_to_idx.keys()))
@staticmethod
def Load(filename):
with open(filename, 'rb') as f:
v = pickle.load(f)
return v
@classmethod
def MakeFromData(cls, lines, min_count, unk_symbol='<UNK>',
max_length=None, no_special_syms=False, normalize=False):
lowercase=False
numreplace=False
if normalize:
lowercase=True
numreplace=True
token_counts = collections.Counter()
for line in lines:
token_counts.update(line)
tokenset = set()
for word in token_counts:
if max_length and len(word) > max_length:
continue
if token_counts[word] >= min_count:
tokenset.add(word)
if not no_special_syms:
tokenset.add(unk_symbol)
tokenset.add('<S>')
tokenset.add('</S>')
return cls(tokenset, unk_symbol=unk_symbol, lowercase=lowercase,
numreplace=numreplace)
@classmethod
def ByteVocab(cls):
"""Creates a vocab that has a token for each possible byte.
It's useful to have a fixed byte vocab so that the subset of bytes
that form the vocab is not dependent on the dataset being used. Thus,
the learned byte embeddings can be reused on different datasets.
"""
c = '0123456789abcdef'
tokens = ['<S>', '</S>']
for i in c:
for j in c:
tokens.append(i + j)
return cls(tokens)
@classmethod
def LoadFromTextFile(cls, filename, unk_symbol='<UNK>'):
tokens = []
with open(filename, 'r') as f:
for line in f:
line = line.strip()
tokens.append(line)
return cls(tokens, unk_symbol=unk_symbol)
def GetWords(self):
"""Get a list of words in the vocabulary."""
return self.word_to_idx.keys()
def LookupIdx(self, token):
token = self._normalize(token)
if token in self.word_to_idx:
return self.word_to_idx[token]
return self.word_to_idx.get(self.unk_symbol, None)
def __contains__(self, key):
key = self._normalize(key)
return key in self.word_to_idx
def __getitem__(self, key):
"""If key is an int lookup word by id, if key is a word then lookup id."""
if type(key) == int or type(key) == np.int64:
return self.idx_to_word[key]
return self.LookupIdx(key)
def __iter__(self):
word_list = [self.idx_to_word[x] for x in xrange(self.vocab_size)]
return word_list.__iter__()
def __len__(self):
return self.vocab_size
def Save(self, filename):
if filename.endswith('.pickle'):
with open(filename, 'wb') as f:
pickle.dump(self, f)
elif filename.endswith('.txt'):
with open(filename, 'w') as f:
for i in range(self.vocab_size):
f.write('{0}\n'.format(self.idx_to_word[i]))
if __name__ == '__main__':
"""Print the contents of the vocabulary."""
parser = argparse.ArgumentParser()
parser.add_argument('filename')
args = parser.parse_args()
v = Vocab.Load(args.filename)
for i in v.GetWords():
print i
|
taipahuchu/language-Identification-
|
code/getembeddings.py
|
"""
Utility for printing nearest neighbors to a given word in embedding space.
Uses the char2vec portion of a trained model and the tweetlid vocabulary.
Nearest neighbors are given based on cosine similarity.
"""
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import argparse
import json
import numpy as np
import os
import tensorflow as tf
from batcher import Dataset
from char2vec import CharCNN as Char2Vec
from vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('expdir')
args = parser.parse_args()
config = tf.ConfigProto(inter_op_parallelism_threads=10,
intra_op_parallelism_threads=10)
dataset = Dataset(10, preshuffle=False)
dataset.ReadData('../data/tweetlid/training.tsv.gz', 'all', 'tweet')
input_vocab = Vocab.MakeFromData(dataset.GetSentences(), min_count=1)
char_vocab = Vocab.Load(os.path.join(args.expdir, 'char_vocab.pickle'))
max_word_len = max([len(x) for x in input_vocab.GetWords()]) + 2
print 'max word len {0}'.format(max_word_len)
with open(os.path.join(args.expdir, 'model_params.json'), 'r') as f:
model_params = json.load(f)
c2v = Char2Vec(char_vocab, model_params,
max_sequence_len=max_word_len)
the_words, word_lengths = c2v.MakeMat(input_vocab, pad_len=max_word_len)
saver = tf.train.Saver(tf.all_variables())
session = tf.Session(config=config)
saver.restore(session, os.path.join(args.expdir, 'model.bin'))
embeds = tf.nn.l2_normalize(c2v.word_embeddings, 1)
out = session.run([embeds], {c2v.words_as_chars: the_words})[0]
while True:
print 'please input a word:'
user_word = raw_input()
user_chars, _ = c2v.MakeMat([user_word, 'DUMMY_WORD'],
pad_len=max_word_len)
user_embed = session.run([embeds], {c2v.words_as_chars: user_chars})[0][0, :]
sims = np.matmul(out, user_embed)
idx = np.argsort(sims)
for i in range(1, 21):
index = idx[-i]
score = sims[index]
print '{0} {1:.3f} {2}'.format(i, score, input_vocab[index])
|
taipahuchu/language-Identification-
|
code/langid.py
|
import sys
reload(sys) # need to reload to set default encoding
sys.setdefaultencoding('utf8')
import argparse
import collections
import json
import logging
import numpy as np
import os
import shutil
import tensorflow as tf
import util
from char2vec import CharCNN as Char2Vec
from batcher import Dataset
from vocab import Vocab
from models import WordSeqModel, CharSeqModel, TweetSeqModel, WordLevelModel
parser = argparse.ArgumentParser()
parser.add_argument('expdir')
parser.add_argument('--start', help='init parameters from saved model')
parser.add_argument('--mode', choices=['train', 'debug', 'eval', 'final',
'apply'], default='train')
parser.add_argument('--data', default='../data/smallwiki.tsv.gz')
parser.add_argument('--params', default='default_params.json',
help='load hyperparams from json file')
parser.add_argument('--model', choices=['word', 'char', 'tweet'],
help='pass "word", "char", or "tweet" to use '\
'WordSeqModel, CharSeqModel or TweetSeqModel',
default='tweet') #default is hierarchical model
args = parser.parse_args()
if not os.path.exists(args.expdir):
os.mkdir(args.expdir)
mode = args.mode
if mode == 'train':
logging.basicConfig(filename=os.path.join(args.expdir, 'logfile.txt'),
level=logging.INFO)
config = tf.ConfigProto(inter_op_parallelism_threads=10,
intra_op_parallelism_threads=10)
tf.set_random_seed(666)
baseline = False
batch_size = 25
dataset = Dataset(batch_size, preshuffle=mode=='train')
und_symbol='und'
dataset.ReadData(args.data, mode, args.model)
# Make the input vocabulary (words that appear in data)
if baseline:
# The baseline is to use fixed word embeddings.
if mode == 'train':
# The input vocab is fixed during training.
input_vocab = Vocab.MakeFromData(dataset.GetSentences(), min_count=2)
input_vocab.Save(os.path.join(args.expdir, 'input_vocab.pickle'))
else:
# During testing we need to load the saved input vocab.
input_vocab = Vocab.Load(os.path.join(args.expdir, 'input_vocab.pickle'))
else:
# The open vocabulary can be regenerated with each run.
min_count = 1
if mode == 'debug':
min_count = 10 # When visualizing word embeddings hide rare words
maxlens = {'word':40, 'char':150, 'tweet':40}
input_vocab = Vocab.MakeFromData(dataset.GetSentences(),
min_count=min_count,
max_length=maxlens[args.model])
if mode == 'train':
# Make the character vocabulary
if args.start:
shutil.copyfile(os.path.join(args.start, 'char_vocab.pickle'),
os.path.join(args.expdir, 'char_vocab.pickle'))
char_vocab = Vocab.Load(os.path.join(args.expdir, 'char_vocab.pickle'))
with open(os.path.join(args.start, 'model_params.json'), 'r') as f:
model_params = json.load(f)
else:
x = [util.Graphemes(w) for w in input_vocab.GetWords()]
char_vocab = Vocab.MakeFromData(x, min_count=2)
char_vocab.Save(os.path.join(args.expdir, 'char_vocab.pickle'))
with open(args.params, 'r') as f:
model_params = json.load(f)
with open(os.path.join(args.expdir, 'model_params.json'), 'w') as f:
json.dump(model_params, f)
else: # eval or debug mode
char_vocab = Vocab.Load(os.path.join(args.expdir, 'char_vocab.pickle'))
with open(os.path.join(args.expdir, 'model_params.json'), 'r') as f:
model_params = json.load(f)
# Make the output vocab (the set of possible languages to predict)
output_vocab_filename = os.path.join(args.expdir, 'out_vocab.pickle')
if mode == 'train':
labels = [[x] for x in dataset.GetLabelSet()]
if [und_symbol] not in labels: labels += [[und_symbol]]
output_vocab = Vocab.MakeFromData(labels, min_count=1,
no_special_syms=True)
output_vocab.Save(output_vocab_filename)
else:
output_vocab = Vocab.Load(output_vocab_filename)
ignore_categories = ['mixed', 'ambiguous', 'fw', 'unk', '{', '}']
dataset.Prepare(input_vocab, output_vocab, und_symbol, ignore_categories)
#Set other hyperparameters and create model
max_word_len = max([len(x) for x in input_vocab.GetWords()]) + 2
print 'max word len {0}'.format(max_word_len)
dropout_keep_prob = tf.placeholder_with_default(1.0, (), name='keep_prob')
if baseline:
c2v = BasicEmbedding(model_params, vocab_size=len(input_vocab))
else:
c2v = Char2Vec(char_vocab, model_params,
max_sequence_len=max_word_len,
dropout_keep_prob=dropout_keep_prob)
the_words, word_lengths = c2v.MakeMat(input_vocab, pad_len=max_word_len)
models = {'word': WordSeqModel, 'char': CharSeqModel, 'tweet': TweetSeqModel}
if args.data == 'codeswitch':
models['tweet'] = WordLevelModel
model = models[args.model](batch_size=batch_size, model_params=model_params,
max_sequence_len=dataset.max_sequence_len,
dropout_keep_prob=dropout_keep_prob,
out_vocab_size=len(output_vocab),
weights=dataset.w, c2v=c2v)
saver = tf.train.Saver(tf.all_variables())
session = tf.Session(config=config)
def Apply(expdir):
saver.restore(session, os.path.join(expdir, 'model.bin'))
results = collections.defaultdict(list)
for _ in xrange(dataset.GetNumBatches()):
words, seqlen, labs, _ = dataset.GetNextBatch()
batch_data = MakeFeedDict(words, seqlen, labs)
cvocab = char_vocab
batch_vocab, words_remapped = Char2Vec.GetBatchVocab(words)
charseqs = the_words[batch_vocab]
hh, hidx = session.run([c2v.hh, c2v.hidx], batch_data)
for word_i in range(len(batch_vocab)):
for filter_i in range(hh.shape[-1]):
activation = hh[word_i, filter_i]
if activation == 0.0:
continue
loc = hidx[word_i, 0, filter_i]
char_seq = charseqs[word_i, loc:loc+4]
unit = ''.join([cvocab[i] for i in char_seq])
results[filter_i].append(('{0:.1f}'.format(activation), unit))
for filtnum in results:
dedup = collections.Counter(results[filtnum])
z = sorted(dedup.keys(), key=lambda x: -float(x[0]))
with open('filters/{0}.txt'.format(filtnum), 'w') as f:
for i in range(len(z)):
first = '{0} {1} {2}\n'.format(z[i][0], z[i][1], dedup[z[i]])
f.write(first)
def Eval(expdir):
"""Evaluates on dev data.
Writes results to a results.tsv file in the expdir for use in the
scoring script.
Args:
expdir: path to experiment directory
"""
if args.data == 'codeswitch':
return EvalPerWord(expdir)
saver.restore(session, os.path.join(expdir, 'model.bin'))
all_preds, all_labs = [], []
for _ in xrange(dataset.GetNumBatches()):
words, seqlen, labs, weights = dataset.GetNextBatch()
batch_data = MakeFeedDict(words, seqlen, labs)
if args.model in ["word", "tweet"]:
model_vars = [model.probs, model.preds_by_word]
probs, pp = session.run(model_vars, batch_data)
elif args.model in ["char"]:
probs, pp = session.run([model.probs, model.preds],
batch_data)
idx = weights != 0
all_preds += [output_vocab[p] for p in np.argmax(probs[idx, :], axis=1)]
all_labs += [output_vocab[p] for p in np.argmax(labs[idx, :], axis=1)]
util.Metrics(all_preds, all_labs)
# This output file is in the format needed to score for TweetLID
ids = dataset.GetIds()
with open(os.path.join(expdir, 'results.tsv'), 'w') as f:
for idnum, p in zip(ids, all_preds):
f.write('{0}\t{1}\n'.format(idnum, p))
def Train(expdir):
logging.info('Input Vocab Size: {0}'.format(len(input_vocab)))
logging.info('Char Vocab Size: {0}'.format(len(char_vocab)))
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(model.cost, tvars), 5.0)
optimizer = tf.train.AdamOptimizer(0.001)
train_op = optimizer.apply_gradients(zip(grads, tvars))
session.run(tf.initialize_all_variables())
if args.start:
saver.restore(session, os.path.join(args.start, 'model.bin'))
util.PrintParams(tf.trainable_variables(), handle=logging.info)
maxitr = model_params.get('num_training_iters', 80001)
print "Training for {} iterations...".format(maxitr)
for idx in xrange(maxitr):
if args.data == 'codeswitch':
words, seqlen, labs, ws, lines = dataset.GetNextBatch()
else:
words, seqlen, labs, ws = dataset.GetNextBatch()
batch_data = MakeFeedDict(words, seqlen, labs, ws)
if idx % 25 == 0:
probs = session.run([model.probs], batch_data)[0]
s = [input_vocab[i] for i in words[0, :seqlen[0]]]
print ' '.join(s)
if args.data != 'codeswitch':
print "Predicted:", GetTopPreds(probs[0, :])
print "Actual:", GetTopPreds(labs[0,:])
else:
print "Predict:", GetTopWordLevelPreds(probs[0,:,:], seqlen[0])
print "Actual: ", GetTopWordLevelPreds(labs[0,:,:], seqlen[0])
cost, _ = session.run([model.cost, train_op], batch_data)
logging.info({'iter': idx, 'cost': float(cost)})
if idx == maxitr-1 or (idx % 2000 == 0 and idx > 0):
saver.save(session, os.path.join(expdir, 'model.bin'))
def Debug(expdir):
"""Plots language and word embeddings from saved model."""
saver.restore(session, os.path.join(expdir, 'model.bin'))
# Plot the language embeddings
z = [x for x in tf.trainable_variables() if 'pred_mat' in x.name][0]
zz = z.eval(session)
c = util.GetProj(zz.T)
lang_names = [util.GetLangName(output_vocab[i])
for i in xrange(len(output_vocab))]
util.PlotText(c, lang_names)
# plot some word embeddings
batch_data = {c2v.words_as_chars: the_words}
word_embeddings = session.run([c2v.word_embeddings], batch_data)[0]
c = util.GetProj(word_embeddings)
util.PlotText(c, input_vocab)
def GetTopPreds(probs):
top_preds = []
for ii in reversed(np.argsort(probs)):
if probs[ii] > 0.05:
top_preds.append('{0}: {1:.1f}'.format(output_vocab[ii],
100.0 * probs[ii]))
return top_preds
def MakeFeedDict(words, seqlen, labs, ws=None):
"""Create the feed dict to process each batch.
All the inputs should be from the GetNextBatch command.
Args:
words: matrix of word ids
seqlen: vector of sequence lengths
labs: target matrix
ws: per-example weights
Returns:
dictionary to be used as feed dict.
"""
batch_data = {
model.seq_lens: seqlen,
model.x: words,
}
if mode == 'train':
batch_data[model.y] = labs
batch_data[model.example_weights] = ws
batch_data[dropout_keep_prob] = model_params['dropout_keep_prob']
if not baseline:
batch_vocab, words_remapped = Char2Vec.GetBatchVocab(words)
batch_data.update({
c2v.words_as_chars: the_words[batch_vocab, :],
model.x: words_remapped
})
if hasattr(c2v, 'seq_lens'):
batch_data.update({
c2v.seq_lens: word_lengths[batch_vocab],
c2v.batch_dim: len(batch_vocab)
})
return batch_data
funcs = {'train': Train, # Train the model.
'debug': Debug, # Plot some graphs.
'apply': Apply, # Apply model to new data.
'eval': Eval, # Evaluate on dev set.
'final': Eval} # Evaluate on eval set.
funcs[mode](args.expdir)
|
nickromano/django-daily-digest
|
leather/ticks/score.py
|
<gh_stars>1-10
#!/usr/bin/env python
from decimal import Decimal, ROUND_CEILING, ROUND_FLOOR
import math
import sys
from leather.ticks.base import Ticker
from leather.utils import isclose
# Shorthand
ZERO = Decimal("0")
TEN = Decimal("10")
#: Normalized intervals to be tested for ticks
INTERVALS = [
Decimal("0.1"),
Decimal("0.15"),
Decimal("0.2"),
Decimal("0.25"),
Decimal("0.5"),
Decimal("1.0"),
]
#: The default number of ticks to produce
DEFAULT_TICKS = 5
#: The minimum length of a viable tick sequence
MIN_TICK_COUNT = 4
#: The maximum length of a viable tick sequence
MAX_TICK_COUNT = 10
#: Most preferred tick intervals
BEST_INTERVALS = [Decimal("0.1"), Decimal("1.0")]
#: Least preferred tick intervals
WORST_INTERVALS = [Decimal("0.15")]
class ScoreTicker(Ticker):
"""
Attempt to find an optimal series of ticks by generating many possible
sequences and scoring them based on several criteria. Only the best
tick sequence is returned.
Based an algorithm described by <NAME>:
http://austinclemens.com/blog/2016/01/09/an-algorithm-for-creating-a-graphs-axes/
See :meth:`.ScoreTicker.score` for scoring implementation.
:param domain_min:
Minimum value of the data series.
:param domain_max:
Maximum value of the data series.
"""
def __init__(self, domain_min, domain_max):
self._domain_min = domain_min
self._domain_max = domain_max
self._ticks = self._find_ticks()
self._min = self._ticks[0]
self._max = self._ticks[-1]
@property
def ticks(self):
return self._ticks
@property
def min(self):
return self._min
@property
def max(self):
return self._max
def _find_ticks(self):
"""
Implements the tick-finding algorithm.
"""
force_zero = self._domain_min < ZERO and self._domain_max > ZERO
interval_guess = abs(self._domain_max - self._domain_min) / (DEFAULT_TICKS - 1)
magnitude = interval_guess.log10().to_integral_exact(rounding=ROUND_CEILING)
candidate_intervals = []
for interval in INTERVALS:
candidate_intervals.append((interval, interval * pow(TEN, magnitude)))
candidate_intervals.append((interval, interval * pow(TEN, magnitude - 1)))
candidate_intervals.append((interval, interval * pow(TEN, magnitude + 1)))
candidate_ticks = []
for base_interval, interval in candidate_intervals:
ticks = []
if force_zero:
min_steps = (abs(self._domain_min) / interval).to_integral_exact(
rounding=ROUND_CEILING
)
ticks.append(self._round_tick(-min_steps * interval))
else:
ticks.append(
self._round_tick(
(self._domain_min / interval).to_integral_exact(
rounding=ROUND_FLOOR
)
* interval
)
)
tick_num = 1
while ticks[tick_num - 1] < self._domain_max:
t = self._round_tick(ticks[0] + (interval * tick_num))
ticks.append(t)
tick_num += 1
# Throw out sequences that are too short or too long
if len(ticks) < MIN_TICK_COUNT or len(ticks) > MAX_TICK_COUNT:
continue
candidate_ticks.append(
{
"base_interval": base_interval,
"interval": interval,
"ticks": ticks,
"score": self._score(base_interval, interval, ticks),
}
)
# Order by best score, using number of ticks as a tie-breaker
best = sorted(
candidate_ticks, key=lambda c: (c["score"]["total"], len(c["ticks"]))
)
return best[0]["ticks"]
def _score(self, base_interval, interval, ticks):
"""
Score a given tick sequence based on several criteria. This method returns
discrete scoring components for easier debugging.
"""
s = {"pct_waste": 0, "interval_penalty": 0, "len_penalty": 0, "total": 0}
# Penalty for wasted scale space
waste = (self._domain_min - ticks[0]) + (ticks[-1] - self._domain_max)
pct_waste = waste / (self._domain_max - self._domain_min)
s["pct_waste"] = pow(10, pct_waste)
# Penalty for choosing less optimal tick intervals
if base_interval in BEST_INTERVALS:
pass
elif base_interval in WORST_INTERVALS:
s["interval_penalty"] = 2
else:
s["interval_penalty"] = 1
# Penalty for too many ticks
if len(ticks) > 5:
s["len_penalty"] = len(ticks) - 5
s["total"] = s["pct_waste"] + s["interval_penalty"] + s["len_penalty"]
return s
def _round_tick(self, t):
"""
Round a tick to 0-3 decimal places, if the remaining digits do not
appear to be significant.
"""
for r in range(0, 4):
exp = pow(Decimal(10), Decimal(-r))
quantized = t.quantize(exp)
if isclose(t, quantized):
return quantized
return t
|
nickromano/django-daily-digest
|
daily_digest/tests.py
|
import random
from datetime import date, datetime, timedelta
import mock
import pytz
from django.contrib.auth.models import User
from django.core.management import call_command
from django.test import TestCase
from django.test.client import Client
from project.photos.models import PhotoUpload
from .config import DailyDigestRequiredFieldException, daily_digest_config, load_config
from .utils import (
EmailMultiAlternatives,
send_daily_digest,
series_data_for_model,
series_labels,
)
class MockEmailMultiAlternatives(mock.Mock):
subject = ""
def __init__(self, subject, text_content, from_email, to):
self.subject = subject
self.text_content = text_content
self.from_email = from_email
self.to = to
class DailyDigestTestCase(TestCase):
@mock.patch(
"daily_digest.utils.current_time_naive",
return_value=datetime(2018, 1, 10, 8, 0),
)
@mock.patch.object(EmailMultiAlternatives, "send")
@mock.patch.object(EmailMultiAlternatives, "attach")
@mock.patch.object(EmailMultiAlternatives, "attach_alternative")
@mock.patch.object(EmailMultiAlternatives, "__init__", return_value=None)
def test_send_daily_digest(
self, mock_init_email, mock_attach_html, mock_add_attachment, *_
):
send_daily_digest()
subject, text_content, from_email, to_emails = mock_init_email.call_args_list[
0
][0]
self.assertEqual(subject, "Daily Digest - 01/10/18")
self.assertEqual(text_content, "Daily Digest")
self.assertEqual(from_email, "<EMAIL>")
self.assertEqual(list(to_emails), [])
self.assertEqual(
mock_attach_html.call_args_list[0],
mock.call(
'<!DOCTYPE html>\n<html>\n<head>\n\t<title></title>\n\t<meta name="viewport" content="width=device-width, initial-scale=1.0">\n\n\t</head>\n<body style="font-family:-apple-system, BlinkMacSystemFont, sans-serif">\n\t\n\n\n<h3 style="font-weight:500; margin-bottom:0">New Users</h3>\n\n\n\t<img class="chart" width="100%" src="cid:new-users" style="max-width:480px">\n\n<br>\n\n<h3 style="font-weight:500; margin-bottom:0">Photo Uploads</h3>\n\n\n\t<img class="chart" width="100%" src="cid:photo-uploads" style="max-width:480px">\n\n<br>\n\n\n\n</body>\n</html>',
"text/html",
),
)
self.assertEqual(
mock_add_attachment.call_args_list[0][0][0].get("Content-ID"), "<new-users>"
)
@mock.patch("daily_digest.management.commands.send_daily_digest.send_daily_digest")
def test_send_daily_digest_command(self, mock_send_digest):
call_command("send_daily_digest")
self.assertEqual(mock_send_digest.called, True)
def test_daily_digest_preview(self):
user = User.objects.create(username="test", is_superuser=True, is_staff=True)
user.set_password("<PASSWORD>")
user.save()
client = Client()
client.login(username="test", password="<PASSWORD>")
response = client.get("/admin/daily-digest-preview/")
self.assertEqual(response.status_code, 200)
def test_daily_digest_preview_requires_staff(self):
user = User.objects.create(username="test")
user.set_password("<PASSWORD>")
user.save()
client = Client()
client.login(username="test", password="<PASSWORD>")
response = client.get("/admin/daily-digest-preview/")
self.assertEqual(response.status_code, 302)
@mock.patch(
"daily_digest.utils.current_time_naive",
return_value=datetime(2018, 1, 10, 16, 0),
) # 8am LA
def test_series_data_generated_for_current_period(self, *_):
timezone = pytz.timezone("America/Los_Angeles")
User.objects.filter(username__startswith="test").delete()
PhotoUpload.objects.all().delete()
los_angeles_timezone = pytz.timezone("America/Los_Angeles")
today = los_angeles_timezone.localize(datetime(2018, 1, 10, 16, 0))
for days in [-1, 0, 1, 2, 3, 4, 5, 6, 7]:
for _ in range(0, days + 3):
User.objects.create(
username="test-{}".format(random.randint(0, 100000000)),
date_joined=today - timedelta(days=days),
)
queryset = User.objects.all()
data, total_count = series_data_for_model(queryset, "date_joined", timezone)
self.assertEqual(total_count, 42)
self.maxDiff = None
self.assertEqual(
data,
[
[date(2018, 1, 4), 9],
[date(2018, 1, 5), 8],
[date(2018, 1, 6), 7],
[date(2018, 1, 7), 6],
[date(2018, 1, 8), 5],
[date(2018, 1, 9), 4],
[date(2018, 1, 10), 3],
],
)
@mock.patch(
"daily_digest.utils.current_time_naive",
return_value=datetime(2018, 1, 10, 16, 0),
) # 8am LA
def test_series_data_generated_for_previous_period(self, *_):
timezone = pytz.timezone("America/Los_Angeles")
User.objects.filter(username__startswith="test").delete()
PhotoUpload.objects.all().delete()
los_angeles_timezone = pytz.timezone("America/Los_Angeles")
today = los_angeles_timezone.localize(datetime(2018, 1, 10, 16, 0))
for days in [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]:
for _ in range(0, days):
User.objects.create(
username="test-{}".format(random.randint(0, 100000000)),
date_joined=today - timedelta(days=days),
)
queryset = User.objects.all()
data, total_count = series_data_for_model(
queryset, "date_joined", timezone, prev_period=True
)
self.assertEqual(total_count, 70)
self.maxDiff = None
self.assertEqual(
data,
[
[date(2018, 1, 4), 13],
[date(2018, 1, 5), 12],
[date(2018, 1, 6), 11],
[date(2018, 1, 7), 10],
[date(2018, 1, 8), 9],
[date(2018, 1, 9), 8],
[date(2018, 1, 10), 7],
],
)
def test_series_labels(self):
current_label, prev_period_label = series_labels(55, 100)
self.assertEqual(current_label, "Last 7 Days (55) -45%")
self.assertEqual(prev_period_label, "Previous Period (100)")
def test_series_labels_with_no_prev_period_data(self):
current_label, prev_period_label = series_labels(55, 0)
self.assertEqual(current_label, "Last 7 Days (55)")
self.assertEqual(prev_period_label, "Previous Period (0)")
def test_config_missing_required_field(self):
config_missing_fields = {
"charts": [
{
# 'title': 'New Users' Missing field
"model": "django.contrib.auth.models.User",
"date_field": "date_joined",
}
]
}
with self.settings(DAILY_DIGEST_CONFIG=config_missing_fields):
with self.assertRaises(DailyDigestRequiredFieldException):
load_config()
def test_config_passing_model_path(self):
config_missing_fields = {
"charts": [
{
"title": "New Users",
"model": "django.contrib.auth.models.User",
"date_field": "date_joined",
}
]
}
with self.settings(DAILY_DIGEST_CONFIG=config_missing_fields):
load_config()
self.assertEqual(daily_digest_config.chart_configs[0].model, User)
def test_config_passing_app_label(self):
config_missing_fields = {
"charts": [
{
"title": "New Users",
"app_label": "auth",
"model": "user",
"date_field": "date_joined",
}
]
}
with self.settings(DAILY_DIGEST_CONFIG=config_missing_fields):
load_config()
self.assertEqual(daily_digest_config.chart_configs[0].model, User)
|
nickromano/django-daily-digest
|
leather/svg.py
|
<reponame>nickromano/django-daily-digest
#!/usr/bin/env python
"""
Helpers for working with SVG.
"""
import xml.etree.ElementTree as ET
import six
HEADER = (
'<?xml version="1.0" standalone="no"?>\n'
+ '<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"\n'
+ '"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
)
def stringify(root):
"""
Convert an SVG XML tree to a unicode string.
"""
if six.PY3:
return ET.tostring(root, encoding="unicode")
else:
return ET.tostring(root, encoding="utf-8")
def save(f, root):
"""
Save an SVG XML tree to a file.
"""
f.write(HEADER)
f.write(stringify(root))
def translate(x, y):
"""
Generate an SVG transform statement representing a simple translation.
"""
return "translate(%i %i)" % (x, y)
def rotate(deg, x, y):
"""
Generate an SVG transform statement representing rotation around a given
point.
"""
return "rotate(%i %i %i)" % (deg, x, y)
|
nickromano/django-daily-digest
|
daily_digest/apps.py
|
<reponame>nickromano/django-daily-digest
# -*- coding: utf-8 -*-
from django.apps import AppConfig
class DailyDigestConfig(AppConfig):
name = "daily_digest"
|
nickromano/django-daily-digest
|
leather/ticks/score_time.py
|
<reponame>nickromano/django-daily-digest<filename>leather/ticks/score_time.py<gh_stars>1-10
#!/usr/bin/env python
from datetime import date, datetime, timedelta
from decimal import Decimal
from functools import partial
import math
import sys
from leather.ticks.score import ScoreTicker
from leather import utils
#: The default number of ticks to produce
DEFAULT_TICKS = 5
#: The minimum length of a viable tick sequence
MIN_TICK_COUNT = 4
#: The maximum length of a viable tick sequence
MAX_TICK_COUNT = 10
#: The minimum units of the interval needed to use that interval ("4 years")
MIN_UNITS = 4
#: The possible intervals as (to_function, from_function, overlap_tick_formatter, simple_tick_formatter)
INTERVALS = [
(utils.to_year_count, utils.from_year_count, None, "%Y"),
(utils.to_month_count, utils.from_month_count, "%Y-%m", "%m"),
(utils.to_day_count, utils.from_day_count, "%m-%d", "%d"),
(utils.to_hour_count, utils.from_hour_count, "%d-%H", "%H"),
(utils.to_minute_count, utils.from_minute_count, "%H:%M", "%M"),
(utils.to_second_count, utils.from_second_count, "%H:%M:%S", "%S"),
(utils.to_microsecond_count, utils.from_microsecond_count, "%S-%f", "%f"),
]
class ScoreTimeTicker(ScoreTicker):
"""
A variation on :class:`.ScoreTicker` that generates sequences of dates
or datetimes.
:param domain_min:
Minimum value of the data series.
:param domain_max:
Maximum value of the data series.
"""
def __init__(self, domain_min, domain_max):
self._domain_min = domain_min
self._domain_max = domain_max
if isinstance(self._domain_min, datetime):
self._type = datetime
else:
self._type = date
# Identify appropriate interval unit
self._to_unit = None
self._from_unit = None
self._fmt = None
previous_delta = 0
for to_func, from_func, overlap_fmt, simple_fmt in INTERVALS:
delta = to_func(self._domain_max) - to_func(self._domain_min)
if delta >= MIN_UNITS or to_func is utils.to_microsecond_count:
self._to_unit = to_func
self._from_unit = partial(from_func, t=self._type)
if previous_delta >= 1:
self._fmt = overlap_fmt
else:
self._fmt = simple_fmt
break
previous_delta = delta
# Compute unit min and max
self._unit_min = self._to_unit(self._domain_min)
self._unit_max = self._to_unit(self._domain_max)
if (self._domain_max - self._from_unit(self._unit_max)).total_seconds() > 0:
self._unit_max += 1
self._ticks = self._find_ticks()
self._min = self._ticks[0]
self._max = self._ticks[-1]
def _find_ticks(self):
"""
Implements the tick-finding algorithm.
"""
delta = self._unit_max - self._unit_min
interval_guess = int(math.ceil(delta / (DEFAULT_TICKS - 1)))
candidate_intervals = []
candidate_intervals.append(interval_guess)
candidate_intervals.append(interval_guess - 1)
candidate_intervals.append(interval_guess + 1)
if 0 in candidate_intervals:
candidate_intervals.remove(0)
candidate_ticks = []
for interval in candidate_intervals:
ticks = []
ticks.append(int(math.floor((self._unit_min / interval))) * interval)
tick_num = 1
while ticks[tick_num - 1] < self._unit_max:
t = ticks[0] + (interval * tick_num)
ticks.append(t)
tick_num += 1
# Throw out sequences that are too short or too long
if len(ticks) < MIN_TICK_COUNT or len(ticks) > MAX_TICK_COUNT:
continue
candidate_ticks.append(
{
"interval": interval,
"ticks": ticks,
"score": self._score(interval, ticks),
}
)
# Order by best score, using number of ticks as a tie-breaker
best = sorted(
candidate_ticks, key=lambda c: (c["score"]["total"], len(c["ticks"]))
)
ticks = best[0]["ticks"]
return [self._from_unit(t) for t in ticks]
def _score(self, interval, ticks):
"""
Score a given tick sequence based on several criteria. This method returns
discrete scoring components for easier debugging.
"""
s = {"pct_waste": 0, "interval_penalty": 0, "len_penalty": 0, "total": 0}
# Penalty for wasted scale space
waste = (self._unit_min - ticks[0]) + (ticks[-1] - self._unit_max)
pct_waste = waste / (self._unit_max - self._unit_min)
s["pct_waste"] = pow(10, pct_waste)
# Penalty for too many ticks
if len(ticks) > 5:
s["len_penalty"] = len(ticks) - 5
s["total"] = s["pct_waste"] + s["interval_penalty"] + s["len_penalty"]
return s
def format_tick(self, tick):
"""
Format a tick using the inferred time formatting.
"""
return tick.strftime(self._fmt)
|
nickromano/django-daily-digest
|
daily_digest/urls.py
|
<reponame>nickromano/django-daily-digest
from django.conf.urls import url
from .views import preview_daily_digest
urlpatterns = [
url(
r"admin/daily-digest-preview/$",
preview_daily_digest,
name="preview-daily-digest",
),
]
|
nickromano/django-daily-digest
|
project/photos/models.py
|
from django.contrib.auth.models import User
from django.db import models
class PhotoUpload(models.Model):
user = models.ForeignKey(
User, related_name="photo_uploads", on_delete=models.CASCADE
)
created = models.DateTimeField(auto_now_add=True)
|
nickromano/django-daily-digest
|
leather/scales/ordinal.py
|
<reponame>nickromano/django-daily-digest<filename>leather/scales/ordinal.py<gh_stars>1-10
#!/usr/bin/env python
from decimal import Decimal
from leather.scales.base import Scale
class Ordinal(Scale):
"""
A scale that maps individual values (e.g. strings) to a range.
"""
def __init__(self, domain):
self._domain = domain
def contains(self, v):
"""
Return :code:`True` if a given value is contained within this scale's
displayed domain.
"""
return v in self._domain
def project(self, value, range_min, range_max):
"""
Project a value in this scale's domain to a target range.
"""
range_min = Decimal(range_min)
range_max = Decimal(range_max)
segments = len(self._domain)
segment_size = (range_max - range_min) / segments
try:
pos = (
range_min
+ (self._domain.index(value) * segment_size)
+ (segment_size / 2)
)
except ValueError:
raise ValueError(
'Value "%s" is not present in Ordinal scale domain' % value
)
return pos
def project_interval(self, value, range_min, range_max):
"""
Project a value in this scale's domain to an interval in the target
range. This is used for places :class:`.Bars` and :class:`.Columns`.
"""
range_min = Decimal(range_min)
range_max = Decimal(range_max)
segments = len(self._domain)
segment_size = (range_max - range_min) / segments
gap = segment_size / Decimal(20)
try:
a = range_min + (self._domain.index(value) * segment_size) + gap
b = range_min + ((self._domain.index(value) + 1) * segment_size) - gap
except ValueError:
raise ValueError(
'Value "%s" is not present in Ordinal scale domain' % value
)
return (a, b)
def ticks(self):
"""
Generate a series of ticks for this scale.
"""
return self._domain
|
nickromano/django-daily-digest
|
daily_digest/config.py
|
from collections import namedtuple
import importlib
import pytz
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.text import slugify
class DailyDigestConfigurationException(Exception):
pass
class DailyDigestRequiredFieldException(DailyDigestConfigurationException):
pass
def import_class_at_path(path):
module_name, class_name = path.rsplit(".", 1)
parser_module = importlib.import_module(module_name)
parser_class = getattr(parser_module, class_name)
return parser_class
DailyDigestConfig = namedtuple(
"DailyDigestConfig",
["title", "from_email", "to", "timezone", "exclude_today", "chart_configs"],
)
def load_config():
global daily_digest_config
_config = getattr(settings, "DAILY_DIGEST_CONFIG", {})
DailyDigestChart = namedtuple(
"DailyDigestChart",
["title", "slug", "model", "date_field", "filter_kwargs", "distinct_by"],
)
_chart_configs = []
for chart in _config.get("charts", []):
for key in ["title", "model", "date_field"]:
if not chart.get(key):
raise DailyDigestRequiredFieldException(
"Missing required field {} for chart config.".format(key)
)
if chart.get("app_label"):
# Allow the user to pass an app_label and model
content_type = ContentType.objects.get(
app_label=chart["app_label"], model=chart["model"]
)
model = content_type.model_class()
else:
model = import_class_at_path(chart["model"])
chart_config = DailyDigestChart(
title=chart["title"],
slug=slugify(chart["title"]),
model=model,
date_field=chart["date_field"],
filter_kwargs=chart.get("filter_kwargs", {}),
distinct_by=chart.get("distinct_by"),
)
_chart_configs.append(chart_config)
daily_digest_config = DailyDigestConfig(
title=_config.get("title", "Daily Digest"),
from_email=_config.get("from_email", settings.DEFAULT_FROM_EMAIL),
to=_config.get("to", settings.ADMINS),
timezone=pytz.timezone(_config["timezone"])
if _config.get("timezone")
else pytz.UTC,
exclude_today=_config.get("exclude_today", False),
chart_configs=_chart_configs,
)
load_config()
|
nickromano/django-daily-digest
|
daily_digest/utils.py
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import leather
import pytz
from cairosvg import svg2png
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from email.mime.image import MIMEImage
from premailer import transform
from .config import daily_digest_config
leather.theme.background_color = "#ffffff"
leather.theme.title_font_family = "Helvetica"
leather.theme.legend_font_family = "Helvetica"
leather.theme.tick_font_family = "Helvetica"
PRIMARY_STROKE_COLOR = "#4383CC"
PREV_PERIOD_STROKE_COLOR = "#B4CDEB"
EMAIL_TIME_PERIOD = 7
def current_time_naive():
return datetime.now()
def series_labels(series_1_count, series_2_count):
if series_2_count > 0:
percent_change = int(
round((float(series_1_count - series_2_count) / series_2_count) * 100, 0)
)
if percent_change > 0:
percent_change = "+{}%".format(percent_change)
else:
percent_change = "{}%".format(percent_change)
else:
percent_change = ""
current_label = "Last {} Days ({}) {}".format(
EMAIL_TIME_PERIOD, series_1_count, percent_change
)
prev_period_label = "Previous Period ({})".format(series_2_count)
return current_label.strip(), prev_period_label.strip()
def series_data_for_model(
queryset, field, timezone, prev_period=False, exclude_today=False
):
start_of_today = timezone.localize(
current_time_naive().replace(hour=0, minute=0, second=0)
)
start_of_today = start_of_today.astimezone(pytz.UTC)
if exclude_today:
start_of_today = start_of_today - timedelta(days=1)
start_of_today += timedelta(days=1) # End of today
if prev_period:
period_end = start_of_today - timedelta(days=EMAIL_TIME_PERIOD) # End of today
period_start = period_end - timedelta(days=EMAIL_TIME_PERIOD)
else:
period_end = start_of_today
period_start = period_end - timedelta(days=EMAIL_TIME_PERIOD)
filters = {
"{}__gte".format(field): period_start,
"{}__lt".format(field): period_end,
}
series_data = queryset.filter(**filters).values_list(field, flat=True)
series_data = list(map(lambda x: x.astimezone(timezone).date(), series_data))
grouped_by_date = []
period_start_local = period_start.astimezone(timezone)
for i in range(0, EMAIL_TIME_PERIOD):
day = (period_start_local + timedelta(days=i)).date()
count = 0
for user_date in series_data:
if user_date == day:
count += 1
if prev_period:
day = day + timedelta(
days=EMAIL_TIME_PERIOD
) # offset date to show both series on one chart
grouped_by_date.append([day, count])
return grouped_by_date, len(series_data)
def svg_data_for_query(queryset, field, chart_name, timezone, exclude_today=False):
data, total_count = series_data_for_model(
queryset, field, timezone, exclude_today=exclude_today
)
data_prev_period, total_count_prev_period = series_data_for_model(
queryset, field, timezone, prev_period=True, exclude_today=exclude_today
)
# Show every date on the x axis
x_axis_ticks = []
for item in data:
x_axis_ticks.append(item[0])
chart = leather.Chart()
chart.add_x_axis(ticks=x_axis_ticks)
# Start at 0 - Turn this into an option
y_max = max([item[1] for item in data])
y_max_prev_period = max([item[1] for item in data_prev_period])
chart.add_y_scale(0, max([y_max, y_max_prev_period]))
current_label, prev_period_label = series_labels(
total_count, total_count_prev_period
)
chart.add_line(data, name=current_label, stroke_color=PRIMARY_STROKE_COLOR)
chart.add_line(
data_prev_period,
name=prev_period_label,
stroke_color=PREV_PERIOD_STROKE_COLOR,
stroke_dasharray="5",
)
chart.to_svg("/tmp/{}.svg".format(chart_name), width=480, height=240)
with open("/tmp/{}.svg".format(chart_name)) as svgfile:
svg_data = svgfile.read()
svg2png(
url="/tmp/{}.svg".format(chart_name),
write_to="/tmp/{}.png".format(chart_name),
scale=2,
)
return svg_data
def charts_data_for_config(chart_format="svg"):
charts_data = []
for chart_config in daily_digest_config.chart_configs:
title = chart_config.title
date_field = chart_config.date_field
filter_kwargs = chart_config.filter_kwargs
queryset = chart_config.model.objects.filter(**filter_kwargs)
if chart_config.distinct_by:
queryset = queryset.distinct(chart_config.distinct_by).order_by(
chart_config.distinct_by
)
result = {
"title": title,
"svg_data": svg_data_for_query(
queryset,
date_field,
chart_config.slug,
timezone=daily_digest_config.timezone,
exclude_today=daily_digest_config.exclude_today,
),
}
if chart_format == "png":
del result["svg_data"]
result["slug"] = chart_config.slug
charts_data.append(result)
return charts_data
def send_daily_digest():
title = daily_digest_config.title
today = daily_digest_config.timezone.localize(current_time_naive())
subject = "{} - {}".format(title, today.strftime("%x"))
text_content = title
context = {"charts": charts_data_for_config(chart_format="png")}
html_template = get_template("daily_digest/email.html")
html_content = html_template.render(context)
# Inline all css
html_content = transform(html_content)
msg = EmailMultiAlternatives(
subject, text_content, daily_digest_config.from_email, daily_digest_config.to
)
msg.attach_alternative(html_content, "text/html")
for chart_config in daily_digest_config.chart_configs:
with open("/tmp/{}.png".format(chart_config.slug), "rb") as image_file:
msg_image = MIMEImage(image_file.read())
msg_image.add_header("Content-ID", "<{}>".format(chart_config.slug))
msg.attach(msg_image)
msg.send(fail_silently=False)
|
nickromano/django-daily-digest
|
testrunner.py
|
<reponame>nickromano/django-daily-digest<filename>testrunner.py
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
os.environ["DJANGO_SETTINGS_MODULE"] = "project.settings"
test_dir = os.path.dirname(__file__)
sys.path.insert(0, test_dir)
def runtests():
TestRunner = get_runner(settings) # noqa
test_runner = TestRunner(verbosity=1, interactive=True)
if hasattr(django, "setup"):
django.setup()
failures = test_runner.run_tests(["daily_digest"])
sys.exit(bool(failures))
if __name__ == "__main__":
runtests()
|
nickromano/django-daily-digest
|
project/settings.py
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
UNIT_TESTING = "test" in sys.argv
DEBUG = True
ALLOWED_HOSTS = []
SECRET_KEY = "test"
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "db.sqlite3" if UNIT_TESTING else "db.sqlite3",
}
}
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
MIDDLEWARE_CLASSES = MIDDLEWARE # Django 1.8 support
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.sessions",
"django.contrib.contenttypes",
"django.contrib.staticfiles",
"django.contrib.messages",
# Used for examples
"project.photos",
"daily_digest",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
},
]
LOGIN_URL = "/admin/"
STATIC_URL = "/static/"
DAILY_DIGEST_CONFIG = {
"title": "Daily Digest",
"from_email": "<EMAIL>",
"timezone": "America/Los_Angeles",
"exclude_today": False,
"charts": [
{
"title": "New Users",
"model": "django.contrib.auth.models.User",
"date_field": "date_joined",
"filter_kwargs": {"is_active": True},
},
{
"title": "Photo Uploads",
"model": "project.photos.models.PhotoUpload",
"date_field": "created",
},
],
}
|
nickromano/django-daily-digest
|
daily_digest/__init__.py
|
<reponame>nickromano/django-daily-digest<filename>daily_digest/__init__.py
__version__ = "0.0.3"
default_app_config = "daily_digest.apps.DailyDigestConfig" # noqa
|
nickromano/django-daily-digest
|
leather/scales/linear.py
|
#!/usr/bin/env python
from decimal import Decimal
from leather.scales.base import Scale
from leather.ticks.score import ScoreTicker
class Linear(Scale):
"""
A scale that linearly maps values from a domain to a range.
:param domain_min:
The minimum value of the input domain.
:param domain_max:
The maximum value of the input domain.
"""
def __init__(self, domain_min, domain_max):
if domain_min > domain_max:
raise ValueError("Inverted domains are not currently supported.")
elif domain_min == domain_max:
# Default to unit scale
self._data_min = Decimal(0)
self._data_max = Decimal(1)
else:
self._data_min = Decimal(domain_min)
self._data_max = Decimal(domain_max)
self._ticker = ScoreTicker(self._data_min, self._data_max)
def contains(self, v):
"""
Return :code:`True` if a given value is contained within this scale's
domain.
"""
return self._data_min <= v <= self._data_max
def project(self, value, range_min, range_max):
"""
Project a value in this scale's domain to a target range.
"""
value = Decimal(value)
range_min = Decimal(range_min)
range_max = Decimal(range_max)
pos = (value - self._ticker.min) / (self._ticker.max - self._ticker.min)
return ((range_max - range_min) * pos) + range_min
def project_interval(self, value, range_min, range_max):
"""
Project a value in this scale's domain to an interval in the target
range. This is used for places :class:`.Bars` and :class:`.Columns`.
"""
raise NotImplementedError
def ticks(self):
"""
Generate a series of ticks for this scale.
"""
return self._ticker.ticks
|
nickromano/django-daily-digest
|
leather/chart.py
|
#!/usr/bin/env python
from copy import copy
import os
import xml.etree.ElementTree as ET
import six
from leather.axis import Axis
from leather.data_types import Date, DateTime
from leather.scales import Scale, Linear, Temporal
from leather.series import Series, CategorySeries
from leather.shapes import Bars, Columns, Dots, Line
import leather.svg as svg
from leather import theme
from leather.utils import X, Y, DIMENSION_NAMES, Box, IPythonSVG, warn
class Chart(object):
"""
Container for all chart types.
:param title:
An optional title that will be rendered at the top of the chart.
"""
def __init__(self, title=None):
self._title = title
self._series_colors = theme.default_series_colors
self._layers = []
self._types = [None, None]
self._scales = [None, None]
self._axes = [None, None]
def _palette(self):
"""
Return a generator for series colors.
"""
return (color for color in self._series_colors)
def set_x_scale(self, scale):
"""
Set the X :class:`.Scale` for this chart.
"""
self._scales[X] = scale
def set_y_scale(self, scale):
"""
See :meth:`.Chart.set_x_scale`.
"""
self._scales[Y] = scale
def add_x_scale(self, domain_min, domain_max):
"""
Create and add a :class:`.Scale`.
If the provided domain values are :class:`date` or :class:`datetime`
then a :class:`.Temporal` scale will be created, otherwise it will
:class:`.Linear`.
If you want to set a custom scale class use :meth:`.Chart.set_x_scale`
instead.
"""
scale_type = Linear
if isinstance(domain_min, Date.types) or isinstance(domain_min, DateTime.types):
scale_type = Temporal
self.set_x_scale(scale_type(domain_min, domain_max))
def add_y_scale(self, domain_min, domain_max):
"""
See :meth:`.Chart.add_x_scale`.
"""
scale_type = Linear
if isinstance(domain_min, Date.types) or isinstance(domain_min, DateTime.types):
scale_type = Temporal
self.set_y_scale(scale_type(domain_min, domain_max))
def set_x_axis(self, axis):
"""
Set an :class:`.Axis` class for this chart.
"""
self._axes[X] = axis
def set_y_axis(self, axis):
"""
See :meth:`.Chart.set_x_axis`.
"""
self._axes[Y] = axis
def add_x_axis(self, ticks=None, tick_formatter=None, name=None):
"""
Create and add an X :class:`.Axis`.
If you want to set a custom axis class use :meth:`.Chart.set_x_axis`
instead.
"""
self._axes[X] = Axis(ticks, tick_formatter, name)
def add_y_axis(self, ticks=None, tick_formatter=None, name=None):
"""
See :meth:`.Chart.add_x_axis`.
"""
self._axes[Y] = Axis(ticks, tick_formatter, name)
def add_series(self, series, shape):
"""
Add a data :class:`.Series` to the chart. The data types of the new
series must be consistent with any series that have already been added.
There are several shortcuts for adding different types of data series.
See :meth:`.Chart.add_bars`, :meth:`.Chart.add_columns`,
:meth:`.Chart.add_dots`, and :meth:`.Chart.add_line`.
"""
if self._layers and isinstance(self._layers[0][0], CategorySeries):
raise RuntimeError(
"Additional series can not be added to a chart with a CategorySeries."
)
if isinstance(series, CategorySeries):
self._types = series._types
else:
for dim in [X, Y]:
if not self._types[dim]:
self._types[dim] = series._types[dim]
elif series._types[dim] is not self._types[dim]:
raise TypeError(
"Can't mix axis-data types: %s and %s"
% (series._types[dim], self._types[dim])
)
shape.validate_series(series)
self._layers.append((series, shape))
def add_bars(self, data, x=None, y=None, name=None, fill_color=None):
"""
Create and add a :class:`.Series` rendered with :class:`.Bars`.
Note that when creating bars in this way the order of the series data
will be reversed so that the first item in the series is displayed
as the top-most bar in the graphic. If you don't want this to happen
use :meth:`.Chart.add_series` instead.
"""
self.add_series(
Series(list(reversed(data)), x=x, y=y, name=name), Bars(fill_color)
)
def add_columns(self, data, x=None, y=None, name=None, fill_color=None):
"""
Create and add a :class:`.Series` rendered with :class:`.Columns`.
"""
self.add_series(Series(data, x=x, y=y, name=name), Columns(fill_color))
def add_dots(self, data, x=None, y=None, name=None, fill_color=None, radius=None):
"""
Create and add a :class:`.Series` rendered with :class:`.Dots`.
"""
self.add_series(Series(data, x=x, y=y, name=name), Dots(fill_color, radius))
def add_line(
self,
data,
x=None,
y=None,
name=None,
stroke_color=None,
width=None,
stroke_dasharray=None,
):
"""
Create and add a :class:`.Series` rendered with :class:`.Line`.
"""
self.add_series(
Series(data, x=x, y=y, name=name),
Line(stroke_color, width, stroke_dasharray),
)
def _validate_dimension(self, dimension):
"""
Validates that the given scale and axis are valid for the data that
has been added to this chart. If a scale or axis has not been set,
generates automated ones.
"""
scale = self._scales[dimension]
axis = self._axes[dimension]
if not scale:
scale = Scale.infer(self._layers, dimension, self._types[dimension])
else:
for series, shape in self._layers:
if not scale.contains(series.min(dimension)) or not scale.contains(
series.max(dimension)
):
d = DIMENSION_NAMES[dimension]
warn(
"Data contains values outside %s scale domain. All data points may not be visible on the chart."
% d
)
# Only display once per axis
break
if not axis:
axis = Axis()
return (scale, axis)
def to_svg_group(self, width=None, height=None):
"""
Render this chart to an SVG group element.
This can then be placed inside an :code:`<svg>` tag to make a complete
SVG graphic.
See :meth:`.Chart.to_svg` for arguments.
"""
width = width or theme.default_width
height = height or theme.default_height
if not self._layers:
raise ValueError(
"You must add at least one series to the chart before rendering."
)
if isinstance(theme.margin, float):
default_margin = width * theme.margin
margin = Box(
top=default_margin,
right=default_margin,
bottom=default_margin,
left=default_margin,
)
elif isinstance(margin, int):
margin = Box(margin, margin, margin, margin)
elif not isinstance(margin, Box):
margin = Box(*margin)
# Root / background
root_group = ET.Element("g")
root_group.append(
ET.Element(
"rect",
x=six.text_type(0),
y=six.text_type(0),
width=six.text_type(width),
height=six.text_type(height),
fill=theme.background_color,
)
)
# Margins
margin_group = ET.Element("g")
margin_group.set("transform", svg.translate(margin.left, margin.top))
margin_width = width - (margin.left + margin.right)
margin_height = height - (margin.top + margin.bottom)
root_group.append(margin_group)
# Header
header_group = ET.Element("g")
header_margin = 0
if self._title:
label = ET.Element(
"text", x=six.text_type(0), y=six.text_type(0), fill=theme.title_color
)
label.set("font-family", theme.title_font_family)
label.set("font-size", six.text_type(theme.title_font_size))
label.text = six.text_type(self._title)
header_group.append(label)
header_margin += theme.title_font_char_height + theme.title_gap
# Legend
if len(self._layers) > 1 or isinstance(self._layers[0][0], CategorySeries):
legend_group = ET.Element("g")
legend_group.set("transform", svg.translate(0, header_margin))
indent = 0
rows = 1
palette = self._palette()
for series, shape in self._layers:
for item_group, item_width in shape.legend_to_svg(series, palette):
if indent + item_width > width:
indent = 0
rows += 1
y = (rows - 1) * (theme.legend_font_char_height + theme.legend_gap)
item_group.set("transform", svg.translate(indent, y))
indent += item_width
legend_group.append(item_group)
legend_height = rows * (theme.legend_font_char_height + theme.legend_gap)
header_margin += legend_height
header_group.append(legend_group)
margin_group.append(header_group)
# Body
body_group = ET.Element("g")
body_group.set("transform", svg.translate(0, header_margin))
body_width = margin_width
body_height = margin_height - header_margin
margin_group.append(body_group)
# Axes
x_scale, x_axis = self._validate_dimension(X)
y_scale, y_axis = self._validate_dimension(Y)
bottom_margin = x_axis.estimate_label_margin(x_scale, "bottom")
left_margin = y_axis.estimate_label_margin(y_scale, "left")
canvas_width = body_width - left_margin
canvas_height = body_height - bottom_margin
axes_group = ET.Element("g")
axes_group.set("transform", svg.translate(left_margin, 0))
axes_group.append(x_axis.to_svg(canvas_width, canvas_height, x_scale, "bottom"))
axes_group.append(y_axis.to_svg(canvas_width, canvas_height, y_scale, "left"))
header_group.set("transform", svg.translate(left_margin, 0))
body_group.append(axes_group)
# Series
series_group = ET.Element("g")
palette = self._palette()
for series, shape in self._layers:
series_group.append(
shape.to_svg(
canvas_width, canvas_height, x_scale, y_scale, series, palette
)
)
axes_group.append(series_group)
return root_group
def to_svg(self, path=None, width=None, height=None):
"""
Render this chart to an SVG document.
The :code:`width` and :code:`height` are specified in SVG's
"unitless" units, however, it is usually convenient to specify them
as though they were pixels.
:param path:
Filepath or file-like object to write to. If omitted then the SVG
will be returned as a string. If running within IPython, then this
will return a SVG object to be displayed.
:param width:
The output width, in SVG user units. Defaults to
:data:`.theme.default_chart_width`.
:param height:
The output height, in SVG user units. Defaults to
:data:`.theme.default_chart_height`.
"""
width = width or theme.default_chart_width
height = height or theme.default_chart_height
root = ET.Element(
"svg",
width=six.text_type(width),
height=six.text_type(height),
version="1.1",
xmlns="http://www.w3.org/2000/svg",
)
group = self.to_svg_group(width, height)
root.append(group)
svg_text = svg.stringify(root)
close = True
if path:
f = None
try:
if hasattr(path, "write"):
f = path
close = False
else:
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
f = open(path, "w")
f.write(svg.HEADER)
f.write(svg_text)
finally:
if close and f is not None:
f.close()
else:
return IPythonSVG(svg_text)
|
nickromano/django-daily-digest
|
leather/axis.py
|
#!/usr/bin/env python
import xml.etree.ElementTree as ET
import six
from leather import svg
from leather import theme
class Axis(object):
"""
A horizontal or vertical chart axis.
:param ticks:
Instead of inferring tick values from the data, use exactly this
sequence of ticks values. These will still be passed to the
:code:`tick_formatter`.
:param tick_formatter:
An optional :func:`.tick_format_function`.
"""
def __init__(self, ticks=None, tick_formatter=None, name=None):
self._ticks = ticks
self._tick_formatter = tick_formatter
self._name = six.text_type(name) if name is not None else None
def _estimate_left_tick_width(self, scale):
"""
Estimate the y axis space used by tick labels.
"""
tick_values = self._ticks or scale.ticks()
tick_count = len(tick_values)
tick_formatter = self._tick_formatter or scale.format_tick
max_len = 0
for i, value in enumerate(tick_values):
max_len = max(max_len, len(tick_formatter(value, i, tick_count)))
return max_len * theme.tick_font_char_width
def estimate_label_margin(self, scale, orient):
"""
Estimate the space needed for the tick labels.
"""
margin = 0
if orient == "left":
margin += self._estimate_left_tick_width(scale) + (theme.tick_size * 2)
elif orient == "bottom":
margin += theme.tick_font_char_height + (theme.tick_size * 2)
if self._name:
margin += theme.axis_title_font_char_height + theme.axis_title_gap
return margin
def to_svg(self, width, height, scale, orient):
"""
Render this axis to SVG elements.
"""
group = ET.Element("g")
group.set("class", "axis " + orient)
# Axis title
if self._name is not None:
if orient == "left":
title_x = -(
self._estimate_left_tick_width(scale) + theme.axis_title_gap
)
title_y = height / 2
dy = ""
transform = svg.rotate(270, title_x, title_y)
elif orient == "bottom":
title_x = width / 2
title_y = (
height
+ theme.tick_font_char_height
+ (theme.tick_size * 2)
+ theme.axis_title_gap
)
dy = "1em"
transform = ""
title = ET.Element(
"text",
x=six.text_type(title_x),
y=six.text_type(title_y),
dy=dy,
fill=theme.axis_title_color,
transform=transform,
)
title.set("text-anchor", "middle")
title.set("font-family", theme.axis_title_font_family)
title.text = self._name
group.append(title)
# Ticks
if orient == "left":
label_x = -(theme.tick_size * 2)
x1 = -theme.tick_size
x2 = width
range_min = height
range_max = 0
elif orient == "bottom":
label_y = height + (theme.tick_size * 2)
y1 = 0
y2 = height + theme.tick_size
range_min = 0
range_max = width
tick_values = self._ticks or scale.ticks()
tick_count = len(tick_values)
tick_formatter = self._tick_formatter or scale.format_tick
zero_tick_group = None
for i, value in enumerate(tick_values):
# Tick group
tick_group = ET.Element("g")
tick_group.set("class", "tick")
if value == 0:
zero_tick_group = tick_group
else:
group.append(tick_group)
# Tick line
projected_value = scale.project(value, range_min, range_max)
if value == 0:
tick_color = theme.zero_color
else:
tick_color = theme.tick_color
if orient == "left":
y1 = projected_value
y2 = projected_value
elif orient == "bottom":
x1 = projected_value
x2 = projected_value
tick = ET.Element(
"line",
x1=six.text_type(x1),
y1=six.text_type(y1),
x2=six.text_type(x2),
y2=six.text_type(y2),
stroke=tick_color,
)
tick.set("stroke-width", six.text_type(theme.tick_width))
tick_group.append(tick)
# Tick label
if orient == "left":
x = label_x
y = projected_value
dy = "0.32em"
text_anchor = "end"
elif orient == "bottom":
x = projected_value
y = label_y
dy = "1em"
text_anchor = "middle"
label = ET.Element(
"text",
x=six.text_type(x),
y=six.text_type(y),
dy=dy,
fill=theme.label_color,
)
label.set("text-anchor", text_anchor)
label.set("font-family", theme.tick_font_family)
value = tick_formatter(value, i, tick_count)
label.text = six.text_type(value)
tick_group.append(label)
if zero_tick_group is not None:
group.append(zero_tick_group)
return group
def tick_format_function(value, index, tick_count):
"""
This example shows how to define a function to format tick values for
display.
:param x:
The value to be formatted.
:param index:
The index of the tick.
:param tick_count:
The total number of ticks being displayed.
:returns:
A stringified tick value for display.
"""
return six.text_type(value)
|
nickromano/django-daily-digest
|
daily_digest/management/commands/send_daily_digest.py
|
from django.core.management.base import BaseCommand
from daily_digest.utils import send_daily_digest
class Command(BaseCommand):
help = "Send daily digest email."
def success_message(self, message):
if hasattr(self.style, "SUCCESS"):
self.stdout.write(self.style.SUCCESS(message))
else:
# Django 1.8
self.stdout.write(self.style.MIGRATE_SUCCESS(message))
def handle(self, *args, **options):
send_daily_digest()
self.success_message("Sent daily digest")
|
nickromano/django-daily-digest
|
leather/data_types.py
|
#!/usr/bin/env python
from datetime import date, datetime
from decimal import Decimal
import six
class DataType(object):
"""
Base class for :class:`.Series` data types.
"""
@classmethod
def infer(cls, v):
for t in [DateTime, Date, Number, Text]:
if isinstance(v, t.types):
return t
raise TypeError("No data type available for %s" % type(v))
class Date(DataType):
"""
Data representing dates.
"""
types = (date,)
class DateTime(DataType):
"""
Data representing dates with times.
"""
types = (datetime,)
class Number(DataType):
"""
Data representing numbers.
"""
types = (int, float, Decimal)
class Text(DataType):
"""
Data representing text/strings.
"""
types = six.string_types
|
nickromano/django-daily-digest
|
daily_digest/views.py
|
# -*- coding: utf-8 -*-
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from .config import daily_digest_config
from .utils import charts_data_for_config
@staff_member_required
def preview_daily_digest(request):
context = {"title": daily_digest_config.title, "charts": charts_data_for_config()}
return render(request, "daily_digest/email.html", context)
|
nickromano/django-daily-digest
|
leather/ticks/base.py
|
<reponame>nickromano/django-daily-digest<gh_stars>1-10
#!/usr/bin/env python
class Ticker(object):
"""
Base class for ticker implementations.
"""
@property
def ticks(self):
raise NotImplementedError
@property
def min(self):
raise NotImplementedError
@property
def max(self):
raise NotImplementedError
|
nickromano/django-daily-digest
|
leather/lattice.py
|
<gh_stars>1-10
#!/usr/bin/env python
from leather.axis import Axis
from leather.chart import Chart
from leather.data_types import Date, DateTime
from leather.grid import Grid
from leather.scales import Scale, Linear
from leather.series import Series
from leather.shapes import Line
from leather import theme
from leather.utils import X, Y
class Lattice(object):
"""
A grid of charts with synchronized shapes, scales, and axes.
Lattice only supports graphing a single series of data.
:param shape:
An instance of :class:`.Shape` to use to render all series. Defaults
to :class:`.Line` if not specified.
"""
def __init__(self, shape=None):
self._shape = shape or Line()
self._series = []
self._types = [None, None]
self._scales = [None, None]
self._axes = [None, None]
def set_x_scale(self, scale):
"""
Set the X :class:`.Scale` for this lattice.
"""
self._scales[X] = scale
def set_y_scale(self, scale):
"""
See :meth:`.Lattice.set_x_scale`.
"""
self._scales[Y] = scale
def add_x_scale(self, domain_min, domain_max):
"""
Create and add a :class:`.Scale`.
If the provided domain values are :class:`date` or :class:`datetime`
then a :class:`.Temporal` scale will be created, otherwise it will
:class:`.Linear`.
If you want to set a custom scale class use :meth:`.Lattice.set_x_scale`
instead.
"""
scale_type = Linear
if isinstance(domain_min, Date.types) or isinstance(domain_min, DateTime.types):
scale_type = Temporal
self.set_x_scale(scale_type(domain_min, domain_max))
def add_y_scale(self, domain_min, domain_max):
"""
See :meth:`.Lattice.add_x_scale`.
"""
scale_type = Linear
if isinstance(domain_min, Date.types) or isinstance(domain_min, DateTime.types):
scale_type = Temporal
self.set_y_scale(scale_type(domain_min, domain_max))
def set_x_axis(self, axis):
"""
Set an :class:`.Axis` class for this lattice.
"""
self._axes[X] = axis
def set_y_axis(self, axis):
"""
See :meth:`.Lattice.set_x_axis`.
"""
self._axes[Y] = axis
def add_x_axis(self, ticks=None, tick_formatter=None, name=None):
"""
Create and add an X :class:`.Axis`.
If you want to set a custom axis class use :meth:`.Lattice.set_x_axis`
instead.
"""
self._axes[X] = Axis(ticks=ticks, tick_formatter=tick_formatter, name=name)
def add_y_axis(self, ticks=None, tick_formatter=None, name=None):
"""
See :meth:`.Lattice.add_x_axis`.
"""
self._axes[Y] = Axis(ticks=ticks, tick_formatter=tick_formatter, name=name)
def add_one(self, data, x=None, y=None, title=None):
"""
Add a data series to this lattice.
:param data:
A sequence of data suitable for constructing a :class:`.Series`,
or a sequence of such objects.
:param x:
See :class:`.Series`.
:param y:
See :class:`.Series`.
:param title:
A title to render above this chart.
"""
series = Series(data, x=x, y=y, name=title)
for dimension in [X, Y]:
if self._types[dimension]:
if series._types[dimension] is not self._types[dimension]:
raise TypeError("All data series must have the same data types.")
else:
self._types[dimension] = series._types[dimension]
self._shape.validate_series(series)
self._series.append(series)
def add_many(self, data, x=None, y=None, titles=None):
"""
Same as :meth:`.Lattice.add_one` except :code:`data` is a list of data
series to be added simultaneously.
See :meth:`.Lattice.add_one` for other arguments.
Note that :code:`titles` is a sequence of titles that must be the same
length as :code:`data`.
"""
for i, d in enumerate(data):
title = titles[i] if titles else None
self.add_one(d, x=x, y=y, title=title)
def to_svg(self, path=None, width=None, height=None):
"""
Render the lattice to an SVG.
See :class:`.Grid` for additional documentation.
"""
layers = [(s, self._shape) for s in self._series]
if not self._scales[X]:
self._scales[X] = Scale.infer(layers, X, self._types[X])
if not self._scales[Y]:
self._scales[Y] = Scale.infer(layers, Y, self._types[Y])
if not self._axes[X]:
self._axes[X] = Axis()
if not self._axes[Y]:
self._axes[Y] = Axis()
grid = Grid()
for i, series in enumerate(self._series):
chart = Chart(title=series.name)
chart.set_x_scale(self._scales[X])
chart.set_y_scale(self._scales[Y])
chart.set_x_axis(self._axes[X])
chart.set_y_axis(self._axes[Y])
chart.add_series(series, self._shape)
grid.add_one(chart)
return grid.to_svg(path, width, height)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
|
ExamplesPython_3.6/Chapter9/CamShift.py
|
<filename>ExamplesPython_3.6/Chapter9/CamShift.py
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 9
CamShift: Tracks a region by implementing the CamShift technique. Similar to the MEanShift, but
it uses backprojection to update the region positions and size
'''
# Set module functions
from ImageUtilities import imageReadRGB, showImageRGB, createImageF
from ImageRegionsUtilities import densityHistogram, colourFeature, meanShift, backProjection,backProjectionImage,regionSize
# Math and iteration
from math import exp
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageNames = Input image names
initialPos = position of the region [column, row]
size = Size of the region [column, row]
sigma = weight control
'''
pathToDir = "../../Images/Chapter9/Input/"
imageNames = ["frame1.bmp", "frame2.bmp", "frame3.bmp", "frame4.bmp", "frame5.bmp", "frame6.bmp"]
histoSize = 64
initialPos = [100, 60]
sizeReg = [12, 18]
sigma = 6.0
# Region position and sizes in each frame
positions = [ ]
positions.append(initialPos)
sizes = [ ]
sizes.append(sizeReg)
# Read image
inputImage, width, height = imageReadRGB(pathToDir + imageNames[0])
#showImageRGB(inputImage)
# Density and back projection of the region to track
q = densityHistogram(inputImage, positions[0], sizeReg, sigma, histoSize)
backProjImage = backProjectionImage(inputImage, q, histoSize)
#plot3DHistogram(q)
# For each frame
numImages = len(imageNames)
for frameNum in range(1, numImages):
# Read next frame and estimate the position by using meanshift
currentImage, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
newPos = meanShift(currentImage, q, sizeReg, sigma, histoSize, positions[frameNum-1])
# Back project and use the projections to determine the new position and size
newBackProjImage = backProjectionImage(currentImage, q, histoSize)
pos,newSize = regionSize(backProjImage, newBackProjImage, \
positions[frameNum-1], newPos, sizeReg)
positions.append(pos)
sizes.append(newSize)
# Update density and image
inputImage = currentImage
sizeReg = newSize
backProjImage = newBackProjImage
#print(positions)
#print(sizes)
# Show results
for frameNum in range(0, numImages):
image, _, _ = imageReadRGB(pathToDir + imageNames[frameNum])
p = positions[frameNum]
s = sizes[frameNum]
borderDistance = [s[0] -5, s[1] -5]
for x, y in itertools.product(range(p[0]-s[0], p[0]+s[0]), \
range(p[1]-s[1], p[1]+s[1])):
if abs(x-p[0]) > borderDistance[0] or abs(y-p[1]) > borderDistance[1]:
image[y,x] = [20, 20, 80]
showImageRGB(image)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
|
ExamplesPython_3.6/Chapter10/Reprojection.py
|
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 10
Reprojection: Compute a projection from seven corresponding image and 3D points and re-project
the image to create a new view of the scene
'''
# Set module functions
from ImageUtilities import imageReadRGB, imageReadL, showImageRGB, createImageRGB
from GeometricUtilities import projectionCubePoints, computeProjection, getPointColours, fillImageColours
# Math
from math import sin, cos
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
maskName = Mask image name
'''
pathToDir = "../../Images/Chapter10/Input/"
imageName = "cube1.png"
maskName = "mask1.png"
# Read image data
inputImage, width, height = imageReadRGB(pathToDir + imageName)
maskImage, width, height = imageReadL(pathToDir + maskName)
showImageRGB(inputImage)
centreX, centreY = width/2, height/2
# Corresponding points in the cube image and 3D world
pts = [[131-centreX,378-centreY],[110-centreX,188-centreY],
[200-centreX,70-centreY],[412-centreX,100-centreY],
[410-centreX,285-centreY],[349-centreX,418-centreY],
[345-centreX,220-centreY]]
q = [[0,0,1],[0,1,1],
[0,1,0],[1,1,0],
[1,0,0],[1,0,1],
[1,1,1]]
# Obtain the projection
p = computeProjection(pts,q)
# Get the image position of the 3D cube points
npts = 100
xy = projectionCubePoints(npts, p, centreX, centreY)
# Get the colour of the points
colours = getPointColours(xy, maskImage, inputImage)
# Transform the q points and store in qt
qT = [ ]
angY = .3
angX = -.2
for pointNum in range(0,len(q)):
s = [q[pointNum][0]-.5, q[pointNum][1]-.5, q[pointNum][2]-.5]
rx = .5 + cos(angY)*s[0] + sin(angY)*s[2]
ry = .5 + sin(angX)*sin(angY)*s[0] + cos(angX)*s[1] - sin(angX)*cos(angY)*s[2]
rz = .5 - cos(angX)*sin(angY)*s[0] + sin(angX)*s[1] + cos(angX)*cos(angY)*s[2]
qT.append([rx,ry,rz])
# Get the projection of the transformed points
p = computeProjection(pts,qT)
# The position of the cube points according to the projection of the transformed data
xy = projectionCubePoints(npts, p, centreX, centreY)
# Use the colours of the original image and the points of the transformed projection to generate an image
tImage = createImageRGB(width, height)
fillImageColours(colours, xy, tImage)
showImageRGB(tImage)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
|
ExamplesPython_3.6/Chapter5/InvariantGeneralizedHoughTransform.py
|
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 5
InvariantGeneralizedHoughTransform: Shape detection by the invariant generalized Hough transform
'''
# Set module functions
from ImageUtilities import imageReadL, showImageF, showImageL, createImageF
from ImageOperatorsUtilities import applyCannyEdgeDetector
from ImageRegionsUtilities import computeReferencePoint
from PlotUtilities import plot3DHistogram
# Math and iteration
from math import pi, tan, sqrt, atan
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
gaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
numEntries = Size of the R table
minimaDistPoints = To avoid to use close pairs of points
maxDistPoints = To gather evidence in local regions
'''
pathToDir = "../../Images/Chapter5/Input/"
imageName = "Works.png"
templateName = "TemplateWorks.png"
gaussianKernelSize = 5
sobelKernelSize = 3
upperT = 0.4
lowerT = 0.3
numEntries = 90
minimaDistPoints = 20
maxDistPoints = 200
# Defines the direction of the line used to find pairs of points
alpha = pi/2.0
# Read image into array and show
templateImage, widthTemplate, heightTemplate = imageReadL(pathToDir + templateName)
inputImage, width, height = imageReadL(pathToDir + imageName)
showImageL(templateImage)
showImageL(inputImage)
# Compute edges
magnitudeTemplate, angleTemplate = applyCannyEdgeDetector(templateImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
magnitude, angle = applyCannyEdgeDetector(inputImage, gaussianKernelSize, sobelKernelSize, upperT, lowerT)
showImageF(magnitudeTemplate)
showImageF(magnitude)
# Compute reference point in the template. Template centre
refPoint, edgePoints = computeReferencePoint(magnitudeTemplate)
# Find the pairs of points in the template according to alpha angle
pairPoints = []
numPts = len(edgePoints)
for p in range(0, numPts):
y1, x1 = (edgePoints[p])[0], (edgePoints[p])[1]
# We are looking for two points along the line with slope m
m = tan(angleTemplate[y1,x1] - alpha)
if m>-1 and m<1:
if x1 < refPoint[1]:
xi, xf, step = x1 + minimaDistPoints,widthTemplate, 1
else:
xi, xf, step = x1 - minimaDistPoints, 0, -1
for x2 in range(xi, xf, step):
y2 = int(y1 - m * (x2 - x1))
if y2 > 0 and y2 < heightTemplate and magnitudeTemplate[y2,x2] != 0:
pairPoints.append((y2,x2,y1,x1))
break
else:
m = 1.0/m
if y1 < refPoint[0]:
yi, yf, step = y1 + minimaDistPoints,heightTemplate, 1
else:
yi, yf, step = y1 - minimaDistPoints, 0, -1
for y2 in range(yi, yf, step):
x2 = int(x1 - m * (y2 - y1))
if x2 > 0 and x2 < widthTemplate and magnitudeTemplate[y2,x2] != 0:
pairPoints.append((y2,x2,y1,x1))
break
# Build table (k,c) from each pair of points
rTable = [[] for entryIndex in range(numEntries)]
deltaAngle = pi / (numEntries - 1.0)
numPairs = len(pairPoints)
for pair in range(0, numPairs):
y2, x2 = (pairPoints[pair])[0], (pairPoints[pair])[1]
y1, x1 = (pairPoints[pair])[2], (pairPoints[pair])[3]
# Compute beta
phi1, phi2 = tan(-angleTemplate[y1,x1]), tan(-angleTemplate[y2,x2])
if 1.0+phi1*phi2 != 0:
beta = atan((phi1-phi2)/(1.0+phi1*phi2))
else:
beta=1.57
# Compute k
if x1- refPoint[1] !=0:
m = atan(-float(y1-refPoint[0])/(x1-refPoint[1]))
else:
m =1.57
k = angleTemplate[y1,x1] - m
# Scale
distCentre = sqrt((y1-refPoint[0])*(y1-refPoint[0]) + (x1-refPoint[1])*(x1-refPoint[1]))
distPoints = sqrt((y2-y1)*(y2-y1) + (x2-x1)*(x2-x1))
c = distCentre/distPoints
# Insert in the table. The angle is in the interval -pi/2 to pi/2
entryIndex = int((beta+(pi/2.0))/deltaAngle)
entry = rTable[entryIndex]
entry.append((k,c))
# Gather evidence for the location
accumulator = createImageF(width, height)
for x1,y1 in itertools.product(range(0, width), range(0, height)):
if magnitude[y1,x1] != 0:
# Looking for potential the second points along a line
secondPoints = [ ]
m = tan(angle[y1,x1] - alpha)
if m>-1 and m<1:
for delta in range(minimaDistPoints, maxDistPoints):
x2 = min(x1 + delta, width-1)
y2 = int(y1 - m * (x2 - x1))
if y2 > 0 and y2 < height and magnitude[y2,x2] != 0:
secondPoints.append((y2,x2))
break
for delta in range(minimaDistPoints, maxDistPoints):
x2 = max(0, x1 - delta)
y2 = int(y1 - m * (x2 - x1))
if y2 > 0 and y2 < height and magnitude[y2,x2] != 0:
secondPoints.append((y2,x2))
break
else:
m = 1.0/m
for delta in range(minimaDistPoints, maxDistPoints):
y2 = min(y1 + delta, height-1)
x2 = int(x1 - m * (y2 - y1))
if x2 > 0 and x2 < width and magnitude[y2,x2] != 0:
secondPoints.append((y2,x2))
break
for delta in range(minimaDistPoints, maxDistPoints):
y2 = max(0, y1 - delta)
x2 = int(x1 - m * (y2 - y1))
if x2 > 0 and x2 < width and magnitude[y2,x2] != 0:
secondPoints.append((y2,x2))
break
# Gather evidence
numPts = len(secondPoints) > 0
for ptIndex in range(0, numPts):
secondPoint = secondPoints[ptIndex]
y2, x2 = secondPoint[0], secondPoint[1]
distPoints = sqrt((y2-y1)*(y2-y1) + (x2-x1)*(x2-x1))
# Compute beta
phi1, phi2 = tan(-angle[y1,x1]), tan(-angle[y2,x2])
if 1.0+phi1*phi2 != 0:
beta = atan((phi1-phi2)/(1.0+phi1*phi2))
else:
beta=1.57
# Find entry in table
entryIndex = int((beta+(pi/2.0))/deltaAngle)
row = rTable[entryIndex]
numEntriesinRow = len(row)
for kIndex in range(0, numEntriesinRow):
k, c = (row[kIndex])[0], (row[kIndex])[1]
distCentre = c * distPoints
m = tan(angle[y1,x1] - k)
if m>-1 and m<1:
for x in range(0, width):
y = int(y1 - m * (x - x1))
d = sqrt((x-x1)*(x-x1)+(y-y1)*(y-y1))
if y > 0 and y < height and abs(d-distCentre) < 3:
accumulator[y,x] += 3.0 - abs(d-distCentre)
else:
m = 1.0/m
for y in range(0, height):
x = int(x1 - m * (y - y1))
d = sqrt((x-x1)*(x-x1)+(y-y1)*(y-y1))
if x > 0 and x < width and abs(d-distCentre) < 3:
accumulator[y,x] += 3.0 - abs(d-distCentre)
# Plot accumulator
plot3DHistogram(accumulator)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
|
ExamplesPython_3.6/Chapter4/HarrisOperator.py
|
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 4
HarrisOperator: Compute corners by using the Harris operator or minimal direction
'''
# Set module functions
from ImageUtilities import imageReadL, createImageF, showImageF, showImageL
from ImageOperatorsUtilities import applyCannyEdgeDetector
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
GaussianKernelSize = Gaussian kernel size. Filter noise
sobelKernelSize = Sobel kernel size. Edge detection
upperT = Upper threshold
lowerT = Lower threshold
kernelSize = Size of the kernel
k = Second term constant
op = H for Harris operator
M for minimum direction
'''
pathToDir = "../../Images/Chapter4/Input/"
imageName = "Shapes.png"
GaussianKernelSize = 7
sobelKernelSize = 3
upperT = 0.4
lowerT = 0.2
kernelSize = 9
k = .02
op = "H"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# We apply Canny to obtain the edges from the image
# but also need the results of the Sobel operator (Gradient)
magnitude, angle, mX, mY = applyCannyEdgeDetector(inputImage,
GaussianKernelSize, sobelKernelSize, upperT, lowerT, True) \
# The center of the kernel
kernelCentre = int((kernelSize - 1) / 2)
# Compute curvature
curvature = createImageF(width, height)
for x,y in itertools.product(range(0, width), range(0, height)):
# If it is an edge
if magnitude[y,x] > 0:
A, B, C = 0.0, 0.0, 0.0
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY = y + wy - kernelCentre
posX = x + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
A += mX[posY,posX] * mX[posY,posX]
B += mY[posY,posX] * mY[posY,posX]
C += mX[posY,posX] * mY[posY,posX]
if op == "H":
curvature[y,x] = (A * B) - (C * C) - (k * ((A+B) * (A+B)))
if op == "M":
d = mX[y,x] * mX[y,x] + mY[y,x] * mY[y,x]
if d != 0.0:
curvature[y,x] = (A * mY[y,x] * mY[y,x] - \
2.0 * C * mX[y,x] * mY[y,x] + \
B * mX[y,x] * mX[y,x]) / d
showImageF(curvature)
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
|
ExamplesPython_3.6/Chapter3/Dilation.py
|
<gh_stars>10-100
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 3
Dilation: Dilation morphological filter
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageL
# Iteration
from timeit import itertools
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
kernelSize = Size of the kernel
'''
pathToDir = "../../Images/Chapter3/Input/"
imageName = "Logs.png"
kernelSize = 5
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Create Kernel
kernelCentre = int((kernelSize - 1) / 2)
kernelImage = createImageL(kernelSize, kernelSize)
# Set the pixels of a flat kernel
for x in range(0, kernelSize):
for y in range(0, kernelSize):
kernelImage[y,x] = 1
# Create images to store the result
outputImage = createImageL(width, height)
# Apply kernel
kernelCentre = int((kernelSize - 1) / 2)
for x,y in itertools.product(range(0, width), range(0, height)):
maxValue = 0;
for wx,wy in itertools.product(range(0, kernelSize), range(0, kernelSize)):
posY = y + wy - kernelCentre
posX = x + wx - kernelCentre
if posY > -1 and posY < height and posX > -1 and posX < width:
sub = float(inputImage[posY,posX]) - kernelImage[wy, wx]
if sub > 0 and sub > maxValue:
maxValue = sub
outputImage[y,x] = maxValue
# Show output image
showImageL(outputImage)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.