max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
antlir/nspawn_in_subvol/plugins/launch_repo_servers.py | baioc/antlir | 28 | 12762351 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import signal
import socket
import subprocess
import textwrap
import time
from contextlib import ExitStack, contextmanager
from typing import List, NamedTuple, Optional
from antlir.common import (
FD_UNIX_SOCK_TIMEOUT,
check_popen_returncode,
get_logger,
listen_temporary_unix_socket,
recv_fds_from_unix_sock,
)
from antlir.fs_utils import Path
log = get_logger()
_mockable_popen_for_repo_server = subprocess.Popen
def _make_debug_print(logger_name, fstring):
t = time.time()
ymdhms = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
msecs = int((t - int(t)) * 1000)
return (
"print("
# Emulate the format of `init_logging(debug=True)`
+ repr(f"DEBUG _make_sockets_and_send_via {ymdhms},{msecs:03} ")
+ " + f'Sending {num_socks} FDs to parent', file=sys.stderr)"
)
def _make_sockets_and_send_via(*, num_socks: int, unix_sock_fd: int):
"""
Creates a TCP stream socket and sends it elsewhere via the provided Unix
domain socket file descriptor. This is useful for obtaining a socket
that belongs to a different network namespace (i.e. creating a socket
inside a container, but binding it from outside the container).
IMPORTANT: This code must not write anything to stdout, the fd can be 1.
"""
# NB: Some code here is (sort of) copy-pasta'd in `send_fds_and_run.py`,
# but it's not obviously worthwhile to reuse it here.
return [
"python3",
"-c",
textwrap.dedent(
"""
import array, contextlib, socket, sys
def send_fds(sock, msg: bytes, fds: 'List[int]'):
num_sent = sock.sendmsg([msg], [(
socket.SOL_SOCKET, socket.SCM_RIGHTS,
array.array('i', fds).tobytes(),
# Future: is `flags=socket.MSG_NOSIGNAL` a good idea?
)])
assert len(msg) == num_sent, (msg, num_sent)
num_socks = """
+ str(num_socks)
+ """
""" # indentation for the debug print
+ (
_make_debug_print(
"_make_sockets_and_send_via",
"f'Sending {num_socks} FDs to parent'",
)
if log.isEnabledFor(logging.DEBUG)
else ""
)
+ """
with contextlib.ExitStack() as stack:
# Make a socket in this netns, and send it to the parent.
lsock = stack.enter_context(
socket.socket(fileno="""
+ str(unix_sock_fd)
+ """)
)
lsock.settimeout("""
+ str(FD_UNIX_SOCK_TIMEOUT)
+ """)
csock = stack.enter_context(lsock.accept()[0])
csock.settimeout("""
+ str(FD_UNIX_SOCK_TIMEOUT)
+ """)
send_fds(csock, b'ohai', [
stack.enter_context(socket.socket(
socket.AF_INET, socket.SOCK_STREAM
)).fileno()
for _ in range(num_socks)
])
"""
),
]
def _create_sockets_inside_netns(
target_pid: int, num_socks: int
) -> List[socket.socket]:
"""
Creates TCP stream socket inside the container.
Returns the socket.socket() object.
"""
with listen_temporary_unix_socket() as (
unix_sock_path,
list_sock,
), subprocess.Popen(
[
# NB: /usr/local/fbcode/bin must come first because /bin/python3
# may be very outdated
"sudo",
"env",
"PATH=/usr/local/fbcode/bin:/bin",
"nsenter",
"--net",
"--target",
str(target_pid),
# NB: We pass our listening socket as FD 1 to avoid dealing with
# the `sudo` option of `-C`. Nothing here writes to `stdout`:
*_make_sockets_and_send_via(unix_sock_fd=1, num_socks=num_socks),
],
stdout=list_sock.fileno(),
) as sock_proc:
repo_server_socks = [
socket.socket(fileno=fd)
for fd in recv_fds_from_unix_sock(unix_sock_path, num_socks)
]
assert len(repo_server_socks) == num_socks, len(repo_server_socks)
check_popen_returncode(sock_proc)
return repo_server_socks
class RepoServer(NamedTuple):
rpm_repo_snapshot: Path
port: int
# The socket & server are invalid after the `_launch_repo_server` context
sock: socket.socket
proc: Optional[subprocess.Popen] = None
def __format__(self, _spec):
return f"RepoServer({self.rpm_repo_snapshot}, port={self.port})"
@contextmanager
def _launch_repo_server(repo_server_bin: Path, rs: RepoServer) -> RepoServer:
"""
Invokes `repo-server` with the given snapshot; passes it ownership of
the bound TCP socket -- it listens & accepts connections.
Returns a copy of the `RepoServer` with `server` populated.
"""
assert rs.proc is None
rs.sock.bind(("127.0.0.1", rs.port))
# Socket activation: allow requests to queue up, which means that
# we don't have to explicitly wait for the repo servers to start --
# any in-container clients will do so if/when needed. This reduces
# interactive `=container` boot time by hundreds of ms.
rs.sock.listen() # leave the request queue size at default
with rs.sock, _mockable_popen_for_repo_server(
[
repo_server_bin,
f"--socket-fd={rs.sock.fileno()}",
# TODO: Once the committed BAs all have a `repo-server` that
# knows to append `/snapshot` to the path, remove it here, and
# tidy up the snapshot resolution code in `repo_server.py`.
f"--snapshot-dir={rs.rpm_repo_snapshot / 'snapshot'}",
*(["--debug"] if log.isEnabledFor(logging.DEBUG) else []),
],
pass_fds=[rs.sock.fileno()],
) as server_proc:
try:
# pyre-fixme[7]: Expected `RepoServer` but got
# `Generator[RepoServer, None, None]`.
yield rs._replace(proc=server_proc)
finally:
# Uh-oh, the server already exited. Did it crash?
if server_proc.poll() is not None: # pragma: no cover
check_popen_returncode(server_proc)
else:
# Although `repo-server` is a read-only proxy, give it the
# chance to do graceful cleanup.
log.debug("Trying to gracefully terminate `repo-server`")
# `atexit` (used in an FB-specific `repo-server` plugin) only
# works on graceful termination. In `repo_server_main.py`, we
# graceful set up handling of `SIGTERM`. We signal once, and
# need to wait for it to clean up the resources it must to free.
# Signaling twice would interrupt cleanup (because this is
# Python, lol).
server_proc.send_signal(signal.SIGTERM)
try:
server_proc.wait(60.0)
except subprocess.TimeoutExpired: # pragma: no cover
log.warning(
f"Killing unresponsive `repo-server` {server_proc.pid}"
)
server_proc.kill()
@contextmanager
def launch_repo_servers_for_netns(
*, target_pid: int, snapshot_dir: Path, repo_server_bin: Path
) -> List[RepoServer]:
"""
Creates sockets inside the supplied netns, and binds them to the
supplied ports on localhost.
Yields a list of (host, port) pairs where the servers will listen.
"""
with open(snapshot_dir / "ports-for-repo-server") as infile:
repo_server_ports = {int(v) for v in infile.read().split() if v}
with ExitStack() as stack:
# Start a repo-server instance per port. Give each one a socket
# bound to the loopback inside the supplied netns. We don't
# `__enter__` the sockets since the servers take ownership of them.
servers = []
for sock, port in zip(
_create_sockets_inside_netns(target_pid, len(repo_server_ports)),
repo_server_ports,
):
rs = stack.enter_context(
# pyre-fixme[6]: Expected `ContextManager[Variable[
# contextlib._T]]` for 1st param but got `RepoServer`.
_launch_repo_server(
repo_server_bin,
RepoServer(
rpm_repo_snapshot=snapshot_dir,
port=port,
sock=sock,
),
)
)
log.debug(f"Launched {rs} in {target_pid}'s netns")
servers.append(rs)
# pyre-fixme[7]: Expected `List[RepoServer]` but got
# `Generator[List[typing.Any], None, None]`.
yield servers
| 2.1875 | 2 |
yapu/meta/meta.py | pestrela/yapu | 0 | 12762352 | <filename>yapu/meta/meta.py<gh_stars>0
from ..imports.internal import *
from ..meta.listify import *
###
### very generic wrappers go here
###
def percentage(a,b=None, digits=1):
if b is None:
b=1.0
ret = (float(a) * 100.0) / float(b)
return ret
def st_percent(a, b=None, digits=1):
"""
form1: a= 0..1
form2: a=0..1000, b=0..1000
"""
ret = percentage(a, b, digits=digits)
fmt_st = "%%0.%df" % (digits)
ret = fmt_st % (ret)
ret = ret + "%"
return ret
def st_percentage(a, b, digits=0):
return "%7s (%s of %s)" % (a, st_percent(a, b, digits), b)
def st_collapse(st, sep=","):
"""
collapses "None" to the empty string
collapes a list of strings by joining them
"""
l = listify(st)
return sep.join(l)
# http://stackoverflow.com/questions/31683959/the-zip-function-in-python-3
def zip_python3(*args):
return list(zip(*args))
def tuple_swap(t):
return (tuple(reversed(t)))
def char_range(c1, c2):
"""Generates the characters from `c1` to `c2`, inclusive."""
for c in xrange(ord(c1), ord(c2) + 1):
yield chr(c)
def irange(a, b=None, c=None):
"""
same signature as range(). However, starts at "1" + its always inclusive of the last element.
original documentation:
range(stop) -> range object
range(start, stop[, step]) -> range object
Return an object that produces a sequence of integers from start (inclusive)
to stop (exclusive) by step. range(i, j) produces i, i+1, i+2, ..., j-1.
start defaults to 0, and stop is omitted! range(4) produces 0, 1, 2, 3.
These are exactly the valid indices for a list of 4 elements.
When step is given, it specifies the increment (or decrement).
"""
if c is None:
c = 1
if b is None:
all = list(range(a))
for i in all:
yield (i + 1)
else:
if c > 0:
all = list(range(a, b + 1, c))
else:
all = list(range(a, b - 1, c))
for i in all:
yield (i)
def irange_count(a, n=0, step=None):
"""
from a given number, count n elements around it (up or down)
step is optional and always positive (even if n<0)
"""
if step is None:
step = 1
if n >= 0:
all = list(irange(a, a+n, step))
else:
step = step * -1
all = list (irange(a, a+n, step))
for i in all:
yield (i)
def print_nls(how_many=1):
"""
prints NLs
"""
for i in range(how_many):
print_nl()
def print_nl(what=None):
"""
prints lists with one element
"""
#what = listify(what)
if what is None:
what = ""
print(what, sep="\n")
def print_nonl(*args, **kwargs):
"""
print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
flush: whether to forcibly flush the stream.
"""
print(*args, end="", ** kwargs)
def print_as_list(what):
what = list(what)
for i in what:
print(i)
| 3.375 | 3 |
venv/Lib/site-packages/pyproj/__main__.py | AndrCarvalho/DroneOpenTool | 1 | 12762353 | """
This is the main entry point for pyproj
e.g. python -m pyproj
"""
import argparse
from pyproj import __proj_version__, __version__, _show_versions
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbose",
help="Show verbose debugging version information.",
action="store_true",
)
args = parser.parse_args()
if args.verbose:
_show_versions.show_versions()
else:
print("pyproj version: {} [PROJ version: {}]".format(__version__, __proj_version__))
parser.print_help()
| 2.46875 | 2 |
nemo/collections/asr/parts/numba/rnnt_loss/utils/global_constants.py | madhukarkm/NeMo | 4,145 | 12762354 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import numpy as np
from numba import float32
# Internal globals
_THREADS_PER_BLOCK = 32
_WARP_SIZE = 32
_DTYPE = float32
# Constants
FP32_INF = np.inf
FP32_NEG_INF = -np.inf
THRESHOLD = 1e-1
"""
Getters
"""
def threads_per_block():
global _THREADS_PER_BLOCK
return _THREADS_PER_BLOCK
def warp_size():
global _WARP_SIZE
return _WARP_SIZE
def dtype():
global _DTYPE
return _DTYPE
# RNNT STATUS
class RNNTStatus(enum.Enum):
RNNT_STATUS_SUCCESS = 0
RNNT_STATUS_INVALID_VALUE = 1
| 1.882813 | 2 |
nlu_analysers/rasa_analyser.py | kanbehmw/NLU-Evaluation-Scripts | 0 | 12762355 | <filename>nlu_analysers/rasa_analyser.py
from luis_analyser import *
class RasaAnalyser(LuisAnalyser):
def __init__(self, rasa_url, project):
super(LuisAnalyser, self).__init__()
self.project = project
self.url = rasa_url + "?q=%s&project=%s"
| 1.984375 | 2 |
package_storage/pkg_stack/__init__.py | ds-suyog/stack-ops | 0 | 12762356 | <reponame>ds-suyog/stack-ops
name = "mystack" | 0.875 | 1 |
places.py | WZBSocialScienceCenter/covid19-placesapi | 1 | 12762357 | import math
import json
import os
import sys
from datetime import datetime
import pandas as pd
import googlemaps
import populartimes
from apikeys import API_KEY
#%%
PLACE_SEARCHES = [
# ('restaurant', 'restaurant', True),
# ('bar', 'bar', True),
('fast food', None, True),
# ('club', None, True),
('train station', None, None),
# ('tourist information', None, True),
# ('sights', 'tourist_attraction', None),
# ('park', 'park', None),
# ('mall', 'shopping_mall', True),
# ('shopping', 'shopping_mall', True),
# ('supermarket', 'supermarket', True),
# ('street market', None, None),
# ('hardware store', 'hardware_store', True)
]
PLACE_SEARCH_RADIUS = 20000 # in meters
LIMIT_NUM_PLACES = 20
RESULT_FILE = 'data/pois/%s.csv'
RESULT_POP_FILE = 'data/pois/%s_pop.json'
#%%
def haversine(a_lat, a_lng, b_lat, b_lng):
"""
haversine: calculate great circle distance between two points on earth in km
"""
R = 6371 # earth radius in km
a_lat = math.radians(a_lat)
a_lng = math.radians(a_lng)
b_lat = math.radians(b_lat)
b_lng = math.radians(b_lng)
d_lat = b_lat - a_lat
d_lng = b_lng - a_lng
a = math.pow(math.sin(d_lat / 2), 2) + math.cos(a_lat) * math.cos(b_lat) * math.pow(math.sin(d_lng / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return R * c
#%%
if len(sys.argv) >= 2:
t_start_ymdh = sys.argv[1]
assert len(t_start_ymdh) == 14
skip_already_queried_cities = len(sys.argv) == 3 and sys.argv[2] == 'skip_queried_cities'
if skip_already_queried_cities:
print('will skip already queried cities')
else:
t_start_ymdh = datetime.now().strftime('%Y-%m-%d_h%H')
skip_already_queried_cities = False
result_file = RESULT_FILE % t_start_ymdh
result_pop_file = RESULT_POP_FILE % t_start_ymdh
gmaps = googlemaps.Client(key=API_KEY)
cities = pd.read_csv('data/cities_edited.csv')
if os.path.exists(result_file):
print('loading existing POI CSV file', result_file)
existing_pois = pd.read_csv(result_file)
existing_place_ids = set(existing_pois.place_id)
print('> %d existing places' % len(existing_place_ids))
existing_queried_cities = set(existing_pois.city)
print('> %d existing cities' % len(existing_queried_cities))
else:
existing_pois = None
existing_place_ids = set()
existing_queried_cities = set()
if os.path.exists(result_pop_file):
print('loading existing POI initial popularity score JSON file', result_file)
with open(result_pop_file) as f:
resultrows_pop = json.load(f)
print('> %d existing place popularity score entries' % len(resultrows_pop))
else:
resultrows_pop = []
queried_cities = []
#%%
print('querying places in cities ...')
resultrows = []
for city_i, cityrow in cities.iterrows():
print('> city %d/%d: %s' % (city_i+1, len(cities), cityrow.city))
if skip_already_queried_cities and cityrow.city in existing_queried_cities:
print('> skipping (already queried this city)')
continue
for place_query, place_type, open_now in PLACE_SEARCHES:
utcnow = datetime.utcnow()
query_id = t_start_ymdh + cityrow.city + cityrow.country + place_query
if query_id in queried_cities:
print('>> skipping (already queried this city for this kind of places)')
continue
kwargs = {}
if place_type is not None:
kwargs['type'] = place_type
if open_now is not None:
kwargs['open_now'] = open_now
if open_now is not None:
open_now_info = '(open now restriction: ' + str(open_now) + ')'
else:
open_now_info = ''
full_query = place_query + ' in ' + cityrow.city + ', ' + cityrow.country
print('>> query: "%s" %s in lat=%.4f, lng=%.4f' % (full_query, open_now_info, cityrow.lat, cityrow.lng))
places = gmaps.places(query=full_query, location=(cityrow.lat, cityrow.lng), radius=PLACE_SEARCH_RADIUS,
**kwargs)
if places['status'] != 'OK':
print('>> skipping (bad status: %s)' % places['status'])
continue
print('>> got %d results' % len(places['results']))
queried_cities.append(query_id)
n_pois = 0
for i_place, place in enumerate(places['results']):
if i_place >= LIMIT_NUM_PLACES:
break
print('>>> place: %s' % place['name'])
place_lat, place_lng = place['geometry']['location']['lat'], place['geometry']['location']['lng']
dist = haversine(cityrow.lat, cityrow.lng, place_lat, place_lng)
if dist > (PLACE_SEARCH_RADIUS / 1000) * 2: # accept larger radius here
print('>> found place is out of search radius (distance is %.2f)' % dist)
continue
if place['place_id'] in existing_place_ids:
print('>> skipping (already queried place with ID %s)' % place['place_id'])
continue
poptimes = populartimes.get_id(api_key=API_KEY, place_id=place['place_id'])
if 'current_popularity' in poptimes and 'populartimes' in poptimes:
print('>>>> adding this place as place of interest')
resultrows.append(cityrow.to_list() + [
place_query,
place['place_id'],
place['name'],
place.get('formatted_address', ''),
place['geometry']['location']['lat'],
place['geometry']['location']['lng']
])
resultrows_pop.append([
place['place_id'],
utcnow.strftime('%Y-%m-%d'),
utcnow.hour,
poptimes['current_popularity'],
poptimes['populartimes']
])
existing_place_ids.add(place['place_id'])
n_pois += 1
print('>> got %d places of interest for this city and query' % n_pois)
print('preparing and storing dataset')
places_of_interest = pd.DataFrame(resultrows, columns=cities.columns.to_list() + [
'query', 'place_id', 'name', 'addr', 'place_lat', 'place_lng'
])
if existing_pois is not None:
places_of_interest = pd.concat((existing_pois, places_of_interest), ignore_index=True)
places_of_interest = places_of_interest \
.drop_duplicates(['city', 'country', 'iso2', 'query', 'place_id'])\
.sort_values(by=['country', 'city', 'query', 'name'])\
.reset_index(drop=True)
print('got %d places of interest so far' % len(places_of_interest))
places_of_interest.to_csv(result_file, index=False)
with open(result_pop_file, 'w') as f:
json.dump(resultrows_pop, f, indent=2)
print('\n')
print('done.')
| 2.859375 | 3 |
factor_model.py | FWNietzsche/Time-Series-Deconfounder2 | 0 | 12762358 | '''
Title: Time Series Deconfounder: Estimating Treatment Effects over Time in the Presence of Hidden Confounders
Authors: <NAME>, <NAME>, <NAME>
International Conference on Machine Learning (ICML) 2020
Last Updated Date: July 20th 2020
Code Author: <NAME> (<EMAIL>)
'''
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
logging.getLogger().setLevel(logging.INFO)
import numpy as np
import keras
from tqdm import tqdm
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, DropoutWrapper
from tensorflow.python.ops import rnn
from utils.predictive_checks_utils import compute_test_statistic_all_timesteps
from utils.rnn_utils import AutoregressiveLSTMCell, compute_sequence_length
class FactorModel:
def __init__(self, params, hyperparams):
self.num_treatments = params['num_treatments']
self.num_covariates = params['num_covariates']
self.num_confounders = params['num_confounders']
self.max_sequence_length = params['max_sequence_length']
self.num_epochs = params['num_epochs']
self.rnn_hidden_units = hyperparams['rnn_hidden_units']
self.fc_hidden_units = hyperparams['fc_hidden_units']
self.learning_rate = hyperparams['learning_rate']
self.batch_size = hyperparams['batch_size']
self.rnn_keep_prob = hyperparams['rnn_keep_prob']
tf.compat.v1.reset_default_graph()
self.previous_covariates = tf.compat.v1.placeholder(tf.float32, [None, self.max_sequence_length - 1, self.num_covariates])
self.previous_treatments = tf.compat.v1.placeholder(tf.float32, [None, self.max_sequence_length - 1, self.num_treatments])
self.trainable_init_input = tf.compat.v1.get_variable(name='trainable_init_input',
shape=[self.batch_size, 1,
self.num_covariates + self.num_treatments], trainable=True)
self.current_covariates = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_covariates])
self.target_treatments = tf.placeholder(tf.float32, [None, self.max_sequence_length, self.num_treatments])
def build_confounders(self, trainable_state=True):
previous_covariates_and_treatments = tf.concat([self.previous_covariates, self.previous_treatments],
axis=-1)
self.rnn_input = tf.concat([self.trainable_init_input, previous_covariates_and_treatments], axis=1)
self.sequence_length = compute_sequence_length(self.rnn_input)
rnn_cell = DropoutWrapper(LSTMCell(self.rnn_hidden_units, state_is_tuple=False),
output_keep_prob=self.rnn_keep_prob,
state_keep_prob=self.rnn_keep_prob, variational_recurrent=True,
dtype=tf.float32)
autoregressive_cell = AutoregressiveLSTMCell(rnn_cell, self.num_confounders)
if trainable_state:
init_state = tf.get_variable(name='init_cell',
shape=[self.batch_size, autoregressive_cell.state_size],
trainable=True)
else:
init_state = autoregressive_cell.zero_state(self.batch_size, dtype=tf.float32)
rnn_output, _ = rnn.dynamic_rnn(
autoregressive_cell,
self.rnn_input,
initial_state=init_state,
dtype=tf.float32,
sequence_length=self.sequence_length)
# Flatten to apply same weights to all time steps.
rnn_output = tf.reshape(rnn_output, [-1, self.num_confounders])
hidden_confounders = rnn_output
covariates = tf.reshape(self.current_covariates, [-1, self.num_covariates])
self.multitask_input = tf.concat([covariates, hidden_confounders], axis=-1)
self.hidden_confounders = tf.reshape(hidden_confounders,
[-1, self.max_sequence_length, self.num_confounders])
def build_treatment_assignments(self):
self.treatment_prob_predictions = dict()
for treatment in range(self.num_treatments):
treatment_network_layer = tf.layers.dense(self.multitask_input, self.fc_hidden_units,
name='treatment_network_%s' % str(treatment),
activation=tf.nn.leaky_relu)
treatment_output = tf.layers.dense(treatment_network_layer, 1, activation=tf.nn.sigmoid,
name='treatment_output_%s' % str(treatment))
self.treatment_prob_predictions[treatment] = treatment_output
self.treatment_prob_predictions = tf.concat(list(self.treatment_prob_predictions.values()), axis=-1)
return self.treatment_prob_predictions
def build_network(self):
self.build_confounders()
self.treatment_prob_predictions = self.build_treatment_assignments()
return self.treatment_prob_predictions
def gen_epoch(self, dataset):
dataset_size = dataset['previous_covariates'].shape[0]
num_batches = int(dataset_size / self.batch_size) + 1
for i in range(num_batches):
if (i == num_batches - 1):
batch_samples = range(dataset_size - self.batch_size, dataset_size)
else:
batch_samples = range(i * self.batch_size, (i + 1) * self.batch_size)
batch_previous_covariates = dataset['previous_covariates'][batch_samples, :, :]
batch_previous_treatments = dataset['previous_treatments'][batch_samples, :, :]
batch_current_covariates = dataset['covariates'][batch_samples, :, :]
batch_target_treatments = dataset['treatments'][batch_samples, :, :].astype(np.int32)
yield (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments)
def eval_network(self, dataset):
validation_losses = []
for (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments) in self.gen_epoch(dataset):
feed_dict = self.build_feed_dictionary(batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments)
validation_loss= self.sess.run([self.loss], feed_dict=feed_dict)
validation_losses.append(validation_loss)
validation_loss = np.mean(np.array(validation_losses))
return validation_loss
def compute_test_statistic(self, num_samples, target_treatments, feed_dict, predicted_mask):
test_statistic = np.zeros(shape=(self.max_sequence_length,))
for sample_idx in range(num_samples):
[treatment_probability] = self.sess.run(
[self.treatment_prob_predictions], feed_dict=feed_dict)
treatment_probability = np.reshape(treatment_probability, newshape=(
self.batch_size, self.max_sequence_length, self.num_treatments))
test_statistic_sequence = compute_test_statistic_all_timesteps(target_treatments,
treatment_probability,
self.max_sequence_length, predicted_mask)
test_statistic += test_statistic_sequence
test_statistic = test_statistic / num_samples
return test_statistic
def eval_predictive_checks(self, dataset):
num_replications = 50
num_samples = 50
p_values_over_time = np.zeros(shape=(self.max_sequence_length,))
steps = 0
for (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments) in self.gen_epoch(dataset):
feed_dict = self.build_feed_dictionary(batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments)
mask = tf.sign(tf.reduce_max(tf.abs(self.rnn_input), axis=2))
[seq_lenghts, predicted_mask] = self.sess.run([self.sequence_length, mask], feed_dict=feed_dict)
steps = steps + 1
""" Compute test statistics for replicas """
test_statistic_replicas = np.zeros(shape=(num_replications, self.max_sequence_length))
for replication_idx in range(num_replications):
[treatment_replica, treatment_prob_pred] = self.sess.run(
[self.treatment_realizations, self.treatment_prob_predictions], feed_dict=feed_dict)
treatment_replica = np.reshape(treatment_replica, newshape=(
self.batch_size, self.max_sequence_length, self.num_treatments))
test_statistic_replicas[replication_idx] = self.compute_test_statistic(num_samples, treatment_replica,
feed_dict, predicted_mask)
""" Compute test statistic for target """
test_statistic_target = self.compute_test_statistic(num_samples, batch_target_treatments, feed_dict,
predicted_mask)
probability = np.mean(np.less(test_statistic_replicas, test_statistic_target).astype(np.int32), axis=0)
p_values_over_time += probability
p_values_over_time = p_values_over_time / steps
return p_values_over_time
def train(self, dataset_train, dataset_val, verbose=False):
self.treatment_prob_predictions = self.build_network()
self.treatment_realizations = tf.distributions.Bernoulli(probs=self.treatment_prob_predictions).sample()
self.loss = self.compute_loss(self.target_treatments, self.treatment_prob_predictions)
optimizer = self.get_optimizer()
# Setup tensorflow
tf_device = 'gpu'
if tf_device == "cpu":
tf_config = tf.compat.v1.ConfigProto(log_device_placement=False, device_count={'GPU': 0})
else:
tf_config = tf.compat.v1.ConfigProto(log_device_placement=False, device_count={'GPU': 1})
tf_config.gpu_options.allow_growth = True
self.sess = tf.compat.v1.Session(config=tf_config)
self.sess.run(tf.compat.v1.global_variables_initializer())
self.sess.run(tf.compat.v1.local_variables_initializer())
for epoch in tqdm(range(self.num_epochs)):
for (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments) in self.gen_epoch(dataset_train):
feed_dict = self.build_feed_dictionary(batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments)
_, training_loss = self.sess.run([optimizer, self.loss], feed_dict=feed_dict)
if (verbose):
logging.info(
"Epoch {} out of {}: Summary| Training loss = {}".format(
(epoch + 1), self.num_epochs, training_loss))
if ((epoch + 1) % 100 == 0):
validation_loss = self.eval_network(dataset_val)
logging.info(
"Epoch {} out of {}: Summary| Validation loss = {}".format(epoch, self.num_epochs, validation_loss))
def build_feed_dictionary(self, batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments):
feed_dict = {self.previous_covariates: batch_previous_covariates,
self.previous_treatments: batch_previous_treatments,
self.current_covariates: batch_current_covariates,
self.target_treatments: batch_target_treatments}
return feed_dict
def compute_loss(self, target_treatments, treatment_predictions):
target_treatments_reshape = tf.reshape(target_treatments, [-1, self.num_treatments])
mask = tf.sign(tf.reduce_max(tf.abs(self.rnn_input), axis=2))
flat_mask = tf.reshape(mask, [-1, 1])
cross_entropy = - tf.reduce_sum((target_treatments_reshape * tf.math.log(
tf.clip_by_value(treatment_predictions, 1e-10, 1.0)) + (1 - target_treatments_reshape) * (tf.math.log(
tf.clip_by_value(1 - treatment_predictions, 1e-10, 1.0)))) * flat_mask, axis=0)
self.mask = mask
cross_entropy /= tf.reduce_sum(tf.cast(self.sequence_length, tf.float32), axis=0)
return tf.reduce_mean(cross_entropy)
def get_optimizer(self):
optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
return optimizer
def compute_hidden_confounders(self, dataset):
dataset_size = dataset['covariates'].shape[0]
hidden_confounders = np.zeros(
shape=(dataset_size, self.max_sequence_length, self.num_confounders))
num_batches = int(dataset_size / self.batch_size) + 1
batch_id = 0
num_samples = 50
for (batch_previous_covariates, batch_previous_treatments, batch_current_covariates,
batch_target_treatments) in self.gen_epoch(dataset):
feed_dict = self.build_feed_dictionary(batch_previous_covariates, batch_previous_treatments,
batch_current_covariates, batch_target_treatments)
total_predicted_hidden_confounders = np.zeros(
shape=(self.batch_size, self.max_sequence_length, self.num_confounders))
for sample in range(num_samples):
predicted_hidden_confounders, predicted_treatment_probs = self.sess.run(
[self.hidden_confounders, self.treatment_prob_predictions], feed_dict=feed_dict)
total_predicted_hidden_confounders += predicted_hidden_confounders
total_predicted_hidden_confounders /= num_samples
if (batch_id == num_batches - 1):
batch_samples = range(dataset_size - self.batch_size, dataset_size)
else:
batch_samples = range(batch_id * self.batch_size, (batch_id + 1) * self.batch_size)
batch_id += 1
hidden_confounders[batch_samples] = total_predicted_hidden_confounders
return hidden_confounders
| 2.6875 | 3 |
packages/gtmcore/gtmcore/exceptions/exceptions.py | jjwatts/gigantum-client | 60 | 12762359 | <reponame>jjwatts/gigantum-client<gh_stars>10-100
# TODO: Finish exception implementation, with single exception used to manage hiding error details from user in UI
class GigantumException(Exception):
"""Any Exception arising from inside the Labbook class will be cast as a LabbookException.
This is to avoid having "except Exception" clauses in the client code, and to avoid
having to be aware of every sub-library that is used by the Labbook and the exceptions that those raise.
The principle idea behind this is to have a single catch for all Labbook-related errors. In the stack trace you
can still observe the origin of the problem."""
pass
class GigantumLockedException(GigantumException):
""" Raised when trying to acquire a Labbook lock when lock
is already acquired by another process and failfast flag is set to
True"""
pass
| 2.171875 | 2 |
plotter.py | Anny-Moon/PlotterPyPCA | 0 | 12762360 | <filename>plotter.py
"""
Run me like this:
python ./plotter.py <path/pcaFile> <"show" or the name of the file to save> <configurations>
python ./movieMaker.py data/1abs 1abs.pdf 30 44 58
If you pass only the first argument then I will
plot the 0-th configuration.
"""
#============ parameters ===============
# Plot
dotSize = None; #if None then optimal size will be found
lineSize = None; # = '#ee0000'; if None then optimal size will be found
dotColor = None; #if None then will be random
lineColor = None; #if None then will be random
dotHueDispersion = 0.05; #[0,1];
dotSaturationDispersion = 0.1; #[0,1];
dotVolumeDispersion = 0.1; #[0,1];
# Axes
elevation = None;
azimut = None;
axisOnOff ='off';
#=======================================
import sys
sys.path.append('Plotter_lib/');
import matplotlib.pyplot as plt
import Polymer
import EqualAxes
import Color
if(len(sys.argv)<2):
print(__doc__);
exit();
fileName = sys.argv[1];
polymer = Polymer.Polymer(fileName+".pca");
fig = plt.figure()
ax = fig.gca(projection='3d');
ax.set_aspect('equal');
eqAx = EqualAxes.EqualAxes(ax);
if(len(sys.argv)<4):
confNum = 0;
eqAx.push(polymer.getX(confNum),polymer.getY(confNum),polymer.getZ(confNum));
dotSmartColors = Color.arrayWithSmartColors(polymer.getChainLenght(0),
dotHueDispersion, dotSaturationDispersion, dotVolumeDispersion, dotColor);
polymer.plot(confNum, eqAx, dotSize, lineSize, dotSmartColors, lineColor);
else:
for i in range(3,len(sys.argv)):
confNum = int(sys.argv[i]);
print('Chain %s has %i atoms.' % (sys.argv[i], polymer.getN(confNum)));
eqAx.push(polymer.getX(confNum),polymer.getY(confNum),polymer.getZ(confNum));
for i in range(3,len(sys.argv)):
confNum = int(sys.argv[i]);
dotSmartColors = Color.arrayWithSmartColors(polymer.getChainLenght(0),
dotHueDispersion, dotSaturationDispersion, dotVolumeDispersion, dotColor);
polymer.plot(confNum, eqAx, dotSize, lineSize, dotSmartColors, lineColor);
# polymer.smartColorPlot(confNum,ax, axMaxRange, "#002255");
# polymer.happyPlot(confNum,ax, axMaxRange);
# polymer.plotOld(confNum, ax);
eqAx.set();
plt.axis(axisOnOff);
if(len(sys.argv)<3 or sys.argv[2] == 'show'):
plt.show();
else:
fig.savefig(sys.argv[2]); | 2.609375 | 3 |
usl_score/normalize.py | vitouphy/usl_dialogue_metric | 5 | 12762361 | from collections import namedtuple
from datasets import VUPDataset, NUPDataset, MLMDataset
import numpy as np
from data_utils import read_dataset
from models.VUPScorer import VUPScorer
from models.NUPScorer import NUPScorer
from models.MLMScorer import MLMScorer
import argparse
import json
from tqdm.auto import tqdm
import torch
from torch.utils.data import DataLoader
import pytorch_lightning as pl
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def calc_minmax(model, X_data):
scores = []
with torch.no_grad():
for x in tqdm(X_data):
score = model.predict(x)
scores.append(score)
score_dict = {}
keys = scores[0].keys()
for k in keys:
arr = []
for score in scores:
arr.append(score[k]) # score of each metric
# min_s = min(arr)
# max_s = max(arr)
min_s = np.quantile(arr, 0.25).item()
max_s = np.quantile(arr, 0.75).item()
score_dict[k] = {
'min': min_s,
'max': max_s
}
return score_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculating min and max of MLM for normalizatiion')
parser.add_argument('--weight-path', type=str, default='./checkpoints', help='Path to directory that stores the weight')
parser.add_argument('--data-path', type=str, required=True, help='Path to the directory of training set')
parser.add_argument('--output-path', type=str, default='mlm_minmax_score.json', help='Output path for the min max values')
args = parser.parse_args()
xdata = read_dataset(args.data_path)
model = MLMScorer.load_from_checkpoint(checkpoint_path=args.weight_path).to(device)
model.eval()
print ('[!] loading model complete')
scores = calc_minmax(model, xdata)
print ('[!] normalizing complete')
with open(args.output_path, 'w') as f:
f.write(json.dumps(scores, indent=4))
f.close()
print ('[!] complete')
| 2.328125 | 2 |
alphad3m_containers/setup.py | VIDA-NYU/alphad3m | 0 | 12762362 | <filename>alphad3m_containers/setup.py
import os
import setuptools
package_name = 'alphad3m-containers'
def read_readme():
with open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding='utf8') as file:
return file.read()
def read_version():
module_path = os.path.join('alphad3m_containers', '__init__.py')
with open(module_path) as file:
for line in file:
parts = line.strip().split(' ')
if parts and parts[0] == '__version__':
return parts[-1].strip("'")
raise KeyError('Version not found in {0}'.format(module_path))
long_description = read_readme()
version = read_version()
with open('requirements.txt') as fp:
req = [line for line in fp if line and not line.startswith('#')]
setuptools.setup(
name=package_name,
version=version,
packages=setuptools.find_packages(),
install_requires=req,
description="AlphaD3M: NYU's AutoML System",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://gitlab.com/ViDA-NYU/d3m/alphad3m',
include_package_data=True,
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
maintainer='<NAME>, <NAME>',
maintainer_email='<EMAIL>, <EMAIL>',
keywords=['datadrivendiscovery', 'automl', 'd3m', 'ta2', 'nyu'],
license='Apache-2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering',
])
| 1.882813 | 2 |
src/tests/test_utils_batchprocessors.py | reddcoin-project/ReddConnect | 5 | 12762363 | import unittest
class TestReadBatchfile(unittest.TestCase):
def test_read_batchfile(self):
# self.assertEqual(expected, read_batchfile(pythonpath, file_ending))
assert True # TODO: implement your test here
class TestBatchCommandProcessor(unittest.TestCase):
def test_parse_file(self):
# batch_command_processor = BatchCommandProcessor()
# self.assertEqual(expected, batch_command_processor.parse_file(pythonpath))
assert True # TODO: implement your test here
class TestTbFilename(unittest.TestCase):
def test_tb_filename(self):
# self.assertEqual(expected, tb_filename(tb))
assert True # TODO: implement your test here
class TestTbIter(unittest.TestCase):
def test_tb_iter(self):
# self.assertEqual(expected, tb_iter(tb))
assert True # TODO: implement your test here
class TestBatchCodeProcessor(unittest.TestCase):
def test_code_exec(self):
# batch_code_processor = BatchCodeProcessor()
# self.assertEqual(expected, batch_code_processor.code_exec(codedict, extra_environ, debug))
assert True # TODO: implement your test here
def test_parse_file(self):
# batch_code_processor = BatchCodeProcessor()
# self.assertEqual(expected, batch_code_processor.parse_file(pythonpath))
assert True # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
| 2.984375 | 3 |
2017/395T-greg/HW3/hw3-release/models.py | xxks-kkk/Code-for-blog | 8 | 12762364 | <filename>2017/395T-greg/HW3/hw3-release/models.py<gh_stars>1-10
# models.py
import tensorflow as tf
import numpy as np
import random
import datetime
from sentiment_data import *
import os
# We suppress the warning messages raised by the tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Returns a new numpy array with the data from np_arr padded to be of length length. If length is less than the
# length of the base array, truncates instead.
def pad_to_length(np_arr, length):
result = np.zeros(length)
result[0:np_arr.shape[0]] = np_arr
return result
# Train a feedforward neural network on the given training examples, using dev_exs for development and returning
# predictions on the *blind* test_exs (all test_exs have label 0 as a dummy placeholder value). Returned predictions
# should be SentimentExample objects with predicted labels and the same sentences as input (but these won't be
# read for evaluation anyway)
def train_ffnn(train_exs, dev_exs, test_exs, word_vectors):
train_xs = np.array([np.mean(word_vectors.vectors[ex.indexed_words,:],axis=0) for ex in train_exs])
train_ys = np.array([ex.label for ex in train_exs])
X_dev = np.array([np.mean(word_vectors.vectors[ex.indexed_words,:],axis=0) for ex in dev_exs])
y_dev = np.array([ex.label for ex in dev_exs])
X_test = np.array([np.mean(word_vectors.vectors[ex.indexed_words,:],axis=0) for ex in test_exs])
y_test = np.array([ex.label for ex in test_exs])
batch_size = 10
feat_vec_size = train_xs.shape[1]
embedding_size = 150
num_classes = 2
num_epochs = 50
initial_learning_rate = 0.1
decay_steps = 10
learning_rate_decay_factor = 0.99
graph = tf.Graph()
with graph.as_default():
tf_y_train = tf.placeholder(tf.int32, batch_size) # Input for the gold label so we can compute the loss
label_onehot = tf.one_hot(tf_y_train, num_classes)
# We have no hidden layer at all
if os.environ.get('FC_LAYER', 1) == '0':
print "NUM HIDDEN LAYER = 0"
with tf.name_scope('softmax'):
tf_X_train = tf.placeholder(tf.float32, [batch_size, feat_vec_size])
V_h1 = tf.get_variable("V_h1", [feat_vec_size, num_classes], initializer=tf.contrib.layers.xavier_initializer(seed=0))
probs = tf.nn.softmax(tf.tensordot(tf_X_train, V_h1, 1))
one_best = tf.argmax(probs, axis=1)
# We have 2 hidden layers
elif os.environ.get('FC_LAYER', 1) == '2':
print "NUM HIDDEN LAYER = 2"
with tf.name_scope('h1') as scope:
tf_X_train = tf.placeholder(tf.float32, [None, feat_vec_size])
V_h1 = tf.get_variable("V_h1", [feat_vec_size, embedding_size], initializer=tf.contrib.layers.xavier_initializer(seed=0))
z_h1 = tf.sigmoid(tf.tensordot(tf_X_train, V_h1, 1))# Can use other nonlinearities: tf.nn.relu, tf.tanh, etc.
W_h1 = tf.get_variable("W_h1", [embedding_size, embedding_size], initializer=tf.contrib.layers.xavier_initializer(seed=0))
h1 = tf.tensordot(z_h1, W_h1, 1)
with tf.name_scope('h2') as scope:
V_h2 = tf.get_variable("V_h2", [embedding_size, embedding_size], initializer=tf.contrib.layers.xavier_initializer(seed=0))
W_h2 = tf.get_variable("W_h2", [embedding_size, num_classes])
z_h2 = tf.sigmoid(tf.tensordot(h1, V_h2, 1))
with tf.name_scope('softmax'):
probs = tf.nn.softmax(tf.tensordot(z_h2, W_h2, 1))
one_best = tf.argmax(probs, axis=1)
# We have 1 hidden layer
else:
print "NUM HIDDEN LAYER = 1"
with tf.name_scope('h1') as scope:
tf_X_train = tf.placeholder(tf.float32, [None, feat_vec_size])
V_h1 = tf.get_variable("V_h1", [feat_vec_size, embedding_size], initializer=tf.contrib.layers.xavier_initializer(seed=0))
z_h1 = tf.sigmoid(tf.tensordot(tf_X_train, V_h1, 1))# Can use other nonlinearities: tf.nn.relu, tf.tanh, etc.
W_h1 = tf.get_variable("W_h1", [embedding_size, num_classes], initializer=tf.contrib.layers.xavier_initializer(seed=0))
with tf.name_scope('softmax'):
probs = tf.nn.softmax(tf.tensordot(z_h1, W_h1, 1))
one_best = tf.argmax(probs, axis=1)
loss = tf.negative(tf.reduce_sum(tf.multiply(tf.log(probs), label_onehot)))
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(initial_learning_rate,global_step,decay_steps,learning_rate_decay_factor, staircase=True)
# Plug in any first-order method here! We'll use SGD with momentum
if os.environ.get('OPT', 'SGD') == 'SGD':
print "OPT = SGD"
opt = tf.train.GradientDescentOptimizer(learning_rate)
grads = opt.compute_gradients(loss)
train_op = opt.apply_gradients(grads)
# The above three lines can be replaced by the following line. Use the above three lines for experiment purpose.
# train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
elif os.environ.get('OPT', 'SGD') == 'ADAM':
print "OPT = ADAM"
opt = tf.train.AdamOptimizer(learning_rate)
grads = opt.compute_gradients(loss)
train_op = opt.apply_gradients(grads, global_step=global_step)
with tf.Session(graph=graph) as sess:
init = tf.global_variables_initializer() # run this first to initialize variables
arr_train = np.arange(len(train_xs))
# Generally want to determinize training as much as possible
tf.set_random_seed(0)
# Initialize variables
sess.run(init)
step_idx = 0
for i in range(num_epochs):
np.random.shuffle(arr_train)
loss_this_iter = 0
for batch_idx in xrange(0, len(train_xs)/batch_size):
ex_idx = arr_train[batch_idx*batch_size:(batch_idx+1)*batch_size]
feed_dict = {
tf_X_train: train_xs[ex_idx],
tf_y_train: train_ys[ex_idx]
}
[_, loss_this_batch] = sess.run([train_op, loss], feed_dict = feed_dict)
step_idx += 1
loss_this_iter += loss_this_batch
print "Loss for iteration " + repr(i) + ": " + repr(loss_this_iter)
# Evaluate on the train set
train_correct = 0
for batch_idx in xrange(0, len(train_xs)/batch_size):
ex_idx = arr_train[batch_idx*batch_size:(batch_idx+1)*batch_size]
# Note that we only feed in the x, not the y, since we're not training. We're also extracting different
# quantities from the running of the computation graph, namely the probabilities, prediction, and z
feed_dict = {
tf_X_train: train_xs[ex_idx]
}
[pred_this_batch] = sess.run([one_best],feed_dict=feed_dict)
train_correct += np.sum(np.equal(train_ys[ex_idx],pred_this_batch))
print str(float(train_correct)/len(train_ys))[:7] + " correct on the training set"
# Evaluate on the dev set
train_correct = 0
arr_dev = np.arange(len(X_dev))
for batch_idx in xrange(0, len(X_dev)/batch_size):
ex_idx = arr_dev[batch_idx*batch_size:(batch_idx+1)*batch_size]
# Note that we only feed in the x, not the y, since we're not training. We're also extracting different
# quantities from the running of the computation graph, namely the probabilities, prediction, and z
feed_dict = {
tf_X_train: X_dev[ex_idx]
}
[pred_this_batch] = sess.run([one_best],feed_dict=feed_dict)
train_correct += np.sum(np.equal(y_dev[ex_idx],pred_this_batch))
print str(float(train_correct)/len(y_dev))[:7] + " correct on the dev set"
# Evaluate on the test set
sentimentExamples = []
train_correct = 0
arr_test = np.arange(len(X_test))
for batch_idx in xrange(0, len(X_test) / batch_size):
ex_idx = arr_test[batch_idx * batch_size:(batch_idx + 1) * batch_size]
# Note that we only feed in the x, not the y, since we're not training. We're also extracting different
# quantities from the running of the computation graph, namely the probabilities, prediction, and z
feed_dict = {
tf_X_train: X_test[ex_idx]
}
[pred_this_batch] = sess.run([one_best], feed_dict=feed_dict)
for i in y_test[ex_idx]:
sentimentExamples.append(SentimentExample(test_exs[i].indexed_words, pred_this_batch[i]))
return sentimentExamples
# Train a feedforward neural network on the given training examples, using dev_exs for development and returning
# predictions on the *blind* test_exs (all test_exs have label 0 as a dummy placeholder value). Returned predictions
# should be SentimentExample objects with predicted labels and the same sentences as input (but these won't be
# read for evaluation anyway)
# NOTE: Two hidden layer FFNN that achieves good result
def train_ffnn2(train_exs, dev_exs, test_exs, word_vectors):
# 59 is the max sentence length in the corpus, so let's set this to 60
seq_max_len = 60
# To get you started off, we'll pad the training input to 60 words to make it a square matrix.
train_mat = np.asarray([pad_to_length(np.array(ex.indexed_words), seq_max_len) for ex in train_exs])
# Also store the sequence lengths -- this could be useful for training LSTMs
train_seq_lens = np.array([len(ex.indexed_words) for ex in train_exs])
# Labels
train_labels_arr = np.array([ex.label for ex in train_exs])
dev_mat = np.asarray([pad_to_length(np.array(ex.indexed_words), seq_max_len) for ex in dev_exs])
# Also store the sequence lengths -- this could be useful for training LSTMs
dev_seq_lens = np.array([len(ex.indexed_words) for ex in dev_exs])
# Labels
dev_labels = np.array([ex.label for ex in dev_exs])
dev_xs = dev_mat
dev_ys = dev_labels.reshape(-1, 1)
train_xs = train_mat
train_ys = train_labels_arr.reshape(-1, 1)
# Define some constants
embedding_size = 10
num_classes = 2
batch_size = 100
vec_size = 300
# DEFINING THE COMPUTATION GRAPH
# Define the core neural network
fx = tf.placeholder(tf.float32, [None, vec_size])
# Other initializers like tf.random_normal_initializer are possible too
W1 = tf.get_variable("W1", [vec_size, embedding_size], initializer=tf.contrib.layers.xavier_initializer(seed=0))
b = tf.get_variable("b1", [embedding_size])
# Can use other nonlinearities: tf.nn.relu, tf.tanh, etc.
z = tf.sigmoid(tf.matmul(fx, W1) + b)
W2 = tf.get_variable("W2", [embedding_size, embedding_size], initializer=tf.contrib.layers.xavier_initializer(seed=0))
b2 = tf.get_variable("b2", [embedding_size])
z = tf.nn.softmax(tf.matmul(z, W2) + b2)
W3 = tf.get_variable("W3", [embedding_size, num_classes], initializer=tf.contrib.layers.xavier_initializer(seed=0))
b3 = tf.get_variable("b3", [num_classes])
probs = tf.nn.softmax(tf.matmul(z, W3) + b3)
# This is the actual prediction -- not used for training but used for inference
one_best = tf.reshape(tf.argmax(probs, axis=1), shape=[-1, 1])
# Input for the gold label so we can compute the loss
label = tf.placeholder(tf.int32, [None, 1])
y_ = tf.reshape(tf.one_hot(label, num_classes), shape=[-1, num_classes])
loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(probs), reduction_indices=[1]))
decay_steps = 10
learning_rate_decay_factor = 1.0
global_step = tf.contrib.framework.get_or_create_global_step()
# Smaller learning rates are sometimes necessary for larger networks
initial_learning_rate = 1.0
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(initial_learning_rate,
global_step,
decay_steps,
learning_rate_decay_factor,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)
# Logging with Tensorboard
tf.summary.scalar('learning_rate', lr)
tf.summary.scalar('loss', loss)
# RUN TRAINING AND TEST
# Initializer; we need to run this first to initialize variables
init = tf.global_variables_initializer()
num_epochs = 1000
merged = tf.summary.merge_all() # merge all the tensorboard variables
# The computation graph must be run in a particular Tensorflow "session". Parameters, etc. are localized to the
# session (unless you pass them around outside it). All runs of a computation graph with certain values are relative
# to a particular session
with tf.Session() as sess:
# Write a logfile to the logs/ directory, can use Tensorboard to view this
train_writer = tf.summary.FileWriter('logs/', sess.graph)
# Generally want to determinize training as much as possible
tf.set_random_seed(0)
# Initialize variables
sess.run(init)
step_idx = 0
iters = int(len(train_xs) / batch_size)
embedded_train = []
for i in range(len(train_xs)):
tmp = []
for word in train_xs[i]:
tmp.append(word_vectors.get_train_embedding(int(word), add=False))
embedded_train.append(tmp)
train_xs = []
print('data shape ', np.shape(embedded_train))
print('reading word embeddings ...')
for i in range(len(embedded_train)):
train_xs.append(np.mean(embedded_train[i][:len(train_exs[i].indexed_words)], 0))
print('input shape: ', np.shape(train_xs))
embedded_dev = []
for i in range(len(dev_xs)):
tmp = []
for word in dev_xs[i]:
tmp.append(word_vectors.get_train_embedding(int(word), add=False))
embedded_dev.append(tmp)
dev_xs = []
print('data shape ', np.shape(embedded_dev))
print('reading word embeddings ...')
for i in range(len(embedded_dev)):
dev_xs.append(np.mean(embedded_dev[i][:len(dev_exs[i].indexed_words)], 0))
print('input shape: ', np.shape(dev_xs))
for i in range(num_epochs):
loss_this_epoch = 0
print('number of epoch: ', i)
for iter in range(iters):
[_, loss_this_batch, summary] = sess.run([train_step, loss, merged],
feed_dict={fx: train_xs[batch_size*iter:batch_size*(iter+1)],
label: train_ys[batch_size*iter:batch_size*(iter+1)]})
train_writer.add_summary(summary, step_idx)
step_idx += 1
loss_this_epoch += loss_this_batch
print("Loss for epoch " + repr(i) + ": " + "{0:.2f}".format(loss_this_epoch))
preds, _ = sess.run([one_best, probs], feed_dict={fx: train_xs})
print('training accuracy: ', "{0:.2f}".format(np.mean(np.equal(preds, train_ys))))
preds, _ = sess.run([one_best, probs], feed_dict={fx: dev_xs})
print('dev accuracy: ', "{0:.2f}".format(np.mean(np.equal(preds, dev_ys))))
print()
# Analogous to train_ffnn, but trains CNN.
# Here, I implement CNN for sentiment analysis
# Reference:
# 1. [Implementing a CNN for Text Classification in TensorFlow](http://www.wildml.com/2015/12/implementing-a-cnn-for-text-classification-in-tensorflow/)
def train_cnn(train_exs, dev_exs, test_exs, word_vectors):
# 59 is the max sentence length in the corpus, so let's set this to 60
seq_max_len = 60
# To get you started off, we'll pad the training input to 60 words to make it a square matrix.
train_mat = np.asarray([pad_to_length(np.array(ex.indexed_words), seq_max_len) for ex in train_exs])
# Labels
train_labels_arr = np.array([ex.label for ex in train_exs])
train_xs = train_mat
train_ys_2cols = np.array(list(zip(1-train_labels_arr, train_labels_arr)))
X_dev = np.asarray([pad_to_length(np.array(ex.indexed_words), seq_max_len) for ex in dev_exs])
y_dev = np.array([ex.label for ex in dev_exs])
y_dev_2cols = np.array(list(zip(1-y_dev, y_dev)))
X_test = np.asarray([pad_to_length(np.array(ex.indexed_words), seq_max_len) for ex in test_exs])
y_test = np.array([ex.label for ex in test_exs])
y_test_2cols = np.array(list(zip(1-y_test, y_test)))
vocab_size = word_vectors.vectors.shape[0]
embedding_size = word_vectors.vectors.shape[1]
filter_sizes = [3,4,5]
num_filters = 128
l2_reg_lambda = 0.0
dropout_keep_prob_train = 0.5
dropout_keep_prob_dev = 1.0
# number steps = number of sentences / batch_size
batch_size = 64
num_epochs = 20
learning_rate = 1e-3
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
cnn = TextCNN(
sequence_length=train_xs.shape[1],
num_classes=train_ys_2cols.shape[1],
vocab_size=vocab_size,
embedding_size=embedding_size,
filter_sizes=filter_sizes,
num_filters=num_filters,
activation_func='relu',
l2_reg_lambda=l2_reg_lambda)
# Define Training procedure
global_step = tf.Variable(0.0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
#train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cnn.loss, global_step=global_step)
# Initialize all variables
sess.run(tf.global_variables_initializer())
# Use the pre-trained word embeddings
sess.run(cnn.word_embeddings.assign(word_vectors.vectors))
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: dropout_keep_prob_train
}
_, step, loss, accuracy = sess.run(
[train_op, global_step, cnn.loss, cnn.accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
#print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
def dev_step(x_batch, y_batch):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: dropout_keep_prob_dev,
}
step, loss, accuracy, predictions = sess.run(
[global_step, cnn.loss, cnn.accuracy, cnn.predictions],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
def test_step(x_batch, y_batch):
"""
Evaluates model on a test set and save the predictions
as a list of sentimentExamples as a return
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: dropout_keep_prob_dev,
}
predictions = sess.run(cnn.predictions, feed_dict)
# Save the prediction result as a list of SentimentExample
sentimentExamples = []
for i in range(len(x_batch)):
sentimentExamples.append(SentimentExample(x_batch[i], predictions[i]))
return sentimentExamples
# Generate batches
for epoch in range(num_epochs):
batches = batch_iter(
list(zip(train_xs, train_ys_2cols)), batch_size=batch_size, num_epochs=num_epochs)
# Training loop. For each batch...
# num batches = (num training examples / batch_size)
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
print("\nEvaluation:")
dev_step(X_dev, y_dev_2cols)
print("")
return test_step(X_test, y_test_2cols)
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(self,
sequence_length,
num_classes,
vocab_size,
embedding_size,
filter_sizes,
num_filters,
activation_func='relu',
l2_reg_lambda=0.0):
# sequence_length : The length of sentence.
# num_classes : Number of classes in the output layer, two in our case (positive and negative).
# vocab_size : The size of our vocabulary. This is needed to define the size of our embedding layer,
# which will have shape [vocabulary_size, embedding_size].
# embedding_size : The dimensionality of our embeddings.
# filter_sizes : The number of words we want our convolutional filters to cover. We will have num_filters for each size specified here.
# For example, [3, 4, 5] means that we will have filters that slide over 3, 4 and 5 words respectively,
# for a total of 3 * num_filters filters.
# num_filters : The number of filters per filter size
# activation_func : The activation function we use: "tanh", "relu"
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.word_embeddings = tf.get_variable(name="word_embeddings", shape=[vocab_size, embedding_size])
self.embedded_chars = tf.nn.embedding_lookup(self.word_embeddings, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Implement convolution layer and 1-max pooling strategy
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding='VALID',
name="conv")
# Apply nonlinearity
if activation_func == 'tanh':
h = tf.nn.tanh(tf.nn.bias_add(conv, b), name = "tanh")
elif activation_func == 'relu':
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# Calculate mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
# Same task but using LSTM instead
def train_lstm(train_exs, dev_exs, test_exs, word_vectors):
seq_max_len = 60
train_mat = np.asarray([pad_to_length(np.array(ex.indexed_words), seq_max_len) for ex in train_exs])
train_seq_lens = np.array([len(ex.indexed_words) for ex in train_exs])
train_labels = np.array([ex.label for ex in train_exs])
train_xs = train_mat
train_ys = train_labels.reshape(-1, 1)
b = np.zeros((len(train_ys), 2), dtype=np.float32)
train_ys = np.concatenate(train_ys, axis=0)
b[np.arange(len(train_ys)), train_ys] = 1.0
train_ys = b
embedded_train = []
for i in range(len(train_xs)):
tmp = []
for word in train_xs[i]:
tmp.append(word_vectors.get_train_embedding(int(word), add=False))
embedded_train.append(tmp)
train_xs = embedded_train
# Parameters
learning_rate = 0.1
batch_size = 128
epochs = 100
iters = int(len(train_xs)/batch_size)
# Network Parameters
seq_max_len = 60 # Sequence max length
n_hidden = 128 # hidden layer num of features
n_classes = 2 # linear sequence or not
# tf Graph input
x = tf.placeholder("float", [None, seq_max_len, 300])
y = tf.placeholder("float", [None, n_classes])
# A placeholder for indicating each sequence length
seqlen = tf.placeholder(tf.int32, [None])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def dynamicRNN(x, seqlen, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, axis=1)
# Define a lstm cell with tensorflow
lstm_cell = tf.contrib.rnn.BasicLSTMCell(n_hidden)
# Get lstm cell output, providing 'sequence_length' will perform dynamic
# calculation.
outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x, dtype=tf.float32,
sequence_length=seqlen)
# When performing dynamic calculation, we must retrieve the last
# dynamically computed output, i.e., if a sequence length is 10, we need
# to retrieve the 10th output.
# However TensorFlow doesn't support advanced indexing yet, so we build
# a custom op that for each sample in batch size, get its length and
# get the corresponding relevant output.
# 'outputs' is a list of output at every timestep, we pack them in a Tensor
# and change back dimension to [batch_size, n_step, n_input]
print(np.shape(outputs))
outputs = tf.stack(outputs)
print(np.shape(outputs))
outputs = tf.transpose(outputs, [1, 0, 2])
print(np.shape(outputs))
# Hack to build the indexing and retrieve the right output.
batch_size = tf.shape(outputs)[0]
# Start indices for each sample
index = tf.range(0, batch_size) * seq_max_len + (seqlen - 1)
# Indexing
outputs = tf.gather(tf.reshape(outputs, [-1, n_hidden]), index)
# Linear activation, using outputs computed above
return tf.matmul(outputs, weights['out']) + biases['out']
pred = dynamicRNN(x, seqlen, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for epoch in range(epochs):
for iter in range(iters-1):
batch_x, batch_y, batch_seqlen = [train_xs[iter*batch_size:min((iter+1)*batch_size, len(train_xs))],
train_ys[iter * batch_size:min((iter + 1) * batch_size, len(train_xs))],
train_seq_lens[iter * batch_size:min((iter + 1) * batch_size, len(train_xs))]]
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, seqlen: batch_seqlen})
# Calculate batch accuracy & loss
acc, loss = sess.run([accuracy, cost], feed_dict={x: train_xs, y: train_ys, seqlen: train_seq_lens})
print("Epoch " + str(epoch) + ", Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
print("Optimization Finished!") | 2.34375 | 2 |
life_line_chart/ReadGedcom.py | mustaqimM/life_line_chart | 0 | 12762365 | <reponame>mustaqimM/life_line_chart
from collections import OrderedDict
def read_data(filename):
"""
read a gedcom file and creates a structured data dict
Args:
filename (str): gedcom file
Returns:
dict: structured data
"""
import re
stack = [None, None, None, None, None, None, None, None]
indi = re.compile(r'0 @I\d+@ INDI.*?(?=\n0)',
flags=re.DOTALL | re.MULTILINE)
fam = re.compile(r'0 @F\d+@ FAM.*?(?=\n0)', flags=re.DOTALL | re.MULTILINE)
content = open(filename, 'r', encoding='utf8').read()
indi_database = OrderedDict()
for i in indi.finditer(content):
ged_data = i.string[i.regs[0][0]:i.regs[0][1]]
stack[0] = indi_database
for line in ged_data.split('\n'):
level = int(line[0])
tag_name = line.split(' ')[1]
tag_data = " ".join(line.split(' ')[2:])
if tag_name not in stack[level]:
stack[level][tag_name] = OrderedDict({'tag_data': tag_data})
else:
stack[level][tag_name]['tag_data'] += '\n'+tag_data
stack[level+1] = stack[level][tag_name]
if len(indi_database) > 999000:
break
fam_database = OrderedDict()
for i in fam.finditer(content):
ged_data = i.string[i.regs[0][0]:i.regs[0][1]]
stack[0] = fam_database
for line in ged_data.split('\n'):
level = int(line[0])
tag_name = line.split(' ')[1]
tag_data = " ".join(line.split(' ')[2:])
if tag_name not in stack[level]:
stack[level][tag_name] = OrderedDict({'tag_data': tag_data})
else:
stack[level][tag_name]['tag_data'] += '\n'+tag_data
stack[level+1] = stack[level][tag_name]
if len(fam_database) > 992000:
break
return indi_database, fam_database
| 2.78125 | 3 |
animation.py | Manik2000/drinker_fate_analysis | 0 | 12762366 | <reponame>Manik2000/drinker_fate_analysis<filename>animation.py
import plotly.graph_objects as go
def draw_figure(d, drinker, cars, add_frames=False):
"""
Return a plotly figure with containing an animation.
:param d: width of the road
:param drinker: drinker positions
:param cars: cars' positions
:param add_frames: boolean value deciding if to add frames to an animation
"""
fig = go.Figure()
fig.update_xaxes(range=[0, 1000])
fig.update_yaxes(range=[0, 50])
# create a road
fig.add_shape(
type="rect",
x0=0, y0=25-d, x1=1000, y1=25+d,
fillcolor='black',
layer="below"
)
fig.add_shape(
type="rect",
x0=0, y0=25+d, x1=1000, y1=50,
fillcolor='#696969',
layer="below"
)
fig.add_shape(
type="rect",
x0=0, y0=0, x1=1000, y1=25-d,
fillcolor='#696969',
layer="below"
)
fig.add_shape(
type="line",
x0=0, y0=25, x1=1000, y1=25,
line=dict(
color="white",
width=3,
dash="dash"
)
)
fig.update_shapes(layer="below")
fig.add_trace(go.Scatter())
fig.add_trace(go.Scatter())
fig.update_xaxes(fixedrange=True, showgrid=False)
fig.update_yaxes(fixedrange=True, showgrid=False)
# animation setup
fig.update_layout(
autosize=False,
width=1000,
height=600,
updatemenus=[
dict(
type="buttons",
buttons=[dict(
label=15 * " " + "Play" + 15 * " ",
method="animate",
args=[
None,
{"frame": {"duration": 120, "redraw": False},
"fromcurrent": True,
"transition": {"duration": 1}}
]
),
{
"args": [[None], {"frame": {"duration": 0, "redraw": False},
"mode": "immediate",
"transition": {"duration": 0}}],
"label": 15 * " " + "Pause" + 15 * " ",
"method": "animate"
},
],
showactive=True,
direction='left',
x=0.1,
xanchor="left",
y=-0.2,
yanchor="bottom",
font={'size': 20}
),
],
title={
'text': "Drinker fate simulation",
'y': 0.95,
'x': 0.5,
'font': {'size': 40},
'xanchor': 'center',
'yanchor': 'top'}
)
# adding frames
if add_frames:
fig.frames = [go.Frame(
data=[
go.Scatter(
x=[k[0]-k[2]*26 for k in y],
y=[k[1] for k in y],
mode="markers",
marker=dict(color="blue",
size=42, symbol="square")
),
go.Scatter(
x=[x[0]],
y=[x[1]],
mode="markers",
marker=dict(color="red", size=10))
]) for x, y in zip(drinker, cars)]
return fig
| 3.140625 | 3 |
tests/view/core/public/test_shop.py | katomaso/django-market | 0 | 12762367 | <reponame>katomaso/django-market<gh_stars>0
# coding: utf-8
import re
import logging
import random
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from django.core import mail
from market.core import models
from tests.factories import core as factory
logger = logging.getLogger(__name__)
re_link = re.compile(r'(https?\://[^\s<>\(\)\'",\|]+)')
class TestUser(WebTest):
"""Basic test of availability of registration."""
def test_registration_without_user(self):
"""Fresh registration should lead to unverified user with a vendor."""
self.fail("Unimplemented")
def test_registration_unverified_user(self):
"""Unverified user should end up with the same vendor as new user."""
email = "<EMAIL>"
user = models.User.objects.create_user(email=email, password="password")
def test_registration_verified_user_no_ID(self):
"""Test basic vendor registration flow.
- user can register with and without business ID
"""
email = "<EMAIL>"
user = models.User.objects.create_verified_user(email=email, password="password")
category = random.choice(models.Category.objects.filter(parent__isnull=True))
webform = self.app.get(reverse('admin-vendor'), user=email).form
mail.outbox.clear()
vendor_template = {
'name': "My Vendor",
'motto': "We sell responsible products",
'category': category.id,
'description': "Locally produced without harmful chemicals.",
}
address_template = {
'street': "", # vesnican
'city': "Louňovice 15",
'country': "Praha-východ",
'business_id': "",
'tax_id': "",
}
webform['vendor-name'] = vendor_template['name']
webform['vendor-motto'] = vendor_template['motto']
webform['vendor-category_1'] = vendor_template['category']
webform['vendor-description'] = vendor_template['description']
for key, value in address_template.items():
webform["address-" + key] = value
form_response = webform.submit()
self.assertRedirects(form_response, reverse("admin-home"))
# now we should have a vendor without business ID
# that means they will not pay VAT but cannot provide goods for more
# than 20 000CZK a year according to the law
# Should redirect to newly created vendor
self.assertTrue(models.Vendor.objects.filter(user=user, active=True).exists())
# the vendorper should receive email about the rules of non-legal vendor
self.assertEqual(len(mail.outbox), 1)
def test_registration_verified_user_with_ID(self):
"""Test basic vendor registration flow.
- user can register with and without business ID
"""
email = "<EMAIL>"
user = models.User.objects.create_verified_user(email=email, password="password")
address = factory.Address.create( # make sure person has no business ID
user_shipping=user, business_id="91283464", tax_id="CZ91283464")
category = random.choice(models.Category.objects.filter(parent__isnull=True))
webform = self.app.get(reverse('admin-vendor'), user=email).form
mail.outbox.clear()
vendor_template = {
'name': "My Vendor",
'motto': "We sell responsible products",
'category': category.id,
'description': "Locally produced without harmful chemicals.",
}
webform['vendor-name'] = vendor_template['name']
webform['vendor-motto'] = vendor_template['motto']
webform['vendor-category_1'] = vendor_template['category']
webform['vendor-description'] = vendor_template['description']
# address form will be auto-filled from user's address
form_response = webform.submit()
self.assertRedirects(form_response, reverse("admin-home"))
self.assertTrue(models.Vendor.objects.filter(user=user, active=True).exists())
# no email necessary (user is verified)
self.assertEqual(len(mail.outbox), 0)
| 2.46875 | 2 |
summariser/reward_feature/js_rewarder.py | UKPLab/ijcai2019-relis | 5 | 12762368 | <filename>summariser/reward_feature/js_rewarder.py
from summariser.utils.misc import jsd,normaliseList
from nltk.stem.porter import PorterStemmer
from collections import OrderedDict
from nltk.corpus import stopwords
from summariser.utils.data_helpers import sent2stokens_wostop,extract_ngrams2
from resources import LANGUAGE,PROCESSED_PATH,FEATURE_DIR
from summariser.utils.corpus_reader import CorpusReader
from summariser.utils.reader import readSummaries
import numpy as np
import os
class JSRewardGenerator:
def __init__(self,docs,nlist=None):
self.stopwords = set(stopwords.words(LANGUAGE))
self.stemmer = PorterStemmer()
self.docs = docs
self.sentences = []
for doc in docs:
self.sentences.extend(doc[1])
self.vocab = None
if nlist is None:
self.nlist = [1]
else:
self.nlist = nlist
self.vocab, self.doc_word_distribution = self.getWordDistribution(self.sentences,self.vocab,self.nlist)
def __call__(self, summary_list):
js_list = []
for summary_idx in summary_list:
summary = []
for idx in summary_idx:
summary.append(self.sentences[idx])
_, sum_word_distrubtion = self.getWordDistribution(summary,self.vocab,self.nlist)
js = jsd(sum_word_distrubtion,self.doc_word_distribution)
if js is float('nan') or js < 0. or js > 1.:
print('Invalid JS Divergence!')
js_list.append(js)
return js_list
def getWordDistribution(self,sent_list,vocab,nlist):
if vocab is None:
vocab_list = []
build_vocab = True
else:
vocab_list = vocab
build_vocab = False
dic = OrderedDict((el,0) for el in vocab_list)
for sent in sent_list:
ngrams = []
for n in nlist:
if n == 1:
ngrams.extend(sent2stokens_wostop(sent,self.stemmer,self.stopwords,LANGUAGE))
else:
ngrams.extend(extract_ngrams2([sent],self.stemmer,LANGUAGE,n))
for ww in ngrams:
if ww in dic:
dic[ww] = dic[ww]+1
elif build_vocab:
dic[ww] = 1
return list(dic.keys()), list(dic.values())
if __name__ == '__main__':
dataset = 'DUC2004' ## DUC2001, DUC2002, DUC2004
sample_num = 9999
out_base = os.path.join(FEATURE_DIR,dataset)
if not os.path.exists(out_base):
os.makedirs(out_base)
### read documents and ref. summaries
reader = CorpusReader(PROCESSED_PATH)
data = reader.get_data(dataset)
### store all results
all_test_reward_dic = OrderedDict()
topic_cnt = 0
summaries = []
targets = []
groups = []
models_list = []
docs_list = []
### read data
for topic,docs,models in data:
print('read DATA {}, TOPIC {}'.format(dataset,topic))
summs, ref_values_dic = readSummaries(dataset,topic,'rouge',sample_num)
js = JSRewardGenerator(docs,[1,2])
js_values = js(summs)
print('topic {}, js value num {}, mean {}, max {}, min {}'.format(topic,len(js_values), np.mean(js_values), np.max(js_values), np.min(js_values)))
out_str = ''
for ii,vv in enumerate(js_values):
out_str += '{}\t{}\n'.format(summs[ii],vv)
if not os.path.exists(os.path.join(out_base,topic)):
os.makedirs(os.path.join(out_base,topic))
fpath = os.path.join(out_base,topic,'js_12gram')
ff = open(fpath,'w')
ff.write(out_str)
ff.close()
| 2.328125 | 2 |
test/messenger_test.py | AustinHellerRepo/SocketQueuedMessageFramework | 1 | 12762369 | from __future__ import annotations
import unittest
from src.austin_heller_repo.socket_queued_message_framework import ClientMessenger, ServerMessenger, ClientServerMessage, ClientServerMessageTypeEnum, Structure, StructureStateEnum, StructureFactory, StructureTransitionException, StructureInfluence, SourceTypeEnum, ClientMessengerFactory, ServerMessengerFactory
from austin_heller_repo.socket import ClientSocketFactory, ServerSocketFactory, ReadWriteSocketClosedException
from austin_heller_repo.common import HostPointer
from austin_heller_repo.kafka_manager import KafkaSequentialQueueFactory, KafkaManager, KafkaWrapper, KafkaManagerFactory
from austin_heller_repo.threading import start_thread, Semaphore, SingletonMemorySequentialQueueFactory
from typing import List, Tuple, Dict, Callable, Type
import uuid
import time
from datetime import datetime
from abc import ABC, abstractmethod
import multiprocessing as mp
import matplotlib.pyplot as plt
import math
is_socket_debug_active = False
is_client_messenger_debug_active = False
is_server_messenger_debug_active = False
is_kafka_debug_active = False
is_kafka_sequential_queue = False
is_plotted = False
def get_default_local_host_pointer() -> HostPointer:
return HostPointer(
host_address="0.0.0.0",
host_port=36429
)
def get_default_kafka_host_pointer() -> HostPointer:
return HostPointer(
host_address="0.0.0.0",
host_port=9092
)
def get_default_kafka_manager_factory() -> KafkaManagerFactory:
return KafkaManagerFactory(
kafka_wrapper=KafkaWrapper(
host_pointer=get_default_kafka_host_pointer()
),
read_polling_seconds=0,
is_cancelled_polling_seconds=0.01,
new_topic_partitions_total=1,
new_topic_replication_factor=1,
remove_topic_cluster_propagation_blocking_timeout_seconds=30,
is_debug=is_kafka_debug_active
)
def get_default_client_messenger_factory() -> ClientMessengerFactory:
return ClientMessengerFactory(
client_socket_factory=ClientSocketFactory(
to_server_packet_bytes_length=4096,
is_debug=is_socket_debug_active
),
server_host_pointer=get_default_local_host_pointer(),
client_server_message_class=BaseClientServerMessage,
is_debug=is_client_messenger_debug_active
)
def get_default_server_messenger_factory() -> ServerMessengerFactory:
if is_kafka_sequential_queue:
kafka_topic_name = str(uuid.uuid4())
kafka_manager = get_default_kafka_manager_factory().get_kafka_manager()
kafka_manager.add_topic(
topic_name=kafka_topic_name
).get_result()
sequential_queue_factory = KafkaSequentialQueueFactory(
kafka_manager=kafka_manager,
kafka_topic_name=kafka_topic_name
)
else:
sequential_queue_factory = SingletonMemorySequentialQueueFactory()
return ServerMessengerFactory(
server_socket_factory_and_local_host_pointer_per_source_type={
BaseSourceTypeEnum.Main: (
ServerSocketFactory(
to_client_packet_bytes_length=4096,
listening_limit_total=10,
accept_timeout_seconds=10.0,
is_debug=is_socket_debug_active
),
get_default_local_host_pointer()
)
},
client_server_message_class=BaseClientServerMessage,
source_type_enum_class=BaseSourceTypeEnum,
server_messenger_source_type=BaseSourceTypeEnum.ServerMessenger,
structure_factory=ButtonStructureFactory(),
is_debug=is_server_messenger_debug_active
)
class BaseClientServerMessageTypeEnum(ClientServerMessageTypeEnum):
HelloWorld = "hello_world" # basic test
Announce = "announce" # announces name to structure
AnnounceFailed = "announce_failed" # announce failed to apply to structure
PressButton = "press_button" # structural influence, three presses cause broadcast of transmission to users
ResetButton = "reset_button" # structural_influence, resets number of presses and informs button pressers that it was reset
ResetTransmission = "reset_transmission" # directed to specific users that pressed the button
ThreePressesTransmission = "three_presses_transmission" # broadcasts to all users that the button was pressed three times and then resets the button
PingRequest = "ping_request" # pings the server and gets a response
PingResponse = "ping_response" # the response from the ping request
EchoRequest = "echo_request" # records the messages that should be echoed back
EchoResponse = "echo_response" # the response containing the echo message
ErrorOnGetClientServerMessageType = "error_on_get_client_server_message_type"
ErrorRequest = "error_request" # a request that throws an exception as defined in the constructor
ErrorResponse = "error_response" # the response that will throw a predefined exception
PowerButton = "power_button" # increments a child structure by the number of presses processed by the parent structure
PowerOverloadTransmission = "power_overload_transmission" # if the power button is pressed three times at any stage of normal button presses an overload transmission is sent out to all clients involved
PowerButtonFailed = "power_button_failed" # power was already overloaded when attempted
TimerRequest = "timer_request" # set a timer for a later response
TimerResponse = "timer_response" # a response scheduled by the timer_request
class BaseSourceTypeEnum(SourceTypeEnum):
Main = "main"
ServerMessenger = "server_messenger"
class BaseClientServerMessage(ClientServerMessage, ABC):
@classmethod
def get_client_server_message_type_class(cls) -> Type[ClientServerMessageTypeEnum]:
return BaseClientServerMessageTypeEnum
class HelloWorldBaseClientServerMessage(BaseClientServerMessage):
def __init__(self):
super().__init__(
destination_uuid=None
)
pass
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.HelloWorld
def to_json(self) -> Dict:
json_object = super().to_json()
del json_object["destination_uuid"]
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class AnnounceBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, name: str):
super().__init__(
destination_uuid=None
)
self.__name = name
def get_name(self) -> str:
return self.__name
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.Announce
def to_json(self) -> Dict:
json_object = super().to_json()
del json_object["destination_uuid"]
json_object["name"] = self.__name
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
print(f"{datetime.utcnow()}: AnnounceBaseClientServerMessage: get_structural_error_client_server_message_response: structure state: {structure_transition_exception.get_structure_state()}")
return AnnounceFailedBaseClientServerMessage(
destination_uuid=destination_uuid
)
class AnnounceFailedBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, destination_uuid: str):
super().__init__(
destination_uuid=destination_uuid
)
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.AnnounceFailed
def to_json(self) -> Dict:
json_object = super().to_json()
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class PressButtonBaseClientServerMessage(BaseClientServerMessage):
def __init__(self):
super().__init__(
destination_uuid=None
)
pass
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.PressButton
def to_json(self) -> Dict:
json_object = super().to_json()
del json_object["destination_uuid"]
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class ResetButtonBaseClientServerMessage(BaseClientServerMessage):
def __init__(self):
super().__init__(
destination_uuid=None
)
pass
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.ResetButton
def to_json(self) -> Dict:
json_object = super().to_json()
del json_object["destination_uuid"]
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class ResetTransmissionBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, destination_uuid: str):
super().__init__(
destination_uuid=destination_uuid
)
pass
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.ResetTransmission
def to_json(self) -> Dict:
json_object = super().to_json()
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class ThreePressesTransmissionBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, power: str, destination_uuid: str):
super().__init__(
destination_uuid=destination_uuid
)
self.__power = power
def get_power(self) -> str:
return self.__power
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.ThreePressesTransmission
def to_json(self) -> Dict:
json_object = super().to_json()
json_object["power"] = self.__power
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class PingRequestBaseClientServerMessage(BaseClientServerMessage):
def __init__(self):
super().__init__(
destination_uuid=None
)
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.PingRequest
def to_json(self) -> Dict:
json_object = super().to_json()
del json_object["destination_uuid"]
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class PingResponseBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, ping_index: int, destination_uuid: str):
super().__init__(
destination_uuid=destination_uuid
)
self.__ping_index = ping_index
def get_ping_index(self) -> int:
return self.__ping_index
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.PingResponse
def to_json(self) -> Dict:
json_object = super().to_json()
json_object["ping_index"] = self.__ping_index
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class EchoRequestBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, message: str, is_ordered: bool):
super().__init__(
destination_uuid=None
)
self.__message = message
self.__is_ordered = is_ordered
def get_message(self) -> str:
return self.__message
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.EchoRequest
def to_json(self) -> Dict:
json_object = super().to_json()
del json_object["destination_uuid"]
json_object["message"] = self.__message
json_object["is_ordered"] = self.__is_ordered
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class EchoResponseBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, message: str, destination_uuid: str):
super().__init__(
destination_uuid=destination_uuid
)
self.__message = message
def get_message(self) -> str:
return self.__message
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.EchoResponse
def to_json(self) -> Dict:
json_object = super().to_json()
json_object["message"] = self.__message
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class ErrorRequestBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, is_constructor_exception_to_set: str = None, constructor_exception: str = None, to_json_exception: str = None, get_structural_error_client_server_message_response_exception: str = None, response_constructor_arguments: Dict = None):
super().__init__(
destination_uuid=None
)
self.__is_constructor_exception_to_set = is_constructor_exception_to_set
self.__constructor_exception = constructor_exception
self.__to_json_exception = to_json_exception
self.__get_structural_error_client_server_message_response_exception = get_structural_error_client_server_message_response_exception
self.__response_constructor_arguments = response_constructor_arguments
if self.__constructor_exception is not None:
raise Exception(self.__constructor_exception)
if self.__is_constructor_exception_to_set is not None:
self.__constructor_exception = self.__is_constructor_exception_to_set
self.__is_constructor_exception_to_set = None
def get_response_constructor_arguments(self) -> Dict:
return self.__response_constructor_arguments
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.ErrorRequest
def to_json(self) -> Dict:
if self.__to_json_exception is not None:
raise Exception(self.__to_json_exception)
json_object = super().to_json()
del json_object["destination_uuid"]
json_object["is_constructor_exception_to_set"] = self.__is_constructor_exception_to_set
json_object["constructor_exception"] = self.__constructor_exception
json_object["to_json_exception"] = self.__to_json_exception
json_object["get_structural_error_client_server_message_response_exception"] = self.__get_structural_error_client_server_message_response_exception
json_object["response_constructor_arguments"] = self.__response_constructor_arguments
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
if self.__get_structural_error_client_server_message_response_exception is not None:
raise Exception(self.__get_structural_error_client_server_message_response_exception)
return None
class ErrorResponseBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, destination_uuid: str, is_constructor_exception_to_set: str = None, constructor_exception: str = None, to_json_exception: str = None, get_structural_error_client_server_message_response_exception: str = None):
super().__init__(
destination_uuid=destination_uuid
)
self.__is_constructor_exception_to_set = is_constructor_exception_to_set
self.__constructor_exception = constructor_exception
self.__to_json_exception = to_json_exception
self.__get_structural_error_client_server_message_response_exception = get_structural_error_client_server_message_response_exception
if self.__constructor_exception is not None:
raise Exception(self.__constructor_exception)
if self.__is_constructor_exception_to_set is not None:
self.__constructor_exception = self.__is_constructor_exception_to_set
self.__is_constructor_exception_to_set = None
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.ErrorResponse
def to_json(self) -> Dict:
if self.__to_json_exception is not None:
raise Exception(self.__to_json_exception)
json_object = super().to_json()
json_object["is_constructor_exception_to_set"] = self.__is_constructor_exception_to_set
json_object["constructor_exception"] = self.__constructor_exception
json_object["to_json_exception"] = self.__to_json_exception
json_object["get_structural_error_client_server_message_response_exception"] = self.__get_structural_error_client_server_message_response_exception
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
if self.__get_structural_error_client_server_message_response_exception is not None:
raise Exception(self.__get_structural_error_client_server_message_response_exception)
return None
class PowerButtonBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, is_anonymous: bool):
super().__init__(
destination_uuid=None
)
self.__is_anonymous = is_anonymous # if an overload should not be sent back to them due to this message
def is_anonymous(self) -> bool:
return self.__is_anonymous
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.PowerButton
def to_json(self) -> Dict:
json_object = super().to_json()
del json_object["destination_uuid"]
json_object["is_anonymous"] = self.__is_anonymous
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return PowerButtonFailedBaseClientServerMessage(
destination_uuid=destination_uuid
)
class PowerOverloadTransmissionBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, destination_uuid: str):
super().__init__(
destination_uuid=destination_uuid
)
pass
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.PowerOverloadTransmission
def to_json(self) -> Dict:
json_object = super().to_json()
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class PowerButtonFailedBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, destination_uuid: str):
super().__init__(
destination_uuid=destination_uuid
)
pass
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.PowerButtonFailed
def to_json(self) -> Dict:
json_object = super().to_json()
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class TimerRequestBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, message: str, seconds: float):
super().__init__(
destination_uuid=None
)
self.__message = message
self.__seconds = seconds
def get_message(self) -> str:
return self.__message
def get_seconds(self) -> float:
return self.__seconds
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.TimerRequest
def to_json(self) -> Dict:
json_object = super().to_json()
del json_object["destination_uuid"]
json_object["message"] = self.__message
json_object["seconds"] = self.__seconds
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class TimerResponseBaseClientServerMessage(BaseClientServerMessage):
def __init__(self, *, message: str, destination_uuid: str):
super().__init__(
destination_uuid=destination_uuid
)
self.__message = message
def get_message(self) -> str:
return self.__message
@classmethod
def get_client_server_message_type(cls) -> ClientServerMessageTypeEnum:
return BaseClientServerMessageTypeEnum.TimerResponse
def to_json(self) -> Dict:
json_object = super().to_json()
json_object["message"] = self.__message
return json_object
def get_structural_error_client_server_message_response(self, structure_transition_exception: StructureTransitionException, destination_uuid: str) -> ClientServerMessage:
return None
class PowerStructureStateEnum(StructureStateEnum):
Underpowered = "underpower"
Powered = "powered"
Overpowered = "overpowered"
class PowerStructure(Structure):
def __init__(self):
super().__init__(
states=PowerStructureStateEnum,
initial_state=PowerStructureStateEnum.Underpowered
)
self.__power_total = 0
self.__source_uuids_to_inform_on_power_overload = [] # type: List[str]
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PowerButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=PowerStructureStateEnum.Underpowered,
end_structure_state=PowerStructureStateEnum.Underpowered,
on_transition=self.__power_button_pressed
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PowerButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=PowerStructureStateEnum.Powered,
end_structure_state=PowerStructureStateEnum.Powered,
on_transition=self.__power_button_pressed
)
def on_client_connected(self, *, source_uuid: str, source_type: SourceTypeEnum, tag_json: Dict):
raise Exception(f"Unexpected client connected: {source_uuid}: {source_type}: {tag_json}")
def add_source_uuid_for_power_overload_transmission(self, *, source_uuid: str):
if source_uuid not in self.__source_uuids_to_inform_on_power_overload:
self.__source_uuids_to_inform_on_power_overload.append(source_uuid)
def get_power(self) -> str:
if self.__power_total < 3:
return "underpowered"
elif self.__power_total == 3:
return "powered"
else:
return "overpowered"
def __power_button_pressed(self, structure_influence: StructureInfluence):
print(f"{datetime.utcnow()}: PowerStructure: __power_button_pressed: start")
print(f"get state: {self.get_state()}")
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
power_button = structure_influence.get_client_server_message() # type: PowerButtonBaseClientServerMessage
source_uuid = structure_influence.get_source_uuid()
if not power_button.is_anonymous():
self.add_source_uuid_for_power_overload_transmission(
source_uuid=source_uuid
)
self.__power_total += 1
if self.__power_total == 3:
# set the state to "powered"
self.set_state(
structure_state=PowerStructureStateEnum.Powered
)
elif self.__power_total == 4:
# set the state to "overpowered"
# NOTE this will also permit an impossible state change if another power button message is sent
self.set_state(
structure_state=PowerStructureStateEnum.Overpowered
)
for source_uuid in self.__source_uuids_to_inform_on_power_overload:
self.send_client_server_message(
client_server_message=PowerOverloadTransmissionBaseClientServerMessage(
destination_uuid=source_uuid
)
)
self.__source_uuids_to_inform_on_power_overload.clear()
print(f"{datetime.utcnow()}: PowerStructure: __power_button_pressed: end")
def dispose(self):
pass
class ButtonStructureStateEnum(StructureStateEnum):
ZeroPresses = "zero_presses"
OnePress = "one_press"
TwoPresses = "two_presses"
ThreePresses = "three_presses"
class ButtonStructure(Structure):
def __init__(self):
super().__init__(
states=ButtonStructureStateEnum,
initial_state=ButtonStructureStateEnum.ZeroPresses
)
self.__main_source_uuids = [] # type: List[str]
self.__pressed_button_source_uuids = [] # type: List[str]
self.__name_per_client_uuid = {} # type: Dict[str, str]
self.__presses_total = 0
self.__pings_total = 0
self.__power_structure = PowerStructure()
self.register_child_structure(
structure=self.__power_structure
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.Announce,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.ZeroPresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__name_announced
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PressButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.ZeroPresses,
end_structure_state=ButtonStructureStateEnum.OnePress,
on_transition=self.__button_pressed
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PressButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.OnePress,
end_structure_state=ButtonStructureStateEnum.TwoPresses,
on_transition=self.__button_pressed
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PressButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.TwoPresses,
end_structure_state=ButtonStructureStateEnum.ThreePresses,
on_transition=self.__button_pressed
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.ResetButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.ZeroPresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__button_reset
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.ResetButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.OnePress,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__button_reset
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.ResetButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.TwoPresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__button_reset
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.ThreePressesTransmission,
from_source_type=BaseSourceTypeEnum.ServerMessenger,
start_structure_state=ButtonStructureStateEnum.ThreePresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__three_presses_transmission_sent
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PingRequest,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.ZeroPresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__ping_requested
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.EchoRequest,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.ZeroPresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__echo_requested
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.ErrorRequest,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.ZeroPresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__error_requested
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PowerButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.ZeroPresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__power_button_pressed
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PowerButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.OnePress,
end_structure_state=ButtonStructureStateEnum.OnePress,
on_transition=self.__power_button_pressed
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.PowerButton,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.TwoPresses,
end_structure_state=ButtonStructureStateEnum.TwoPresses,
on_transition=self.__power_button_pressed
)
self.add_transition(
client_server_message_type=BaseClientServerMessageTypeEnum.TimerRequest,
from_source_type=BaseSourceTypeEnum.Main,
start_structure_state=ButtonStructureStateEnum.ZeroPresses,
end_structure_state=ButtonStructureStateEnum.ZeroPresses,
on_transition=self.__timer_requested
)
def on_client_connected(self, *, source_uuid: str, source_type: SourceTypeEnum, tag_json: Dict):
if source_type == BaseSourceTypeEnum.Main:
self.__main_source_uuids.append(source_uuid)
else:
raise Exception(f"Unexpected client connected: {source_uuid}: {source_type}: {tag_json}")
def __name_announced(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
announce = structure_influence.get_client_server_message() # type: AnnounceBaseClientServerMessage
source_uuid = structure_influence.get_source_uuid()
self.__name_per_client_uuid[source_uuid] = announce.get_name()
def __button_pressed(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
source_uuid = structure_influence.get_source_uuid()
if source_uuid not in self.__pressed_button_source_uuids:
self.__pressed_button_source_uuids.append(source_uuid)
if source_uuid in self.__name_per_client_uuid:
print(f"button pressed by {self.__name_per_client_uuid[source_uuid]}")
else:
print(f"button pressed by {source_uuid}")
self.__presses_total += 1
if self.__presses_total == 3:
self.send_client_server_message(
client_server_message=ThreePressesTransmissionBaseClientServerMessage(
destination_uuid=source_uuid,
power=self.__power_structure.get_power()
)
)
def __button_reset(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
for source_uuid in self.__pressed_button_source_uuids:
client_server_message = ResetTransmissionBaseClientServerMessage(
destination_uuid=source_uuid
)
self.send_client_server_message(
client_server_message=client_server_message
)
self.__pressed_button_source_uuids.clear()
def __three_presses_transmission_sent(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.ServerMessenger:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
self.__pressed_button_source_uuids.clear()
def __ping_requested(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
source_uuid = structure_influence.get_source_uuid()
self.send_client_server_message(
client_server_message=PingResponseBaseClientServerMessage(
destination_uuid=source_uuid,
ping_index=self.__pings_total
)
)
self.__pings_total += 1
def __echo_requested(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
echo_request = structure_influence.get_client_server_message() # type: EchoRequestBaseClientServerMessage
source_uuid = structure_influence.get_source_uuid()
message = echo_request.get_message()
self.send_client_server_message(
client_server_message=EchoResponseBaseClientServerMessage(
message=message,
destination_uuid=source_uuid
)
)
def __error_requested(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
error_request = structure_influence.get_client_server_message() # type: ErrorRequestBaseClientServerMessage
source_uuid = structure_influence.get_source_uuid()
constructor_arguments = error_request.get_response_constructor_arguments()
if constructor_arguments is None:
constructor_arguments = {}
constructor_arguments["destination_uuid"] = source_uuid
self.send_client_server_message(
client_server_message=ErrorResponseBaseClientServerMessage(
**constructor_arguments
)
)
def __power_button_pressed(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
self.__power_structure.update_structure(
structure_influence=structure_influence
)
def __timer_requested(self, structure_influence: StructureInfluence):
if structure_influence.get_source_type() != BaseSourceTypeEnum.Main:
raise Exception(f"Unexpected source type: {structure_influence.get_source_type()}.")
timer_request = structure_influence.get_client_server_message() # type: TimerRequestBaseClientServerMessage
source_uuid = structure_influence.get_source_uuid()
def timer_thread_method():
nonlocal timer_request
nonlocal source_uuid
time.sleep(timer_request.get_seconds())
self.send_client_server_message(
client_server_message=TimerResponseBaseClientServerMessage(
destination_uuid=source_uuid,
message=timer_request.get_message()
)
)
start_thread(timer_thread_method)
def dispose(self):
self.__power_structure.dispose()
class ButtonStructureFactory(StructureFactory):
def __init__(self):
super().__init__()
pass
def get_structure(self) -> Structure:
return ButtonStructure()
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
##############################################################################################
class MessengerTest(unittest.TestCase):
def setUp(self) -> None:
print(f"{datetime.utcnow()}: setUp: start")
if is_kafka_sequential_queue:
kafka_manager = get_default_kafka_manager_factory().get_kafka_manager()
print(f"setUp: initialized: {datetime.utcnow()}")
topics = kafka_manager.get_topics().get_result() # type: List[str]
print(f"setUp: get_topics: {datetime.utcnow()}")
for topic in topics:
print(f"setUp: topic: {topic}: {datetime.utcnow()}")
async_handle = kafka_manager.remove_topic(
topic_name=topic
)
print(f"setUp: async: {topic}: {datetime.utcnow()}")
async_handle.get_result()
print(f"setUp: result: {topic}: {datetime.utcnow()}")
time.sleep(1)
print(f"{datetime.utcnow()}: setUp: end")
def test_initialize_client_messenger(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
self.assertIsNotNone(client_messenger)
client_messenger.dispose()
def test_initialize_server_messenger(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
self.assertIsNotNone(server_messenger)
def test_server_messenger_start_and_stop(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(3)
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(5)
def test_connect_client_to_server_and_client_disposes_first(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
client_messenger.send_to_server(
client_server_message=HelloWorldBaseClientServerMessage()
)
time.sleep(1)
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
time.sleep(1)
def test_connect_client_to_server_and_server_stops_first(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
client_messenger.send_to_server(
client_server_message=HelloWorldBaseClientServerMessage()
)
time.sleep(1)
server_messenger.stop_receiving_from_clients()
time.sleep(1)
client_messenger.dispose()
time.sleep(1)
def test_connect_client_to_server_client_receives_and_client_disposes_first(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
time.sleep(1)
def callback(client_server_message: ClientServerMessage):
raise Exception("Unexpected response")
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
time.sleep(1)
client_messenger.send_to_server(
client_server_message=HelloWorldBaseClientServerMessage()
)
time.sleep(1)
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
time.sleep(1)
if found_exception is not None:
raise found_exception
def test_connect_client_to_server_client_receives_and_server_stops_first(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
time.sleep(1)
def callback(client_server_message: ClientServerMessage):
raise Exception("Unexpected response")
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
time.sleep(1)
client_messenger.send_to_server(
client_server_message=HelloWorldBaseClientServerMessage()
)
time.sleep(1)
server_messenger.stop_receiving_from_clients()
time.sleep(1)
client_messenger.dispose()
time.sleep(1)
self.assertIsInstance(found_exception, ReadWriteSocketClosedException)
def test_press_button_three_times(self):
# send three presses and wait for a reply
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, ThreePressesTransmissionBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="<NAME>"
)
)
print(f"{datetime.utcnow()}: sending first press")
client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: sending second press")
client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: sending third press")
client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
self.assertIsNone(found_exception)
def test_one_client_sends_two_presses_then_reset(self):
# send two presses of the button, then send a reset, and finally wait for a reply
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, ResetTransmissionBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="<NAME>"
)
)
print(f"{datetime.utcnow()}: sending first press")
client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: sending second press")
client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: sending reset")
client_messenger.send_to_server(
client_server_message=ResetButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
self.assertIsNone(found_exception)
def test_two_clients_each_send_one_press_then_reset(self):
first_client_messenger = get_default_client_messenger_factory().get_client_messenger()
second_client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
first_client_messenger.connect_to_server()
second_client_messenger.connect_to_server()
callback_total = 0
def first_callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: first_callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, ResetTransmissionBaseClientServerMessage)
first_found_exception = None # type: Exception
def first_on_exception(exception: Exception):
nonlocal first_found_exception
first_found_exception = exception
first_client_messenger.receive_from_server(
callback=first_callback,
on_exception=first_on_exception
)
def second_callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: second_callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, ResetTransmissionBaseClientServerMessage)
second_found_exception = None # type: Exception
def second_on_exception(exception: Exception):
nonlocal second_found_exception
second_found_exception = exception
second_client_messenger.receive_from_server(
callback=second_callback,
on_exception=second_on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
first_client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
print(f"{datetime.utcnow()}: sending second announcement")
second_client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="Second"
)
)
time.sleep(1)
print(f"{datetime.utcnow()}: sending first press")
first_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: sending second press")
second_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: sending reset")
first_client_messenger.send_to_server(
client_server_message=ResetButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
first_client_messenger.dispose()
second_client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(2, callback_total)
self.assertIsNone(first_found_exception)
self.assertIsNone(second_found_exception)
def test_two_clients_each_send_one_press_then_third_client_reset(self):
first_client_messenger = get_default_client_messenger_factory().get_client_messenger()
second_client_messenger = get_default_client_messenger_factory().get_client_messenger()
third_client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
first_client_messenger.connect_to_server()
second_client_messenger.connect_to_server()
third_client_messenger.connect_to_server()
callback_total = 0
def first_callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: first_callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, ResetTransmissionBaseClientServerMessage)
first_found_exception = None # type: Exception
def first_on_exception(exception: Exception):
nonlocal first_found_exception
first_found_exception = exception
first_client_messenger.receive_from_server(
callback=first_callback,
on_exception=first_on_exception
)
def second_callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: second_callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, ResetTransmissionBaseClientServerMessage)
second_found_exception = None # type: Exception
def second_on_exception(exception: Exception):
nonlocal second_found_exception
second_found_exception = exception
second_client_messenger.receive_from_server(
callback=second_callback,
on_exception=second_on_exception
)
def third_callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: third_callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
raise Exception(f"Third client should not receive a message.")
third_found_exception = None # type: Exception
def third_on_exception(exception: Exception):
nonlocal third_found_exception
third_found_exception = exception
third_client_messenger.receive_from_server(
callback=third_callback,
on_exception=third_on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
first_client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
print(f"{datetime.utcnow()}: sending second announcement")
second_client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="Second"
)
)
print(f"{datetime.utcnow()}: sending third announcement")
third_client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="Third"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending first press")
first_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending second press")
second_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending reset")
third_client_messenger.send_to_server(
client_server_message=ResetButtonBaseClientServerMessage()
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
first_client_messenger.dispose()
second_client_messenger.dispose()
third_client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertIsNone(first_found_exception)
self.assertIsNone(second_found_exception)
self.assertIsNone(third_found_exception)
self.assertEqual(2, callback_total)
def test_client_disconnects_before_receiving_intended_message(self):
# the first client sends a press, disconnects, then the second client resets
# the server messenger should detect that the client disconnected and release the socket gracefully
first_client_messenger = get_default_client_messenger_factory().get_client_messenger()
second_client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
first_client_messenger.connect_to_server()
second_client_messenger.connect_to_server()
callback_total = 0
def first_callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: first_callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
raise Exception("This client should have been disposed of already.")
first_found_exception = None # type: Exception
def first_on_exception(exception: Exception):
nonlocal first_found_exception
first_found_exception = exception
first_client_messenger.receive_from_server(
callback=first_callback,
on_exception=first_on_exception
)
def second_callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: second_callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
raise Exception("This client should not be receiving a message.")
second_found_exception = None # type: Exception
def second_on_exception(exception: Exception):
nonlocal second_found_exception
second_found_exception = exception
second_client_messenger.receive_from_server(
callback=second_callback,
on_exception=second_on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
first_client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending second announcement")
second_client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="Second"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending first press")
first_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
time.sleep(1)
print(f"{datetime.utcnow()}: disposing first client")
first_client_messenger.dispose()
time.sleep(1)
print(f"{datetime.utcnow()}: sending reset")
second_client_messenger.send_to_server(
client_server_message=ResetButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
second_client_messenger.dispose()
time.sleep(1)
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
time.sleep(1)
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(0, callback_total)
self.assertIsNone(first_found_exception)
self.assertIsNone(second_found_exception)
def test_ping(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, PingResponseBaseClientServerMessage)
ping_response_base_client_server_message = client_server_message # type: PingResponseBaseClientServerMessage
self.assertEqual(0, ping_response_base_client_server_message.get_ping_index())
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending ping")
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
self.assertIsNone(found_exception)
def test_single_client_quickly_pings_using_threading(self):
# spam pings and detect timing differences between sends and receives
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
expected_pings_total = 1000
callback_total = 0
expected_ping_index = 0
first_message_datetime = None # type: datetime
last_message_datetime = None # type: datetime
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
nonlocal expected_ping_index
nonlocal first_message_datetime
nonlocal last_message_datetime
nonlocal expected_pings_total
#print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, PingResponseBaseClientServerMessage)
ping_response_base_client_server_message = client_server_message # type: PingResponseBaseClientServerMessage
self.assertEqual(expected_ping_index, ping_response_base_client_server_message.get_ping_index())
expected_ping_index += 1
if expected_ping_index == 1:
first_message_datetime = datetime.utcnow()
if expected_ping_index == expected_pings_total:
last_message_datetime = datetime.utcnow()
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending first press")
sent_first_ping_datetime = None # type: datetime
sent_last_ping_datetime = None # type: datetime
def ping_thread_method():
nonlocal client_messenger
nonlocal expected_pings_total
nonlocal sent_first_ping_datetime
nonlocal sent_last_ping_datetime
sent_first_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
for index in range(expected_pings_total - 2):
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
sent_last_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
ping_thread = start_thread(ping_thread_method)
ping_thread.join()
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
while last_message_datetime is None:
time.sleep(1)
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(expected_pings_total, callback_total)
print(f"Sent first message datetime: {sent_first_ping_datetime}")
print(f"Received first message datetime: {first_message_datetime}")
print(f"Diff: {(first_message_datetime - sent_first_ping_datetime).total_seconds()} seconds")
print(f"Sent last message datetime: {sent_last_ping_datetime}")
print(f"Received last message datetime: {last_message_datetime}")
print(f"Diff: {(last_message_datetime - sent_last_ping_datetime).total_seconds()} seconds")
seconds_total = (last_message_datetime - first_message_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Messages per seconds: {messages_per_second}")
print(f"Seconds per message: {1.0 / messages_per_second}")
self.assertIsNone(found_exception)
def test_single_client_quickly_pings_burst(self):
# spam pings and detect timing differences between sends and receives
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
expected_pings_total = 1000
print(f"{datetime.utcnow()}: sending first press")
found_exception = None # type: Exception
def ping_thread_method():
nonlocal expected_pings_total
nonlocal found_exception
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
expected_ping_index = 0
received_first_message_datetime = None # type: datetime
received_last_message_datetime = None # type: datetime
callback_semaphore = Semaphore()
def callback(client_server_message: ClientServerMessage):
nonlocal expected_pings_total
nonlocal expected_ping_index
nonlocal received_first_message_datetime
nonlocal received_last_message_datetime
nonlocal callback_semaphore
#print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, PingResponseBaseClientServerMessage)
ping_response_base_client_server_message = client_server_message # type: PingResponseBaseClientServerMessage
self.assertEqual(expected_ping_index, ping_response_base_client_server_message.get_ping_index())
callback_semaphore.acquire()
expected_ping_index += 1
if expected_ping_index == 1:
received_first_message_datetime = datetime.utcnow()
if expected_ping_index == expected_pings_total:
received_last_message_datetime = datetime.utcnow()
callback_semaphore.release()
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
sent_first_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
for index in range(expected_pings_total - 2):
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
sent_last_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting for messages")
while received_last_message_datetime is None:
time.sleep(1)
time.sleep(1)
print(f"Sent first message datetime: {sent_first_ping_datetime}")
print(f"Received first message datetime: {received_first_message_datetime}")
print(f"Diff: {(received_first_message_datetime - sent_first_ping_datetime).total_seconds()} seconds")
print(f"Sent last message datetime: {sent_last_ping_datetime}")
print(f"Received last message datetime: {received_last_message_datetime}")
print(f"Diff: {(received_last_message_datetime - sent_last_ping_datetime).total_seconds()} seconds")
seconds_total = (sent_last_ping_datetime - sent_first_ping_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to send all messages: {seconds_total}")
print(f"Sent messages per seconds: {messages_per_second}")
print(f"Seconds per sent message: {1.0 / messages_per_second}")
seconds_total = (received_last_message_datetime - received_first_message_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to receive all messages: {seconds_total}")
print(f"Received messages per seconds: {messages_per_second}")
print(f"Seconds per received message: {1.0 / messages_per_second}")
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
ping_thread = start_thread(ping_thread_method)
ping_thread.join()
time.sleep(0.1)
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertIsNone(found_exception)
def test_single_client_quickly_pings_delayed(self):
# spam pings and detect timing differences between sends and receives
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
test_seconds = 10
test_messages_per_second = 500
expected_pings_total = test_seconds * test_messages_per_second
delay_between_sending_message_seconds = (1.0 / test_messages_per_second) * 0.6
#expected_pings_total = 1000
#delay_between_sending_message_seconds = 0.0025
print(f"{datetime.utcnow()}: sending first press")
found_exception = None # type: Exception
def ping_thread_method():
nonlocal expected_pings_total
nonlocal delay_between_sending_message_seconds
nonlocal found_exception
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
expected_ping_index = 0
callback_semaphore = Semaphore()
received_datetimes = [] # type: List[datetime]
sent_datetimes = [] # type: List[datetime]
def callback(client_server_message: ClientServerMessage):
nonlocal expected_pings_total
nonlocal expected_ping_index
nonlocal received_datetimes
nonlocal callback_semaphore
#print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, PingResponseBaseClientServerMessage)
ping_response_base_client_server_message = client_server_message # type: PingResponseBaseClientServerMessage
#self.assertEqual(expected_ping_index, ping_response_base_client_server_message.get_ping_index())
callback_semaphore.acquire()
expected_ping_index += 1
received_datetimes.append(datetime.utcnow())
callback_semaphore.release()
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
print(f"{datetime.utcnow()}: starting to send messages")
sent_datetimes.append(datetime.utcnow())
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
time.sleep(delay_between_sending_message_seconds)
for index in range(expected_pings_total - 2):
sent_datetimes.append(datetime.utcnow())
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
time.sleep(delay_between_sending_message_seconds)
sent_datetimes.append(datetime.utcnow())
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting for messages")
while len(received_datetimes) != expected_pings_total:
time.sleep(1)
print(f"len(received_datetimes): {len(received_datetimes)}")
time.sleep(1)
self.assertEqual(expected_pings_total, len(sent_datetimes))
self.assertEqual(expected_pings_total, len(received_datetimes))
diff_seconds_totals = [] # type: List[float]
for sent_datetime, received_datetime in zip(sent_datetimes, received_datetimes):
seconds_total = (received_datetime - sent_datetime).total_seconds()
diff_seconds_totals.append(seconds_total)
print(f"Time to send {(sent_datetimes[-1] - sent_datetimes[0]).total_seconds()} seconds")
print(f"Messages per second to send: {expected_pings_total / (sent_datetimes[-1] - sent_datetimes[0]).total_seconds()}")
print(f"Time to receive {(received_datetimes[-1] - received_datetimes[0]).total_seconds()} seconds")
print(f"Messages per second to receive: {expected_pings_total / (received_datetimes[-1] - received_datetimes[0]).total_seconds()}")
print(f"Min diff seconds {min(diff_seconds_totals)} at {diff_seconds_totals.index(min(diff_seconds_totals))}")
print(f"Max diff seconds {max(diff_seconds_totals)} at {diff_seconds_totals.index(max(diff_seconds_totals))}")
print(f"Ave diff seconds {sum(diff_seconds_totals)/expected_pings_total}")
if is_plotted:
plt.scatter(sent_datetimes, range(len(sent_datetimes)), s=1, c="red")
plt.scatter(received_datetimes, range(len(received_datetimes)), s=1, c="blue")
plt.show()
cutoff = 150
print(f"Min diff seconds {min(diff_seconds_totals[cutoff:])} at {diff_seconds_totals.index(min(diff_seconds_totals[cutoff:]))}")
print(f"Max diff seconds {max(diff_seconds_totals[cutoff:])} at {diff_seconds_totals.index(max(diff_seconds_totals[cutoff:]))}")
print(f"Ave diff seconds {sum(diff_seconds_totals[cutoff:]) / (expected_pings_total - cutoff)}")
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
ping_thread = start_thread(ping_thread_method)
ping_thread.join()
time.sleep(0.1)
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertIsNone(found_exception)
def test_single_client_quickly_echos_burst_0B(self):
# spam pings and detect timing differences between sends and receives
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
expected_pings_total = 1000
message_contents = ""
print(f"{datetime.utcnow()}: sending first press")
found_exception = None # type: Exception
def ping_thread_method():
nonlocal expected_pings_total
nonlocal found_exception
nonlocal message_contents
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
expected_ping_index = 0
received_first_message_datetime = None # type: datetime
received_last_message_datetime = None # type: datetime
callback_semaphore = Semaphore()
def callback(client_server_message: ClientServerMessage):
nonlocal expected_pings_total
nonlocal expected_ping_index
nonlocal received_first_message_datetime
nonlocal received_last_message_datetime
nonlocal callback_semaphore
# print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, EchoResponseBaseClientServerMessage)
callback_semaphore.acquire()
expected_ping_index += 1
if expected_ping_index == 1:
received_first_message_datetime = datetime.utcnow()
if expected_ping_index == expected_pings_total:
received_last_message_datetime = datetime.utcnow()
callback_semaphore.release()
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
sent_first_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
for index in range(expected_pings_total - 2):
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
sent_last_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
print(f"{datetime.utcnow()}: waiting for messages")
while received_last_message_datetime is None:
time.sleep(1)
time.sleep(1)
print(f"Sent first message datetime: {sent_first_ping_datetime}")
print(f"Received first message datetime: {received_first_message_datetime}")
print(f"Diff: {(received_first_message_datetime - sent_first_ping_datetime).total_seconds()} seconds")
print(f"Sent last message datetime: {sent_last_ping_datetime}")
print(f"Received last message datetime: {received_last_message_datetime}")
print(f"Diff: {(received_last_message_datetime - sent_last_ping_datetime).total_seconds()} seconds")
seconds_total = (sent_last_ping_datetime - sent_first_ping_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to send all messages: {seconds_total}")
print(f"Sent messages per seconds: {messages_per_second}")
print(f"Seconds per sent message: {1.0 / messages_per_second}")
seconds_total = (received_last_message_datetime - received_first_message_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to receive all messages: {seconds_total}")
print(f"Received messages per seconds: {messages_per_second}")
print(f"Seconds per received message: {1.0 / messages_per_second}")
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
ping_thread = start_thread(ping_thread_method)
ping_thread.join()
time.sleep(0.1)
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertIsNone(found_exception)
def test_single_client_quickly_echos_burst_1KB(self):
# spam pings and detect timing differences between sends and receives
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
expected_pings_total = 1000
message_contents = "12345678" * 128
print(f"{datetime.utcnow()}: sending first press")
found_exception = None # type: Exception
def ping_thread_method():
nonlocal expected_pings_total
nonlocal found_exception
nonlocal message_contents
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
expected_ping_index = 0
received_first_message_datetime = None # type: datetime
received_last_message_datetime = None # type: datetime
callback_semaphore = Semaphore()
def callback(client_server_message: ClientServerMessage):
nonlocal expected_pings_total
nonlocal expected_ping_index
nonlocal received_first_message_datetime
nonlocal received_last_message_datetime
nonlocal callback_semaphore
# print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, EchoResponseBaseClientServerMessage)
callback_semaphore.acquire()
expected_ping_index += 1
if expected_ping_index == 1:
received_first_message_datetime = datetime.utcnow()
if expected_ping_index == expected_pings_total:
received_last_message_datetime = datetime.utcnow()
callback_semaphore.release()
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
sent_first_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
for index in range(expected_pings_total - 2):
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
sent_last_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
print(f"{datetime.utcnow()}: waiting for messages")
while received_last_message_datetime is None:
time.sleep(1)
time.sleep(1)
print(f"Sent first message datetime: {sent_first_ping_datetime}")
print(f"Received first message datetime: {received_first_message_datetime}")
print(f"Diff: {(received_first_message_datetime - sent_first_ping_datetime).total_seconds()} seconds")
print(f"Sent last message datetime: {sent_last_ping_datetime}")
print(f"Received last message datetime: {received_last_message_datetime}")
print(f"Diff: {(received_last_message_datetime - sent_last_ping_datetime).total_seconds()} seconds")
seconds_total = (sent_last_ping_datetime - sent_first_ping_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to send all messages: {seconds_total}")
print(f"Sent messages per seconds: {messages_per_second}")
print(f"Seconds per sent message: {1.0 / messages_per_second}")
seconds_total = (received_last_message_datetime - received_first_message_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to receive all messages: {seconds_total}")
print(f"Received messages per seconds: {messages_per_second}")
print(f"Seconds per received message: {1.0 / messages_per_second}")
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
ping_thread = start_thread(ping_thread_method)
ping_thread.join()
time.sleep(0.1)
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertIsNone(found_exception)
def test_single_client_quickly_echos_burst_5KB(self):
# spam pings and detect timing differences between sends and receives
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
expected_pings_total = 1000
message_contents = "12345678" * 128 * 5
print(f"{datetime.utcnow()}: sending first press")
found_exception = None # type: Exception
def ping_thread_method():
nonlocal expected_pings_total
nonlocal found_exception
nonlocal message_contents
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
expected_ping_index = 0
received_first_message_datetime = None # type: datetime
received_last_message_datetime = None # type: datetime
callback_semaphore = Semaphore()
def callback(client_server_message: ClientServerMessage):
nonlocal expected_pings_total
nonlocal expected_ping_index
nonlocal received_first_message_datetime
nonlocal received_last_message_datetime
nonlocal callback_semaphore
# print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, EchoResponseBaseClientServerMessage)
callback_semaphore.acquire()
expected_ping_index += 1
if expected_ping_index == 1:
received_first_message_datetime = datetime.utcnow()
if expected_ping_index == expected_pings_total:
received_last_message_datetime = datetime.utcnow()
callback_semaphore.release()
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
sent_first_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
for index in range(expected_pings_total - 2):
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
sent_last_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
print(f"{datetime.utcnow()}: waiting for messages")
while received_last_message_datetime is None:
time.sleep(1)
time.sleep(1)
print(f"Sent first message datetime: {sent_first_ping_datetime}")
print(f"Received first message datetime: {received_first_message_datetime}")
print(f"Diff: {(received_first_message_datetime - sent_first_ping_datetime).total_seconds()} seconds")
print(f"Sent last message datetime: {sent_last_ping_datetime}")
print(f"Received last message datetime: {received_last_message_datetime}")
print(f"Diff: {(received_last_message_datetime - sent_last_ping_datetime).total_seconds()} seconds")
seconds_total = (sent_last_ping_datetime - sent_first_ping_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to send all messages: {seconds_total}")
print(f"Sent messages per seconds: {messages_per_second}")
print(f"Seconds per sent message: {1.0 / messages_per_second}")
seconds_total = (received_last_message_datetime - received_first_message_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to receive all messages: {seconds_total}")
print(f"Received messages per seconds: {messages_per_second}")
print(f"Seconds per received message: {1.0 / messages_per_second}")
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
ping_thread = start_thread(ping_thread_method)
ping_thread.join()
time.sleep(0.1)
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertIsNone(found_exception)
def test_single_client_quickly_echos_burst_10KB(self):
# spam pings and detect timing differences between sends and receives
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
expected_pings_total = 1000
message_contents = "12345678" * 128 * 10
print(f"{datetime.utcnow()}: sending first press")
found_exception = None # type: Exception
def ping_thread_method():
nonlocal expected_pings_total
nonlocal found_exception
nonlocal message_contents
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
expected_ping_index = 0
received_first_message_datetime = None # type: datetime
received_last_message_datetime = None # type: datetime
callback_semaphore = Semaphore()
def callback(client_server_message: ClientServerMessage):
nonlocal expected_pings_total
nonlocal expected_ping_index
nonlocal received_first_message_datetime
nonlocal received_last_message_datetime
nonlocal callback_semaphore
# print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, EchoResponseBaseClientServerMessage)
callback_semaphore.acquire()
expected_ping_index += 1
if expected_ping_index == 1:
received_first_message_datetime = datetime.utcnow()
if expected_ping_index == expected_pings_total:
received_last_message_datetime = datetime.utcnow()
callback_semaphore.release()
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
sent_first_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
for index in range(expected_pings_total - 2):
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
sent_last_ping_datetime = datetime.utcnow()
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=message_contents,
is_ordered=True
)
)
print(f"{datetime.utcnow()}: waiting for messages")
while received_last_message_datetime is None:
time.sleep(1)
time.sleep(1)
print(f"Sent first message datetime: {sent_first_ping_datetime}")
print(f"Received first message datetime: {received_first_message_datetime}")
print(f"Diff: {(received_first_message_datetime - sent_first_ping_datetime).total_seconds()} seconds")
print(f"Sent last message datetime: {sent_last_ping_datetime}")
print(f"Received last message datetime: {received_last_message_datetime}")
print(f"Diff: {(received_last_message_datetime - sent_last_ping_datetime).total_seconds()} seconds")
seconds_total = (sent_last_ping_datetime - sent_first_ping_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to send all messages: {seconds_total}")
print(f"Sent messages per seconds: {messages_per_second}")
print(f"Seconds per sent message: {1.0 / messages_per_second}")
seconds_total = (received_last_message_datetime - received_first_message_datetime).total_seconds()
messages_per_second = expected_pings_total / seconds_total
print(f"Seconds to receive all messages: {seconds_total}")
print(f"Received messages per seconds: {messages_per_second}")
print(f"Seconds per received message: {1.0 / messages_per_second}")
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
ping_thread = start_thread(ping_thread_method)
ping_thread.join()
time.sleep(0.1)
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertIsNone(found_exception)
def test_client_attempts_message_impossible_for_structure_state_but_exception_in_callback(self):
# attempt to reset the presses without first pressing the button
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
expected_exception = Exception(f"Client should not receive any messages as part of this test.")
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
nonlocal expected_exception
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
raise expected_exception
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending press")
client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
time.sleep(1)
print(f"{datetime.utcnow()}: sending announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="<NAME>"
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
self.assertIsNotNone(found_exception)
self.assertEqual(expected_exception, found_exception)
def test_client_attempts_message_impossible_for_structure_state(self):
# attempt to reset the presses without first pressing the button
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, AnnounceFailedBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending press")
client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
time.sleep(1)
print(f"{datetime.utcnow()}: sending announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="<NAME>"
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
self.assertIsNone(found_exception)
def test_client_attempts_message_impossible_for_child_structure_state(self):
# call power 4 times
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(power_overload_transmission: PowerOverloadTransmissionBaseClientServerMessage):
nonlocal callback_total
callback_total += 1
self.assertIsInstance(power_overload_transmission, PowerOverloadTransmissionBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: first power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: second power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: third power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: fourth power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: fourth power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
self.assertIsNone(found_exception)
def test_order_of_messages(self):
# send multiple messages from the same client to the server, expecting the response order to be the same
messages_total = 1000
print(f"{datetime.utcnow()}: setting up server")
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
print(f"{datetime.utcnow()}: setting up client")
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
callback_total = 0
last_message_index = -1
failed_at_message_index = None # type: int
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
nonlocal last_message_index
nonlocal failed_at_message_index
# print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, EchoResponseBaseClientServerMessage)
echo_response_client_server_message = client_server_message # type: EchoResponseBaseClientServerMessage
if int(echo_response_client_server_message.get_message()) == last_message_index + 1:
# correct message received
last_message_index += 1
else:
if failed_at_message_index is None:
failed_at_message_index = last_message_index
callback_total += 1
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
if found_exception is None:
found_exception = exception
# TODO determine why the first thread to spawn as a part of the connect_to_server process does not die
client_messenger.connect_to_server()
time.sleep(1)
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
time.sleep(1)
for message_index in range(messages_total):
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=str(message_index),
is_ordered=True
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing client messenger: start")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposing client messenger: end")
time.sleep(1)
print(f"{datetime.utcnow()}: server_messenger.stop_receiving_from_clients(): start")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: server_messenger.stop_receiving_from_clients(): end")
if found_exception is not None:
raise found_exception
self.assertIsNone(failed_at_message_index)
print(f"end")
def test_two_clients_becoming_out_of_sync(self):
# as the delay between two different clients send messages shrinks, how often are the messages received in the wrong order
current_delay_between_messages_seconds = 1
delay_percentage_decrease_delta = 0.1
minimum_delay_between_messages_seconds = 0.0001
accepted_delay_between_messages_that_could_result_in_disorder = 0.001
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
print(f"{datetime.utcnow()}: setting up clients")
time.sleep(1)
client_messengers = [] # type: List[ClientMessenger]
client_messengers.append(get_default_client_messenger_factory().get_client_messenger())
client_messengers.append(get_default_client_messenger_factory().get_client_messenger())
callback_total = 0
last_message_index = -1
failed_at_message_index = None # type: int
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
nonlocal last_message_index
nonlocal failed_at_message_index
#print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, EchoResponseBaseClientServerMessage)
echo_response_client_server_message = client_server_message # type: EchoResponseBaseClientServerMessage
if int(echo_response_client_server_message.get_message()) == last_message_index + 1:
# correct message received
last_message_index += 1
else:
if failed_at_message_index is None:
failed_at_message_index = last_message_index
callback_total += 1
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
if found_exception is None:
found_exception = exception
for client_messenger in client_messengers:
client_messenger.connect_to_server()
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending messages")
client_messengers_index = 0
message_index = 0
client_messengers[client_messengers_index].send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=str(message_index),
is_ordered=True
)
)
message_index += 1
while minimum_delay_between_messages_seconds < current_delay_between_messages_seconds and failed_at_message_index is None:
time.sleep(current_delay_between_messages_seconds)
client_messengers_index += 1
if client_messengers_index == len(client_messengers):
client_messengers_index = 0
client_messengers[client_messengers_index].send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=str(message_index),
is_ordered=True
)
)
message_index += 1
current_delay_between_messages_seconds -= current_delay_between_messages_seconds * delay_percentage_decrease_delta
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(5)
print(f"{datetime.utcnow()}: disposing")
for client_messenger in client_messengers:
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
if found_exception is not None:
raise found_exception
print(f"{datetime.utcnow()}: last successful index {failed_at_message_index} with delay of {current_delay_between_messages_seconds} seconds")
self.assertLess(current_delay_between_messages_seconds, accepted_delay_between_messages_that_could_result_in_disorder)
def test_dispose_client_too_quickly_before_receiving_all_messages(self):
# a thread seems to remain alive when this happens
# NOTE: the client_socket read only gets to 988 before it stops reading
messages_total = 1000
print(f"{datetime.utcnow()}: setting up server")
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
print(f"{datetime.utcnow()}: setting up client")
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
callback_total = 0
last_message_index = -1
failed_at_message_index = None # type: int
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
nonlocal last_message_index
nonlocal failed_at_message_index
# print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
self.assertIsInstance(client_server_message, EchoResponseBaseClientServerMessage)
echo_response_client_server_message = client_server_message # type: EchoResponseBaseClientServerMessage
if int(echo_response_client_server_message.get_message()) == last_message_index + 1:
# correct message received
last_message_index += 1
else:
if failed_at_message_index is None:
failed_at_message_index = last_message_index
callback_total += 1
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
if found_exception is None:
found_exception = exception
print(f"{datetime.utcnow()}: connecting to server")
client_messenger.connect_to_server()
print(f"{datetime.utcnow()}: receiving from server")
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending messages")
for message_index in range(messages_total):
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=str(message_index),
is_ordered=True
)
)
print(f"{datetime.utcnow()}: immediately disposing")
client_messenger.dispose()
server_messenger.stop_receiving_from_clients()
if found_exception is not None:
raise found_exception
self.assertIsNone(failed_at_message_index)
def test_parse_client_server_message_raises_exception_when_receiving_in_server_messenger(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, ErrorResponseBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending error messages")
expected_exception = f"test exception: {uuid.uuid4()}"
client_messenger.send_to_server(
client_server_message=ErrorRequestBaseClientServerMessage(
is_constructor_exception_to_set=expected_exception
)
)
print(f"{datetime.utcnow()}: wait for messages")
time.sleep(1)
client_messenger.dispose()
time.sleep(1)
with self.assertRaises(Exception) as assertedException:
server_messenger.stop_receiving_from_clients()
self.assertEqual(expected_exception, str(assertedException.exception))
# the server encountered an exception, closing the connection
self.assertIsInstance(found_exception, ReadWriteSocketClosedException)
def test_getting_json_of_client_server_message_raises_exception_when_sending_to_server_messenger(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
callback_total += 1
self.assertIsInstance(client_server_message, ErrorResponseBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending error messages")
expected_exception = f"test exception: {uuid.uuid4()}"
with self.assertRaises(Exception) as assertedException:
client_messenger.send_to_server(
client_server_message=ErrorRequestBaseClientServerMessage(
to_json_exception=expected_exception
)
)
self.assertEqual(expected_exception, str(assertedException.exception))
time.sleep(1)
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
if found_exception is not None:
raise found_exception
def test_getting_structural_error_client_server_message_response_from_client_server_message_raises_exception_when_processing_in_server_messenger_but_succeeds(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
if callback_total == 0:
self.assertIsInstance(client_server_message, ErrorResponseBaseClientServerMessage)
else:
self.assertIsInstance(client_server_message, PingResponseBaseClientServerMessage)
callback_total += 1
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending error messages")
expected_exception = f"test exception: {uuid.uuid4()}"
client_messenger.send_to_server(
client_server_message=ErrorRequestBaseClientServerMessage(
get_structural_error_client_server_message_response_exception=expected_exception
)
)
time.sleep(1)
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
time.sleep(1)
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
# the server encountered an exception but did not close the connect due to it and is still receiving requests
if found_exception is not None:
raise found_exception
def test_getting_structural_error_client_server_message_response_from_client_server_message_raises_exception_when_processing_in_server_messenger_and_causes_exception(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
if callback_total == 0:
self.assertIsInstance(client_server_message, ErrorResponseBaseClientServerMessage)
else:
self.assertIsInstance(client_server_message, PingResponseBaseClientServerMessage)
callback_total += 1
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending error messages")
expected_exception = f"test exception: {uuid.uuid4()}"
client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
time.sleep(1)
print(f"{datetime.utcnow()}: sending ErrorRequestBaseClientServerMessage")
client_messenger.send_to_server(
client_server_message=ErrorRequestBaseClientServerMessage(
get_structural_error_client_server_message_response_exception=expected_exception
)
)
time.sleep(1)
print(f"{datetime.utcnow()}: sending PingRequestBaseClientServerMessage")
with self.assertRaises(ReadWriteSocketClosedException):
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing client_messenger")
client_messenger.dispose()
time.sleep(1)
print(f"{datetime.utcnow()}: server_messenger.stop_receiving_from_clients()")
with self.assertRaises(Exception) as assertedException:
server_messenger.stop_receiving_from_clients()
self.assertEqual(expected_exception, str(assertedException.exception))
self.assertIsInstance(found_exception, ReadWriteSocketClosedException)
def test_parse_client_server_message_in_response_raises_exception_when_parsing_in_server_messenger(self):
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
print(f"{datetime.utcnow()}: callback: client_server_message: {client_server_message.to_json()}")
if callback_total == 0:
self.assertIsInstance(client_server_message, ErrorResponseBaseClientServerMessage)
else:
self.assertIsInstance(client_server_message, PingResponseBaseClientServerMessage)
callback_total += 1
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending error messages")
expected_exception = f"test exception: {uuid.uuid4()}"
client_messenger.send_to_server(
client_server_message=ErrorRequestBaseClientServerMessage(
response_constructor_arguments={
"is_constructor_exception_to_set": expected_exception
}
)
)
time.sleep(1)
client_messenger.send_to_server(
client_server_message=PingRequestBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting")
time.sleep(1)
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
self.assertEqual(expected_exception, str(found_exception))
# TODO create more server-side ErrorResponse tests
def test_unordered_client_server_messages_100m_10s(self):
messages_total = 100
message_subset_length = 10
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
previous_ordered_index = -1 - message_subset_length
previous_unordered_index = -1
is_printing = False
def callback(echo_response: EchoResponseBaseClientServerMessage):
nonlocal callback_total
nonlocal previous_ordered_index
nonlocal previous_unordered_index
nonlocal is_printing
nonlocal message_subset_length
#print(f"{datetime.utcnow()}: callback: echo_response: {echo_response.to_json()}")
self.assertIsInstance(echo_response, EchoResponseBaseClientServerMessage)
callback_total += 1
index = int(echo_response.get_message())
print(f"index: {index}")
subset_index = int(index / message_subset_length) % 2
print(f"subset_index: {subset_index}")
previous_subset_index = math.floor((index - 1) / message_subset_length) % 2
print(f"previous_subset_index: {previous_subset_index}")
if subset_index == 0:
if previous_subset_index != subset_index:
if previous_ordered_index + message_subset_length + 1 != index:
raise Exception(f"Failed to jump to next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found ordered index: {index}")
else:
if previous_ordered_index + 1 != index:
raise Exception(f"Failed to find next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found ordered index: {index}")
previous_ordered_index = index
else:
if previous_subset_index != subset_index:
if previous_unordered_index + message_subset_length + 1 != index:
raise Exception(f"Failed to jump to next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found unordered index: {index}")
else:
if previous_unordered_index + 1 != index:
raise Exception(f"Failed to find next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found unordered index: {index}")
previous_unordered_index = index
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending messages")
for index in range(messages_total):
subset_index = int(index / message_subset_length) % 2
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=str(index),
is_ordered=(subset_index == 0)
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(5)
print(f"{datetime.utcnow()}: disposing client")
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
# the server encountered an exception but did not close the connect due to it and is still receiving requests
if found_exception is not None:
raise found_exception
def test_unordered_client_server_messages_100m_1s(self):
messages_total = 100
message_subset_length = 1
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
previous_ordered_index = -1 - message_subset_length
previous_unordered_index = -1
is_printing = False
def callback(echo_response: EchoResponseBaseClientServerMessage):
nonlocal callback_total
nonlocal previous_ordered_index
nonlocal previous_unordered_index
nonlocal is_printing
nonlocal message_subset_length
#print(f"{datetime.utcnow()}: callback: echo_response: {echo_response.to_json()}")
self.assertIsInstance(echo_response, EchoResponseBaseClientServerMessage)
callback_total += 1
index = int(echo_response.get_message())
#print(f"index: {index}")
subset_index = int(index / message_subset_length) % 2
#print(f"subset_index: {subset_index}")
previous_subset_index = math.floor((index - 1) / message_subset_length) % 2
#print(f"previous_subset_index: {previous_subset_index}")
if subset_index == 0:
if previous_subset_index != subset_index:
if previous_ordered_index + message_subset_length + 1 != index:
raise Exception(f"Failed to jump to next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found ordered index: {index}")
else:
if previous_ordered_index + 1 != index:
raise Exception(f"Failed to find next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found ordered index: {index}")
previous_ordered_index = index
else:
if previous_subset_index != subset_index:
if previous_unordered_index + message_subset_length + 1 != index:
raise Exception(f"Failed to jump to next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found unordered index: {index}")
else:
if previous_unordered_index + 1 != index:
raise Exception(f"Failed to find next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found unordered index: {index}")
previous_unordered_index = index
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending messages")
for index in range(messages_total):
subset_index = int(index / message_subset_length) % 2
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=str(index),
is_ordered=(subset_index == 0)
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(5)
print(f"{datetime.utcnow()}: disposing client")
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
# the server encountered an exception but did not close the connect due to it and is still receiving requests
if found_exception is not None:
raise found_exception
def test_unordered_client_server_messages_1000m_1s(self):
messages_total = 1000
message_subset_length = 1
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger = get_default_client_messenger_factory().get_client_messenger()
client_messenger.connect_to_server()
callback_total = 0
previous_ordered_index = -1 - message_subset_length
previous_unordered_index = -1
is_printing = False
def callback(echo_response: EchoResponseBaseClientServerMessage):
nonlocal callback_total
nonlocal previous_ordered_index
nonlocal previous_unordered_index
nonlocal is_printing
nonlocal message_subset_length
#print(f"{datetime.utcnow()}: callback: echo_response: {echo_response.to_json()}")
self.assertIsInstance(echo_response, EchoResponseBaseClientServerMessage)
callback_total += 1
index = int(echo_response.get_message())
#print(f"index: {index}")
subset_index = int(index / message_subset_length) % 2
#print(f"subset_index: {subset_index}")
previous_subset_index = math.floor((index - 1) / message_subset_length) % 2
#print(f"previous_subset_index: {previous_subset_index}")
if subset_index == 0:
if previous_subset_index != subset_index:
if previous_ordered_index + message_subset_length + 1 != index:
raise Exception(f"Failed to jump to next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found ordered index: {index}")
else:
if previous_ordered_index + 1 != index:
raise Exception(f"Failed to find next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found ordered index: {index}")
previous_ordered_index = index
else:
if previous_subset_index != subset_index:
if previous_unordered_index + message_subset_length + 1 != index:
raise Exception(f"Failed to jump to next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found unordered index: {index}")
else:
if previous_unordered_index + 1 != index:
raise Exception(f"Failed to find next index at index: {index}")
else:
if is_printing:
print(f"{datetime.utcnow()}: found unordered index: {index}")
previous_unordered_index = index
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending messages")
for index in range(messages_total):
subset_index = int(index / message_subset_length) % 2
client_messenger.send_to_server(
client_server_message=EchoRequestBaseClientServerMessage(
message=str(index),
is_ordered=(subset_index == 0)
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(5)
print(f"{datetime.utcnow()}: disposing client")
client_messenger.dispose()
time.sleep(1)
server_messenger.stop_receiving_from_clients()
# the server encountered an exception but did not close the connect due to it and is still receiving requests
if found_exception is not None:
raise found_exception
def test_child_structure_power_once_then_reset(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
callback_total += 1
print(f"received callback: {client_server_message}")
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
print(f"{datetime.utcnow()}: sending first power")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: sending reset")
client_messenger.send_to_server(
client_server_message=ResetButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(0, callback_total)
self.assertIsNone(found_exception)
def test_child_structure_power_four_times(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(power_overload_transmission: PowerOverloadTransmissionBaseClientServerMessage):
nonlocal callback_total
callback_total += 1
self.assertIsInstance(power_overload_transmission, PowerOverloadTransmissionBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: first power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: second power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: third power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: fourth power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=False
)
)
print(f"{datetime.utcnow()}: fourth power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: disposing")
client_messenger.dispose()
print(f"{datetime.utcnow()}: disposed")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
self.assertIsNone(found_exception)
def test_child_structure_power_two_times_anonymous_underpowered_and_power_attempt_with_failure(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
callback_total += 1
self.assertIsInstance(client_server_message, PowerButtonFailedBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: first power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: second power: end")
time.sleep(0.1)
press_client_messenger = get_default_client_messenger_factory().get_client_messenger()
press_client_messenger.connect_to_server()
def press_callback(three_presses_transmission: ThreePressesTransmissionBaseClientServerMessage):
nonlocal callback_total
callback_total += 1
self.assertIsInstance(three_presses_transmission, ThreePressesTransmissionBaseClientServerMessage)
self.assertEqual("underpowered", three_presses_transmission.get_power())
press_client_messenger.receive_from_server(
callback=press_callback,
on_exception=on_exception
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: first press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: second press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: third press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: third power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: dispose client_messenger: start")
client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose client_messenger: end")
print(f"{datetime.utcnow()}: dispose press_client_messenger: start")
press_client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose press_client_messenger: end")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(2, callback_total)
if found_exception is not None:
raise found_exception
def test_child_structure_power_three_times_anonymous_powered(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
callback_total += 1
raise Exception("This client should not be receiving messages.")
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: first power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: second power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: third power: end")
time.sleep(0.1)
press_client_messenger = get_default_client_messenger_factory().get_client_messenger()
press_client_messenger.connect_to_server()
def press_callback(three_presses_transmission: ThreePressesTransmissionBaseClientServerMessage):
nonlocal callback_total
callback_total += 1
self.assertIsInstance(three_presses_transmission, ThreePressesTransmissionBaseClientServerMessage)
self.assertEqual("powered", three_presses_transmission.get_power())
press_client_messenger.receive_from_server(
callback=press_callback,
on_exception=on_exception
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: first press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: second press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: third press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: dispose client_messenger: start")
client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose client_messenger: end")
print(f"{datetime.utcnow()}: dispose press_client_messenger: start")
press_client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose press_client_messenger: end")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
if found_exception is not None:
raise found_exception
def test_child_structure_power_four_times_anonymous_overpowered(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(client_server_message: ClientServerMessage):
nonlocal callback_total
callback_total += 1
raise Exception("This client should not be receiving messages.")
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: first power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: second power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: third power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: fourth power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: fourth power: end")
time.sleep(0.1)
press_client_messenger = get_default_client_messenger_factory().get_client_messenger()
press_client_messenger.connect_to_server()
def press_callback(three_presses_transmission: ThreePressesTransmissionBaseClientServerMessage):
nonlocal callback_total
callback_total += 1
self.assertIsInstance(three_presses_transmission, ThreePressesTransmissionBaseClientServerMessage)
self.assertEqual("overpowered", three_presses_transmission.get_power())
press_client_messenger.receive_from_server(
callback=press_callback,
on_exception=on_exception
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: first press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: second press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third press: start")
press_client_messenger.send_to_server(
client_server_message=PressButtonBaseClientServerMessage()
)
print(f"{datetime.utcnow()}: third press: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
print(f"{datetime.utcnow()}: dispose client_messenger: start")
client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose client_messenger: end")
print(f"{datetime.utcnow()}: dispose press_client_messenger: start")
press_client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose press_client_messenger: end")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
if found_exception is not None:
raise found_exception
def test_child_structure_power_five_times_anonymous_impossible_state(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
def callback(power_button_failed: PowerButtonFailedBaseClientServerMessage):
nonlocal callback_total
callback_total += 1
self.assertIsInstance(power_button_failed, PowerButtonFailedBaseClientServerMessage)
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
try:
print(f"{datetime.utcnow()}: sending first announcement")
client_messenger.send_to_server(
client_server_message=AnnounceBaseClientServerMessage(
name="First"
)
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: first power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: first power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: second power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: second power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: third power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: third power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: fourth power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: fourth power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: fifth power: start")
client_messenger.send_to_server(
client_server_message=PowerButtonBaseClientServerMessage(
is_anonymous=True
)
)
print(f"{datetime.utcnow()}: fifth power: end")
time.sleep(0.1)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(1)
finally:
print(f"{datetime.utcnow()}: dispose client_messenger: start")
client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose client_messenger: end")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(1)
self.assertEqual(1, callback_total)
if found_exception is not None:
raise found_exception
def test_timer_request_1s(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
expected_message = str(uuid.uuid4())
def callback(timer_response: TimerResponseBaseClientServerMessage):
nonlocal callback_total
nonlocal expected_message
callback_total += 1
print(f"{datetime.utcnow()}: received message")
self.assertIsInstance(timer_response, TimerResponseBaseClientServerMessage)
self.assertEqual(expected_message, timer_response.get_message())
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending message")
client_messenger.send_to_server(
client_server_message=TimerRequestBaseClientServerMessage(
message=expected_message,
seconds=1.0
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(5)
print(f"{datetime.utcnow()}: dispose client_messenger: start")
client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose client_messenger: end")
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(5)
self.assertEqual(1, callback_total)
if found_exception is not None:
raise found_exception
def test_timer_request_after_client_disposed(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
expected_message = str(uuid.uuid4())
def callback(timer_response: TimerResponseBaseClientServerMessage):
nonlocal callback_total
nonlocal expected_message
callback_total += 1
print(f"{datetime.utcnow()}: received message")
self.assertIsInstance(timer_response, TimerResponseBaseClientServerMessage)
self.assertEqual(expected_message, timer_response.get_message())
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending message")
client_messenger.send_to_server(
client_server_message=TimerRequestBaseClientServerMessage(
message=expected_message,
seconds=3.0
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(0.5)
print(f"{datetime.utcnow()}: dispose client_messenger: start")
client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose client_messenger: end")
time.sleep(4.0)
print(f"{datetime.utcnow()}: stopping")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped")
time.sleep(5)
self.assertEqual(0, callback_total)
if found_exception is not None:
raise found_exception
def test_timer_request_after_server_stopped(self):
client_messenger = get_default_client_messenger_factory().get_client_messenger()
server_messenger = get_default_server_messenger_factory().get_server_messenger()
server_messenger.start_receiving_from_clients()
time.sleep(1)
client_messenger.connect_to_server()
callback_total = 0
expected_message = str(uuid.uuid4())
def callback(timer_response: TimerResponseBaseClientServerMessage):
nonlocal callback_total
nonlocal expected_message
callback_total += 1
print(f"{datetime.utcnow()}: received message")
self.assertIsInstance(timer_response, TimerResponseBaseClientServerMessage)
self.assertEqual(expected_message, timer_response.get_message())
found_exception = None # type: Exception
def on_exception(exception: Exception):
nonlocal found_exception
found_exception = exception
client_messenger.receive_from_server(
callback=callback,
on_exception=on_exception
)
time.sleep(0.1)
print(f"{datetime.utcnow()}: sending message")
client_messenger.send_to_server(
client_server_message=TimerRequestBaseClientServerMessage(
message=expected_message,
seconds=10.0
)
)
print(f"{datetime.utcnow()}: waiting for messages")
time.sleep(0.5)
print(f"{datetime.utcnow()}: stopping server")
server_messenger.stop_receiving_from_clients()
print(f"{datetime.utcnow()}: stopped server")
time.sleep(12.0)
print(f"{datetime.utcnow()}: dispose client_messenger: start")
client_messenger.dispose()
print(f"{datetime.utcnow()}: dispose client_messenger: end")
time.sleep(5)
self.assertEqual(0, callback_total)
self.assertIsInstance(found_exception, ReadWriteSocketClosedException)
# TODO determine where the lingering thread is (2021-12-09)
| 1.960938 | 2 |
connections/DBConnection.py | eng-aomar/content_aggergator | 1 | 12762370 | from pymongo import MongoClient
import os
class Mongodb:
@classmethod
def db_connect(cls):
DB_URI = os.environ.get('DB_URI')
#print(DB_URI) os.environ.get('DB_URI')
client = MongoClient(DB_URI)
db = client.contentagregatordb
return db
@classmethod
def get_urls(cls):
db = Mongodb.db_connect()
website_collection = db['websites']
websites = website_collection.find()
return websites
@classmethod
def get_url_by(cls, url_category):
db = Mongodb.db_connect()
website_collection = db['websites']
websites = website_collection.find(url_category)
return websites
@classmethod
def get_articels_collection(cls):
db = Mongodb.db_connect()
return db['articles']
@classmethod
def is_saved_to(cls, articles_collection, article_url):
article_found = articles_collection.find_one({'url': article_url})
return article_found
@classmethod
def insert_articles(cls,articles):
articles_collection = Mongodb.get_articels_collection()
for article in articles:
article_found = Mongodb.is_saved_to(articles_collection,
article['url'])
if article_found is None:
articles_collection.insert_one(article)
else:
pass
@classmethod
def find_by(cls, baseurl, articles_collection):
all_article = articles_collection.find({'baseurl': baseurl}, sort=[('_id', -1)]).limit(10)
data =[]
for x in all_article:
datum = {}
datum['category'] = x['category']
datum['baseurl'] = x['baseurl']
datum['webname'] = x['webname']
datum['title'] = x['title']
datum['url'] = x['url']
data.append(datum)
return data
# db = Mongodb.db_connect()
# latest_articles = Mongodb.find_by('https://www.bbc.com/arabic', db['articles'])
# print(latest_articles)
| 2.703125 | 3 |
tracehess.py | BrettLeroux/GRIPS-MCMC | 0 | 12762371 | <reponame>BrettLeroux/GRIPS-MCMC<filename>tracehess.py
import numpy as np
<<<<<<< HEAD
#Gradient Function
x = np.array([[1],[2]])
def the_func(y):
return (y[0]**2+y[1]**2)
=======
import math
#Gradient Function
x= np.array([[2],[2],[2]])
def f(x):
return x[0]**2*x[1]**2*x[2]**2
>>>>>>> master
def gradient_f(x, f):
assert (x.shape[0] >= x.shape[1]), "the vector should be a column vector"
x = x.astype(float)
N = x.shape[0]
gradient = []
for i in range(N):
<<<<<<< HEAD
eps = abs(x[i]) * np.finfo(np.float32).eps
=======
eps = 0.00001#abs(x[i]) * np.finfo(np.float32).eps
>>>>>>> master
xx0 = 1. * x[i]
f0 = f(x)
x[i] = x[i] + eps
f1 = f(x)
<<<<<<< HEAD
gradient.append(np.asscalar(np.array([f1 - f0]))/eps)
x[i] = xx0
return np.array(gradient).reshape(x.shape)
#Hessian Matrix
def hessian_f (x, the_func):
N = x.shape[0]
hessian = np.zeros((N,N))
gd_0 = gradient_f( x, the_func)
eps = np.linalg.norm(gd_0) * np.finfo(np.float32).eps
for i in range(N):
xx0 = 1.*x[i]
x[i] = xx0 + eps
gd_1 = gradient_f(x, the_func)
hessian[:,i] = ((gd_1 - gd_0)/eps).reshape(x.shape[0])
x[i] =xx0
return hessian
hess
=======
gradient.append(np.array([f1 - f0]).astype(np.float)/eps)
x[i] = xx0
return np.array(gradient).reshape(x.shape)
#Laplacian. This returns only zero values. debugging continues.
#def laplacian_f (x, the_anzats):
# N = x.shape[0]
# gd_0 = gradient_f(x, the_anzats)
# eps = 0.1
# for i in range(N):
# xx0 = 1.*x[i]
# x[i] = x[i] + eps
# gd_1 = gradient_f(x, the_anzats)[i]
# lapnot = ((gd_1[i] - gd_0[i])/eps) #.reshape(x.shape[0])
# x[i] = xx0
# return (sum(lapnot))
#def hessian_f (x, the_func):
# N = x.shape[0]
# hessian = np.zeros((N,N))
# gd_0 = gradient_f( x, the_func)
# eps = 1#np.linalg.norm(gd_0) * np.finfo(np.float32).eps
# for i in range(N):
# xx0 = 1.*x[i]
# x[i] = xx0 + eps
# gd_1 = gradient_f(x, the_func)
# hessian[:,i] = ((gd_1 - gd_0)/eps).reshape(x.shape[0])
# x[i] =xx0
#return hessian
>>>>>>> master
| 2.890625 | 3 |
tests/roots/test-ext-autodoc/target/wrappedfunction.py | samdoran/sphinx | 4,973 | 12762372 | from contextlib import contextmanager
from functools import lru_cache
from typing import Generator
@lru_cache(maxsize=None)
def slow_function(message, timeout):
"""This function is slow."""
print(message)
@contextmanager
def feeling_good(x: int, y: int) -> Generator:
"""You'll feel better in this context!"""
yield
| 2.71875 | 3 |
email_pre_processing_service.py | airazabal/email-pre-processing | 0 | 12762373 | <filename>email_pre_processing_service.py<gh_stars>0
# Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, json
from flask import Flask, request, jsonify
from email_thread import EmailThread
app = Flask(__name__)
app.debug = True
@app.route('/')
def Welcome():
return app.send_static_file('index.html')
@app.route('/api/pre_process_email', methods=['POST'])
def Pre_Process_Email():
print(request.data)
request_data_object = {}
request_data_object['source_email'] = json.loads(request.data)['source_email'].encode('utf-8')
request_data_object['source_id'] = json.loads(request.data)['source_id'].encode('utf-8')
print(request_data_object['source_email'])
print(request_data_object['source_id'])
print(type(request_data_object['source_id']))
print(type(request_data_object['source_email']))
response = {}
response['source_id'] = request_data_object['source_id']
response['body'] = request_data_object['source_email']
email_thread = EmailThread(response['source_id'], response['body'])
response['subject'] = email_thread.subject
response['trimmed'] = email_thread.to_trimmed_string()
response['cleansed'] = email_thread.to_cleansed_string()
return jsonify(response)
@app.route('/myapp')
def WelcomeToMyapp():
return 'Welcome again to my app running on Bluemix!'
@app.route('/api/people')
def GetPeople():
list = [
{'name': 'John', 'age': 28},
{'name': 'Bill', 'val': 26}
]
return jsonify(results=list)
@app.route('/api/people/<name>')
def SayHello(name):
message = {
'message': 'Hello ' + name
}
return jsonify(results=message)
port = os.getenv('PORT', '7000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| 2.296875 | 2 |
client/sdk/python/registry/registry_pb2_grpc.py | gofortwos/micro | 37 | 12762374 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from registry import registry_pb2 as registry_dot_registry__pb2
class RegistryStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetService = channel.unary_unary(
'/registry.Registry/GetService',
request_serializer=registry_dot_registry__pb2.GetRequest.SerializeToString,
response_deserializer=registry_dot_registry__pb2.GetResponse.FromString,
)
self.Register = channel.unary_unary(
'/registry.Registry/Register',
request_serializer=registry_dot_registry__pb2.Service.SerializeToString,
response_deserializer=registry_dot_registry__pb2.EmptyResponse.FromString,
)
self.Deregister = channel.unary_unary(
'/registry.Registry/Deregister',
request_serializer=registry_dot_registry__pb2.Service.SerializeToString,
response_deserializer=registry_dot_registry__pb2.EmptyResponse.FromString,
)
self.ListServices = channel.unary_unary(
'/registry.Registry/ListServices',
request_serializer=registry_dot_registry__pb2.ListRequest.SerializeToString,
response_deserializer=registry_dot_registry__pb2.ListResponse.FromString,
)
self.Watch = channel.unary_stream(
'/registry.Registry/Watch',
request_serializer=registry_dot_registry__pb2.WatchRequest.SerializeToString,
response_deserializer=registry_dot_registry__pb2.Result.FromString,
)
class RegistryServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetService(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Register(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Deregister(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListServices(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Watch(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RegistryServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetService': grpc.unary_unary_rpc_method_handler(
servicer.GetService,
request_deserializer=registry_dot_registry__pb2.GetRequest.FromString,
response_serializer=registry_dot_registry__pb2.GetResponse.SerializeToString,
),
'Register': grpc.unary_unary_rpc_method_handler(
servicer.Register,
request_deserializer=registry_dot_registry__pb2.Service.FromString,
response_serializer=registry_dot_registry__pb2.EmptyResponse.SerializeToString,
),
'Deregister': grpc.unary_unary_rpc_method_handler(
servicer.Deregister,
request_deserializer=registry_dot_registry__pb2.Service.FromString,
response_serializer=registry_dot_registry__pb2.EmptyResponse.SerializeToString,
),
'ListServices': grpc.unary_unary_rpc_method_handler(
servicer.ListServices,
request_deserializer=registry_dot_registry__pb2.ListRequest.FromString,
response_serializer=registry_dot_registry__pb2.ListResponse.SerializeToString,
),
'Watch': grpc.unary_stream_rpc_method_handler(
servicer.Watch,
request_deserializer=registry_dot_registry__pb2.WatchRequest.FromString,
response_serializer=registry_dot_registry__pb2.Result.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'registry.Registry', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Registry(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetService(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/registry.Registry/GetService',
registry_dot_registry__pb2.GetRequest.SerializeToString,
registry_dot_registry__pb2.GetResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Register(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/registry.Registry/Register',
registry_dot_registry__pb2.Service.SerializeToString,
registry_dot_registry__pb2.EmptyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Deregister(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/registry.Registry/Deregister',
registry_dot_registry__pb2.Service.SerializeToString,
registry_dot_registry__pb2.EmptyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListServices(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/registry.Registry/ListServices',
registry_dot_registry__pb2.ListRequest.SerializeToString,
registry_dot_registry__pb2.ListResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Watch(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/registry.Registry/Watch',
registry_dot_registry__pb2.WatchRequest.SerializeToString,
registry_dot_registry__pb2.Result.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 1.84375 | 2 |
muddery/worldeditor/dao/image_resources_mapper.py | dongwudanci/muddery | 127 | 12762375 | """
Query and deal common tables.
"""
from evennia.utils import logger
from django.apps import apps
from django.conf import settings
class ImageResourcesMapper(object):
"""
Object's image.
"""
def __init__(self):
self.model_name = "image_resources"
self.model = apps.get_model(settings.WORLD_DATA_APP, self.model_name)
self.objects = self.model.objects
def get(self, resource):
"""
Get object's image.
Args:
resource: (string) resource's path.
"""
return self.objects.get(resource=resource)
def add(self, path, type, width, height):
"""
Add a new image record.
Args:
path: image's path
type: image's type
width: image's width
height: image's height
Return:
none
"""
record = {
"resource": path,
"type": type,
"image_width": width,
"image_height": height,
}
data = self.model(**record)
data.full_clean()
data.save()
IMAGE_RESOURCES = ImageResourcesMapper()
| 2.375 | 2 |
project/main.py | ivanbaug/dissapearing-text | 0 | 12762376 | <reponame>ivanbaug/dissapearing-text
import tkinter as tk
import tkinter.font as tkFont
from tkinter import ttk, messagebox
from tkinter.constants import CENTER, END, WORD
from datetime import datetime as dt
from pathlib import Path
import time
import threading
import json, random
FONT_NAME = "Helvetica"
filepath = "project\\assets\\quotes.json"
class Window(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.init_parent()
# Add Style-Theme
self.style = ttk.Style(self.parent)
## Import the tcl file
self.parent.tk.call("source", "project\\themes\\forest-light.tcl")
## Set the theme with the theme_use method
self.style.theme_use("forest-light")
# Define fonts
self.font_practice_box = tkFont.Font(family=FONT_NAME, size=16)
self.font_title = tkFont.Font(
family=FONT_NAME, size=14, weight="bold", slant="italic"
)
self.font_info_box = tkFont.Font(family=FONT_NAME, size=12)
self.font_countdown = tkFont.Font(family=FONT_NAME, size=40, weight="bold")
# Vars
self.time_var = 5
self._start_time = time.perf_counter()
self._current_time = time.perf_counter() - 3600
self.count_var = tk.StringVar()
self.count_var.set(f"{self.time_var}")
self.is_writing = False
self.is_counter_running = False
self.quotes = self.get_quotes()
# Title
self.title_label = ttk.Label(
self,
text="",
foreground="#008a25",
wraplength=520,
justify=CENTER,
)
self.title_label.grid(
row=0, column=0, padx=10, pady=5, sticky="nsew", columnspan=2
)
self.title_label.configure(font=self.font_title, anchor=CENTER)
self.new_quote()
# Create a frame for practice text
self.entry_frame = ttk.LabelFrame(self, text="Your text", padding=(10, 10))
self.entry_frame.grid(
row=2, column=0, padx=10, pady=(10, 10), sticky="nsew", columnspan=2
)
# Practice Text
self.entry = tk.Text(
self.entry_frame,
width=50,
height=8,
padx=20,
pady=15,
wrap=WORD,
highlightthickness=0,
borderwidth=0,
)
self.entry.configure(font=self.font_practice_box)
self.entry.config(spacing1=10, spacing2=10)
self.entry.bind("<Key>", self.keystroke)
self.entry.grid(row=0, column=0)
# Frame for time customization
self.info_frame = ttk.LabelFrame(
self,
text="Set your countdown (in seconds)",
padding=(10, 10),
height=100,
width=300,
)
self.info_frame.grid(
row=3, column=0, padx=10, pady=(10, 10), sticky="ew", rowspan=3
)
# Spinbox
self.spinbox = ttk.Spinbox(
self.info_frame,
from_=5,
to=120,
wrap=True,
width=10,
textvariable=self.count_var,
)
# self.spinbox.insert(0, "5")
self.spinbox.pack(side="top", padx=10, pady=5)
self.count_var.trace("w", self.update_count_data)
# Other information
self.info_label = ttk.Label(
self.info_frame,
text="",
wraplength=240,
justify=CENTER,
)
self.info_label.pack(side="bottom", padx=10, pady=5)
# self.info_label.config(anchor=CENTER)
# Countdown Frame
self.countdown_frame = ttk.LabelFrame(
self, text="Countdown", padding=(10, 10), height=100, width=200
)
self.countdown_frame.grid(
row=3, column=1, padx=10, pady=(10, 10), sticky="nsew", rowspan=3
)
# Coundown label
self.cd_label = ttk.Label(
self.countdown_frame, text=f"{self.time_var}", foreground="#4d4c5c"
)
self.cd_label.place(relx=0.5, rely=0.5, anchor=CENTER)
self.cd_label.configure(font=self.font_countdown)
def init_parent(self):
self.parent.title("Write your prompt")
self.parent.config(padx=20, pady=20)
self.parent.option_add("*tearOff", False) # This is always a good idea
# Make the app responsive
self.parent.columnconfigure(index=0, weight=1)
self.parent.columnconfigure(index=1, weight=1)
self.parent.columnconfigure(index=2, weight=1)
self.parent.rowconfigure(index=0, weight=1)
self.parent.rowconfigure(index=1, weight=1)
self.parent.rowconfigure(index=2, weight=1)
def keystroke(self, key):
self.is_writing = True
self._start_time = time.perf_counter()
self.info_label.config(text="")
if not self.is_counter_running:
self.is_counter_running = True
x = threading.Thread(target=self.run_countdown)
x.start()
def run_countdown(self):
while self._start_time + self.time_var > self._current_time:
self._current_time = time.perf_counter()
remaining_time = self.time_var - (self._current_time - self._start_time)
if remaining_time < 0:
remaining_time = 0
self.cd_label.config(text=f"{remaining_time:.1f}")
time.sleep(0.2)
# once the counter stops
self.is_counter_running = False
self.is_writing = False
input = self.entry.get("1.0", END)
self.entry.delete("1.0", END)
answer = messagebox.askyesno(
"Your time is up", "Do you want to save what you typed?"
)
if answer:
saving_time = dt.now().strftime("%Y%m%d-%H%M%S")
new_name = f"MyText-{saving_time}.txt"
new_path = Path.cwd().joinpath(new_name)
with open(new_path, "w") as f:
f.write(input)
messagebox.showinfo("Done!", f"Your text was saved in '{new_path}'")
self.new_quote()
def update_count_data(self, var, indx, mode):
if self.is_writing:
self.info_label.config(
text="The countdown already started, change the timer once it ends."
)
else:
try:
tvar = self.count_var.get()
self.time_var = int(tvar)
except ValueError:
# Do nothing if gibberish is written on the spinner
pass
self.cd_label.config(text=f"{self.time_var}")
def get_quotes(self):
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
return data["quotes"]
def new_quote(self):
temp_quote = random.choice(self.quotes)
text = f"{temp_quote['quote']}\n-{temp_quote['author']}"
self.title_label.config(text=text)
if __name__ == "__main__":
root = tk.Tk()
Window(root).pack(fill="both", expand=True)
root.mainloop()
| 2.984375 | 3 |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ipv4_io_cfg.py | tkamata-test/ydk-py | 0 | 12762377 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Ipv4DefaultPingEnum' : _MetaInfoEnum('Ipv4DefaultPingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_io_cfg',
{
'disabled':'disabled',
'enabled':'enabled',
}, 'Cisco-IOS-XR-ipv4-io-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-io-cfg']),
'Ipv4SelfPingEnum' : _MetaInfoEnum('Ipv4SelfPingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_io_cfg',
{
'disabled':'disabled',
'enabled':'enabled',
}, 'Cisco-IOS-XR-ipv4-io-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-io-cfg']),
'Ipv4ReachableEnum' : _MetaInfoEnum('Ipv4ReachableEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_io_cfg',
{
'any':'any',
'received':'received',
}, 'Cisco-IOS-XR-ipv4-io-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-io-cfg']),
'Ipv4InterfaceQppbEnum' : _MetaInfoEnum('Ipv4InterfaceQppbEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_io_cfg',
{
'ip-precedence':'ip_precedence',
'qos-group':'qos_group',
'both':'both',
}, 'Cisco-IOS-XR-ipv4-io-cfg', _yang_ns._namespaces['Cisco-IOS-XR-ipv4-io-cfg']),
}
| 1.507813 | 2 |
flaskeddit/communities/routes.py | aqche/flaskedd | 1 | 12762378 | <filename>flaskeddit/communities/routes.py
from flask import render_template, request
from flaskeddit.communities import communities_blueprint, communities_service
@communities_blueprint.route("/communities")
def communities():
"""
Route for page displaying list of all communities sorted by date created.
"""
page = int(request.args.get("page", 1))
communities = communities_service.get_communities(page)
return render_template("communities.html", tab="recent", communities=communities)
@communities_blueprint.route("/communities/top")
def top_communities():
"""
Route for page displaying list of all communities sorted by most members.
"""
page = int(request.args.get("page", 1))
communities = communities_service.get_communities_by_membership(page)
return render_template("communities.html", tab="top", communities=communities)
| 2.890625 | 3 |
pywizlight/bulblibrary.py | UH-60/pywizlight | 221 | 12762379 | <reponame>UH-60/pywizlight<filename>pywizlight/bulblibrary.py<gh_stars>100-1000
"""Library with compatible bulb types.
Bulb Type detection:
ESP01_SHDW1C_31
ESP01 -- defines the module family (WiFi only bulb in this case)
SH -- Single Head light (most bulbs are single heads) / LED Strip
TW -- Tunable White - can only control CCT and dimming; no color
DW -- Dimmable White (most filament bulbs)
RGB -- Fullstack bulb
1C -- Specific to the hardware - defines PWM frequency + way of controlling CCT temperature
31 -- Related to the hardware revision
"""
import dataclasses
from enum import Enum
from typing import Optional, List
from pywizlight.exceptions import WizLightNotKnownBulb
@dataclasses.dataclass(frozen=True)
class Features:
"""Defines the supported features."""
color: bool
color_tmp: bool
effect: bool
brightness: bool
# RGB supports effects and tuneable white
RGB_FEATURES = Features(brightness=True, color=True, effect=True, color_tmp=True)
# TODO: TW supports effects but only "some"; improve the mapping to supported effects
TW_FEATURES = Features(brightness=True, color=False, effect=True, color_tmp=True)
# Dimmable white only supports brightness
DW_FEATURES = Features(brightness=True, color=False, effect=False, color_tmp=False)
@dataclasses.dataclass(frozen=True)
class KelvinRange:
"""Defines the kelvin range."""
max: int
min: int
class BulbClass(Enum):
"""Bulb Types."""
"""Have Cool White and Warm White LEDs."""
TW = "Tunable White"
"""Have only Dimmable white LEDs."""
DW = "Dimmable White"
"""Have RGB LEDs."""
RGB = "RGB Bulb"
@dataclasses.dataclass(frozen=True)
class BulbType:
"""BulbType object to define functions and features of the bulb."""
features: Features
name: str
kelvin_range: Optional[KelvinRange]
bulb_type: BulbClass
@staticmethod
def from_data(module_name: str, kelvin_list: Optional[List[float]]) -> "BulbType":
if kelvin_list:
kelvin_range: Optional[KelvinRange] = KelvinRange(
min=int(min(kelvin_list)), max=int(max(kelvin_list))
)
else:
kelvin_range = None
try:
# parse the features from name
_identifier = module_name.split("_")[1]
# Throw exception if index can not be found
except IndexError:
raise WizLightNotKnownBulb("The bulb type can not be determined!")
if "RGB" in _identifier: # full RGB bulb
features = RGB_FEATURES
bulb_type = BulbClass.RGB
elif "TW" in _identifier: # Non RGB but tunable white bulb
features = TW_FEATURES
bulb_type = BulbClass.TW
else: # Plain brightness-only bulb
features = DW_FEATURES
bulb_type = BulbClass.DW
return BulbType(
bulb_type=bulb_type,
name=module_name,
features=features,
kelvin_range=kelvin_range,
)
| 2.859375 | 3 |
apps/local_apps/django_openidauth/admin.py | google-code-export/django-hotclub | 1 | 12762380 | <reponame>google-code-export/django-hotclub<filename>apps/local_apps/django_openidauth/admin.py
from django.contrib import admin
from django_openidauth.models import UserOpenID
class UserOpenIDAdmin(admin.ModelAdmin):
raw_id_fields = ('user',)
admin.site.register(UserOpenID, UserOpenIDAdmin)
| 1.921875 | 2 |
scripts/calcdeadtime.py | juliadeneva/NICERsoft | 2 | 12762381 | #!/usr/bin/env python
from __future__ import print_function, division
from glob import glob
import astropy.io.fits as pyfits
import sys, os
from os import path, remove
from astropy import log
from astropy.table import Table
from subprocess import check_call
import argparse
import re
import numpy as np
# from nicer.values import *
# Array of DET_IDs that are used
IDS = np.array(
[
0,
1,
2,
3,
4,
5,
6,
7,
10,
11,
12,
13,
14,
15,
16,
17,
20,
21,
22,
23,
24,
25,
26,
27,
30,
31,
32,
33,
34,
35,
36,
37,
40,
41,
42,
43,
44,
45,
46,
47,
50,
51,
52,
53,
54,
55,
56,
57,
60,
61,
62,
63,
64,
65,
66,
67,
]
)
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(
description="Compute deadtime correction to an EXPOSURE defined by a GTI extension, for a single OBSID."
)
parser.add_argument("obsdir", help="Directory containing the raw data for this OBSID")
parser.add_argument(
"gtifile",
help="FITS file containing a GTI extension to be used. Can be an event file, PHA file or any FITS file with a 'GTI' extension.",
)
parser.add_argument(
"--mask", help="Mask particular FPMs", nargs="+", type=int, default=[]
)
parser.add_argument("--plot", help="Plot deadtime per FPM", action="store_true")
args = parser.parse_args()
# The GTI file is assumed to apply to all FPMs. This is normally the case since the user
# is operating on a merged event file whose GTI is the AND of all the individual MPU GTIs
# then they may make additional GTI selections that are more restrictive than that.
# So, we can go over each MPU file and apply the GTI before counting up the deadtime.
# Get the names of all the individual MPU files
gstr = path.join(args.obsdir, "xti/event_uf/ni*mpu?_uf.evt*")
log.debug("Glob string {}".format(gstr))
ufiles = glob(gstr)
ufiles.sort()
log.info(
"Reading unfiltered events from these files :\n\t{}".format("\n\t".join(ufiles))
)
if len(ufiles) != 7:
log.error("Did not find 7 MPU files!")
fpm_deadtime = np.zeros(len(IDS))
t_mpu = -1
log.info("Mask {}".format(args.mask))
for i, det_id in enumerate(IDS):
if det_id in args.mask:
continue
mpu = det_id // 10
log.debug("{} DET_ID {} MPU {} File {}".format(i, det_id, mpu, ufiles[mpu]))
# Only read the raw MPU file once per MPU since all the FPMs for this MPU are in this file
if mpu != t_mpu:
cmd = "niextract-events {0} eventsout={1} timefile='{2}[GTI]' clobber=yes".format(
ufiles[mpu], "tmp.evt", args.gtifile
)
st = check_call(cmd, shell=True)
if st != 0:
log.error("niextract-events failed!")
t = Table.read("tmp.evt", hdu=1)
t_mpu = mpu
dets = t["DET_ID"]
if not np.any(dets == det_id):
fpm_deadtime[i] = 0.0
else:
fpm_deadtime[i] = (t["DEADTIME"][dets == det_id]).sum()
gtitable = Table.read("{}".format(args.gtifile), hdu="GTI")
exp = (gtitable["STOP"] - gtitable["START"]).sum()
log.debug("exp {}".format(exp))
active = np.where(fpm_deadtime > 0)[0]
if not np.any(fpm_deadtime > 0):
deadtime = 0.0
mindead = 0.0
maxdead = 0.0
stddead = 0.0
else:
deadtime = fpm_deadtime[active].mean()
mindead = fpm_deadtime[active].min()
maxdead = fpm_deadtime[active].max()
stddead = fpm_deadtime[active].std()
if args.plot:
if exp > 0:
plt.plot(IDS, 100 * fpm_deadtime / exp, "s")
plt.xlabel("DET_ID")
plt.ylabel("Deadtime %")
plt.title(t.meta["OBS_ID"])
# plt.savefig("deadtimeplots/{0}_deadtimes.png".format(t.meta["OBS_ID"]))
plt.show()
if exp == 0.0:
percent_frac = 0.0
else:
percent_frac = 100.0 * deadtime / exp
print(
"\nFile {} Exposure {:12.5f}, Mean Deadtime {:12.5f} ({:.3f} %) -> Livetime {:12.5f}".format(
args.gtifile, exp, deadtime, percent_frac, exp - deadtime
)
)
print(
"Deadtime Statistics for {} FPM: Min {:12.5f} Max {:12.5f} Std {:12.5f}".format(
len(active), mindead, maxdead, stddead
)
)
| 2.125 | 2 |
pprs/merge_time.py | huxh10/SGDX | 2 | 12762382 | <gh_stars>1-10
#! /usr/bin/python
import argparse
def merge_time(file1, file2):
announcement_id_2_time = {}
with open(file1, 'r') as f:
for line in f:
tmp = line[:-1].split(' ')
announcement_id_2_time[int(tmp[2].split(':')[1])] = [float(tmp[3].split(':')[1]), float(tmp[4].split(':')[1])]
with open(file2, 'r') as f:
for line in f:
tmp = line[:-1].split(' ')
announcement_id = int(tmp[2].split(':')[1])
announcement_id_2_time[announcement_id][0] = announcement_id_2_time[announcement_id][0] if float(tmp[3].split(':')[1]) > announcement_id_2_time[announcement_id][0] else float(tmp[3].split(':')[1])
announcement_id_2_time[announcement_id][1] = announcement_id_2_time[announcement_id][1] if float(tmp[4].split(':')[1]) < announcement_id_2_time[announcement_id][1] else float(tmp[4].split(':')[1])
with open(file1 + '_' + file2, 'w+') as f:
lines = []
for announcement_id in announcement_id_2_time.keys():
lines.append("latency: %d\n" % (int((announcement_id_2_time[announcement_id][1] - announcement_id_2_time[announcement_id][0]) * 1000000)))
f.writelines(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file1', type=str, help='specify the result file 1')
parser.add_argument('file2', type=str, help='specify the result file 2')
args = parser.parse_args()
merge_time(args.file1, args.file2)
| 2.890625 | 3 |
bugtests/test019.py | doom38/jython_v2.2.1 | 0 | 12762383 | <reponame>doom38/jython_v2.2.1
"""
Check strange indexes/slices.
"""
import support
bits = ['1','1','0','0']
try:
bits[2:1] = '2'
except TypeError:
pass
else:
pass
# BW says this will be allowed in python1.6
#raise support.TestError("Assignment to slice should fail, but didn't " + `bits`)
| 2.5 | 2 |
tangerine/migrations/0017_config_show_future.py | shacker/tangerine | 1 | 12762384 | # Generated by Django 2.0 on 2018-01-06 08:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tangerine', '0016_auto_20180104_2324'),
]
operations = [
migrations.AddField(
model_name='config',
name='show_future',
field=models.BooleanField(default=False, help_text='If enabled, posts dated in the future appear immediately. Default is False (drip-date behavior).'),
),
]
| 1.742188 | 2 |
libs/lsusb.py | hpagseddy/ZPUI | 0 | 12762385 | #!/usr/bin/env python
from subprocess import check_output
"""
Bus 001 Device 008: ID 239a:d1ed
Bus 001 Device 015: ID 045e:00db Microsoft Corp. Natural Ergonomic Keyboard 4000 V1.0
Bus 001 Device 014: ID 046d:c52f Logitech, Inc. Unifying Receiver
Bus 001 Device 013: ID 0b95:772a ASIX Electronics Corp. AX88772A Fast Ethernet
Bus 001 Device 012: ID 0d8c:0105 C-Media Electronics, Inc. CM108 Audio Controller
Bus 001 Device 011: ID 17e9:0117 DisplayLink
Bus 001 Device 010: ID 1a40:0201 Terminus Technology Inc. FE 2.1 7-port Hub
Bus 001 Device 016: ID 04d9:1603 Holtek Semiconductor, Inc. Keyboard
Bus 001 Device 003: ID 0424:ec00 Standard Microsystems Corp. SMSC9512/9514 Fast Ethernet Adapter
Bus 001 Device 002: ID 0424:9514 Standard Microsystems Corp.
Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub
"""
def lsusb():
lsusb_entries = []
output = check_output(["lsusb"])
for line in [line.strip(' ') for line in output.split('\n') if line.strip(' ')]:
location, description = line.split(':', 1)
id_str, vid_pid_name = description.strip(' ').split(' ', 1)
vid_pid = vid_pid_name.split(' ', 1)[0]
name = vid_pid_name.split(' ', 1)[1] if len(vid_pid_name.split(' ', 1)) > 1 else None
bus_str, bus, device_str, device = location.split(' ', 3)
bus = str(int(bus, 10))
device = str(int(device, 10))
lsusb_entries.append([bus, device, vid_pid, name])
return lsusb_entries
if __name__ == "__main__":
print(lsusb())
| 2.59375 | 3 |
examples/Python/unet.py | xrz000/PlotNeuralNet | 1 | 12762386 | import sys
from plotnn import plotnn
import plotnn.tikzeng as tk
def main():
namefile = str(sys.argv[0]).split('.')[0]
arch = [
tk.Image("input", "./images/dogcat.jpg"),
tk.Conv2D(name='conv_0', out_width=570, out_channel=64, activation="relu",
offset=(3, 0, 0), location="input", width=2, height=40, depth=40),
tk.Connection("input", "conv_0", origin_loc=None),
tk.Conv2D(name='conv_1', out_width=568, out_channel=64, activation="relu",
offset=(0, 0, 0), location="conv_0-east", width=2, height=40, depth=40),
tk.Pool(name="pool_b1", offset=(1, -6, 0), location="conv_1-south",
width=2, height=20, depth=20, opacity=0.5),
tk.Connection("conv_1", "pool_b1", origin_loc="east", target_loc="north", path="-|"),
tk.Conv2D(name='conv_2', out_width=282, out_channel=128, activation="relu",
offset=(0, 0, 0), location="pool_b1-east", width=4, height=20, depth=20),
tk.Conv2D(name='conv_3', out_width=280, out_channel=128, activation="relu",
offset=(0, 0, 0), location="conv_2-east", width=4, height=20, depth=20),
tk.Pool(name="pool_b2", offset=(1, -5, 0), location="conv_3-south",
width=4, height=10, depth=10, opacity=0.5),
tk.Connection("conv_3", "pool_b2", origin_loc="east", target_loc="north", path="-|"),
tk.Conv2D(name='conv_4', out_width=138, out_channel=256, activation="relu",
offset=(0, 0, 0), location="pool_b2-east", width=6, height=10, depth=10),
tk.Conv2D(name='conv_5', out_width=136, out_channel=256, activation="relu",
offset=(0, 0, 0), location="conv_4-east", width=6, height=10, depth=10),
tk.Pool(name="pool_b3", offset=(1, -4, 0), location="conv_5-south",
width=6, height=5, depth=5, opacity=0.5),
tk.Connection("conv_5", "pool_b3", origin_loc="east", target_loc="north", path="-|"),
tk.Conv2D(name='conv_6', out_width=66, out_channel=512, activation="relu",
offset=(0, 0, 0), location="pool_b3-east", width=8, height=5, depth=5),
tk.Conv2D(name='conv_7', out_width=64, out_channel=512, activation="relu",
offset=(0, 0, 0), location="conv_6-east", width=8, height=5, depth=5),
tk.Pool(name="pool_b4", offset=(1, -3, 0), location="conv_7-south",
width=8, height=4, depth=4, opacity=0.5),
tk.Connection("conv_7", "pool_b4", origin_loc="east", target_loc="north", path="-|"),
tk.Conv2D(name='conv_8', out_width=30, out_channel=1024, activation="relu",
offset=(0, 0, 0), location="pool_b4-east", width=10, height=4, depth=4),
tk.Conv2D(name='conv_9', out_width=28, out_channel=1024, activation="relu",
offset=(0, 0, 0), location="conv_8-east", width=10, height=4, depth=4),
tk.ConvTranspose2D(name='unpool_b1', out_channel=512, offset=(1, 3, 0), location="conv_9-northeast", width=8, height=5, depth=5),
tk.Connection("conv_9", "unpool_b1", path='|-'),
tk.Concat("concat_b1", location="unpool_b1-east", offset=(1, 0, 0), color="white"),
tk.Connection("unpool_b1", "concat_b1"),
tk.Box("concat_1", location="concat_b1-east", offset=(1, 0, 0), xlabel=1024, width=10, height=5, depth=5),
tk.Connection("concat_b1", "concat_1"),
tk.Connection('conv_7', 'concat_b1', origin_loc="north", target_loc="north", origin_pos=1.5, target_pos=2,
color="blue", linestyle="double", path="|-|"),
tk.Conv2D(name='conv_10', out_width=54, out_channel=512, activation="relu",
offset=(1, 0, 0), location="concat_1-east", width=8, height=5, depth=5),
tk.Connection("concat_1", "conv_10"),
tk.Conv2D(name='conv_11', out_width=52, out_channel=512, activation="relu",
offset=(0, 0, 0), location="conv_10-east", width=8, height=5, depth=5),
tk.ConvTranspose2D(name='unpool_b2', out_channel=256, offset=(1, 5, 0), location="conv_11-northeast", width=6, height=10, depth=10),
tk.Connection("conv_11", "unpool_b2", path='|-'),
tk.Concat("concat_b2", location="unpool_b2-east", offset=(1, 0, 0), color="white"),
tk.Connection("unpool_b2", "concat_b2"),
tk.Box("concat_2", location="concat_b2-east", offset=(1, 0, 0), xlabel=512, width=8, height=10, depth=10),
tk.Connection("concat_b2", "concat_2"),
tk.Connection('conv_5', 'concat_b2', origin_loc="north", target_loc="north", origin_pos=1, target_pos=4,
color="blue", linestyle="double", path="|-|"),
tk.Conv2D(name='conv_12', out_width=102, out_channel=256, activation="relu",
offset=(1, 0, 0), location="concat_2-east", width=6, height=10, depth=10),
tk.Connection("concat_2", "conv_12"),
tk.Conv2D(name='conv_13', out_width=100, out_channel=256, activation="relu",
offset=(0, 0, 0), location="conv_12-east", width=6, height=10, depth=10),
tk.ConvTranspose2D(name='unpool_b3', out_channel=128, offset=(2, 6, 0), location="conv_13-northeast", width=4, height=20, depth=20),
tk.Connection("conv_13", "unpool_b3", path='|-'),
tk.Concat("concat_b3", location="unpool_b3-east", offset=(1, 0, 0), color="white"),
tk.Connection("unpool_b3", "concat_b3"),
tk.Box("concat_3", location="concat_b3-east", offset=(1, 0, 0), xlabel=256, width=6, height=20, depth=20),
tk.Connection("concat_b3", "concat_3"),
tk.Connection('conv_3', 'concat_b3', origin_loc="north", target_loc="north", origin_pos=1, target_pos=6,
color="blue", linestyle="double", path="|-|"),
tk.Conv2D(name='conv_14', out_width=198, out_channel=128, activation="relu",
offset=(1, 0, 0), location="concat_3-east", width=4, height=20, depth=20),
tk.Connection("concat_3", "conv_14"),
tk.Conv2D(name='conv_15', out_width=196, out_channel=128, activation="relu",
offset=(0, 0, 0), location="conv_14-east", width=4, height=20, depth=20),
tk.ConvTranspose2D(name='unpool_b4', out_channel=64, offset=(2, 8, 0), location="conv_15-northeast", width=2, height=40, depth=40),
tk.Connection("conv_15", "unpool_b4", path='|-'),
tk.Concat("concat_b4", location="unpool_b4-east", offset=(2, 0, 0), color="white"),
tk.Connection("unpool_b4", "concat_b4"),
tk.Box("concat_4", location="concat_b4-east", offset=(2, 0, 0), xlabel=128, width=4, height=40, depth=40),
tk.Connection("concat_b4", "concat_4"),
tk.Connection('conv_1', 'concat_b4', origin_loc="north", target_loc="north", origin_pos=1, target_pos=8,
color="blue", linestyle="double", path="|-|"),
tk.Conv2D(name='conv_16', out_width=390, out_channel=64, activation="relu",
offset=(2, 0, 0), location="concat_4-east", width=2, height=40, depth=40),
tk.Connection("concat_4", "conv_16"),
tk.Conv2D(name='conv_17', out_width=388, out_channel=64, activation="relu",
offset=(0, 0, 0), location="conv_16-east", width=2, height=40, depth=40),
tk.Conv2D(name='conv_18', out_width=388, out_channel=2,
offset=(2, 0, 0), location="conv_17-east", width=1, height=40, depth=40),
tk.Connection("conv_17", "conv_18"),
tk.Softmax(name="softmax", out_channel=2, offset=(1, 0, 0), location="conv_18-east",
width=1, height=40, depth=40, caption="softmax"),
tk.Connection("conv_18", "softmax"),
tk.Legend(
items=[
(tk.Conv2D("conv"), "Conv2D"),
(tk.Conv2D("conva", activation="relu"), "Conv2D+ReLU"),
(tk.ConvTranspose2D("deconv"), "Upsample+Conv"),
(tk.Pool("maxpool"), "MaxPooling"),
(tk.Softmax("softmax"), "Softmax"),
(tk.Concat("concat", color="white", radius=0.7), "Concat"),
(tk.Connection((0, 0, 0), (1, 0, 0), color="blue", linestyle="double"), "Copy and Crop"),
],
scale=3.0,
location="south east",
offset=(0, 0, 0)
)
]
plotnn.generate([arch], namefile + '.tex')
if __name__ == '__main__':
main()
| 2.3125 | 2 |
applications/graph/GNN/OGB_LBANN_Trainer.py | aj-prime/lbann | 0 | 12762387 | <filename>applications/graph/GNN/OGB_LBANN_Trainer.py
import lbann
import lbann.contrib.launcher
import lbann.contrib.args
import argparse
import os
import configparser
import math
import data.LSC_PPQM4M
from lbann.util import str_list
from lbann.modules.graph import NNConv
from lbann.modules import ChannelwiseFullyConnectedModule
import numpy as np
from NNConvModel import make_model
desc = ("Training Edge-conditioned Graph Convolutional Model Using LBANN ")
parser = argparse.ArgumentParser(description=desc)
lbann.contrib.args.add_scheduler_arguments(parser)
lbann.contrib.args.add_optimizer_arguments(parser)
parser.add_argument(
'--num-epochs', action='store', default=3, type=int,
help='number of epochs (deafult: 3)', metavar='NUM')
parser.add_argument(
'--mini-batch-size', action='store', default=2048, type=int,
help="mini-batch size (default: 2048)", metavar='NUM')
parser.add_argument(
'--num-edges', action='store', default=118, type=int,
help='number of edges (deafult: 118)', metavar='NUM')
parser.add_argument(
'--num-nodes', action='store', default=51, type=int,
help='number of nodes (deafult: 51)', metavar='NUM')
parser.add_argument(
'--num-node-features', action='store', default=9, type=int,
help='number of node features (deafult: 9)', metavar='NUM')
parser.add_argument(
'--num-edge-features', action='store', default=3, type=int,
help='number of edge features (deafult: 3)', metavar='NUM')
parser.add_argument(
'--num-out-features', action='store', default=32, type=int,
help='number of node features for NNConv (deafult: 32)', metavar='NUM')
parser.add_argument(
'--num-samples', action='store', default=3045360, type=int,
help='number of Samples (deafult: 3045360)', metavar='NUM')
parser.add_argument(
'--node-embeddings', action='store', default=100, type=int,
help='dimensionality of node feature embedding (deafult: 100)', metavar='NUM')
parser.add_argument(
'--edge-embeddings', action='store', default=16, type=int,
help='dimensionality of edge feature embedding (deafult: 16)', metavar='NUM')
parser.add_argument(
'--job-name', action='store', default="NN_Conv", type=str,
help="Job name for scheduler", metavar='NAME')
args = parser.parse_args()
kwargs = lbann.contrib.args.get_scheduler_kwargs(args)
MINI_BATCH_SIZE = args.mini_batch_size
NUM_EPOCHS = args.num_epochs
JOB_NAME = args.job_name
NUM_NODES = 51
NUM_EDGES = 118
NUM_NODES_FEATURES = 9
NUM_EDGE_FEATURES = 3
NUM_OUT_FEATURES = args.num_out_features
NUM_SAMPLES = args.num_samples
EMBEDDING_DIM = args.node_embeddings
EDGE_EMBEDDING_DIM = args.edge_embeddings
# ----------------------------------------
# Generating configuration for dataset
# ----------------------------------------
config = configparser.ConfigParser()
config['Graph'] = {}
config['Graph']['num_nodes'] = str(NUM_NODES)
config['Graph']['num_edges'] = str(NUM_EDGES)
config['Graph']['num_node_features'] = str(NUM_NODES_FEATURES)
config['Graph']['num_edge_features'] = str(NUM_EDGE_FEATURES)
config['Graph']['num_samples'] = str(NUM_SAMPLES)
current_file = os.path.realpath(__file__)
app_dir = os.path.dirname(current_file)
_file_name = os.path.join(app_dir, 'config.ini')
with open(_file_name, 'w') as configfile:
config.write(configfile)
os.environ['LBANN_LSC_CONFIG_FILE'] = _file_name
model = make_model(NUM_NODES,
NUM_EDGES,
NUM_NODES_FEATURES,
NUM_EDGE_FEATURES,
EMBEDDING_DIM,
EDGE_EMBEDDING_DIM,
NUM_OUT_FEATURES,
NUM_EPOCHS)
optimizer = lbann.SGD(learn_rate=1e-4)
data_reader = data.LSC_PPQM4M.make_data_reader("LSC_FULL_DATA")
trainer = lbann.Trainer(mini_batch_size=MINI_BATCH_SIZE)
lbann.contrib.launcher.run(trainer,
model,
data_reader,
optimizer,
job_name=JOB_NAME,
**kwargs)
| 2.0625 | 2 |
jupyterlab2pymolpysnips/MolecularRepresentation/ellipcol.py | MooersLab/pymolpysnips | 0 | 12762388 | <filename>jupyterlab2pymolpysnips/MolecularRepresentation/ellipcol.py
"""
cmd.do('set ellipsoid_color, ${1:color};')
cmd.do('${0}')
"""
cmd.do('set ellipsoid_color, color;')
# Description: Set ellipsoid color.
# Source: placeHolder
| 1.484375 | 1 |
fusus/lakhnawi.py | dirkroorda/fusus | 0 | 12762389 | <filename>fusus/lakhnawi.py
"""Lakhnawi pdf reverse engineering.
This is an effort to make the Lakhnawi PDF readable.
It is a text-based PDF, no images are used to represent text.
Yet the text is not easily extracted, due to:
* the use of private-use unicode characters that refer to heavily customised fonts;
* some fonts have some glyphs with dual unicode points;
* the drawing order of characters does not reflect the reading order;
* horizontal whitespace is hard to detect due to oversized bounding boxes of many
private-use characters.
We used the top-notch Python PDF library
[PyMUPDF](https://pymupdf.readthedocs.io/en/latest/index.html), also know as *fitz*.
```
pip3 install PyMuPDF
```
But even this library could not solve the above issues.
Here is how we solved the issues
# Private use characters
We used font analysis software from PdfLib:
[FontReporter](https://www.pdflib.com/download/free-software/fontreporter/)
to generate a
[report of character and font usage in the Lakhnawi PDF](https://github.com/among/fusus/blob/master/ur/Lakhnawi/FontReport-Lakhnawi.pdf).
Based on visual inspection of this font report and the occurrences
of the private use tables we compiled a translation table mapping dirty strings
(with private use characters) to clean strings (without private use characters).
# Dual code points
In case of dual code points, we ignore the highest code points.
Often the two code points refer to a normal Arabic code point and to a ligature or
special form of the character.
The unicode algorithm is very good nowadays to generate the special forms
from the ordinary forms based on immediate context.
# Reading order
We ordered the characters ourselves, based on the coordinates.
This required considerable subtlety, because we had to deal
with diacritics above and below the lines.
See `clusterVert`.
# Horizontal whitespace
This is the most tricky point, because the information we retain from the PDF is,
strictly speaking, insufficient to determine word boundaries.
Word boundaries are partly in the eyes of the beholder, if the beholder knows Arabic.
The objective part is in the amount of whitespace between characters
and the form of the characters (initial, final, isolated).
But the rules of Arabic orthography allow initial characters inside words,
and there are the enclitic words.
So we only reached an approximate solution for this problem.
!!! caution "Footnotes"
We have strippped footnotes and footnote references from the text.
# Output format
The most important output are tab separated files with text and positions of
individual words.
See `Lakhnawi.tsvPages`.
This data is used to feed the conversion to Text-Fabric.
See also:
* `fusus.tfFromTsv`.
* [Text-Fabric](https://annotation.github.io/text-fabric/tf/index.html)
"""
import sys
import os
import collections
import re
from itertools import chain
from IPython.display import display, HTML, Image
import fitz
from tf.core.helpers import setFromSpec, unexpanduser
from .parameters import SOURCE_DIR, UR_DIR, ALL_PAGES, LINE_CLUSTER_FACTOR
from .lib import DEFAULT_EXTENSION, pprint, parseNums
from .char import (
UChar,
EMSPACE,
getSetFromDef,
isAlefFinal,
isArDigit,
isEuDigit,
isMeemOrYeh,
isWaw,
normalizeC,
normalizeD,
uName,
)
NAME = "Lakhnawi"
SOURCE = f"{SOURCE_DIR}/{NAME}/{NAME.lower()}.pdf"
FONT = f"{UR_DIR}/{NAME}/FontReport-{NAME}.pdf"
DEST = f"{SOURCE_DIR}/{NAME}/{NAME.lower()}.txt"
CSS = """
<style>
*,
*:before,
*:after {
box-sizing: border-box;
}
@page {
size: A4;
margin: 2cm;
}
div.window {
display: flex;
flex-flow: row nowrap;
justify-content: flex-start;
align-items: flex-start;
min-width: 1000pt;
height: 99vh;
}
div.sidebar {
flex: 1 1 300pt;
display: flex;
flex-flow: row nowrap;
border-right: 1pt solid var(--fog-rim);
padding-left: 8px;
padding-right: 12px;
height: 99vh;
overflow: auto;
-webkit-overflow-scrolling: touch;
}
div.toc {
flex: 1 1 50pt;
}
div.pages {
flex: 0 0 700pt;
}
div.pages.bypage {
height: 99vh;
overflow: auto;
-webkit-overflow-scrolling: touch;
}
div.page {
margin-right: 1cm;
padding-left: 0.5cm;
max-width: 600pt;
min-width: 600pt;
width: 600pt;
}
div.pagec {
margin-right: 1cm;
padding-right: 10%;
width: 90%;
text-align: center;
}
div.phead {
color: #777777;
font-size: small;
text-align: right;
width: 1cm;
margin-right: -1cm;
float: right;
}
.box {
border: 1pt solid #888888;
border-radius: 2pt;
}
.r {
font-family: normal, sans-serif;
font-size: 22pt;
direction: rtl;
unicode-bidi: isolate-override;
}
.rc, .lc {
font-family: normal, sans-serif;
font-size: 22pt;
background-color: white;
border: 2pt solid #ffcccc;
}
.rc {
direction: rtl;
unicode-bidi: isolate-override;
}
.lc {
direction: ltr;
unicode-bidi: isolate-override;
}
p {
text-align: left;
direction: ltr;
unicode-bidi: isolate-override;
}
p.r {
text-align: right;
direction: rtl;
unicode-bidi: isolate-override;
}
.l {
font-family: normal, sans-serif;
font-size: x-large;
direction: ltr;
unicode-bidi: isolate-override;
}
.c {
font-family: monospace;
font-size: x-small;
direction: ltr;
unicode-bidi: isolate-override;
}
.p {
font-family: monospace;
font-size: medium;
font-weight: bold;
background-color: yellow;
direction: ltr;
unicode-bidi: isolate-override;
}
.lrg {
font-size: 22pt;
font-weight: bold;
}
span.sp {
background-color: rgba(0, 255, 0, 0.5);
}
td.al {
text-align: left ! important;
}
div.cn {
text-align: center
}
div.ch.p {
background-color: #ffeedd;
text-align: center
}
span.cni {
background-color: #eeeeee;
padding-top: 4pt;
padding-bottom: 4pt;
padding-left: 8pt;
padding-right: 8pt;
border: 2pt solid #66aaaa;
display: inline-block;
}
div.ch,div.cht,div.chs {
border: 2pt solid #cccccc;
display: inline-flex;
flex-flow: column nowrap;
max-width: 10em;
}
div.ch {
background-color: #ddffff;
}
div.chs {
background-color: #ccffcc;
}
div.chm {
background-color: #44ff44;
}
div.sr {
display: flex;
flex-flow: row wrap;
direction: rtl;
unicode-bidi: isolate-override;
}
table.linecols {
max-width: 100%;
min-width: 100%;
width: 100%;
direction: rtl;
unicode-bidi: isolate-override;
}
td.cols {
padding-left: 0.5em;
padding-right: 0.5em;
text-align: right;
}
td.cols2 {
max-width: 50%;
min-width: 50%;
width: 500%;
}
td.cols3 {
max-width: 33%;
min-width: 33%;
width: 33%;
}
td.cols4 {
max-width: 25%;
min-width: 25%;
width: 25%;
}
td.cols4 {
max-width: 20%;
min-width: 20%;
width: 20%;
}
:root {
--fog-rim: hsla( 0, 0%, 60%, 0.5 );
}
</style>
"""
"""Styles to render extracted text.
The styles are chosen such that the extracted text looks as similar as possible to
the PDF display.
"""
POST_HTML = """
</body>
</html>
"""
"""HTML code postfixed to the HTML representation of a page.
"""
def preHtml(pageNum):
"""Generate HTML code to be prefixed to the HTML representation of a page.
Parameters
----------
pageNum: string
The page number of the page for which HTML is generated.
"""
return f"""\
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<meta charset="utf-8"/>
<title>Lakhnawi {pageNum}</title>
{CSS}
</head>
<body>
"""
def getToc(pageNums):
"""Generate a Table Of Contents for multiple HTML pages.
Parameter
---------
pageNums: iterable if int
The page numbers of the pages in the HTML file.
"""
limit = 60
html = []
html.append("""<div class="toc">""")
j = 0
for (i, pageNum) in enumerate(pageNums):
if j == limit:
j = 0
html.append("""</div>\n<div class="toc">""")
html.append(f"""<a href="#p{pageNum:>03}">p {pageNum}</a><br>""")
j += 1
html.append("""</div>""")
return "\n".join(html)
PRIVATE_SPACE = "\uea75"
PRIVATE_LETTERS_DEF = """
e800
e806
e807
e808
e809
e80a
e80e
e898
e8d4
e915
e917
ea79
"""
PRIVATE_DIAS_DEF = """
e812
e814
e815
e816
e817
e818
e81d
e823
e824
e825
e826
e827
e828
e829
e82b
e82e
e82f
e830
e831
e832
e833
e834
e835
e837
e838
e839
e83a
e83f
e840
e845
e846
e849
e84d
e85b
e85c
e863
e864
e86d
e87f
e880
e887
e888
e8de
e8df
e8e6
e8e7
e8e8
e8e9
e8ea
e8eb
e8ee
e8f4
e8f5
e8f6
e8f8
e8fb
e8fe
"""
PRIVATE_TATWEEL = "\ue821"
PRIVATE_FINAL_SPACE_CODES = """
e898
e915
e917
""".strip().split()
REPLACE_DEF = """
# see https://www.unicode.org/versions/Unicode13.0.0/ch09.pdf
# see https://www.compart.com/en/unicode/U+FE8E
# see https://r12a.github.io/scripts/arabic/block
e821 => : (ignore short tatweel)
e825 => 064e : FATHA
e849 => 064e : FATHA
e86d => 064e : FATHA
e87f => 064e : FATHA
e8e8 => 064e : FATHA
e823 => 064b : FATHATAN
e8e6 => 064b : FATHATAN
e826 => 064f : DAMMA
e8e9 => 064f : DAMMA
e824 => 064c : DAMMATAN
e8e7 => 064c : DAMMATAN
e840 => 0650 : KASRA
e864 => 0650 : KASRA
e888 => 0650 : KASRA
e8df => 0650 : KASRA
e83f => 064d : KASRATAN
e863 => 064d : KASRATAN
e887 => 064d : KASRATAN
e8de => 064d : KASRATAN
e827 => 0651 : SHADDA
e8ea => 0651 : SHADDA
e828 => 0652 : SUKUN
e8eb => 0652 : SUKUN
e829 => 0653 : MADDA
e84d => 0653 : MADDA
e82b => 0670 : ALEF(super)
e8ee => 0670 : ALEF(super)
e830 => 064e+0651 : SHADDA+FATHA
e8f4 => 064e+0651 : SHADDA+FATHA
e8f6 => 064e+0651 : SHADDA+FATHA
e831 => 064f+0651 : SHADDA+DAMMA
e8f5 => 064f+0651 : SHADDA+DAMMA
e82e => 064c+0651 : SHADDA+DAMMATAN
e832 => 0650+0651 : SHADDA+KASRA
e82f => 064d+0651 : SHADDA+KASRATAN [2]
e834 => 064d+0651 : SHADDA+KASRATAN [2]
e812 => 064d+0651 : SHADDA+KASRATAN [2]
e8f8 => 064d+0651 : SHADDA+KASRATAN [2]
e818 => 0653+0670 : MADDA+ALEF(super) [4]
e83a => 0653+0670 : MADDA+ALEF(super) [4]
e8fe => 0653+0670 : MADDA+ALEF(super) [4]
e81d => 0640+0650+0651 : TATWEEL+KASRA+SHADDA
# e898 => 0647 : HEH
e898 => feea : HEH final
e806 => 0627 : ALEF
e807 => 0627 : ALEF
e808 => 0671 : ALEF(wasla)
e809 => 0671 : ALEF(wasla)
e800 => 0622 : ALEF/MADDA
0627+e815 => 0623+064e : ALEF/HAMZA+FATHA
# 0627+e85b => 0623+064e : ALEF/HAMZA+FATHA
fe8e+e815 => 0623+064e : ALEF/HAMZA+FATHA
fe8e+e821+e815 => 0623+064e : ALEF/HAMZA+FATHA
e806+e85b => 0623+064e : ALEF/HAMZA+FATHA
# 0627+e85c => 0623+064f : ALEF/HAMZA+DAMMA
0627+e816 => 0623+064f : ALEF/HAMZA+DAMMA
e806+e85c => 0623+064f : ALEF/HAMZA+DAMMA
fe8e+e816 => 0623+064f : ALEF/HAMZA+DAMMA
fe8e+e821+e816 => 0623+064f : ALEF/HAMZA+DAMMA
# 0627+e814 => 0623+064c : ALEF/HAMZA+DAMMATAN
0627+e846 => 0625+064d : ALEF/HAMZA(low)+KASHRATAN
fe8e+e821+e846 => 0625+064d : ALEF/HAMZA(low)+KASRATAN
fe8e+e817 => 0623+0652 : ALEF/HAMZA+SUKUN [7]
e835 => 0654+064b : HAMZA+FATHATAN [3]
e837 => 0654+064e : HAMZA+FATHA [3]
e8fb => 0654+064e : HAMZA+FATHA [3]
e838 => 0654+064f : HAMZA+DAMMA [3]
e880 => 0654+064f : HAMZA+DAMMA [3]
e839 => 0654+0652 : HAMZA+SUKUN [3]
e845 => 0655+0650 : HAMZA(low)+KASRA
0648+e838 => 0624+064f : WAW/HAMZA+DAMMA
e80a => 0644 : LAM
e80e => 0644 : LAM
# e821+e8d4+e821+e830 => 0644 : LAM [10]
# e821+e8d4+e82b+e821 => 0644 : LAM [10]
# e8d4+e830 => 0644 : LAM [10]
e8d4+e833 => 0644 : LAM [10]
# e8d4+e821 => 0644 : LAM [10]
e8d4+e821+e827 => 0644 : LAM [10]
e8d4+e821+e833 => 0644 : LAM [10]
e8d4+e821+e821+e833 => 0644 : LAM [10]
e8d4+fc63 => 0644 : LAM [10]
e8d4+e827 => 0644 : LAM [10]
# e8d4 => 0644 : LAM [10]
# e8d4+064e+e82b => 0644 : LAM [11]
fefb+e85b => 0644+623+064e : LAM+ALEF/HAMZA+FATHA
fefb+e85c => 0644+0623+064f : LAM/ALEF/HAMZA+DAMMA
fefc+e87f => 0644+0623+064e : LAM/ALEF/HAMZA+FATHA
# fef4+e917 => 064a+0649+0670 : YEH+ALEF(super)
fef4+e917 => 064a+fef0+0670 : YEH+ALEF(super)
ea75+e828+ea79 => 062d+0652+0645 : HAH+SUKUN+MEEM
# fe92+0650+e915 => 0628+0650+064a : BEH+KASRA+YEH
fe92+0650+e915 => 0628+0650+fef2 : BEH+KASRA+YEH
fec3+0652+e821+e80e+064e+e807 => 0637+0652+e821+e80e+064e+e807 : [9]
fffd => : replacement character
# [1] it should be a LAM/ALEF ligature with wasla, but there is no such unicode char
# See https://savannah.gnu.org/bugs/?52454
# [2] it looks like shadda+fathatan, but there is no shadda+fathatan.
# Instead, it is shadda+kashratan, where the kashratan is placed high.
# [3] not a perfect solution. After fbe9 (alef maksura) the high hamza is not
# a recommended combination.
# [4] the result combination of madda and alef superscript does not render nicely
# [5] the hamza ends up on the left part of the ligature and combines
# there with the fatha/damma, the d should be positioned on the rightmost part
# of the ligature, but this does not happen
# [6] The shadda/kasra should render low, but it does render high.
# On page 185 line 4 is a yeh that has both this one and the shadda/fatha,
# where in the original the one is rendered below, and the other above the letter.
# In Unicode they end up both in a high position.
# [7] In the original, the sukun tops the alef and the hamza tops the sukun.
# In Unicode, it's the otherway round: the hamza tops the alif and the sukun is
# at the top.
# [9] Singular case on page 45 line 9 char 90 : a final tah inside a word
# [10] as in Allah. The shadda and alef superscript are filled in by the unicode
# algorithm.
# [11] as in Allah, but with fatha instead of shadda. Probaly a typo in a note,
# page 12 second last line.
"""
"""Character replace rules
There are two parts: (1) character replace rules (2) notes.
Each rule consists of a left hand side, then `=>`, then a right hand side,
then `:` and then a short description.
The short description may contain references to notes in the notes section,
which is a list of commented lines at the end of the whole string.
The left and right hand sides consist of one or more hexadecimal character codes,
joined by the `+` sign.
The meaning is that when the left hand side matches a portion of the input text,
the output text, which is otherwise a copy of the input text, will have that portion
replaced by the right hand side.
The exact application of rules has some subtleties which will be dealt with
in `Laknawi.trimLine`.
"""
def ptRepD(p):
"""Represent a float as an integer with enhanced precision.
Parameters
----------
p: float
We multiply it by 10, then round it to the nearest integer.
A none value is converted to `?`.
"""
return "?" if p is None else int(round(p * 10))
def ptRep(p):
"""Represent a float as an integer.
Parameters
----------
p: float
We round it to the nearest integer.
A none value is converted to `?`.
"""
return "?" if p is None else int(round(p))
REPLACE_RE = re.compile(r"""^([0-9a-z+]+)\s*=>\s*([0-9a-z+]*)\s*:\s*(.*)$""", re.I)
LETTER_CODE_DEF = dict(
d=(1, "diacritic"),
)
"""Defines place holder `d` in rule definitions.
"""
LETTER_CODE = {cd: info[0] for (cd, info) in LETTER_CODE_DEF.items()}
CODE_LETTER = {info[0]: cd for (cd, info) in LETTER_CODE_DEF.items()}
LETTER_KIND = {info[0]: info[1] for info in LETTER_CODE_DEF.values()}
def getDictFromDef(defs):
"""Interpret a string as a dictionary.
Parameters
----------
defs: string
A string containing definitions of character replace rules.
!!! note "Only for rules"
We only use this functions for the rules in `REPLACE_DEF`.
"""
rules = []
rn = 0
good = True
for (i, line) in enumerate(defs.strip().split("\n")):
parts = line.split("#", maxsplit=1)
if len(parts) > 1:
line = parts[0]
line = line.strip()
if not line:
continue
match = REPLACE_RE.match(line)
if not match:
print(f"MALFORMED REPLACE DEF @{i}: {line}")
good = False
continue
rn += 1
(valStr, replStr, comment) = match.group(1, 2, 3)
vals = []
d = None
for (i, val) in enumerate(valStr.split("+")):
if val in {"d"}:
if d is not None:
print(f"MULTIPLE d in RULE @{i}: rule {rn}: {line}")
good = False
d = i
vals.append(LETTER_CODE[val])
else:
vals.append(chr(int(val, base=16)))
repls = []
e = None
if replStr:
for (i, repl) in enumerate(replStr.split("+")):
if repl in {"d"}:
if e is not None:
print(f"MULTIPLE d in RULE @{i}: rule {rn}: {line}")
good = False
e = i
repls.append(LETTER_CODE[repl])
else:
repls.append(chr(int(repl, base=16)))
if d is None and e is not None:
print(f"d in REPLACEMENT but not in MATCH @[i]: rule {rn}: {line}")
good = False
rules.append((rn, tuple(vals), d, tuple(repls), e))
if not good:
return None
result = {}
ruleIndex = {}
for (rn, vals, d, repls, e) in sorted(rules, key=lambda x: (-len(x[1]), str(x[1]))):
result.setdefault(vals[0], []).append((rn, vals, d, repls, e))
ruleIndex[rn] = (vals, d, repls, e)
return (result, ruleIndex)
U_LINE_RE = re.compile(r"""^U\+([0-9a-f]{4})([0-9a-f ]*)$""", re.I)
HEX_RE = re.compile(r"""^[0-9a-f]{4}$""", re.I)
PUA_RE = re.compile(r"""⌊([^⌋]*)⌋""")
RECT = "rect"
COLOR = "color"
FNRULE_WIDTH = 60
"""Width of the rule that separates body text from footnote text.
"""
# SPACE_THRESHOLD = 25
SPACE_THRESHOLD = 10
"""Amount of separation between words.
Character boxes this far apart imply that there is a white space between them.
The unit is 0.1 pixel.
"""
class Lakhnawi(UChar):
def __init__(self):
"""Text extraction from the Lakhnawi PDF.
This class makes use of the `fusus.char.UChar` class which
defines several categories of characters.
By extending that class, the Lakhnawi class makes use of those categories.
It also adds specific characters to some of those categories, especially
the private use characters that occur in the Lakhnawi PDF.
We use *fitz* (`pip3 install PyMuPDF`) for PDF reading.
"""
super().__init__()
self.heights = {}
"""Heights of characters, indexed by page number."""
self.clusteredHeights = {}
"""Clustered heights of characters, indexed by page number.
The clustered heights correspond to the lines on a page.
"""
self.lines = {}
"""Lines as tuples of original character objects, indexed by page number"""
self.text = {}
"""Lines as tuples of converted character objects, indexed by page number"""
self.fnRules = {}
"""Vertical positions of footnote lines, indexed by page number"""
self.spaces = {}
"""Spacing information for each character, indexed by page and line number.
For character that has space behind it, it gives the index position of that
character in the line, the amount of space detected,
and whether this counts as a full white space.
"""
self.columns = {}
"""Column information, indexed by page and line number.
Spaces that are significantly larger than a normal white space
are interpreted as an emspace, and these are considered as column separators.
We remember the character positions where this happens plus the amount
of space in question.
Columns in the Lakhnawi PDF correspond to *hemistic* poems,
where lines are divided into two halves, each occupying a column.
See 
!!! caution "hemistic poems versus blocks"
This is very different from blocks (see `fusus.layout`) in OCRed texts,
where blocks have been detected because of vertical strokes
that separate columns.
The reading progress in a hemistic poem is not changed by the
column division, where as in the case of blocks, reading proceeds
by reading the complete blocks in order.
"""
self.doubles = {}
"""Glyphs with double unicode points.
Some private use characters have two unicode points assigned to them
by fonts in the PDF.
This is the cause that straightforward text extractions deliver
double occurrences of those letters. Even *fitz* does that.
We have collected these cases, and choose to use the lower unicode point,
which is usually an ordinary character, whereas the other is usually a
related presentational character.
This dictionary maps the lower character to the higher character.
"""
self.privateLetters = None
"""Private-use unicodes that correspond to full letters."""
self.privateDias = None
"""Private-use unicodes that correspond to diacritics."""
self.privateSpace = None
"""Private-use-unicode used to represent a space."""
self.good = True
"""Whether processing is still ok, i.e. no errors encountered."""
self.getCharConfig()
self.doc = fitz.open(SOURCE)
"""A handle to the PDF document, after it has been read by *fitz*."""
def close(self):
"""Close the PDF handle, offered by *fitz*."""
self.doc.close()
def setStyle(self):
"""Import the CSS styles into the notebook.
See `CSS`.
"""
display(HTML(CSS))
def getCharConfig(self):
"""Configure all character information.
Private-use characters, transformation rules, character categories.
"""
self.privateInfo()
self.setupRules()
self.getCharInfo()
def privateInfo(self):
"""Set up additional character categories wrt. private-use characters.
Several categories will receive additional members from the
private use characters.
"""
self.privateLetters = getSetFromDef(PRIVATE_LETTERS_DEF)
self.privateDias = getSetFromDef(PRIVATE_DIAS_DEF)
self.privateSpace = PRIVATE_SPACE
self.nospacings |= self.privateDias
# self.nospacings.add(PRIVATE_TATWEEL)
self.diacritics |= self.privateDias
self.diacriticLike |= self.privateDias
self.arabicLetters = self.arabic - self.privateDias
self.rls |= self.puas
def setupRules(self):
"""Set up character transformation rules.
Prepare for counting how much rules will be applied
when extracting text from pages of the Lakhnawi PDF.
"""
(self.replace, self.ruleIndex) = getDictFromDef(REPLACE_DEF)
if self.replace is None:
self.replace = {}
self.good = False
self.rulesApplied = collections.defaultdict(collections.Counter)
for rn in self.ruleIndex:
self.rulesApplied[rn] = collections.Counter()
def getCharInfo(self):
"""Obtain detailed character information by reading the font report file.
From this file we read:
* which are the private use characters?
* which of them have a double unicode?
The font file is
[here](https://github.com/among/fusus/blob/master/ur/Lakhnawi/FontReport-Lakhnawi.pdf).
"""
self.doubles = {}
self.privates = set()
doubles = self.doubles
privates = self.privates
finalSpace = self.finalSpace
puas = self.puas
doc = fitz.open(FONT)
for page in doc:
textPage = page.getTextPage()
data = textPage.extractText()
for (ln, line) in enumerate(data.split("\n")):
if line.startswith("U+"):
match = U_LINE_RE.match(line)
if not match:
continue
(main, rest) = match.group(1, 2)
main = main.lower()
nMain = int(main, base=16)
cMain = chr(nMain)
if cMain in puas:
privates.add(cMain)
continue
if cMain == chr(0):
continue
second = None
rest = rest.replace(" ", "")
if rest:
if HEX_RE.match(rest):
second = rest.lower()
if second:
nSecond = int(second, base=16)
cSecond = chr(nSecond)
if nSecond > nMain:
doubles[cMain] = cSecond
else:
doubles[cSecond] = cMain
doublesApplied = collections.defaultdict(collections.Counter)
for d in doubles:
doublesApplied[d] = collections.Counter()
self.doublesApplied = doublesApplied
finalsApplied = collections.defaultdict(collections.Counter)
for f in finalSpace:
finalsApplied[f] = collections.Counter()
self.finalsApplied = finalsApplied
def plainChar(self, c):
"""Show the character code of a character.
Parameters
----------
c: string
The character in question, may also be the empty string or
the integer 1 (diacritic place holder).
Returns
-------
string
The hexadecimal unicode point of `c`, between `⌊ ⌋` - brackets.
"""
if c == "":
return "⌊⌋"
if c in {1}:
return CODE_LETTER[c]
return f"⌊{ord(c):>04x}⌋"
def plainString(self, s):
"""Show the character codes of the characters in a string.
Parameters
----------
s: string
The string to show, may be empty, may contain place holders.
Returns
-------
string
The concatenation of the unicode points of the characters in the string,
each code point between brackets.
See also `Lakhnawi.plainChar()`.
"""
return " ".join(self.plainChar(c) for c in s)
def showChar(self, c):
"""Pretty display of a single unicode character.
We show the character itself and its name (if not a private-use one),
its hexadecimal code, and we indicate by coloring the kind of
white space that the character represents (ordinary space or emspace).
Parameters
----------
c: string
The character in question, may also be the empty string or
the integer 1 (diacritic place holder).
"""
if c in {1, 2}:
return f"""
<div class="ch p">
<div class="cn">{LETTER_KIND[c]}</div>
</div>
"""
if c == "":
extra = ""
ccode = ""
crep = "\u00a0"
cname = "EMPTY"
else:
puas = self.puas
rls = self.rls
ccode = (
f"""<span class="{"p" if c in puas else "c"}">{ord(c):>04x}</span>"""
)
crep = (
"??"
if c in puas
else f"""<span class="{"rc" if c in rls else "lc"}">{c}"""
)
cname = "" if c in puas else f"""<span class="c">{uName(c)}</span>"""
extra = (
"m" if c == EMSPACE else "s" if c == " " else ""
)
return f"""
<div class="ch{extra}">
<div class="cn">{ccode}</div>
<div class="cn"><span class="cni">{crep}</span></div>
<div class="cn">{cname}</div>
</div>
"""
def showString(self, s, asString=False):
"""Pretty display of a string as a series of unicode characters.
Parameters
----------
s: string
The string to display, may be empty, may contain place holders.
asString: boolean, optional `False`
If True, return the result as an HTML string.
Returns
-------
None | string
If `asString`, returns an HTML string, otherwise returns None,
but displays the HTML string.
See also `Lakhnawi.showChar()`.
"""
shtml = f"""<span class="r">{s}</span>"""
html = """<div class="sr">""" + (
"".join(self.showChar(c) for c in s) + "</div>"
)
if asString:
return f"""<span>{shtml}</span>{html}"""
display(HTML(f"""<p>{shtml}</p>{html}"""))
def showReplacements(self, rule=None, isApplied=False):
"""Show a character conversion rule and how it has been applied.
Parameters
----------
rule: string|int, optional `None`
A specification of zero or more rule numbers (see `fusus.lib.parseNums`).
If None, all rules will be taken.
isApplied: boolean, optional `False`
Only show rules that have been applied.
Returns
-------
None
Displays a table of rules with usage statistics.
"""
ruleIndex = self.ruleIndex
rulesApplied = self.rulesApplied
ruleNums = parseNums(rule)
ruleNums = (
set(ruleIndex)
if ruleNums is None
else sorted(r for r in ruleNums if r in ruleIndex)
)
html = []
totalRules = len(ruleIndex)
totalApplications = sum(sum(x.values()) for x in rulesApplied.values())
totalPages = len(set(chain.from_iterable(rulesApplied.values())))
ruleRep = "rule" + ("" if totalRules == 1 else "s")
appRep = "application" + ("" if totalApplications == 1 else "s")
pageRep = "page" + ("" if totalPages == 1 else "s")
html.append(
f"""
<p><b>{totalRules} {ruleRep} with
{totalApplications} {appRep} on {totalPages} {pageRep}</b></p>
<table>
"""
)
for (rn, applied) in sorted(
rulesApplied.items(), key=lambda x: (-sum(x[1].values()), x[0])
):
if rn not in ruleNums:
continue
(vals, d, repls, e) = ruleIndex[rn]
valRep = "".join(self.showChar(c) for c in vals)
replRep = "".join(self.showChar(c) for c in repls)
total = sum(applied.values())
if isApplied and not applied:
continue
if applied:
examplePageNum = sorted(applied, key=lambda p: -applied[p])[0]
nExamples = applied[examplePageNum]
appliedEx = f"e.g. page {examplePageNum} with {nExamples} applications"
else:
appliedEx = ""
appliedRep = f"<b>{total}</b> x applied on <i>{len(applied)}</i> pages"
html.append(
f"""
<tr>
<th>rule {rn}</th>
<td class="al">{appliedRep}</td>
<td class="al">{appliedEx}</td>
<td class="al">{valRep}</td>
<td class="al"><span class="lrg">⇒</span></td>
<td class="al">{replRep}</td>
</tr>
"""
)
html.append("<table>")
display(HTML("".join(html)))
def showDoubles(self, double=None):
"""Show a character with double entry and how often it occurs.
See `Lakhnawi.doubles`.
Parameters
----------
double: char, optional `None`
A character from the doubles list (`Lakhnawi.doubles`).
If None, all such characters will be taken.
isApplied: boolean, optional `False`
Only show rules that have been applied.
Returns
-------
None
Displays a table of double-entry characters with occurrence statistics.
"""
doubles = self.doubles
doublesApplied = self.doublesApplied
theseDoubles = (
set(doubles) if double is None else {double} if double in doubles else set()
)
html = []
totalDoubles = len(doubles)
totalApplications = sum(sum(x.values()) for x in doublesApplied.values())
totalPages = len(set(chain.from_iterable(doublesApplied.values())))
doubleRep = "double" + ("" if totalDoubles == 1 else "s")
appRep = "application" + ("" if totalApplications == 1 else "s")
pageRep = "page" + ("" if totalPages == 1 else "s")
html.append(
f"""
<p><b>{totalDoubles} {doubleRep} with
{totalApplications} {appRep} on {totalPages} {pageRep}</b></p>
<table>
"""
)
for (d, applied) in sorted(
doublesApplied.items(), key=lambda x: (-sum(x[1].values()), x[0])
):
if d not in theseDoubles:
continue
e = doubles[d]
doubleRep = f"{self.showChar(e)} ⇒ {self.showChar(d)}"
total = sum(applied.values())
if applied:
examplePageNum = sorted(applied, key=lambda p: -applied[p])[0]
nExamples = applied[examplePageNum]
appliedEx = f"e.g. page {examplePageNum} with {nExamples} applications"
else:
appliedEx = ""
appliedRep = f"<b>{total}</b> x applied on <i>{len(applied)}</i> pages"
html.append(
f"""
<tr>
<td class="al">{appliedRep}</td>
<td class="al">{appliedEx}</td>
<td class="al">{doubleRep}</td>
</tr>
"""
)
html.append("<table>")
display(HTML("".join(html)))
def showFinals(self, final=None):
"""Show a character with final form and how often it has been replaced.
Final forms will be normalized to ground forms
and sometimes a space will be added.
Parameters
----------
final: char, optional `None`
A character from the final space list (`fusus.char.UChar.finalSpace`).
If None, all such characters will be taken.
isApplied: boolean, optional `False`
Only show rules that have been applied.
Returns
-------
None
Displays a table of final space characters with occurrence statistics.
"""
finalSpace = self.finalSpace
finalsApplied = self.finalsApplied
theseFinals = (
finalSpace if final is None else {final} if final in finalSpace else set()
)
html = []
totalFinals = len(finalSpace)
totalApplications = sum(sum(x.values()) for x in finalsApplied.values())
totalPages = len(set(chain.from_iterable(finalsApplied.values())))
finalRep = "final" + ("" if totalFinals == 1 else "s")
appRep = "application" + ("" if totalApplications == 1 else "s")
pageRep = "page" + ("" if totalPages == 1 else "s")
html.append(
f"""
<p><b>{totalFinals} {finalRep} with
{totalApplications} {appRep} on {totalPages} {pageRep}</b></p>
<table>
"""
)
for (f, applied) in sorted(
finalsApplied.items(), key=lambda x: (-sum(x[1].values()), x[0])
):
if f not in theseFinals:
continue
finalRep = self.showChar(f)
total = sum(applied.values())
if applied:
examplePageNum = sorted(applied, key=lambda p: -applied[p])[0]
nExamples = applied[examplePageNum]
appliedEx = f"e.g. page {examplePageNum} with {nExamples} applications"
else:
appliedEx = ""
appliedRep = f"<b>{total}</b> x applied on <i>{len(applied)}</i> pages"
html.append(
f"""
<tr>
<td class="al">{appliedRep}</td>
<td class="al">{appliedEx}</td>
<td class="al">{finalRep}</td>
</tr>
"""
)
html.append("<table>")
display(HTML("".join(html)))
def showLineHeights(self, pageNumSpec):
"""Shows how line heights have been determined.
The pages can be selected by page numbers.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
"""
heights = self.heights
clusteredHeights = self.clusteredHeights
for pageNum in self.parsePageNums(pageNumSpec):
theseHeights = heights[pageNum]
theseClusteredHeights = clusteredHeights[pageNum]
print(f"Line heights page {pageNum:>3}")
print("\nraw heights")
for k in sorted(theseHeights):
print(f"{theseHeights[k]:>4} characters @ height {int(round(k)):>4}")
print("line heights")
for (ln, kc) in enumerate(sorted(theseClusteredHeights)):
peak = ", ".join(
f"{int(round(k)):>4}" for k in sorted(theseClusteredHeights[kc])
)
print(
f"line {ln + 1:>2}: "
f"{sum(theseHeights[k] for k in theseClusteredHeights[kc]):>4}"
f" characters @height {peak}"
)
def parsePageNums(self, pageNumSpec):
"""Parses a value as one or more page numbers.
Parameters
----------
pageNumSpec: None | int | string | iterable
If `None` results in all page numbers.
If an `int`, it stands for that int.
If a `string`, it is allowed to be a comma separated list of
numbers or ranges, where a range is a lower bound and an upper bound
separated by a `-`.
If none of these, it should be an iterable of `int` values.
Returns
-------
None | iterable of int
Depending on the value.
"""
doc = self.doc
pageNums = (
list(range(1, len(doc) + 1))
if not pageNumSpec
else [pageNumSpec]
if type(pageNumSpec) is int
else setFromSpec(pageNumSpec)
if type(pageNumSpec) is str
else list(pageNumSpec)
)
return [i for i in sorted(pageNums) if 0 < i <= len(doc)]
def drawPages(self, pageNumSpec, clip=None):
"""Draws a (part) of page from the PDF as a raster image.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
clip: (int, int), optional `None`
If None: produces the whole page.
Otherwise it is `(top, bottom)`, and a stripe
from top to bottom will be displayed.
"""
doc = self.doc
for pageNum in self.parsePageNums(pageNumSpec):
page = doc[pageNum - 1]
if clip is not None:
clip = (0, clip[0], page.rect.width, clip[1])
pix = page.getPixmap(matrix=fitz.Matrix(4, 4), clip=clip, alpha=False)
display(HTML(f"""<p><b>page {pageNum}</b></p>"""))
display(Image(data=pix.getPNGData(), format=DEFAULT_EXTENSION))
def getPages(
self,
pageNumSpec,
refreshConfig=False,
doRules=True,
doFilter=True,
onlyFnRules=False,
):
"""Reads pages of the PDF and extracts text.
This does all of the hard work of the text extraction.
It saves the textual data in attributes of the Lakhnawi object,
augmented with all kinds of diagnostic information.
From all this data, various output representations can be generated
rather easily by other methods.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
refreshConfig: boolean, optional `False`
If `True`, rereads all character configuration.
Ideal when you are iteratively developing the character configuration.
doRules: boolean, optional `True`
If `False`, suppresses the application of character transformation
rules. Mainly used when debugging other aspects of the text extraction.
doFilter: boolean, optional `True`
If `False`, suppresses the application of unicode normalization,
by which presentational characters are transformed into sequences
of ordinary, basic characters.
Used for debugging.
onlyFnRules: boolean, optional `False`
If `True`, skips most of the conversion.
Only determine where the footnote rules are.
Used for debugging.
Returns
-------
None
The effect is that attributes of the Lakhnawi object
are filled:
* `Lakhnawi.heights`
* `Lakhnawi.clusteredHeights`
* `Lakhnawi.fnRules`
For the other attributes, see `Lakhnawi.collectPage()`.
!!! hint "multiple runs"
If you do multiple runs of this function for different pages,
the results will not overwrite each other in general,
because the attributes hold the results in dictionaries
keyed by page number.
"""
if not self.good:
print("SKIPPING because of config errors")
return
fnRules = self.fnRules
spaces = self.spaces
columns = self.columns
self.doRules = doRules
self.doFilter = doFilter
ruleIndex = self.ruleIndex
rulesApplied = self.rulesApplied
if refreshConfig:
self.getCharConfig()
for rn in ruleIndex:
rulesApplied[rn] = collections.Counter()
for (i, pageNum) in enumerate(self.parsePageNums(pageNumSpec)):
self.pageNum = pageNum
rep = (
f"{i + 1:>4} (page {pageNum:>4})"
if pageNum != i + 1
else (f"{i + 1:>4}" + " " * 12)
)
sys.stdout.write(f"\r\t{rep}")
sys.stdout.flush()
doc = self.doc
page = doc[pageNum - 1]
theseFnRules = set()
for fnRule in page.getDrawings():
if RECT in fnRule and fnRule.get(COLOR, None):
rect = fnRule[RECT]
width = rect.x1 - rect.x0
if width > FNRULE_WIDTH:
theseFnRules.add(int(round(rect.y1)))
fnRules[pageNum] = tuple(sorted(theseFnRules))
spaces[pageNum] = {}
columns[pageNum] = {}
if onlyFnRules:
continue
textPage = page.getTextPage()
data = textPage.extractRAWDICT()
self.collectPage(data)
def getPageRaw(self, pageNum):
"""Do a rough/raw text extract of a specific page.
The *fitz* method
[extractRAWDICT()](https://pymupdf.readthedocs.io/en/latest/textpage.html#TextPage.extractRAWDICT)
is used to obtain very detailed information about each character on that page.
Used for debugging.
Parameters
----------
pageNum: int
A valid page number.
It is the sequence number of the page within the PDF, counting from 1.
Returns
-------
None
It pretty prints the output of the fitz method, which is a big
and deep dictionary.
"""
self.pageNum = pageNum
rep = f"page {pageNum:>4}"
sys.stdout.write(f"{rep}")
sys.stdout.flush()
doc = self.doc
page = doc[pageNum - 1]
textPage = page.getTextPage()
data = textPage.extractRAWDICT()
pprint(data)
def getPageObj(self, pageNum):
"""Get the *fitz* object for a specific page.
Used for debugging.
Parameters
----------
pageNum: int
A valid page number.
It is the sequence number of the page within the PDF, counting from 1.
Returns
-------
object
A *fitz*
[page object](https://pymupdf.readthedocs.io/en/latest/page.html)
"""
self.pageNum = pageNum
doc = self.doc
return doc[pageNum - 1]
def plainPages(self, pageNumSpec):
"""Outputs processed pages as plain text.
Uses `Lakhnawi.plainLine()`.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
Returns
-------
None
The plain text is printed to the output.
"""
text = self.text
for pageNum in self.parsePageNums(pageNumSpec):
lines = text.get(pageNum, [])
for (i, line) in enumerate(lines):
print(self.plainLine(line))
def tsvPages(self, pageNumSpec):
"""Outputs processed pages as tab-separated data.
See `fusus.convert` for the details of the output format.
Uses
`Lakhnawi.tsvLine()`.
and `Lakhnawi.tsvHeadLine()`.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
Returns
-------
None
The tab-separated data is written to a single tsv file.
There is a heading row.
The file is in `fusus.parameters.UR_DIR`, under `Lakhnawi`.
The name of the file includes a page specification.
"""
text = self.text
destDir = f"{UR_DIR}/{NAME}"
pageNums = self.parsePageNums(pageNumSpec)
if not os.path.exists(destDir):
os.makedirs(destDir, exist_ok=True)
pageNumRep = ALL_PAGES if pageNumSpec is None else str(pageNumSpec)
filePath = f"{destDir}/{pageNumRep}.tsv"
fh = open(filePath, "w")
fh.write(self.tsvHeadLine())
for pageNum in pageNums:
lines = text.get(pageNum, [])
for (ln, line) in enumerate(lines):
fh.write(self.tsvLine(line, pageNum, ln + 1))
fh.close()
print(f"TSV data written to {unexpanduser(filePath)}")
def htmlPages(
self,
pageNumSpec,
line=None,
showSpaces=False,
export=False,
singleFile=False,
toc=False,
):
"""Outputs processed pages as formatted HTML pages.
Uses
`Lakhnawi.htmlLine()`.
The HTML output is suitable to read the extracted text.
Its layout matches the original closely, which makes it easier
to see where the output deviates from the source page.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
line: None | int | string | iterable
A specification of zero or more line numbers (see `fusus.lib.parseNums`).
showSpaces: boolean, optional `False`
If `True`, shows the spaces with a conspicuous coloured background.
export: boolean, optional `False`
If `True`, writes the HTML results to disk.
In this case, the HTML will not be displayed in the notebook.
singleFile: boolean, optional `False`
Only meaningful is `export=True`.
If `True`, writes the output to a single HTML file,
otherwise to one file per page, in a directory called `html`.
toc: boolean, optional `False`
Only meaningful is `export=True` and `singleFile=True`.
If `True`, writes a table of contents to the file.
The TOC points to every page that is included in the output file.
Returns
-------
None
Depending on `export`, the page is displayed in the notebook
where this function is called, or exported to a file on disk.
The file is in `fusus.parameters.UR_DIR`, under `Lakhnawi`.
The name of the file includes a page specification.
"""
self.showSpaces = showSpaces
text = self.text
destDir = f"{UR_DIR}/{NAME}" if singleFile else f"{UR_DIR}/{NAME}/html"
pageNums = self.parsePageNums(pageNumSpec)
lineNums = parseNums(line)
lineNums = None if lineNums is None else set(lineNums)
filesWritten = 0
if export:
if not os.path.exists(destDir):
os.makedirs(destDir, exist_ok=True)
if singleFile:
pageNumRep = ALL_PAGES if pageNumSpec is None else str(pageNumSpec)
tocRep = "-with-toc" if toc else ""
filePath = f"{destDir}/{pageNumRep}{tocRep}.html"
fh = open(filePath, "w")
fh.write(preHtml(f"{pageNumRep}{tocRep}"))
if toc:
toc = getToc(pageNums)
fh.write(
f"""
<div class="window">
<div class="sidebar">
{toc}
</div>
<div class="pages bypage">
"""
)
else:
fh.write(
"""
<div class="pages">
"""
)
pageClass = "page" + ("" if export else "c")
for pageNum in pageNums:
lines = text.get(pageNum, [])
nLines = len(lines)
html = []
html.append(
f"""
<div class="{pageClass}">
<div class="phead"><a name="p{pageNum:>03}">{pageNum}</a></div>
"""
)
prevMulti = False
for (i, line) in enumerate(lines):
if lineNums is not None and i + 1 not in lineNums:
continue
html.append(self.htmlLine(line, prevMulti, i == nLines - 1))
prevMulti = len(line) > 1
html.append("""</div>""")
if export:
htmlRep = "".join(html)
if singleFile:
fh.write(htmlRep)
else:
html = preHtml(pageNum) + htmlRep + POST_HTML
filePath = f"{destDir}/p{pageNum:>03}.html"
with open(filePath, "w") as fh:
fh.write(html)
filesWritten += 1
else:
display(HTML("\n".join(html)))
if export and singleFile:
fh.write(
"""
</div>
"""
)
if toc:
fh.write(
"""
</div>
"""
)
fh.write(POST_HTML)
fh.close()
print(f"HTML written to {unexpanduser(filePath)}")
if export and not singleFile:
print(f"{filesWritten} HTML files written to {unexpanduser(destDir)}/")
def showLines(
self,
pageNumSpec,
line=None,
start=None,
end=None,
search=None,
orig=False,
every=False,
):
"""Outputs processed lines as a formatted HTML table.
The lines can be selected by page numbers and line numbers.
Within the selected lines, the characters can be selected by
start/end postions, or by characters of interest.
All of these indices start at 1.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
line: None | int | string | iterable
A specification of zero or more line numbers (see `fusus.lib.parseNums`).
start: integer, optional `None`
Starting word position in each line to be output.
If `None`, starts at the beginning of each line.
end: integer, optional `None`
End word position in each line to be output.
If `None`, ends at the end of each line.
search: string or iterable of char, optional `None`
If not none, all characters in `search` are deemed interesting.
All occurrences of these characters within the selected lines are
displayed, included a small context.
orig: boolean, optional `False`
Only meaningful if `search` is given.
If `True`: the check for interesting
characters is done in the original, untranslated characters.
Otherwise, interesting characters are looked up in the translated
characters.
every: boolean, optional `False`
Only meaningful if `search` is given.
If `True`, when looking for interesting characters, all occurrences will
be retrieved, otherwise only the first one.
Returns
-------
None
The output material will be displayed in the notebook.
"""
lines = self.lines
pageNums = self.parsePageNums(pageNumSpec)
lineNums = parseNums(line)
myLines = {pageNum: lines[pageNum] for pageNum in pageNums if pageNum in lines}
html = []
html.append("<table>")
html.append(
"""
<tr>
<th>seq</th>
<th>top</th>
<th>bottom</th>
<th>left</th>
<th>right</th>
<th>spacing</th>
<th>font</th>
<th>size</th>
<th>orig char</th>
<th>char</th>
</tr>
"""
)
shift = 5
for (pageNum, pageLines) in myLines.items():
myLineNums = (
range(1, len(pageLines) + 1)
if lineNums is None
else [ln for ln in lineNums if 0 < ln <= len(pageLines)]
)
for ln in myLineNums:
chars = pageLines[ln - 1]
nChars = len(chars)
html.append(
f"""
<tr>
<th colspan=3>page {pageNum}</th>
<th colspan=2>line {ln}</th>
<th colspan=3>{nChars} characters</th>
</tr>
"""
)
if search is None:
ranges = [(max((start or 0) - 1, 0), min(end or nChars, nChars))]
else:
ranges = []
for (i, char) in enumerate(chars):
if search in char[-2 if orig else -1]:
occStart = max((i - shift, 0))
occEnd = min((i + shift + 1, nChars))
if ranges and occStart <= ranges[-1][1]:
ranges[-1][1] = occEnd
else:
ranges.append([occStart, occEnd])
if not every:
break
for (occStart, occEnd) in ranges:
for i in range(occStart, occEnd):
char = chars[i]
(le, to, ri, bo, font, size, spacing, oc, c) = char
html.append(
f"""
<tr>
<td><b>{i + 1}</b></td>
<td>{ptRepD(to)}</td>
<td>{ptRepD(bo)}</td>
<td>{ptRepD(le)}</td>
<td>{ptRepD(ri)}</td>
<td>{spacing}</td>
<td>{font}</td>
<td>{size}pt</td>
<td>{"".join(self.showChar(x) for x in reversed(oc))}</td>
<td>{"".join(self.showChar(x) for x in reversed(c))}</td>
</tr>
"""
)
if search and ranges and not every:
break
html.append("</table>")
display(HTML("".join(html)))
def showWords(self, pageNumSpec, line=None):
"""Outputs processed words as a formatted HTML table.
The lines can be selected by page numbers and line numbers.
All words within the selected lines are put into a table with
the same properties as in the TSV data,
see `Lakhnawi.tsvPages`.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
line: None | int | string | iterable
A specification of zero or more line numbers (see `fusus.lib.parseNums`).
Returns
-------
None
The output material will be displayed in the notebook.
"""
text = self.text
pageNums = self.parsePageNums(pageNumSpec)
lineNums = parseNums(line)
myLines = {pageNum: text[pageNum] for pageNum in pageNums if pageNum in text}
html = []
html.append("<table>")
html.append(
"""
<tr>
<th>page</th>
<th>line</th>
<th>col</th>
<th>span</th>
<th>dir</th>
<th>left</th>
<th>top</th>
<th>right</th>
<th>bottom</th>
<th>letters</th>
<th>punc</th>
</tr>
"""
)
for (pageNum, pageLines) in myLines.items():
myLineNums = (
range(1, len(pageLines) + 1)
if lineNums is None
else [ln for ln in lineNums if 0 < ln <= len(pageLines)]
)
for ln in myLineNums:
cols = pageLines[ln - 1]
for (cn, spans) in enumerate(cols):
for (sn, (dr, words)) in enumerate(spans):
for (letters, punc, (le, to, ri, bo)) in words:
html.append(
f"""
<tr>
<td><b>{pageNum}</b></td>
<td><b>{ln}</b></td>
<td><i>{cn + 1}</i></td>
<td><i>{sn + 1}</i></td>
<td><i>{dr}</i></td>
<td>{ptRep(le)}</td>
<td>{ptRep(to)}</td>
<td>{ptRep(ri)}</td>
<td>{ptRep(bo)}</td>
<td>{self.showString(letters, asString=True)}</td>
<td>{self.showString(punc, asString=True)}</td>
</tr>
"""
)
html.append("</table>")
display(HTML("".join(html)))
def showUsedChars(
self,
pageNumSpec,
orig=False,
onlyPuas=False,
onlyPresentational=False,
long=False,
byOcc=False,
):
"""Show used characters.
Gives an overview of character usage, either in the input PDF, or in
the text output.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
orig: boolean, optional `False`
If `True`: shows characters of the original PDF.
Otherwise, shows characters of the translated output/
onlyPuas: boolean, optional `False`
If `True`, the result is restricted to private use characters.
onlyPresentational: boolean, optional `False`
If `True`, the result is restricted to presentational characters.
See `fusus.char.UChar.presentational`.
long: boolean, optional `False`
If `True`, for each character output the complete list of pages
where the character occurs. Otherwise, show only the most
prominent pages.
byOcc: boolean, optional `False`
If `True`, sort the results by first occurrence of the characters.
Otherwise, sort the results by unicode code point of the character.
Returns
-------
None
The output material will be displayed in the notebook.
"""
presentational = self.presentational
pageNums = self.parsePageNums(pageNumSpec)
text = self.text
lines = self.lines
puas = self.puas
charsOut = collections.defaultdict(collections.Counter)
def keyByOcc(c):
pageNums = charsOut[c]
return -sum(pageNums.values())
sortKey = keyByOcc if byOcc else lambda x: x
if orig:
lns = {pageNum: lines[pageNum] for pageNum in pageNums if pageNum in lines}
for (pageNum, pageLines) in lns.items():
for line in pageLines:
for char in line:
c = char[-2]
if c in puas or (
not onlyPuas
and (c in presentational or not onlyPresentational)
):
charsOut[c][pageNum] += 1
else:
texts = {pageNum: text[pageNum] for pageNum in pageNums if pageNum in text}
for (pageNum, pageText) in texts.items():
for line in pageText:
for col in line:
for span in col:
for word in span[1]:
letters = word[0]
punc = word[1]
thesePuas = PUA_RE.findall(letters)
for pua in thesePuas:
charsOut[chr(int(pua, base=16))][pageNum] += 1
if not onlyPuas:
rest = PUA_RE.sub("", f"{letters}{punc}")
for c in rest:
if not (
onlyPresentational
and c not in presentational
):
charsOut[c][pageNum] += 1
totalChars = len(charsOut)
totalPages = len(set(chain.from_iterable(charsOut.values())))
totalOccs = sum(sum(pns.values()) for pns in charsOut.values())
charRep = "character" + ("" if totalChars == 1 else "s")
occRep = "occurence" + ("" if totalOccs == 1 else "s")
pageRep = "page" + ("" if totalPages == 1 else "s")
label = "private use " if onlyPuas else ""
html = []
html.append(
f"""
<p><b>{totalChars} {label}{charRep} in {totalOccs} {occRep}
on {totalPages} {pageRep}</b></p>
<table>
"""
)
for c in sorted(charsOut, key=sortKey):
pageNums = charsOut[c]
nPageNums = len(pageNums)
pageRep = "page" + ("" if nPageNums == 1 else "s")
thistotal = sum(pageNums.values())
examplePageNum = sorted(pageNums, key=lambda p: -pageNums[p])[0]
nExamples = pageNums[examplePageNum]
html.append(
f"""
<tr>
<td class="al">{self.showChar(c)}</td>
<td class="al"><b>{thistotal}</b> on <i>{nPageNums}</i> {pageRep}</td>
<td class="al">e.g. page {examplePageNum} with <b>{nExamples}</b> occurrences</td>
</tr>
"""
)
if long:
for pn in sorted(pageNums):
occs = pageNums[pn]
html.append(
f"""
<tr>
<td></td>
<td class="al"><i>page {pn:>3}</i>: <b>{occs:>3}</b></td>
</tr>
"""
)
html.append("</table>")
display(HTML("".join(html)))
def showColumns(self, pageNumSpec):
"""Show used characters.
Gives an overview of the columns in each line.
The result is a readable, ascii overview of the columns
that exists in the lines of the selected pages.
It is useful to visually check column detection for many pages.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
Returns
-------
None
The output material will be displayed in the notebook.
"""
pageNums = self.parsePageNums(pageNumSpec)
columns = self.columns
for pageNum in pageNums:
if pageNum not in columns:
continue
lineInfo = columns[pageNum]
multiple = []
for lNum in sorted(lineInfo):
(threshold, emspaces) = lineInfo[lNum]
nEmspaces = len(emspaces)
if threshold is not None and nEmspaces > 0:
multiple.append((lNum, threshold, emspaces))
if not len(multiple):
print(f"page {pageNum:>3} -")
else:
print(f"page {pageNum:>3}:")
for (lNum, threshold, emspaces) in multiple:
nEmspaces = len(emspaces)
print(f"\t{lNum:>2}: {'- ' * (nEmspaces + 1)}")
def showSpacing(self, pageNumSpec, line=None):
"""Show where the spaces are.
Gives an overview of the white space positions in each line.
It is useful to debug the horizontal white space algorithm.
Parameters
----------
pageNumSpec: None | int | string | iterable
As in `Lakhnawi.parsePageNums()`.
line: None | int | string | iterable
A specification of zero or more line numbers (see `fusus.lib.parseNums`).
Returns
-------
None
The output material will be displayed in the notebook.
"""
pageNums = self.parsePageNums(pageNumSpec)
lineNums = parseNums(line)
lineNums = None if lineNums is None else set(lineNums)
spaces = self.spaces
for pageNum in pageNums:
if pageNum not in spaces:
continue
print(f"page {pageNum:>3}")
lineInfo = spaces[pageNum]
for (ln, spaces) in lineInfo.items():
if lineNums is not None and ln not in lineNums:
continue
print(f"\tline {ln:>2}")
for (i, after, isSpace) in spaces:
print(f"\t\t{i + 1:>3} {'] [' if isSpace else ']==['} {after}")
def collectPage(self, data):
"""Transforms raw text into proper textual data.
Called by `Lakhnawi.getPages()` and delivers its results
to attributes of the Lakhnawi object.
Here are they
* `Lakhnawi.lines`
* `Lakhnawi.doubles`
* `Lakhnawi.text`
They are all dictionaries, keyed by page number first and then by line.
Parameters
----------
data: dict
as obtained by the
[extractRAWDICT()](https://pymupdf.readthedocs.io/en/latest/textpage.html#TextPage.extractRAWDICT)
method of *fitz*.
Returns
-------
None
"""
doubles = self.doubles
doublesApplied = self.doublesApplied
pageNum = self.pageNum
nospacings = self.nospacings
fnRules = self.fnRules
bracketMap = self.bracketMap
text = self.text
fnRule = fnRules.get(pageNum, None)
fnRule = fnRule[0] if fnRule else None
chars = []
prevChar = None
prevFont = None
prevSize = None
def addChar():
box = prevChar["bbox"]
yBot = box[3]
# skip chars below the footnote rule, if any
if fnRule is not None and yBot > fnRule:
return
c = prevChar["c"]
cr = "" if isEuDigit(c) else c
cr = bracketMap.get(cr, cr)
chars.append(
(
*box,
prevFont,
prevSize,
"" if c in nospacings else True,
c,
cr,
)
)
def collectChars(data, font, size):
nonlocal prevChar
nonlocal prevFont
nonlocal prevSize
if type(data) is list:
for elem in data:
collectChars(elem, font, size)
elif type(data) is dict:
if "font" in data:
font = data["font"]
if "size" in data:
size = data["size"]
if "c" in data:
c = data["c"]
skip = False
if c == " ":
skip = True
if prevChar is not None:
pc = prevChar["c"]
if pc in doubles and doubles[pc] == c:
skip = True
doublesApplied[pc][pageNum] += 1
if c in doubles and doubles[c] == pc:
prevChar = data
skip = True
doublesApplied[c][pageNum] += 1
if not skip:
if prevChar is not None:
addChar()
prevChar = data
prevFont = font
prevSize = size
for (k, v) in data.items():
if type(v) in {list, dict}:
collectChars(v, font, size)
collectChars(data, None, None)
if prevChar is not None:
addChar()
clusterKeyCharV = self.clusterVert(chars)
lines = {}
for char in sorted(chars, key=lambda c: (clusterKeyCharV(c), keyCharH(c))):
k = clusterKeyCharV(char)
lines.setdefault(k, []).append(list(char))
theseLines = list(lines.values())
if theseLines and self.isPageNum(theseLines[0]):
theseLines = theseLines[1:]
# remove arabic numerals between brackets
for chars in theseLines:
nChars = len(chars)
if not nChars:
continue
i = 0
while i < nChars:
char = chars[i]
nextI = i + 1
if char[-1] == "(":
found = None
for j in range(i + 1, nChars):
theChar = chars[j][-1]
if theChar == ")":
found = j + 1
nextI = found
break
if isArDigit(theChar):
continue
nextI = j
break
if found is not None:
for j in range(i, found):
chars[j][-1] = ""
i = found
i = nextI
self.lines[pageNum] = tuple(
chars for chars in theseLines if not all(c[-1] == "" for c in chars)
)
text[pageNum] = []
for (ln, line) in enumerate(self.lines[pageNum]):
self.trimLine(pageNum, ln + 1, line)
def isPageNum(self, chars):
"""Checks whether a series of characters represents an arabic number.
Parameters
----------
chars: iterable of char reocrds
"""
return 1 <= len(chars) <= 3 and all(isArDigit(c[-1]) for c in chars)
def trimLine(self, pageNum, ln, chars):
"""Map character sequences to other sequences.
Two tasks:
1. Map private use characters to well-known unicode characters
2. Insert space characters where the next character is separated from the
previous one.
Complications:
Diacritical characters are mostly contained in a very wide box that overlaps
with the boxes of the other characters. So the diacritical boxes must not be
taken into account.
Private use characters often come in sequences, so a sequence of characters
must be transformed to another sequence.
We do the tramsformation before the space insertion, because otherwise we
might insert the space at the wrong point.
When we transform characters we need to retain the box information,
because we still have to insert the space.
That's why we have as input a list of character records, where each record
is itself a list with box information, orginal character, modified characters
and space information.
When we transform characters, we modify character records in place.
We do not add or remove character records.
The last member of a character record is the modified sequence.
This can be zero, one, or multiple characters.
The second last member is the original character.
Initially, the the last and second last member of each record are equal.
We call these members the original character and the result string.
Space will be appended at the last member of the appropriate character records.
The transformations are given as a set of rules.
See `REPLACE_DEFS`.
A rule consists of a sequence of characters to match and a sequence of
characters to replace the match with. We call them the match sequence and the
replacement sequence of the rule.
For each character in the input list we check which rules have a match sequence
that start with this character.
Of these rules, we start with the one with the longest match sequence.
We then check, by looking ahead, whether the whole match sequence matches the
input.
For the purposes of matching, we look into the result strings of the character,
not to the original characters. This will prevent some rules to be applied
after an earlier rule has been applied. This is intentional, and results
in a more simple rule set.
If there is a match, we walk through all the characters in the input for the
length of the match sequence of the rule.
For each input character record, we set its replacement string to the
corresponding member of the replacement sequence of the rule.
If the replacement sequence has run out, we replace with the empty string.
If after this process the replacement sequence has not been exhausted,
we join the remaining characters in the replacement string and append it
after the replacement string of the last input character that we have visited.
After succesful application of a rule, we do not apply other rules that would
have been applicable at this point. Instead, we move our starting point to the
next character record in the sequence and repeat the matching process.
It might be that a character is replaced multiple times, for example when
it is reached by a rule while looking ahead 3 places, and then later by a
different rule looking ahead two places.
However, once a character matches the first member of the match sequence of
a rule, and the rule matches and is applied, that character will not be
changed anymore by any other rule.
!!! caution "place holders for diacritics"
The following functionality exists in the code, but is not needed anymore
to process the Lakhnawi PDF.
The match sequence may contain the character `d`, which is a placeholder
for a diacritic sign. It will match any diacritic.
The replacement sequence of such a rule may or may not contain a `d`.
It is an error if the replacement seqience of a rule contains a `d` while
its match sequence does not.
It is also an error of there are multiple `d`s in a match sequence
of a replacement sequence.
If so, the working of this rule is effectively two rules:
Suppose the rule is
x d y => r d s
where x, y, r, s are sequences of arbitrary length.
If the rule matches the input, then first the rule
x => r
will be applied at the current position.
Then we shift temporarily to the position right after where the d has matched,
and apply the rule
y => s
Then we shift back to the orginal position plus one, and continue applying
rules.
"""
replace = self.replace
puas = self.puas
neutrals = self.neutrals
rls = self.rls
rulesApplied = self.rulesApplied
spaces = self.spaces
columns = self.columns
diacritics = self.diacritics
punct = self.punct
diacriticLike = self.diacriticLike
arabicLetters = self.arabicLetters
presentationalC = self.presentationalC
presentationalD = self.presentationalD
finalSpace = self.finalSpace
finalsApplied = self.finalsApplied
nonLetter = self.nonLetter
doRules = self.doRules
doFilter = self.doFilter
nChars = len(chars)
# rule application stage
if doRules:
for (i, char) in enumerate(chars):
c = char[-1]
if c in replace:
rules = replace[c]
for (rn, vals, d, repls, e) in rules:
nVals = len(vals)
if i + nVals > nChars:
# not enough characters left to match this rule
continue
if not all(
(
d is not None
and j == d
and chars[i + j][-1]
in (diacritics if vals[d] == 1 else arabicLetters)
)
or chars[i + j][-1] == vals[j]
for j in range(nVals)
):
# the rule does not match after all
continue
# the rule matches: we are going to fill in the replacements
# if there is a diacritic in the match sequence or the
# replacement sequence, we restrict ourselves to the parts
# before the diacritics.
rulesApplied[rn][pageNum] += 1
nRepls = len(repls)
dEnd = nVals if d is None else d
eEnd = nRepls if e is None else e
# so, we are going to replace from here to dEnd (not including)
for j in range(dEnd):
# put the appropriate replacement character in the
# replacement part of the character record
# After running out of replacement characters, put in ""
chars[i + j][-1] = repls[j] if j < eEnd else ""
if eEnd > dEnd:
# if there are replacement characters left, put them
# in after the last character that we have visited.
if dEnd == 0:
# In case we have not visited any yet,
# we put them in before the current character
cd = chars[i + dEnd][-1]
r = "".join(repls[dEnd + 1 :])
chars[i + dEnd][-1] = f"{r}{cd}"
else:
# this is the normal case
chars[i + dEnd - 1][-1] += "".join(repls[dEnd:eEnd])
# if there is a diacritic in the match sequence
# we are going to perform the rule for the part
# after the diacritic
# Note that the case where d is None and e is not None
# does not occur
if d is not None:
# we set the starting points: just after the diacritics
dStart = d + 1
# if the replacement part does not have a diacritic,
# we have already consumed it, and we start right after it
eStart = nRepls if e is None else e + 1
# we compute the number of characters that still need to be
# matched and to be replaced
dn = nVals - dStart
en = nRepls - eStart
# we perform the replacement analogously to what we did
# for the first part
for j in range(dn):
# put the appropriate replacement character in the
# replacement part of the character record
# After running out of replacement characters, put in ""
chars[i + dStart + j][-1] = (
repls[eStart + j] if eStart + j < nRepls else ""
)
if en > dn:
# if there are replacement characters left, put them
# in after the last character that we have visited.
chars[i + nVals - 1][-1] += "".join(
repls[eStart + dn :]
)
break
# sift out all presentational characters
if doFilter:
trailSpace = False
for (i, char) in enumerate(chars):
c = char[-1]
string = ""
for x in c:
if trailSpace:
if x not in diacriticLike:
if x not in nonLetter:
if string == "" and i > 0:
chars[i - 1][-1] += " "
else:
string += " "
trailSpace = False
hasFinalSpace = x in finalSpace
y = (
normalizeC(x)
if x in presentationalC
else normalizeD(x)
if x in presentationalD
else x
).strip()
space = " " if hasFinalSpace or x in punct else ""
if hasFinalSpace:
finalsApplied[x][pageNum] += 1
string += y
if space:
trailSpace = True
char[-1] = string
if trailSpace:
if chars:
chars[-1][-1] += " "
# add horizontal spacing
theseSpaces = []
spaces[pageNum][ln] = theseSpaces
threshold = None
theseColumns = [threshold, []]
columns[pageNum][ln] = theseColumns
prevLeft = None
prevLeftI = None
for (i, char) in enumerate(chars):
spacing = char[-3]
if spacing:
left = char[0]
right = char[2]
if prevLeft is not None:
prevChar = chars[prevLeftI]
after = prevLeft - right
theAfter = ptRepD(after)
isSpace = theAfter >= SPACE_THRESHOLD
if isSpace:
lastChar = chars[i - 1]
if not lastChar[-1].endswith(" "):
lastChar[-1] += " "
prevChar[-3] = f"⌊{theAfter}⌋" if isSpace else f"«{theAfter}»"
theseSpaces.append((i - 1, theAfter, isSpace))
prevLeft = left
prevLeftI = i
if chars:
chars[-1][-3] = "end"
# change big spaces to emspaces
nSpaces = sum(1 for x in theseSpaces if x[2])
if nSpaces == 1:
threshold = 90
elif nSpaces > 1:
spacesGrowing = sorted(x[1] for x in theseSpaces if x[2])
maxSpace = spacesGrowing[-1]
medialSpace = spacesGrowing[nSpaces // 2]
if maxSpace > 4 * medialSpace:
threshold = maxSpace - medialSpace
if threshold is not None:
theseColumns[0] = threshold
for (i, after, isSpace) in theseSpaces:
if isSpace and after > threshold:
theseColumns[1].append((i, after))
char = chars[i]
char[-1] = char[-1].rstrip(" ") + EMSPACE
# remove space between alef and initial follower,
# provided the alef is the single letter in its word.
# also for the words yeh+alef(final) and mem+alef(final) do:
# insert a space behind the alef(final)
curLen = 0
prevCons = None
pprevCons = None
for (i, char) in enumerate(chars):
c = char[-1]
co = char[-2]
r = ""
isAFinal = isAlefFinal(co)
for x in c:
skip = False
if x == " ":
if curLen == 1: # and prevC in PROCLITICS:
skip = True
curLen = 0
prevCons = None
pprevCons = None
elif x in arabicLetters:
curLen += 1
if 2 <= curLen <= 3 and isAFinal:
if isMeemOrYeh(prevCons) and (curLen == 2 or isWaw(pprevCons)):
x += " "
curLen = 0
prevCons = None
pprevCons = None
pprevCons = prevCons
prevCons = x
if not skip:
r += x
char[-1] = r
# divide lines into columns
emspaces = theseColumns[1]
emspacePositions = {t[0] for t in emspaces}
columnedChars = [[]]
dest = columnedChars[-1]
for (i, char) in enumerate(chars):
if i in emspacePositions:
if char[-1]:
dest.append(char)
columnedChars.append([])
dest = columnedChars[-1]
else:
dest.append(char)
# divide columns into ranges
# and chunk the ranges into words
# and save the word boundary boxes
text = self.text
# text is a dict keyed by pageNum and the values are tuples of line data
# a line datum is a list of columns
# a column is a list of spans
# a span is a pair of a direction char ("l" or "r") plus a list of word data
# a word datum is a string plus a word box
# a word box is a (left, top, right, bottom) tuple
result = []
text.setdefault(pageNum, []).append(result)
prevDir = "r"
# we transform letters into chunks, where each chunk is a pair of
# word material
# punctuation material
outChars = [[], []]
inWord = True
box = [None, None, None, None]
def addWord():
if outChars[0] or outChars[1]:
wordCharsRep = "".join(
outChars[0] if prevDir == "r" else reversed(outChars[0])
)
puncCharsRep = "".join(
outChars[1] if prevDir == "r" else reversed(outChars[1])
)
lastSpan = None if len(result[-1]) == 0 else result[-1][-1]
element = (wordCharsRep, puncCharsRep, tuple(box))
if lastSpan is None or lastSpan[0] != prevDir:
result[-1].append((prevDir, [element]))
else:
result[-1][-1][-1].append(element)
def setBox(char):
for (i, coor) in enumerate(char[0:4]):
if (
(b := box[i]) is None
or (i < 2 and coor < b)
or (i >= 2 and coor > b)
):
box[i] = coor
for chars in columnedChars:
result.append([])
outChars = [[], []]
box = [None, None, None, None]
for char in chars:
c = char[-1]
if c == "":
continue
for d in c:
spaceSeen = d in {" ", EMSPACE}
changeWord = not inWord and d not in nonLetter
if spaceSeen:
outChars[1].append(d)
if spaceSeen or changeWord:
addWord()
box = [None, None, None, None]
outChars = [[d] if changeWord else [], []]
inWord = True
continue
thisDir = prevDir if d in neutrals else "r" if d in rls else "l"
if prevDir != thisDir:
addWord()
box = [None, None, None, None]
outChars = [[], []]
inWord = True
prevDir = thisDir
if inWord:
if d in nonLetter:
inWord = False
dest = 0 if inWord else 1
rep = d
if d in puas:
rep = f"⌊{ord(d):>04x}⌋"
outChars[dest].append(rep)
setBox(char)
addWord()
def plainLine(self, columns):
"""Outputs a processed line as plain text.
Used by `Lakhnawi.plainPages()`.
Parameters
----------
columns: iterable
An iterable of columns that make up a line.
Each column is an iterable of spans.
Spans contain words plus an indication of the writing direction
for that span.
Returns
-------
string
The concatenation of all words in all spans separated by white space.
"""
return "\t".join(
" ".join(
" ".join(f"{word[0]}{word[1]}" for word in span[1]) for span in spans
)
for spans in columns
)
def tsvHeadLine(self):
"""Outputs the field names of a word in TSV data.
See `Lakhnawi.tsvPages()` for the structure of TSV data
as output format for the extracted text of the Lakhnawi PDF.
Returns
-------
string
A tab-separated line of field names.
"""
return "page\tline\tcolumn\tspan\tdirection\tleft\ttop\tright\tbottom\tletters\tpunc\n"
def tsvLine(self, columns, pageNum, ln):
"""Outputs a processed line as lines of tab-separated fields for each word.
Used by `Lakhnawi.tsvPages()`.
Parameters
----------
columns: iterable
An iterable of columns that make up a line.
Each column is an iterable of spans.
Spans contain words plus an indication of the writing direction
for that span.
pageNum: int
The page number of the page where this line occurs.
Returns
-------
string
The concatenation of the TSV lines for all words in all spans
in all columns.
"""
material = []
for (cn, spans) in enumerate(columns):
for (sn, (dr, words)) in enumerate(spans):
for (letters, punc, (le, to, ri, bo)) in words:
material.append(
(
"\t".join(
str(x)
for x in (
pageNum,
ln,
cn + 1,
sn + 1,
dr,
ptRep(le),
ptRep(to),
ptRep(ri),
ptRep(bo),
letters,
punc,
)
)
)
+ "\n"
)
return "".join(material)
def htmlLine(self, columns, prevMulti, isLast):
"""Outputs a processed line as HTML.
Used by `Lakhnawi.htmlPages()`.
Parameters
----------
columns: iterable
An iterable of columns that make up a line.
Each column is an iterable of spans.
Spans contain words plus an indication of the writing direction
for that span.
prevMulti: boolean
Whether the preceding line has multiple columns.
isLast: boolean
Whether this line is the last line on the page.
Returns
-------
string
The concatenation of the TSV lines for all words in all spans
in all columns.
"""
showSpaces = self.showSpaces
result = []
nCols = len(columns)
multi = nCols > 1
if prevMulti and not multi:
result.append("</table>\n")
elif not prevMulti and multi:
result.append("""<table class="linecols">\n""")
if multi:
result.append("<tr>\n")
for spans in columns:
result.append(
f"""\t<td class="cols col{nCols}">""" if multi else """<p class="r">"""
)
for (textDir, words) in spans:
result.append(f"""<span class="{textDir}">""")
for word in words:
letters = normalizeD(word[0])
letters = letters.replace("⌊", """<span class="p">""").replace(
"⌋", "</span>"
)
if showSpaces:
letters = f"""<span class="box">{letters}</span>"""
punc = word[1]
if showSpaces:
punc = punc.replace(" ", """<span class="sp"> </span>""")
result.append(f"{letters}{punc}")
result.append("""</span>""")
result.append("</td>\n" if multi else "</p>\n")
if multi:
result.append("</tr>\n")
if isLast:
result.append("</table>\n")
return "".join(result)
def clusterVert(self, data):
"""Cluster characters into lines based on their bounding boxes.
Most characters on a line have their middle line in approximately the same height.
But diacritics of characters in that line may occupy different heights.
Without intervention, these would be clustered on separate lines.
We take care to cluster them into the same lines as their main characters.
It involves getting an idea of the regular line height, and clustering boxes
that fall between the lines with the line above or below, whichever is closest.
The result of the clustering is delivered as a key function, which will
be used to sort characters.
Parameters
----------
data: iterable of record
The character records
Returns
-------
function
A key function that assigns to each character record a value
that corresponds to the vertical position of a real line,
which is a clustered set of characters.
The information on the vertical clustering of lines
is delivered in the attributes `Lakhnawi.heights` and
`Lakhnawi.clusteredHeights`, on a page by page basis.
"""
pageNum = self.pageNum
heights = collections.Counter()
for char in data:
k = keyCharV(char)
heights[k] += 1
peaks = sorted(heights)
if len(peaks) > 1:
nDistances = len(peaks) - 1
distances = sorted(peaks[i + 1] - peaks[i] for i in range(nDistances))
# remove the biggest distances if > 50,
# to prevent outliers pulling the average too high
for _ in range(2):
if len(distances) > 1:
if distances[-1] > 50:
distances = distances[0:-1]
# remove distances < 15, which are much smaller than a line
distances = [d for d in distances if d > 15]
nDistances = len(distances)
avPeakDist = sum(distances) / nDistances
peakThreshold = avPeakDist * LINE_CLUSTER_FACTOR
clusteredHeights = {}
for (k, n) in sorted(heights.items(), key=lambda x: (-x[1], x[0])):
added = False
for kc in clusteredHeights:
if abs(k - kc) <= peakThreshold:
clusteredHeights[kc].add(k)
added = True
break
if not added:
clusteredHeights[k] = {k}
toCluster = {}
for (kc, ks) in clusteredHeights.items():
for k in ks:
toCluster[k] = kc
self.heights[pageNum] = heights
self.clusteredHeights[pageNum] = clusteredHeights
def clusterKeyCharV(char):
k = keyCharV(char)
return toCluster[k]
return clusterKeyCharV
def keyCharV(char):
"""The vertical position of the middle of a character.
Used to sort the characters of a page in the vertical direction.
Parameters
----------
char: record
Returns
-------
float
The height of the middle of the character.
"""
return (char[3] + char[1]) / 2
def keyCharH(char):
"""Sort key to sort the characters of a line horizontally.
Basically, characters whose right edge are closer to the right edge of the page
come before characters whose right edges are further left.
So we could sort on minus the x coordinate of the right edge.
However, there are complications. Sometimes characters have the same right edge.
Diacritics usually start right
after the letter they are on together with the next letter.
So they should come before that next letter.
In those cases we take the width into account.
Private use diacritics usually have a big width, they are wider than letters.
So if we sort wider characters before narrower characters, we get that right.
However, normal unicode diacritics have a typical width of zero, and also
these should come before the next letter.
We can solve that by sorting by a key defined as 1 divided by the width
if the width is nonzero, and 0 if the the width is zero.
Then zero width characters come first, then wide characters, then narrow characters.
One extra complication: the widths are not integers but fractions.
Sometimes a the diacritic and the next letter have an almost equal right edge,
but not quite equal, and the wrong one comes first.
We can solve that by rounding.
Parameters
----------
char: record
Returns
-------
(int, float)
"""
width = abs(int(round(char[2] - char[0])))
widthKey = (1 / width) if width else 0
rightKey = int(round(char[2]))
return (-rightKey, widthKey)
| 2.5625 | 3 |
testapp/tests/test_migrations.py | 18F/django-pg-fts | 22 | 12762390 | <filename>testapp/tests/test_migrations.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import connection
from pg_fts.introspection import PgFTSIntrospection
from django.test import (override_settings, override_system_checks,
TestCase, TransactionTestCase)
from django.utils import six
from django.core.management import call_command
try:
from django.db.backends import TableInfo
table_info = True
except:
table_info = False
__all__ = ('FTSTestBase', 'CreateOperationTestSQL',
'TransactionsMigrationsTest')
class FTSTestBase(TransactionTestCase):
'''tests for FTS'''
introspection = PgFTSIntrospection()
def assertIndexExists(self, index, table):
with connection.cursor() as cursor:
self.assertIn(
index, connection.introspection.get_indexes(cursor, table))
def assertIndexNotExists(self, index, table):
with connection.cursor() as cursor:
self.assertNotIn(
index, connection.introspection.get_indexes(cursor, table))
def assertTriggerExists(self, trigger):
with connection.cursor() as cursor:
self.assertIn(trigger, self.introspection.get_trigger_list(cursor))
def assertTriggerNotExists(self, trigger):
with connection.cursor() as cursor:
self.assertNotIn(trigger,
self.introspection.get_trigger_list(cursor))
def assertFunctionExists(self, function):
with connection.cursor() as cursor:
self.assertIn(function,
self.introspection.get_functions_list(cursor))
def assertFunctionNotExists(self, function):
with connection.cursor() as cursor:
self.assertNotIn(function,
self.introspection.get_functions_list(cursor))
def assertTableExists(self, table):
if table_info:
table = TableInfo(name=table, type='t')
with connection.cursor() as cursor:
self.assertIn(table,
connection.introspection.get_table_list(cursor))
def assertTableNotExists(self, table):
if table_info:
table = TableInfo(name=table, type='t')
with connection.cursor() as cursor:
self.assertNotIn(table,
connection.introspection.get_table_list(cursor))
class CreateOperationTestSQL(TestCase):
# single dictionary
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"testapp": "testapp.migrations_index"})
def test_sql_migrate_creates_vector_field(self):
stdout = six.StringIO()
call_command('sqlmigrate', 'testapp', '0002', stdout=stdout)
self.assertIn('"tsvector" tsvector null', stdout.getvalue().lower())
self.assertIn(
"UPDATE testapp_tsvectormodel SET tsvector = setweight(to_tsvector('english', COALESCE(title, '')), 'D') || setweight(to_tsvector('english', COALESCE(body, '')), 'D');",
stdout.getvalue())
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"testapp": "testapp.migrations_index"})
def test_sql_fts_index(self):
stdout = six.StringIO()
call_command('sqlmigrate', 'testapp', '0003', stdout=stdout)
self.assertIn(
('CREATE INDEX testapp_tsvectormodel_tsvector ON '
'testapp_tsvectormodel USING gin(tsvector);'),
stdout.getvalue())
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"testapp": "testapp.migrations_index"})
def test_sql_fts_trigger(self):
stdout = six.StringIO()
call_command('sqlmigrate', 'testapp', '0004', stdout=stdout)
self.assertIn(''.join(
"""
CREATE FUNCTION testapp_tsvectormodel_tsvector_update() RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'INSERT' THEN
new.tsvector = setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'D') || setweight(to_tsvector('english', COALESCE(NEW.body, '')), 'D');
END IF;
IF TG_OP = 'UPDATE' THEN
IF NEW.title <> OLD.title OR NEW.body <> OLD.body THEN
new.tsvector = setweight(to_tsvector('english', COALESCE(NEW.title, '')), 'D') || setweight(to_tsvector('english', COALESCE(NEW.body, '')), 'D');
ELSE
new.tsvector = old.tsvector;
END IF;
END IF;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER testapp_tsvectormodel_tsvector_update BEFORE INSERT OR UPDATE ON testapp_tsvectormodel
FOR EACH ROW EXECUTE PROCEDURE testapp_tsvectormodel_tsvector_update();
""".split()),
''.join(stdout.getvalue().split())
)
# multiple dictionaries
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"testapp": "testapp.migrations_multidict"})
def test_sql_migrate_creates_vector_field_multi(self):
stdout = six.StringIO()
call_command('sqlmigrate', 'testapp', '0002', stdout=stdout)
self.assertIn('"tsvector" tsvector null', stdout.getvalue().lower())
self.assertIn(
"UPDATE testapp_tsvectormodel SET tsvector = setweight(to_tsvector(dictionary::regconfig, COALESCE(title, '')), 'D') || setweight(to_tsvector(dictionary::regconfig, COALESCE(body, '')), 'D');",
stdout.getvalue())
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"testapp": "testapp.migrations_multidict"})
def test_sql_fts_index_multi(self):
stdout = six.StringIO()
call_command('sqlmigrate', 'testapp', '0003', stdout=stdout)
self.assertIn(
('CREATE INDEX testapp_tsvectormodel_tsvector ON '
'testapp_tsvectormodel USING gin(tsvector);'),
stdout.getvalue())
self.assertIn(
('CREATE INDEX testapp_tsvectormodel_tsvector ON '
'testapp_tsvectormodel USING gin(tsvector);'),
stdout.getvalue())
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"testapp": "testapp.migrations_multidict"})
def test_sql_fts_trigger_multi(self):
stdout = six.StringIO()
call_command('sqlmigrate', 'testapp', '0004', stdout=stdout)
self.assertIn(''.join(
"""
CREATE FUNCTION testapp_tsvectormodel_tsvector_update() RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'INSERT' THEN
new.tsvector = setweight(to_tsvector(NEW.dictionary::regconfig, COALESCE(NEW.title, '')), 'D') || setweight(to_tsvector(NEW.dictionary::regconfig, COALESCE(NEW.body, '')), 'D');
END IF;
IF TG_OP = 'UPDATE' THEN
IF NEW.dictionary <> OLD.dictionary OR NEW.title <> OLD.title OR NEW.body <> OLD.body THEN
new.tsvector = setweight(to_tsvector(NEW.dictionary::regconfig, COALESCE(NEW.title, '')), 'D') || setweight(to_tsvector(NEW.dictionary::regconfig, COALESCE(NEW.body, '')), 'D');
ELSE
new.tsvector = old.tsvector;
END IF;
END IF;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER testapp_tsvectormodel_tsvector_update BEFORE INSERT OR UPDATE ON testapp_tsvectormodel
FOR EACH ROW EXECUTE PROCEDURE testapp_tsvectormodel_tsvector_update();
""".split()),
''.join(stdout.getvalue().split())
)
class TransactionsMigrationsTest(FTSTestBase):
# available_apps = ["testapp"]
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"testapp": "testapp.migrations_index"})
def test_migrate_forwards_backwards(self):
stdout = six.StringIO()
call_command('migrate', 'testapp', '0002', stdout=stdout)
self.assertTableExists('testapp_tsvectormodel')
self.assertIndexNotExists('tsvector',
'testapp_tsvectormodel')
call_command('migrate', 'testapp', '0003', stdout=stdout)
self.assertIndexExists('tsvector', 'testapp_tsvectormodel')
self.assertFunctionNotExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerNotExists('testapp_tsvectormodel_tsvector_update')
call_command('migrate', 'testapp', '0004', stdout=stdout)
self.assertFunctionExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerExists('testapp_tsvectormodel_tsvector_update')
call_command('migrate', 'testapp', '0005', stdout=stdout)
self.assertFunctionNotExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerNotExists('testapp_tsvectormodel_tsvector_update')
self.assertTableNotExists('testapp_tsvectormodel')
call_command('migrate', 'testapp', '0004', stdout=stdout)
self.assertFunctionExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerExists('testapp_tsvectormodel_tsvector_update')
self.assertTableExists('testapp_tsvectormodel')
call_command('migrate', 'testapp', '0003', stdout=stdout)
self.assertFunctionNotExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerNotExists('testapp_tsvectormodel_tsvector_update')
call_command('migrate', 'testapp', '0002', stdout=stdout)
self.assertIndexNotExists('tsvector',
'testapp_tsvectormodel')
call_command('migrate', 'testapp', '0001', stdout=stdout)
@override_system_checks([])
@override_settings(MIGRATION_MODULES={"testapp": "testapp.migrations_index"})
def test_migrate_forwards_backwards_multi(self):
stdout = six.StringIO()
call_command('migrate', 'testapp', '0002', stdout=stdout)
self.assertTableExists('testapp_tsvectormodel')
self.assertIndexNotExists('tsvector',
'testapp_tsvectormodel')
call_command('migrate', 'testapp', '0003', stdout=stdout)
self.assertIndexExists('tsvector', 'testapp_tsvectormodel')
self.assertFunctionNotExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerNotExists('testapp_tsvectormodel_tsvector_update')
call_command('migrate', 'testapp', '0004', stdout=stdout)
self.assertFunctionExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerExists('testapp_tsvectormodel_tsvector_update')
call_command('migrate', 'testapp', '0005', stdout=stdout)
self.assertFunctionNotExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerNotExists('testapp_tsvectormodel_tsvector_update')
self.assertTableNotExists('testapp_tsvectormodel')
call_command('migrate', 'testapp', '0004', stdout=stdout)
self.assertFunctionExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerExists('testapp_tsvectormodel_tsvector_update')
self.assertTableExists('testapp_tsvectormodel')
call_command('migrate', 'testapp', '0003', stdout=stdout)
self.assertFunctionNotExists('testapp_tsvectormodel_tsvector_update')
self.assertTriggerNotExists('testapp_tsvectormodel_tsvector_update')
call_command('migrate', 'testapp', '0002', stdout=stdout)
self.assertIndexNotExists('tsvector',
'testapp_tsvectormodel')
call_command('migrate', 'testapp', '0001', stdout=stdout)
| 1.992188 | 2 |
src/ffmeta/media.py | stephen-bunn/ffmeta | 0 | 12762391 | # -*- encoding: utf-8 -*-
# Copyright (c) 2021 st37 <<EMAIL>>
# ISC License <https://choosealicense.com/licenses/isc>
"""Contains media parsing utilities."""
from enum import Enum
from pathlib import Path
from typing import Optional
try:
import magic
except ImportError as exc: # pragma: no cover
raise ImportError("libmagic binary not found") from exc
class MediaType(Enum):
"""Describes the available types of media."""
AUDIO = "audio"
VIDEO = "video"
IMAGE = "image"
DEFAULT_BUFFER_SIZE = 2 ** 11
def get_mimetype(
filepath: Path,
buffer_size: Optional[int] = None,
) -> Optional[str]:
"""Get a file's guessed mimetype via `libmagic`.
Args:
filepath (~pathlib.Path):
The filepath to guess the mimetype of
buffer_size (Optional[int]):
The number of bytes to read in the buffer.
Defaults to None.
Raises:
FileNotFoundError:
If the given filepath does not exist
Returns:
Optional[str]:
The guessed mimetype
"""
if not filepath.is_file():
raise FileNotFoundError(f"No such file {filepath} exists")
with filepath.open("rb") as buffer:
return magic.from_buffer(
buffer.read(buffer_size or DEFAULT_BUFFER_SIZE),
mime=True,
)
def get_media_type(
filepath: Path,
buffer_size: Optional[int] = None,
) -> Optional[MediaType]:
"""Get a file's guessed media type.
Args:
filepath (~pathlib.Path):
The filepath to guess the media type of
buffer_size (Optional[int]):
The number of bytes to read in the buffer.
Defaults to None.
Raises:
FileNotFoundError:
If the given filepath does not exist
Returns:
Optional[MediaType]:
The guessed media type
"""
if not filepath.is_file():
raise FileNotFoundError(f"No such file {filepath} exists")
mimetype = get_mimetype(filepath, buffer_size=buffer_size)
if mimetype is None:
return None
prefix, *_ = mimetype.split("/")
try:
return MediaType(prefix.lower())
except ValueError:
return None
| 2.40625 | 2 |
ros/src/tl_detector/light_classification/tl_classifier.py | nuaayxy/CarND-Capstone | 0 | 12762392 |
import tensorflow as tf
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageColor
import cv2
import time
from styx_msgs.msg import TrafficLight
class TLClassifier(object):
def __init__(self):
self.current_light = TrafficLight.UNKNOWN
SSD_GRAPH_FILE = './frozen_inference_graph.pb'
self.detection_graph = self.load_graph(SSD_GRAPH_FILE)
# The input placeholder for the image.
# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
# The classification of the object (integer id).
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
def load_graph(self,graph_file):
"""Loads a frozen inference graph"""
graph = tf.Graph()
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return graph
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)
with tf.Session(graph=self.detection_graph) as sess:
# Actual detection.
(boxes, scores, classes) = sess.run([self.detection_boxes, self.detection_scores, self.detection_classes],
feed_dict={self.image_tensor: image_np})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
# confidence_cutoff = 0.8
# # Filter boxes with a confidence score less than `confidence_cutoff`
# boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes)
min_score_thresh = .5
count = 0
count1 = 0
# print(scores)
for i in range(boxes.shape[0]):
if scores is None or scores[i] > min_score_thresh:
count1 += 1
class_name = self.category_index[classes[i]]['name']
# Traffic light thing
if class_name == 'Red':
count += 1
# print(count)
if count < count1 - count:
self.current_light = TrafficLight.GREEN
else:
self.current_light = TrafficLight.RED
return self.current_light
| 2.828125 | 3 |
src/1_wrapper/0_method/0.py | ytyaru/Python.curses.Main.20210609114611 | 0 | 12762393 | <filename>src/1_wrapper/0_method/0.py
#!/usr/bin/env python3
# coding: utf8
import os, curses
def main(stdscr): pass
if __name__ == "__main__":
os.environ['TERM'] = 'xterm-256color'
curses.wrapper(main)
| 1.914063 | 2 |
mysql_test/settings.py | AlbertCQY/AIOps | 3 | 12762394 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# mysql connect info 待压测数据库信息
mydb_info = 'testdb'
mydb_ip = '172.16.17.32'
mydb_usr = 'compare'
mydb_pass = '<PASSWORD>'
mydb_port = 3306
mydb_db = 'otpstest'
#待压测mysql主机信息
os_data_ip = '172.16.17.32'
os_user = 'oracle'
os_pass = '<PASSWORD>'
# mysql资料库信息
my_ip = '172.16.17.32'
my_usr = 'compare'
my_pass = '<PASSWORD>'
my_port = 3306
my_db = 'otpstest'
#
| 1.5625 | 2 |
src/nark/assets.py | shadowmint/python-nark | 0 | 12762395 | # Copyright 2013 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
class Assets():
""" Helper for resolving paths in a convenient manner """
__base = ""
""" The path which is the universal root for this assets object """
def __init__(self, base=""):
""" Base is the path path for the loader; for getcwd() if not provided """
if base == "":
base = os.getcwd()
self.__base = os.path.abspath(base)
def resolve(self, *args):
""" Appropriately resolves a path in the form blah, blah, blah.
Base is attached as the root to this set of path elements.
Raises BadFileException on failure to find path.
"""
rtn = os.path.join(self.__base, *args)
if not self.__exists(rtn):
raise BadFileException("Invalid file: '%s'" % rtn)
return rtn
def new(self, *args):
""" Returns full path to a new file, if its a valid new file.
Base is attached as the root to this set of path elements.
Raises BadFileException on failure to find parent path.
"""
rtn = os.path.join(self.__base, *args)
if len(args) > 1:
parent = args[:-1]
parent = os.path.join(self.__base, *parent)
if not self.__exists(parent):
raise BadFileException("Invalid parent path: '%s'" % rtn)
return rtn
def exists(self, *args):
""" Returns false if the file does not exist """
rtn = os.path.join(self.__base, *args)
if not self.__exists(rtn):
return False
return rtn
def __exists(self, path):
""" Check a path exists """
rtn = os.path.isdir(path) or (os.path.isfile(path) and os.access(path, os.R_OK))
return rtn
def base(self):
""" Returns own base path """
return self.__base
class BadFileException(Exception):
pass
| 2.390625 | 2 |
python/EnglishSpellCorrection/source/StatisticLog.py | smallflyingpig/projects | 31 | 12762396 | # ! /usr/bin/env/ python2
# -*- coding:utf-8 -*-
LOG_PATH = './'
LOG_FILENAME = 'test_doc.log' #'top3_t2_basic_1.log'
def StatisticLog():
data = open(LOG_PATH+LOG_FILENAME, 'rb')
#data.readline()
#data.readline()
misspell_type_list = ['deletion', 'insertion', 'replacement', 'transposition']
line = ''
word_correct_len_add = [[0,0,0], [0,0,0], [0,0,0], [0,0,0]]
word_correct_len_size = [[0,0,0], [0,0,0], [0,0,0], [0,0,0]]
word_error_len_add = [[0,0,0], [0,0,0], [0,0,0], [0,0,0]]
word_error_len_size = [[0,0,0], [0,0,0], [0,0,0], [0,0,0]]
while True:
misspell_type = data.readline().replace('\n', '');
if misspell_type not in misspell_type_list:
line = misspell_type
break;
misspell_type_idx = misspell_type_list.index(misspell_type)
line = data.readline()
words = line.split(' ')
label = ''
if len(words)<3:
print('error occur')
break
else:
label = words[0]
label_len = len(label)
data.readline()
data.readline()
match = eval(data.readline())
for idx in range(0, 3):
if match[idx]:
word_correct_len_add[misspell_type_idx][idx] += label_len
word_correct_len_size[misspell_type_idx][idx] += 1
else:
word_error_len_add[misspell_type_idx][idx] += label_len
word_error_len_size[misspell_type_idx][idx] += 1
top_correct_len_add = [0,0,0]
top_correct_len_size = [0,0,0]
top_error_len_add = [0,0,0]
top_error_len_size = [0,0,0]
for idx in range(0,4):
top_correct_len_add[0] += word_correct_len_add[idx][0]
top_correct_len_add[1] += word_correct_len_add[idx][1]
top_correct_len_add[2] += word_correct_len_add[idx][2]
top_error_len_add[0] += word_error_len_add[idx][0]
top_error_len_add[1] += word_error_len_add[idx][1]
top_error_len_add[2] += word_error_len_add[idx][2]
top_correct_len_size[0] += word_correct_len_size[idx][0]
top_correct_len_size[1] += word_correct_len_size[idx][1]
top_correct_len_size[2] += word_correct_len_size[idx][2]
top_error_len_size[0] += word_error_len_size[idx][0]
top_error_len_size[1] += word_error_len_size[idx][1]
top_error_len_size[2] += word_error_len_size[idx][2]
print('correct')
print [float(x)/y for (x,y) in zip(top_correct_len_add, top_correct_len_size)]
print('error')
print [float(x)/y for (x,y) in zip(top_error_len_add, top_error_len_size)]
if __name__ == '__main__':
StatisticLog();
| 3.140625 | 3 |
built-in/PyTorch/Official/nlp/Transformer_for_PyTorch/train_1p.py | Ascend/modelzoo | 12 | 12762397 | <reponame>Ascend/modelzoo<gh_stars>10-100
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
# -------------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import os
import math
import torch
import torch.npu
import time
import ctypes
import sys
import threading
from copy import deepcopy
from utils import distributed_utils, options, utils
from utils.ddp_trainer import DDPTrainer
from utils.meters import StopwatchMeter, TimeMeter
import data
from data import tokenizer, dictionary, data_utils, load_dataset_splits
from models import build_model
import numpy as np
import dllogger as DLLogger
from utils.log_helper import AggregatorBackend, setup_logger
MAX = 2147483647
def _gen_seeds(shape):
return np.random.uniform(1, MAX, size=shape).astype(np.float32)
seed_shape = (32 * 1024 * 12, )
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def main(args):
print(args)
setup_logger(args)
loc = 'npu:{}'.format(args.device_id)
torch.npu.set_device(loc)
if args.max_tokens is None:
args.max_tokens = 6000
torch.manual_seed(args.seed)
src_dict, tgt_dict = data_utils.load_dictionaries(args)
add_extra_items_to_checkpoint({'src_dict': src_dict, 'tgt_dict': tgt_dict})
datasets = load_dataset_splits(args, ['train', 'valid', 'test'], src_dict, tgt_dict)
seed = _gen_seeds(seed_shape)
seed = torch.from_numpy(seed)
seed = seed.to(loc)
model = build_model(args, seed=seed)
print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters())))
# Build trainer
trainer = DDPTrainer(args, model)
print('| model {}, criterion {}'.format(args.arch, trainer.criterion.__class__.__name__))
print('| training on {} NPUs'.format(args.distributed_world_size))
print('| max sentences per NPU = {}'.format(args.max_sentences))
epoch_itr = data.EpochBatchIterator(
dataset=datasets[args.train_subset],
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=args.max_positions,
ignore_invalid_inputs=True,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=1,
shard_id=0,
max_positions_num=96,
)
# Load the latest checkpoint if one is available
load_checkpoint(args, trainer, epoch_itr)
# Train until the learning rate gets too small or model reaches target score
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
run_summary = {'loss': float('inf'),
'val_loss': float('inf'),
'speed': 0,
'accuracy': 0}
# max_update
while lr >= args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update:
DLLogger.log(step=trainer.get_num_updates(), data={'epoch': epoch_itr.epoch}, verbosity=0)
# train for one epoch
train(args, trainer, datasets, epoch_itr)
if epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, datasets, valid_subsets)
DLLogger.log(step=trainer.get_num_updates(), data={'val_loss': valid_losses[0]},
verbosity=1)
if valid_losses[0] < run_summary['val_loss']:
run_summary['val_loss'] = valid_losses[0]
run_summary['loss'] = valid_losses[0]
run_summary['speed'] = trainer.throughput_meter.u_avg
# Only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# Save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
train_meter.stop()
DLLogger.log(step=[], data=run_summary, verbosity=0)
DLLogger.log(step='RUN', data={'walltime': train_meter.sum}, verbosity=0)
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, datasets, epoch_itr):
"""Train the model for one epoch."""
itr = epoch_itr.next_epoch_itr()
# update parameters every N batches
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
num_batches = len(epoch_itr)
batch_time = AverageMeter('Time', ':6.3f')
sentence_s = AverageMeter('Sentence/s', ':6.3f')
losses = AverageMeter('Loss', ':.4f')
progress = ProgressMeter(int(num_batches/update_freq),
[batch_time, sentence_s,losses],
prefix = "Epoch: [{}]".format(epoch_itr.epoch))
print("Update Frequence is :", str(update_freq))
first_valid = args.valid_subset.split(',')[0]
max_update = args.max_update or math.inf
# reset meters
DLLogger.flush()
trainer.get_throughput_meter().reset()
for i, sample in enumerate(itr):
if i < num_batches - 1 and (i + 1) % update_freq > 0:
# buffer updates according to --update-freq
loss = trainer.train_step(sample, update_params=False, last_step=(i == len(itr) - 1))
continue
else:
loss = trainer.train_step(sample, update_params=True, last_step=(i == len(itr) - 1))
if loss != None:
losses.update(loss)
if i >= 10:
t = time.time()
batch_time.update((t - end)/update_freq)
sentence_s.update(args.max_sentences/(t-end)*update_freq)
end = time.time()
if i < 10:
end = time.time()
if i >= 10:
progress.display(int((i+1)/update_freq))
# ignore the first mini-batch in words-per-second calculation
if i == 0:
trainer.get_throughput_meter().reset()
for backend in DLLogger.GLOBAL_LOGGER.backends:
if isinstance(backend, AggregatorBackend):
backend._reset_perf_meter('tokens')
backend._reset_perf_meter('updates')
break
# Mid epoch checkpoint
num_updates = trainer.get_num_updates()
if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0:
valid_losses = validate(args, trainer, datasets, [first_valid])
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if (i + 1) % args.log_interval == 0:
DLLogger.flush()
if num_updates >= max_update:
break
print("End of epoch, batch_size:", args.max_sentences, 'Time: {:.3f}'.format(batch_time.avg), ' Sentence/s@all {:.3f}'.format(
args.max_sentences / batch_time.avg))
# Print epoch stats and reset training meters
DLLogger.log(step=trainer.get_num_updates(), data={'speed': trainer.get_throughput_meter().avg}, verbosity=0)
DLLogger.flush()
def validate(args, trainer, datasets, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
# Reset value iterations counter
trainer._num_val_iterations = 0
valid_losses = []
for subset in subsets:
if len(subsets) > 1:
print('Validating on \'{}\' subset'.format(subset))
# Initialize data iterator
itr = data.EpochBatchIterator(
dataset=datasets[subset],
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=args.max_positions,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=1,
shard_id=0,
max_positions_num=1024,
).next_epoch_itr(shuffle=False)
# reset validation loss meters
DLLogger.flush()
subset_losses = []
for sample in itr:
loss = trainer.valid_step(sample)
subset_losses.append(loss)
subset_loss = sum(subset_losses) / len(subset_losses)
DLLogger.flush()
valid_losses.append(subset_loss)
print(f'Validation loss on subset {subset}: {subset_loss}')
return valid_losses
def save_checkpoint(args, trainer, epoch_itr, val_loss):
if args.no_save or not distributed_utils.is_master(args):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = (
end_of_epoch and not args.no_epoch_checkpoints and
epoch % args.save_interval == 0
)
checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = (
not end_of_epoch and args.save_interval_updates > 0 and
updates % args.save_interval_updates == 0
)
checkpoint_conds['checkpoint_best.pt'] = (
val_loss is not None and
(not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best)
)
checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink
prev_best = getattr(save_checkpoint, 'best', val_loss)
if val_loss is not None:
save_checkpoint.best = min(val_loss, prev_best)
extra_state = {
'best': save_checkpoint.best,
'train_iterator': epoch_itr.state_dict(),
'val_loss': val_loss,
}
extra_state.update(save_checkpoint.extra_items)
checkpoints = [os.path.join(args.save_dir, 'checkpoints', fn) for fn, cond in checkpoint_conds.items() if cond]
if len(checkpoints) > 0:
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = utils.checkpoint_paths(os.path.join(args.save_dir, 'checkpoints'),
pattern=r'checkpoint_\d+_(\d+)\.pt')
for old_chk in checkpoints[args.keep_interval_updates:]:
os.remove(old_chk)
def add_extra_items_to_checkpoint(dict):
if not hasattr(save_checkpoint, 'extra_items'):
save_checkpoint.extra_items = {}
save_checkpoint.extra_items.update(dict)
def load_checkpoint(args, trainer, epoch_itr):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)
checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path)
if extra_state is not None:
# replay train iterator to match checkpoint
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if 'best' in extra_state:
save_checkpoint.best = extra_state['best']
if __name__ == '__main__':
parser = options.get_training_parser()
ARGS = options.parse_args_and_arch(parser)
main(ARGS)
| 1.484375 | 1 |
python-exercise300/071_080.py | sharebook-kr/learningspoons-bootcamp-finance | 9 | 12762398 | <gh_stars>1-10
# 071
# my_variable = ()
# print(type(my_variable))
# print(len(my_variable))
# 072
# movie_rank = ('닥터 스트레인지', '스플릿', '럭키')
# print(movie_rank)
# 073
# a = (1, )
# print(a, type(a))
# 074
# t = (0, 1, 2)
# t[0] = 'a'
# 075
#t = 1, 2, 3, 4
#print(type(t))
# 076
# t = ('a', 'b', 'c')
# t = ('A', 'b', 'c')
# print(t)
# 077
# interest = ('삼성전자', 'LG전자', 'SK Hynix')
# print(list(interest))
# 078
# interest = ['삼성전자', 'LG전자', 'SK Hynix']
# print(tuple(interest))
# 079 튜플 언패킹
# temp = ('apple', 'banana', 'cake')
# a, b, c = temp
# print(a, b, c)
# 080
# a = range(2, 99, 2)
# print(tuple(a)) | 3.078125 | 3 |
djmx/widgets.py | justquick/django-mx-datetime | 0 | 12762399 | from mx.DateTime import DateTimeType
from django.forms import widgets
from django.contrib.admin import widgets as admin_widgets
class DateInput(widgets.DateInput):
"""
DateInput that returns the date from a mx.DateTime.DateTime instance to hide time information.
Also gives a bigger input size by default since it can parse larger strings (eg January 1 2000)
"""
def __init__(self, attrs=None, format=None):
if attrs is None:
attrs = {'size': '25'}
elif not 'size' in attrs:
attrs['size'] = '25'
super(DateInput, self).__init__(attrs, format)
def _format_value(self, value):
if isinstance(value, DateTimeType):
return value.date
return value
class AdminDateWidget(DateInput, admin_widgets.AdminDateWidget):
"""
Rebuild of the AdminDateWidget to use mx.DateTime DateInput
"""
| 2.96875 | 3 |
datacamp-data-scientist-master/Python-career-track/13-intro-to-data-visualization-with-python/02-plotting-2d-arrays.py | vitthal10/datacamp | 0 | 12762400 | <reponame>vitthal10/datacamp
# Import numpy and matplotlib.pyplot
import numpy as np
import matplotlib.pyplot as plt
# Generate two 1-D arrays: u, v
u = np.linspace(-2, 2, 41)
v = np.linspace(-1,1,21)
# Generate 2-D arrays from u and v: X, Y
X,Y = np.meshgrid(u,v)
# Compute Z based on X and Y
Z = np.sin(3*np.sqrt(X**2 + Y**2))
# Display the resulting image with pcolor()
plt.pcolor(Z)
plt.show()
# Save the figure to 'sine_mesh.png'
plt.savefig("sine_mesh.png")
# Generate a default contour map of the array Z
plt.subplot(2,2,1)
plt.contour(X,Y,Z)
# Generate a contour map with 20 contours
plt.subplot(2,2,2)
plt.contour(X,Y,Z,20)
# Generate a default filled contour map of the array Z
plt.subplot(2,2,3)
plt.contourf(X,Y,Z)
# Generate a default filled contour map with 20 contours
plt.subplot(2,2,4)
plt.contourf(X,Y,Z,20)
# Improve the spacing between subplots
plt.tight_layout()
# Display the figure
plt.show()
# Create a filled contour plot with a color map of 'viridis'
plt.subplot(2,2,1)
plt.contourf(X,Y,Z,20, cmap='viridis')
plt.colorbar()
plt.title('Viridis')
# Create a filled contour plot with a color map of 'gray'
plt.subplot(2,2,2)
plt.contourf(X,Y,Z,20, cmap='gray')
plt.colorbar()
plt.title('Gray')
# Create a filled contour plot with a color map of 'autumn'
plt.subplot(2,2,3)
plt.contourf(X,Y,Z,20, cmap='autumn')
plt.colorbar()
plt.title('Autumn')
# Create a filled contour plot with a color map of 'winter'
plt.subplot(2,2,4)
plt.contourf(X,Y,Z,20, cmap='winter')
plt.colorbar()
plt.title('Winter')
# Improve the spacing between subplots and display them
plt.tight_layout()
plt.show()
# Generate a 2-D histogram
plt.hist2d(hp,mpg,bins=(20,20),range=((40,235),(8,48)))
# Add a color bar to the histogram
plt.colorbar()
# Add labels, title, and display the plot
plt.xlabel('Horse power [hp]')
plt.ylabel('Miles per gallon [mpg]')
plt.title('hist2d() plot')
plt.show()
# Generate a 2d histogram with hexagonal bins
plt.hexbin(hp,mpg,gridsize=(15,12),extent=(40,235,8,48))
# Add a color bar to the histogram
plt.colorbar()
# Add labels, title, and display the plot
plt.xlabel('Horse power [hp]')
plt.ylabel('Miles per gallon [mpg]')
plt.title('hexbin() plot')
plt.show()
# Load the image into an array: img
img = plt.imread("480px-Astronaut-EVA.jpg")
# Print the shape of the image
print(img.shape)
# Display the image
plt.imshow(img)
# Hide the axes
plt.axis("off")
plt.show()
# Load the image into an array: img
img = plt.imread('480px-Astronaut-EVA.jpg')
# Print the shape of the image
print(img.shape)
# Compute the sum of the red, green and blue channels: intensity
intensity = img.sum(axis=2)
# Print the shape of the intensity
print(intensity.shape)
# Display the intensity with a colormap of 'gray'
plt.imshow(intensity,cmap="gray")
# Add a colorbar
plt.colorbar()
# Hide the axes and show the figure
plt.axis('off')
plt.show()
# Load the image into an array: img
img = plt.imread('480px-Astronaut-EVA.jpg')
# Specify the extent and aspect ratio of the top left subplot
plt.subplot(2,2,1)
plt.title('extent=(-1,1,-1,1),\naspect=0.5')
plt.xticks([-1,0,1])
plt.yticks([-1,0,1])
plt.imshow(img, extent=(-1,1,-1,1), aspect=0.5)
# Specify the extent and aspect ratio of the top right subplot
plt.subplot(2,2,2)
plt.title('extent=(-1,1,-1,1),\naspect=1')
plt.xticks([-1,0,1])
plt.yticks([-1,0,1])
plt.imshow(img, extent=(-1,1,-1,1), aspect=1)
# Specify the extent and aspect ratio of the bottom left subplot
plt.subplot(2,2,3)
plt.title('extent=(-1,1,-1,1),\naspect=2')
plt.xticks([-1,0,1])
plt.yticks([-1,0,1])
plt.imshow(img, extent=(-1,1,-1,1), aspect=2)
# Specify the extent and aspect ratio of the bottom right subplot
plt.subplot(2,2,4)
plt.title('extent=(-2,2,-1,1),\naspect=2')
plt.xticks([-2,-1,0,1,2])
plt.yticks([-1,0,1])
plt.imshow(img, extent=(-2,2,-1,1), aspect=2)
# Improve spacing and display the figure
plt.tight_layout()
plt.show()
# Load the image into an array: image
image = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')
# Extract minimum and maximum values from the image: pmin, pmax
pmin, pmax = image.min(), image.max()
print("The smallest & largest pixel intensities are %d & %d." % (pmin, pmax))
# Rescale the pixels: rescaled_image
rescaled_image = 256*(image - pmin) / (pmax - pmin)
print("The rescaled smallest & largest pixel intensities are %.1f & %.1f." %
(rescaled_image.min(), rescaled_image.max()))
# Display the original image in the top subplot
plt.subplot(2,1,1)
plt.title('original image')
plt.axis('off')
plt.imshow(image)
# Display the rescaled image in the bottom subplot
plt.subplot(2,1,2)
plt.title('rescaled image')
plt.axis('off')
plt.imshow(rescaled_image)
plt.show()
| 3.234375 | 3 |
problems/020.Valid_Parentheses/AC_stack_dict_n.py | subramp-prep/leetcode | 0 | 12762401 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <<EMAIL>uzen[at]gmail.com>
# File: AC_stack_dict_n.py
# Create Date: 2015-03-04 19:47:43
# Usage: AC_stack_dict_n.py
# Descripton:
class Solution:
# @return a boolean
def isValid(self, s):
mp = {')': '(', ']': '[', '}': '{'}
stk = []
for ch in s:
if ch in '([{':
stk.append(ch)
else:
if not stk or mp[ch] != stk.pop():
return False
return not stk
| 3.359375 | 3 |
software/libraries/micropython-osc/uosc/client.py | BeeHive-org/BeeHive | 3 | 12762402 | # -*- coding: utf-8 -*-
"""Simple OSC client."""
import socket
try:
from ustruct import pack
except ImportError:
from struct import pack
from uosc.common import Bundle, to_frac
if isinstance("", bytes):
have_bytes = False
unicodetype = unicode # noqa
else:
have_bytes = True
unicodetype = str
TYPE_MAP = {
int: "i",
float: "f",
bytes: "b",
bytearray: "b",
unicodetype: "s",
True: "T",
False: "F",
None: "N",
}
def pack_addr(addr):
"""Pack a (host, port) tuple into the format expected by socket methods."""
if isinstance(addr, (bytes, bytearray)):
return addr
if len(addr) != 2:
raise NotImplementedError("Only IPv4/v6 supported")
addrinfo = socket.getaddrinfo(addr[0], addr[1])
return addrinfo[0][4]
def pack_timetag(t):
"""Pack an OSC timetag into 64-bit binary blob."""
return pack(">II", *to_frac(t))
def pack_string(s, encoding="utf-8"):
"""Pack a string into a binary OSC string."""
if isinstance(s, unicodetype):
s = s.encode(encoding)
assert all(
(i if have_bytes else ord(i)) < 128 for i in s
), "OSC strings may only contain ASCII chars."
slen = len(s)
return s + b"\0" * (((slen + 4) & ~0x03) - slen)
def pack_blob(b, encoding="utf-8"):
"""Pack a bytes, bytearray or tuple/list of ints into a binary OSC blob."""
if isinstance(b, (tuple, list)):
b = bytearray(b)
elif isinstance(b, unicodetype):
b = b.encode(encoding)
blen = len(b)
b = pack(">I", blen) + bytes(b)
return b + b"\0" * (((blen + 3) & ~0x03) - blen)
def pack_bundle(bundle):
"""Return bundle data packed into a binary string."""
data = []
for msg in bundle:
if isinstance(msg, Bundle):
msg = pack_bundle(msg)
elif isinstance(msg, tuple):
msg = create_message(*msg)
data.append(pack(">I", len(msg)) + msg)
return b"#bundle\0" + pack_timetag(bundle.timetag) + b"".join(data)
def pack_midi(val):
assert not isinstance(val, unicodetype), (
"Value with tag 'm' or 'r' must be bytes, bytearray or a sequence of "
"ints, not %s" % unicodetype
)
if not have_bytes and isinstance(val, str):
val = (ord(c) for c in val)
return pack("BBBB", *tuple(val))
def create_message(address, *args):
"""Create an OSC message with given address pattern and arguments.
The OSC types are either inferred from the Python types of the arguments,
or you can pass arguments as 2-item tuples with the OSC typetag as the
first item and the argument value as the second. Python objects are mapped
to OSC typetags as follows:
* ``int``: i
* ``float``: f
* ``str``: s
* ``bytes`` / ``bytearray``: b
* ``None``: N
* ``True``: T
* ``False``: F
If you want to encode a Python object to another OSC type, you have to pass
a ``(typetag, data)`` tuple, where ``data`` must be of the appropriate type
according to the following table:
* c: ``str`` of length 1
* h: ``int``
* d: ``float``
* I: ``None`` (unused)
* m: ``tuple / list`` of 4 ``int``s or ``bytes / bytearray`` of length 4
* r: same as 'm'
* t: OSC timetag as as ``int / float`` seconds since the NTP epoch
* S: ``str``
"""
assert address.startswith("/"), "Address pattern must start with a slash."
data = []
types = [","]
for arg in args:
type_ = type(arg)
if isinstance(arg, tuple):
typetag, arg = arg
else:
typetag = TYPE_MAP.get(type_) or TYPE_MAP.get(arg)
if typetag in "ifd":
data.append(pack(">" + typetag, arg))
elif typetag in "sS":
data.append(pack_string(arg))
elif typetag == "b":
data.append(pack_blob(arg))
elif typetag in "rm":
data.append(pack_midi(arg))
elif typetag == "c":
data.append(pack(">I", ord(arg)))
elif typetag == "h":
data.append(pack(">q", arg))
elif typetag == "t":
data.append(pack_timetag(arg))
elif typetag not in "IFNT":
raise TypeError("Argument of type '%s' not supported." % type_)
types.append(typetag)
return pack_string(address) + pack_string("".join(types)) + b"".join(data)
class Client:
def __init__(self, host, port=None):
if port is None:
if isinstance(host, (list, tuple)):
host, port = host
else:
port = host
host = "127.0.0.1"
self.dest = pack_addr((host, port))
self.sock = None
def send(self, msg, *args, **kw):
dest = pack_addr(kw.get("dest", self.dest))
if not self.sock:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if isinstance(msg, Bundle):
msg = pack_bundle(msg)
elif args or isinstance(msg, unicodetype):
msg = create_message(msg, *args)
self.sock.sendto(msg, dest)
def close(self):
if self.sock:
self.sock.close()
self.sock = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def send(dest, address, *args):
with Client(dest) as client:
client.send(address, *args)
| 2.546875 | 3 |
tp_screening/admin/ineligiblie_subject_admin.py | MoffatMore/tp-screening | 0 | 12762403 | '''
Created on Jun 21, 2018
@author: moffat
'''
from django.contrib import admin
from ..models import IneligibleSubject
from ..admin_site import tp_screening_admin
class ChoiceInline(admin.TabularInline):
model = IneligibleSubject
@admin.register(IneligibleSubject, site=tp_screening_admin)
class IneligibleSubjectAdmin(admin.ModelAdmin):
fieldsets = (
('Screening Enrollment Loss', {
'fields': ('screening_identifier',
'report_datetime',
'reasons_ineligible'),
}),
)
list_display = ('screening_identifier', 'report_datetime',
'reasons_ineligible')
# list_filter = ['screening_identifier', 'eligible']
# search_fields = ['screening_identifier']
readonly_fields = ('screening_identifier',
'reasons_ineligible',)
admin.site.register(IneligibleSubject, IneligibleSubjectAdmin)
| 1.890625 | 2 |
Evaluate/Evaluate_AE.py | melodist/MELTNET | 9 | 12762404 | <gh_stars>1-10
""" Test Autoencoder
1. Extract Features using trained network
2. Using K-means to classify the patches
3. Merging Patches
"""
import tensorflow as tf
import numpy as np
from Network import NetworkKeras
import cv2
import os
import sys
import time
from datetime import datetime
from Evaluate import ImageProcessing
from Extraction import PatchExtraction
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
def test_model(path_model, path_image, ind_CT, ind_PT, num_labels=5):
"""Test model using images in path_image
This function makes result directory in the path:
./Results_yymmdd_HHMMSS'/
Result directory has 3 subdirectory.
CT : CT images used for test
PT : PT images used for test
Features : It has # of num_labels subdirectory.
Each subdirectory has segmented result for each slices of patients.
Input
______
path_model : path of trained model
path_image : path of images for test
ind_CT : tuple for winodowing CT image.
ex) ind_ct = ([width1, width2], [height1, height2])
ind_PT : tuple for winodowing PT image.
num_labels : number of clusters for K-means clustering. Default value is 5.
Output
______
"""
tf.enable_eager_execution()
time_start = time.time()
# Extract Features using trained network
# Load model
input_shape = (17 * 17)
trained_model_CT = NetworkKeras.create_autoencoder(input_shape)
trained_model_CT.load_weights(path_model + 'CT/')
trained_model_CT.summary()
trained_model_PT = NetworkKeras.create_autoencoder(input_shape)
trained_model_PT.load_weights(path_model + 'PT/')
trained_model_PT.summary()
# Make feature extraction model
feature_extractor_CT = tf.keras.models.Model(inputs=trained_model_CT.input,
outputs=trained_model_CT.get_layer('tf_op_layer_l2_normalize').output)
feature_extractor_PT = tf.keras.models.Model(inputs=trained_model_PT.input,
outputs=trained_model_PT.get_layer('tf_op_layer_l2_normalize_2').output)
# Make Results Folder
now = datetime.now()
path_result = f"./Results_{now.strftime('%Y%m%d_%H%M%S')}/"
os.makedirs(path_result)
# Print Patients Number
patient_dir = os.listdir(path_image)
print(f'Patients Number: {len(patient_dir)}')
for path_patient in patient_dir:
addr_patient = f'{path_image}/{path_patient}/'
path_files = path_result + path_patient + '/'
os.makedirs(path_files)
os.makedirs(f'{path_files}CT/')
os.makedirs(f'{path_files}PT/')
os.makedirs(f'{path_files}Features/')
img_CT, img_PT = PatchExtraction.stackImages(addr_patient, ind_CT, ind_PT)
patches_CT, patches_PT = PatchExtraction.patch_extraction_thres(img_CT, img_PT, 0)
# Normalize the inputs
scaler_CT = StandardScaler()
scaled_CT = scaler_CT.fit_transform(patches_CT)
scaler_PT = StandardScaler()
scaled_PT = scaler_PT.fit_transform(patches_PT)
# Extract Features
print(f"Extract Features...")
features_CT = feature_extractor_CT.predict(scaled_CT, steps=1)
features_PT = feature_extractor_PT.predict(scaled_PT, steps=1)
features = np.hstack((features_CT, features_PT))
# Using K-means
print(f"K-means Clustering...")
model_k_means = KMeans(n_clusters=num_labels, random_state=0)
model_k_means.fit(features)
# Merging Patches
num_x = 44
num_y = 30
stride = 5
label_predict = model_k_means.fit_predict(features)
label_predict_batch = label_predict.reshape((-1, num_y * num_x))
# Extract File Names
for root, dirs, files in os.walk(os.path.join(path_image, path_patient)):
file_list = files
file_list.sort()
print(f'Merging Patches...')
for i, filename in enumerate(file_list):
mask = ImageProcessing.project_patches(label_predict_batch[i, :], num_labels, num_y, num_x, stride, 5)
for j in range(num_labels):
ImageProcessing.save_image(mask[:, :, j], f'./Features/Features_{j}_' + filename, path_files)
# save original image as reference
cv2.imwrite(path_files + 'CT/' + filename, img_CT[i, :, :, 0])
cv2.imwrite(path_files + 'PT/' + filename, img_PT[i, :, :, 0])
time_end = time.time()
print(f"Test Finished! Elapsed time: {time_end - time_start:.2f}")
if __name__ == '__main__':
test_model(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], int(sys.argv[5]))
| 2.859375 | 3 |
vang/tfs/tests/test_get_branches.py | bjuvensjo/scripts | 6 | 12762405 | <reponame>bjuvensjo/scripts<filename>vang/tfs/tests/test_get_branches.py
from unittest.mock import call, patch
import pytest
from pytest import raises
from vang.tfs.get_branches import get_branches, get_repo_branches, main, parse_args
def test_get_repo_branches():
with patch(
'vang.tfs.get_branches.call',
return_value={
'value': [{
'name':
'refs/heads/develop',
'objectId':
'071bd4a8b19c37b2c5290b127c787f8acd52272e',
'url':
'remoteUrl',
'statuses': []
}],
'count':
1
},
autospec=True) as mock_call:
assert [{
'name': 'refs/heads/develop',
'objectId': '071bd4a8b19c37b2c5290b127c787f8acd52272e',
'url': 'remoteUrl',
'statuses': []
}] == get_repo_branches('organisation', 'project', 'repository')
assert [
call('/organisation/project/_apis/git/'
'repositories/repository/refs/heads?includeStatuses=true'
'&api-version=3.2')
] == mock_call.mock_calls
def test_get_branches():
assert [] == get_branches(None)
assert [] == get_branches([])
with patch(
'vang.tfs.get_branches.get_repos',
return_value=['organisation/project/repo'],
autospec=True) as mock_get_repos:
with patch(
'vang.tfs.get_branches.get_repo_branches',
return_value=[{
'name':
'refs/heads/develop',
'objectId':
'071bd4a8b19c37b2c5290b127c787f8acd52272e',
'url':
'remoteUrl',
'statuses': []
}],
autospec=True) as mock_get_repo_branches:
assert [('organisation/project/repo', [{
'name':
'develop',
'objectId':
'071bd4a8b19c37b2c5290b127c787f8acd52272e',
'statuses': [],
'url':
'remoteUrl'
}])] == get_branches(organisations=['organisation'])
assert [call(organisations=['organisation'],
repo_specs=True)] == mock_get_repos.mock_calls
assert [call('organisation', 'project',
'repo')] == mock_get_repo_branches.mock_calls
mock_get_repos.reset_mock()
mock_get_repo_branches.reset_mock()
assert [('organisation/project/repo', [{
'name':
'develop',
'objectId':
'071bd4a8b19c37b2c5290b127c787f8acd52272e',
'statuses': [],
'url':
'remoteUrl'
}])] == get_branches(projects=['organisation/project'])
assert [call(projects=['organisation/project'],
repo_specs=True)] == mock_get_repos.mock_calls
assert [call('organisation', 'project',
'repo')] == mock_get_repo_branches.mock_calls
mock_get_repos.reset_mock()
mock_get_repo_branches.reset_mock()
assert [('organisation/project/repo', [{
'name':
'develop',
'objectId':
'071bd4a8b19c37b2c5290b127c787f8acd52272e',
'statuses': [],
'url':
'remoteUrl'
}])] == get_branches(repos=['organisation/project/repo'])
assert [] == mock_get_repos.mock_calls
assert [call('organisation', 'project',
'repo')] == mock_get_repo_branches.mock_calls
assert [
('organisation/project/repo', ['develop']),
] == get_branches(
repos=['organisation/project/repo'], names=True)
@pytest.mark.parametrize("args", [
'',
'-o o -p -p',
'-o o -r -r',
'-p -p -r r',
])
def test_parse_args_raises(args):
with raises(SystemExit):
parse_args(args.split(' ') if args else args)
@pytest.mark.parametrize("args, expected", [
[
'-o o1 o2',
{
'names': False,
'organisations': ['o1', 'o2'],
'projects': None,
'repos': None
}
],
[
'-p p1 p2',
{
'names': False,
'organisations': None,
'projects': ['p1', 'p2'],
'repos': None
}
],
[
'-r r1 r2',
{
'names': False,
'organisations': None,
'projects': None,
'repos': ['r1', 'r2']
}
],
[
'-o o -n',
{
'names': True,
'organisations': ['o'],
'projects': None,
'repos': None
}
],
])
def test_parse_args_valid(args, expected):
assert expected == parse_args(args.split(' ')).__dict__
def test_main():
with patch(
'vang.tfs.get_branches.get_branches',
return_value=[['r1', ['b1', 'b2']], ['r2', ['b1', 'b2']]],
autospec=True,
) as mock_get_branches:
with patch('vang.tfs.get_branches.print') as mock_print:
main('organisations', None, None, False)
assert [call('organisations', None, None,
False)] == mock_get_branches.mock_calls
assert [
call('r1: b1'),
call('r1: b2'),
call('r2: b1'),
call('r2: b2')
] == mock_print.mock_calls
| 2.25 | 2 |
fbmsgbot/models/attachment.py | ben-cunningham/python-messenger-bot | 3 | 12762406 |
class Button(object):
"""Button object, used for creating button messages"""
def __init__(self, type=None, title="", payload=""):
# Type: request param key
valid_types = {
'web_url': 'url',
'postback': 'payload'
}
assert type in valid_types, "Type %s is not a Button type" \
% (type,)
self.title = title
self.type = type
self.typekey = valid_types[type]
self.payload = payload
def to_json(self):
request_payload = {}
request_payload[self.typekey] = self.payload
request_payload['title'] = self.title
request_payload['type'] = self.type
return request_payload
class Element(object):
"""Elements are features of Templates"""
def __init__(self, title="", subtitle="", image_url="", buttons=None):
self.title = title
self.image_url = image_url
self.subtitle = subtitle
self.buttons = buttons
def to_json(self):
if self.buttons:
if not all(isinstance(button, Button)
for button in self.buttons):
raise TypeError("Buttons list contained non-type Button")
buttons = [button.to_json() for button in self.buttons]
payload = {
'title': self.title,
'image_url': self.image_url,
'subtitle': self.subtitle,
'buttons': buttons
}
return payload
class ReceiptElement(Element):
def __init__(self, quantity=None, price=None,
currency="CAD", **kwargs):
self.kwargs = kwargs
super(ReceiptElement, self).__init__(**self.kwargs)
if price is None:
raise ValueError("Incorrect keyword-argument given for type ReceiptElement, needed: price")
self.quantity = quantity
self.price = price
self.currency = currency
def to_json(self):
payload = {
'title': self.title,
'subtitle': self.subtitle,
'quantity': self.quantity,
'price': self.price,
'currency': self.currency,
'image_url': self.image_url
}
return payload
| 3.359375 | 3 |
bokchoi/__init__.py | TimNooren/buzz | 3 | 12762407 | <reponame>TimNooren/buzz
from bokchoi.bokchoi import Bokchoi
from bokchoi.config import Config | 0.90625 | 1 |
tools/c7n_azure/tests_azure/test_actions_autotag-base.py | dnouri/cloud-custodian | 1 | 12762408 | <filename>tools/c7n_azure/tests_azure/test_actions_autotag-base.py<gh_stars>1-10
# Copyright 2019 Microsoft Corporation
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import copy
from . import tools_tags as tools
from azure.mgmt.monitor.models import EventData
from .azure_common import BaseTest
from c7n.resources import load_resources
from c7n_azure.actions.tagging import AutoTagDate
from mock import Mock
class ActionsAutotagBaseTest(BaseTest):
vm_id = "/subscriptions/ea42f556-5106-4743-99b0-c129bfa71a47/resourcegroups/" \
"TEST_VM/providers/Microsoft.Compute/virtualMachines/cctestvm"
event_dict = {
"caller": "<EMAIL>",
"id": vm_id + "/events/37bf930a-fbb8-4c8c-9cc7-057cc1805c04/ticks/",
"operationName": {
"value": "Microsoft.Compute/virtualMachines/write",
"localizedValue": "Create or Update Virtual Machine"
},
"eventTimestamp": "2019-05-01T15:20:04.8336028Z"
}
def __init__(self, *args, **kwargs):
super(ActionsAutotagBaseTest, self).__init__(*args, **kwargs)
self.events = []
for i in range(5):
event = EventData.from_dict(self.event_dict)
event.id = event.id + str(i)
self.events.append(event)
load_resources(['azure.vm'])
def test_get_first_element_resource(self):
client_mock = Mock()
client_mock.activity_logs.list.return_value = self.events
manager = Mock()
manager.type = 'vm'
manager.get_client.return_value = client_mock
resource = tools.get_resource({})
base = AutoTagDate(data={'tag': 'test'}, manager=manager)
base._prepare_processing()
result = base._get_first_event(resource)
client_mock.activity_logs.list.assert_called_once()
self.assertEqual(result, self.events[-1])
def test_get_first_element_resource_group(self):
events = copy.copy(self.events)
for e in events:
e.operation_name.value = 'Microsoft.Resources/subscriptions/resourcegroups/write'
client_mock = Mock()
client_mock.activity_logs.list.return_value = events
manager = Mock()
manager.type = 'resourcegroup'
manager.get_client.return_value = client_mock
resource_group = tools.get_resource_group_resource({})
base = AutoTagDate(data={'tag': 'test'}, manager=manager)
base._prepare_processing()
result = base._get_first_event(resource_group)
client_mock.activity_logs.list.assert_called_once()
self.assertEqual(result, self.events[-1])
| 1.953125 | 2 |
adsb_arrivals.py | phreakmonkey/ads-b-arrivals | 4 | 12762409 | #!/usr/bin/env python
import datetime
import select
import socket
import time
import errno
from operator import itemgetter
from geopy import distance
import paho.mqtt.publish as publish
MQTT_HOST = 'localhost' # MQTT broker
ADSB_HOST = 'pi-sdr001' # Host running dump1090
ADSB_PORT = 30003
AIRCRAFT = ['A8F94E'] # List of Mode-S HexIDs we want to alert on
LOC = (38.893888, -119.995333) # KTVL Airport
ARM_ALT = (8000, 10000) # Aircraft between these altitudes
ARM_NM = 15 # within this distance are considered "ARMed"
ARR_ALT = 7800 # ARMed aircraft that descend below this altitude,
ARR_NM = 8 # and within this distance are assumed to arrive.
LOGFILE = "arrivals.log"
class Aircraft():
def __init__(self, hexid):
self.hexid = hexid
self.latlon = None
self.altitude = None
self.speed = None
self.timestamp = time.time()
self.status = 0
def update(self, attribute, param):
setattr(self, attribute, param)
self.timestamp = time.time()
class SpeedRecords():
def __init__(self):
self.low = {}
self.mid = {}
self.high = {}
self.number = 5
def check(self, aid, alt, spd):
if (not alt) or (not spd):
return
if alt < 12000:
d = self.low
elif alt < 18000:
d = self.mid
else:
d = self.high
if aid in d:
if spd > d[aid]:
d[aid] = spd
else:
d[aid] = spd
if len(d) > self.number:
del(d[sorted(d.items(), key=itemgetter(1), reverse=True)[-1][0]])
def highscores(self, d):
return sorted(d.items(), key=itemgetter(1), reverse=True)
aircraft = {}
records = SpeedRecords()
starttime = datetime.datetime.now()
def parse(line):
fields = line.split(',')
if len(fields) != 22:
print "Discarding invalid packet [Len: %d]" % len(fields)
return
msg_type = fields[1]
if msg_type == '3' or msg_type == '4':
aircraft_id = fields[4]
alt = fields[11]
speed = fields[12]
lat, lon = fields[14], fields[15]
if aircraft_id not in aircraft:
aircraft[aircraft_id] = Aircraft(aircraft_id)
if alt:
aircraft[aircraft_id].update('altitude', int(alt))
if speed:
aircraft[aircraft_id].update('speed', int(speed))
if lat and lon:
aircraft[aircraft_id].update('latlon', (lat, lon))
def prune():
for k, a in aircraft.items():
if time.time() - a.timestamp > 60:
del(aircraft[k])
def scan():
for k, a in aircraft.items():
age = time.time() - a.timestamp
if a.altitude and a.latlon:
records.check(k, a.altitude, a.speed)
nm = distance.distance(LOC, a.latlon).nm
if (a.status == 0 and nm < ARM_NM and
a.altitude > ARM_ALT[0] and a.altitude < ARM_ALT[1]):
aircraft[k].status = 1
elif a.status == 1 and nm < ARR_NM and a.altitude < ARR_ALT:
if k in AIRCRAFT:
# MQTT Message for aircraft of interest:
publish.single(topic="lastseen/", payload=str(int(time.time())),
retain=True, hostname=MQTT_HOST)
aircraft[k].status = 2
arrstr = '%s %s: Arriving (%s feet at %s nm)' % (
time.ctime(), k, a.altitude, nm)
with open(LOGFILE, 'a') as f:
f.write(arrstr + '\n')
def draw():
ESC = chr(27)
CLEAR = '%s[H%s[J' % (ESC, ESC)
print '%sUptime: %s' % (CLEAR, str(datetime.datetime.now() - starttime).split('.')[0])
print 'ID\tDis\tAlt\tSpeed\tAge\tStatus'
for k, a in aircraft.items():
nm = distance.distance(LOC, a.latlon).nm
age = int(time.time() - a.timestamp)
if nm < 6000:
print '%s\t%.1f\t%s\t%s\t%s\t%s' % (k, nm, a.altitude, a.speed, age,
a.status)
else:
print '%s\tUNK\t%s\t%s\t%s\t%s' % (k, a.altitude, a.speed, age, a.status)
print '\n---- Speed Records ----'
print '===== Above FL180 ====='
for i,s in records.highscores(records.high):
print '%s\t%s' % (i,s)
print '\n===== 12000 MSL - 18000 MSL ====='
for i,s in records.highscores(records.mid):
print '%s\t%s' % (i,s)
print '\n===== Below 12000 MSL ====='
for i,s in records.highscores(records.low):
print '%s\t%s' % (i,s)
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
try:
s.connect((socket.gethostbyname(ADSB_HOST), ADSB_PORT))
except socket.error, e:
print "Failed to connect: %s Retrying." % e
time.sleep(30)
continue
break
s.settimeout(None)
print "Connected to %s:%s" % (ADSB_HOST, ADSB_PORT)
return s
def main():
s = connect()
data = ''
buf = ''
buffer_size = 1024
prunetime = time.time()
drawtime = 0
while True:
try:
if not '\r\n' in data:
d = s.recv(buffer_size)
if d == '':
s.shutdown(2)
s.close()
print "Connection terminated. Attempting to re-establish."
time.sleep(1)
s = connect()
continue
data += d
if not '\r\n' in data:
continue
except socket.error:
s.shutdown(2)
s.close()
print "Connection terminated. Attempting to re-establish."
time.sleep(1)
s = connect()
continue
i = data.rfind('\r\n')
data, buf = data[:i+2], data[i+2:]
lines = data.split('\r\n')
lines = filter(None, lines)
for line in lines:
parse(line)
data = buf
scan()
if time.time() - prunetime > 5:
prune()
if time.time() - drawtime >= 1:
draw()
if __name__ == '__main__':
main()
| 2.65625 | 3 |
Non Linear Eqn/bisection.py | Abdus-Samee/CSE-218 | 0 | 12762410 | <gh_stars>0
"""
Equation:
x^3 - 2400x^2 - 3x + 2 = 0
x = [0, ]
"""
def bisection(lb, ub, e, iter):
lv = lb**3-2400*(lb**2)-3*lb+2
uv = ub**3-2400*(ub**2)-3*ub+2
if lv*uv > 0:
print("No root found")
return
prev_mid = (lb+ub)/2
f_lb = lb**3-2400*(lb**2)-3*lb+2
f_mid = prev_mid**3-2400*(prev_mid**2)-3*prev_mid+2
if f_lb*f_mid < 0 :
ub = prev_mid
elif f_lb*f_mid > 0:
lb = prev_mid
else:
return prev_mid
for _ in range(iter):
mid = (lb+ub)/2
f_lb = lb**3-2400*(lb**2)-3*lb+2
f_mid = mid**3-2400*(mid**2)-3*mid+2
if f_lb*f_mid < 0 :
ub = mid
elif f_lb*f_mid > 0:
lb = mid
else:
return mid
rel_error = abs(mid - prev_mid)/mid
if rel_error <= e:
return mid
else:
prev_mid = mid
#calling bisection method
ans = bisection(0, 1, 0.005, 20)
if ans is not None:
print("Molar dissociation: " + str(ans))
| 3.578125 | 4 |
workflows/rdx.py | umd-lhcb/lhcb-ntuples-gen | 0 | 12762411 | #!/usr/bin/env python3
#
# Author: <NAME>
# License: BSD 2-clause
# Last Change: Mon Oct 25, 2021 at 09:41 PM +0200
import sys
import os
import os.path as op
from argparse import ArgumentParser, Action
from os import chdir
from shutil import rmtree
from pyBabyMaker.base import TermColor as TC
sys.path.insert(0, op.dirname(op.abspath(__file__)))
from utils import (
run_cmd_wrapper,
append_path, abs_path, ensure_dir, find_all_input,
aggregate_fltr, aggregate_output, load_yaml_db,
find_year, find_polarity,
generate_step2_name, parse_step2_name,
workflow_compile_cpp, workflow_cached_ntuple
)
#################################
# Command line arguments parser #
#################################
def parse_input():
parser = ArgumentParser(description='workflow for R(D(*)).')
parser.add_argument('job_name', help='specify job name.')
parser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode.')
return parser.parse_args()
###########
# Helpers #
###########
rdx_default_fltr = aggregate_fltr(
keep=[r'^(Dst|D0).*\.root'], blocked=['__aux'])
rdx_default_output_fltrs = {
'ntuple': rdx_default_fltr,
'ntuple_aux': aggregate_fltr(keep=['__aux']),
}
def rdx_mc_fltr(decay_mode):
db = load_yaml_db()
# Unfortunately we need to use 'Filename' as the key so we need to re-build
# the dict on the fly
db = {v['Filename']: v['Keep'] for v in db.values() if 'Keep' in v}
if decay_mode not in db:
return rdx_default_fltr
return aggregate_fltr(keep=[r'^({}).*\.root'.format(
'|'.join(db[decay_mode]))])
def rdx_mc_add_info(decay_mode):
known_trees = ['D0', 'Dst']
tree_dict = {
'D0': 'TupleBminus/DecayTree',
'Dst': 'TupleB0/DecayTree'
}
raw_db = load_yaml_db()
# Unfortunately we need to use 'Filename' as the key so we need to re-build
# the dict on the fly
db_keep = {v['Filename']: v['Keep']
for v in raw_db.values() if 'Keep' in v}
db_id = {v['Filename']: k for k, v in raw_db.items()}
try:
decay_id = db_id[decay_mode]
except KeyError:
decay_id = '0'
if decay_mode not in db_keep:
return None, decay_id
# NOTE: Here we are returning trees to BLOCK!!
return [tree_dict[t] for t in known_trees
if t not in db_keep[decay_mode]], decay_id
######################
# Workflows: helpers #
######################
def workflow_ubdt(input_ntp,
trees=['TupleB0/DecayTree', 'TupleBminus/DecayTree'],
**kwargs):
weight_file = abs_path('../run2-rdx/weights_run2_no_cut_ubdt.xml')
cmd = 'addUBDTBranch {} mu_isMuonTight {} ubdt.root {}'.format(
input_ntp, weight_file, ' '.join(trees))
workflow_cached_ntuple(cmd, input_ntp, **kwargs)
try:
rmtree('./weights')
except FileNotFoundError:
pass
def workflow_hammer(input_ntp,
trees=['TupleB0/DecayTree', 'TupleBminus/DecayTree'],
**kwargs):
run = 'run1' if '2011' in input_ntp or '2012' in input_ntp else 'run2'
cmd = ['ReweightRDX '+input_ntp+' hammer.root '+t+' '+run for t in trees]
workflow_cached_ntuple(
cmd, input_ntp, output_ntp='hammer.root', cache_suffix='__aux_hammer',
**kwargs)
def workflow_pid(input_ntp, pid_histo_folder, config, **kwargs):
pid_histo_folder = abs_path(pid_histo_folder)
config = abs_path(config)
year = find_year(input_ntp)
polarity = find_polarity(input_ntp)
# This is in 'scripts' folder!
cmd = 'apply_histo_weight.py {} {} pid.root -c {} --year {} --polarity {}'.format(
input_ntp, pid_histo_folder, config, year, polarity)
workflow_cached_ntuple(
cmd, input_ntp, output_ntp='pid.root', cache_suffix='__aux_pid',
**kwargs)
def workflow_data_mc(job_name, inputs,
output_dir=abs_path('../gen'),
patterns=['*.root'],
blocked_patterns=['__aux'],
executor=run_cmd_wrapper()
):
print('{}==== Job: {} ===={}'.format(TC.BOLD+TC.GREEN, job_name, TC.END))
# Need to figure out the absolute path
input_files = find_all_input(inputs, patterns, blocked_patterns)
subworkdirs = {op.splitext(op.basename(i))[0]: i
for i in input_files}
# Now ensure the working dir
workdir = ensure_dir(op.join(output_dir, job_name))
return subworkdirs, workdir, executor
#############
# Workflows #
#############
def workflow_data(job_name, inputs, input_yml,
use_ubdt=True,
output_ntp_name_gen=generate_step2_name,
output_fltr=rdx_default_output_fltrs,
cli_vars=None,
blocked_input_trees=None,
blocked_output_trees=None,
directive_override=None,
**kwargs):
subworkdirs, workdir, executor = workflow_data_mc(
job_name, inputs, **kwargs)
chdir(workdir)
cpp_template = abs_path('../postprocess/cpp_templates/rdx.cpp')
if cli_vars:
cli_vars = ' '.join([k+':'+v for k, v in cli_vars.items()])
for subdir, input_ntp in subworkdirs.items():
print('{}Working on {}...{}'.format(TC.GREEN, input_ntp, TC.END))
ensure_dir(subdir, make_absolute=False)
chdir(subdir) # Switch to the workdir of the subjob
if use_ubdt:
# Generate a ubdt ntuple
workflow_ubdt(input_ntp, executor=executor)
bm_cmd = 'babymaker -i {} -o baby.cpp -n {} -t {} -f ubdt.root'
else:
bm_cmd = 'babymaker -i {} -o baby.cpp -n {} -t {}'
if cli_vars:
bm_cmd += ' -V '+cli_vars
if blocked_input_trees:
bm_cmd += ' -B '+' '.join(blocked_input_trees)
if blocked_output_trees:
bm_cmd += ' -X '+' '.join(blocked_output_trees)
if directive_override:
bm_cmd += ' -D '+' '.join([k+':'+v
for k, v in directive_override.items()])
executor(bm_cmd.format(abs_path(input_yml), input_ntp, cpp_template))
workflow_compile_cpp('baby.cpp', executor=executor)
output_suffix = output_ntp_name_gen(input_ntp)
executor('./baby.exe --{}'.format(output_suffix))
aggregate_output('..', subdir, output_fltr)
chdir('..') # Switch back to parent workdir
def workflow_mc(job_name, inputs, input_yml,
output_ntp_name_gen=generate_step2_name,
pid_histo_folder='../run2-rdx/reweight/pid/root-run2-rdx_oldcut',
config='../run2-rdx/reweight/pid/run2-rdx_oldcut.yml',
output_fltr=rdx_default_output_fltrs,
**kwargs):
subworkdirs, workdir, executor = workflow_data_mc(
job_name, inputs, **kwargs)
chdir(workdir)
cpp_template = abs_path('../postprocess/cpp_templates/rdx.cpp')
for subdir, input_ntp in subworkdirs.items():
print('{}Working on {}...{}'.format(TC.GREEN, input_ntp, TC.END))
ensure_dir(subdir, make_absolute=False)
chdir(subdir) # Switch to the workdir of the subjob
output_suffix = output_ntp_name_gen(input_ntp)
decay_mode = output_suffix.split('--')[2]
blocked_input_trees, decay_id = rdx_mc_add_info(decay_mode)
# Generate a HAMMER ntuple
workflow_hammer(input_ntp, executor=executor)
# Generate PID weights
workflow_pid(input_ntp, pid_histo_folder, config, executor=executor)
bm_cmd = 'babymaker -i {} -o baby.cpp -n {} -t {} -f hammer.root pid.root'
if blocked_input_trees:
bm_cmd += ' -B '+' '.join(blocked_input_trees)
bm_cmd += ' -V '+'cli_mc_id:'+decay_id
executor(bm_cmd.format(abs_path(input_yml), input_ntp, cpp_template))
workflow_compile_cpp('baby.cpp', executor=executor)
executor('./baby.exe --{}'.format(output_suffix))
aggregate_output('..', subdir, output_fltr)
chdir('..') # Switch back to parent workdir
#####################
# Production config #
#####################
args = parse_input()
executor = run_cmd_wrapper(args.debug)
JOBS = {
# Run 2
'rdx-ntuple-run2-data-oldcut': lambda name: workflow_data(
name,
'../ntuples/0.9.5-bugfix/Dst_D0-cutflow_data',
'../postprocess/rdx-run2/rdx-run2_oldcut.yml',
executor=executor
),
'rdx-ntuple-run2-mc-demo': lambda name: workflow_mc(
name,
'../ntuples/0.9.5-bugfix/Dst_D0-mc/Dst_D0--21_10_08--mc--MC_2016_Beam6500GeV-2016-MagDown-Nu1.6-25ns-Pythia8_Sim09j_Trig0x6139160F_Reco16_Turbo03a_Filtered_11574011_D0TAUNU.SAFESTRIPTRIG.DST.root',
'../postprocess/rdx-run2/rdx-run2_oldcut.yml',
executor=executor
),
# Run 2 debug
'rdx-ntuple-run2-data-oldcut-no-Dst-veto': lambda name: workflow_data(
name,
[
'../ntuples/0.9.4-trigger_emulation/Dst_D0-std',
'../ntuples/0.9.5-bugfix/Dst_D0-cutflow_data',
],
'../postprocess/rdx-run2/rdx-run2_oldcut.yml',
executor=executor,
cli_vars={'cli_no_dst_veto': '100.0'}
),
# Run 2 cutflow
'rdx-ntuple-run2-data-oldcut-cutflow': lambda name: workflow_data(
name,
'../ntuples/0.9.5-bugfix/Dst_D0-cutflow_data',
'../postprocess/rdx-run2/rdx-run2_oldcut.yml',
executor=executor,
cli_vars={'cli_cutflow': 'true'}
),
# Run 1
'rdx-ntuple-run1-data': lambda name: workflow_data(
name,
'../ntuples/0.9.5-bugfix/Dst_D0-std',
'../postprocess/rdx-run1/rdx-run1.yml',
use_ubdt=False,
executor=executor
),
# Reference Run 1
'ref-rdx-ntuple-run1-data-Dst': lambda name: workflow_data(
name,
'../ntuples/ref-rdx-run1/Dst-mix/Dst--21_10_21--mix--all--2011-2012--md-mu--phoebe.root',
'../postprocess/ref-rdx-run1/ref-rdx-run1-Dst.yml',
use_ubdt=False,
output_ntp_name_gen=parse_step2_name,
executor=executor,
directive_override={'one_cand_only/enable': 'false'}
),
'ref-rdx-ntuple-run1-data-D0': lambda name: workflow_data(
name,
'../ntuples/ref-rdx-run1/D0-mix/D0--21_10_21--mix--all--2011-2012--md-mu--phoebe.root',
'../postprocess/ref-rdx-run1/ref-rdx-run1-D0.yml',
use_ubdt=False,
output_ntp_name_gen=parse_step2_name,
executor=executor,
directive_override={'one_cand_only/enable': 'false'}
),
}
if args.job_name in JOBS:
JOBS[args.job_name](args.job_name)
else:
print('Unknown job name: {}'.format(args.job_name))
| 1.765625 | 2 |
modules/py/scripts/simple_MPI.py | ICHEC/QNLP | 29 | 12762412 | #!/usr/bin/env python
# coding: utf-8
from mpi4py import MPI
from PyQNLPSimulator import PyQNLPSimulator as p
import QNLP as q
import numpy as np
num_qubits = 24
# Create simulator object
use_fusion = False
sim = p(num_qubits, use_fusion)
sim.initRegister()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
val = 0
sim.applyGateX(0)
sim.applyGateH(2)
sim.applyGateH(4)
sim.applyGateX(7)
sim.applyGateX(22)
val = sim.applyMeasurementToRegister(range(num_qubits), True)
print("RANK={} VAL={}".format(rank,val))
comm.Barrier()
"""
# Note, performing operations on rank=0 only will causes failures. The following example will fail.
if rank == 0:
sim.initRegister()
val = sim.applyMeasurementToRegister(range(num_qubits), True)
print(val)
"""
| 2.484375 | 2 |
packyou/py2.py | llazzaro/packyou | 217 | 12762413 | <reponame>llazzaro/packyou<filename>packyou/py2.py
# -*- coding: utf-8 -*-
import imp
import ipdb
import logging
from sys import modules, meta_path
from os import mkdir
from os.path import (
isdir,
abspath,
dirname,
exists,
join,
)
import encodings.idna
import requests
from git import Repo
from packyou import find_module_path_in_cloned_repos
from packyou.utils import walklevel, memoize
MODULES_PATH = dirname(abspath(__file__))
LOGGER = logging.getLogger(__name__)
class GithubLoader(object):
"""
Import hook that will allow to import from a github repo.
"""
def __init__(self, repo_url=None, path=None, username=None, repository_name=None):
self.path = path
self.repo_url = repo_url
self.username = username
self.repository_name = repository_name
def check_root(self, fullname):
"""
#Sometimes the code is a python package or similar and there is a directory
#which contains all the code.
This method is used to search first on the root of the cloned repository for the
imported module.
"""
parent, _, module_name = fullname.rpartition('.')
if self.username and self.repository_name:
# REVISAR QUE PASE TODOS LOS PATHS
cloned_root = join(self.path[0], 'github', self.username, self.repository_name)
candidate_path = join(cloned_root, module_name)
if exists(candidate_path):
return candidate_path
for root, dirs, files in walklevel(cloned_root, level=1):
pass
def get_source(self, fullname):
filename = self.get_filename(fullname)
with open(filename, 'r') as source_file:
return source_file.read()
def get_code(self, fullname):
source = self.get_source(fullname)
return compile(source, self.get_filename(fullname), 'exec', dont_inherit=True)
def get_filename(self, fullname):
parent, _, current_module = fullname.rpartition('.')
filename = None
LOGGER.debug('Fullname {0} self.path {1}'.format(fullname, self.path))
for path in self.path:
package_path = join(path, '__init__.py')
if exists(package_path):
filename = package_path
module_path = '{0}.py'.format(join(path, current_module))
if exists(module_path):
filename = module_path
LOGGER.debug('get_filename({0}) is {1}'.format(fullname, filename))
return filename
def is_package(self, fullname):
filename = self.get_filename(fullname)
return not exists(filename) or isdir(filename)
def get_or_create_module(self, fullname):
"""
Given a name and a path it will return a module instance
if found.
When the module could not be found it will raise ImportError
"""
LOGGER.info('Loading module {0}'.format(fullname))
parent, _, module_name = fullname.rpartition('.')
if fullname in modules:
LOGGER.info('Found cache entry for {0}'.format(fullname))
return modules[fullname]
module = modules.setdefault(fullname, imp.new_module(fullname))
if len(fullname.strip('.')) > 3:
absolute_from_root = fullname.split('.', 3)[-1]
modules.setdefault(absolute_from_root, module)
if len(fullname.split('.')) == 4:
# add the root of the project
modules[fullname.split('.')[-1]] = module
# required by PEP 302
module.__file__ = self.get_filename(fullname)
LOGGER.info('Created module {0} with fullname {1}'.format(self.get_filename(fullname), fullname))
module.__name__ = fullname
module.__loader__ = self
module.__path__ = self.path
if self.is_package(fullname):
module.__path__ = self.path
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
LOGGER.debug('loading file {0}'.format(self.get_filename(fullname)))
source = self.get_source(fullname)
try:
exec(source, module.__dict__)
except Exception as ex:
ipdb.set_trace()
return module
def clone_github_repo(self):
"""
Clones a github repo with a username and repository_name
"""
if not (self.username and self.repository_name):
return
repository_local_destination = join(MODULES_PATH, 'github', self.username, self.repository_name)
if not exists(repository_local_destination):
Repo.clone_from(self.repo_url, repository_local_destination, branch='master')
init_filename = join(repository_local_destination, '__init__.py')
open(init_filename, 'a').close()
@property
def project_fullname(self):
return 'packyou.github.{0}.{1}'.format(self.username, self.repository_name)
def load_module(self, fullname):
"""
Given a name it will load the module from github.
When the project is not locally stored it will clone the
repo from github.
"""
module = None
splitted_names = fullname.split('.')
_, _, module_name = fullname.rpartition('.')
_, remaining = find_module_path_in_cloned_repos(fullname)
if 'github' in splitted_names and not remaining:
self.clone_github_repo()
if len(splitted_names) == 2:
module = self.get_or_create_module(fullname)
if len(splitted_names) == 3:
username_directory = join(MODULES_PATH, 'github', self.username)
if not exists(username_directory):
mkdir(username_directory)
username_init_filename = join(MODULES_PATH, 'github', self.username, '__init__.py')
open(username_init_filename, 'a').close()
module = self.get_or_create_module(fullname)
if len(splitted_names) >= 4:
module = self.get_or_create_module(fullname)
elif self.username and self.repository_name:
# relative import from project root.
fullname = 'packyou.github.{0}.{1}.{2}'.format(self.username, self.repository_name, remaining)
module = self.get_or_create_module(fullname)
if module:
modules[fullname] = module
if remaining is not None:
modules[remaining] = module
return module
class GithubFinder(object):
def __init__(self):
self.username = None
self.repository_name = None
@memoize
def check_repository_available(self, username, repository_name):
"""
Sometimes github has a - in the username or repository name.
The - can't be used in the import statement.
"""
repo_url = 'https://github.com/{0}/{1}.git'.format(username, repository_name)
response = requests.get(repo_url)
if response.status_code == 404:
if '_' in username:
repo_url = 'https://github.com/{0}/{1}.git'.format(username.replace('_', '-'), repository_name)
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
if '_' in repository_name:
repo_url = 'https://github.com/{0}/{1}.git'.format(username, repository_name.replace('_', '-'))
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
repo_url = 'https://github.com/{0}/{1}.git'.format(username.replace('_', '-'), repository_name.replace('_', '-'))
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
raise ImportError('Github repository not found.')
return repo_url
def find_module_in_cloned_repos(self, fullname):
return find_module_in_cloned_repos(fullname, GithubLoader)
def find_module(self, fullname, path=None):
"""
Finds a module and returns a module loader when
the import uses packyou
"""
LOGGER.info('Finding {0}'.format(fullname))
partent, _, module_name = fullname.rpartition('.')
path, _ = find_module_path_in_cloned_repos(fullname)
LOGGER.debug('FOUND PATH {0}'.format(path))
try:
# sometimes the project imported from github does an
# "import x" (absolute import), this translates to import github...x
# we try first to do an import x and cache the module in the sys.path.
# and return None if the imp.find_module was successful.
# This will allow python finders in the meta_path to do the import, and not packyou
# loaders.
if not path:
imp.find_module(module_name)
LOGGER.info('Absolute import: {0}. Original fullname {1}'.format(module_name, fullname))
return None
except ImportError:
LOGGER.debug('imp.find_module could not find {0}. this is ussually fine.'.format(module_name))
if 'packyou.github' in fullname:
fullname_parts = fullname.split('.')
repo_url = None
if len(fullname_parts) >= 3:
self.username = fullname.split('.')[2]
if len(fullname_parts) >= 4:
if not self.repository_name:
LOGGER.debug('FULLNAME -> {0} '.format(fullname))
self.repository_name = fullname.split('.')[3]
repo_url = self.check_repository_available(self.username, self.repository_name)
current_path = dirname(abspath(__file__))
repo_path = join(current_path, 'github', self.username, self.repository_name)
if repo_path not in path:
path.insert(0, repo_path)
LOGGER.info('Found {0} with path {1}'.format(fullname, path))
return GithubLoader(repo_url, path, self.username, self.repository_name)
elif self.username and self.repository_name and path:
LOGGER.info('Fullname {0} does not start with packyou, searching in cloned repos. Result was {1}'.format(fullname, path))
repo_url = self.check_repository_available(self.username, self.repository_name)
return GithubLoader(repo_url, path, self.username, self.repository_name)
LOGGER.info('Not found -> {0}'.format(fullname))
meta_path.append(GithubFinder())
| 2.65625 | 3 |
storybro/__init__.py | MikkoMMM/storybro | 67 | 12762414 | <gh_stars>10-100
from .cli import ep
def main():
ep()
| 0.925781 | 1 |
pysd/py_backend/vensim/table2py.py | rogersamso/pysd_dev | 240 | 12762415 | import pandas as pd
import warnings
from ...pysd import read_vensim
from io import open
def read_tabular(table_file, sheetname='Sheet1'):
"""
Reads a vensim syntax model which has been formatted as a table.
This is useful in contexts where model building is performed
without the aid of Vensim.
Parameters
----------
table_file: .csv, .tab or .xls(x) file
Table should have columns titled as in the table below
| Variable | Equation | Units | Min | Max | Comment |
| :------- | :------- | :---- | :-- | :-- | :--------------- |
| Age | 5 | Yrs | 0 | inf | How old are you? |
| ... | ... | ... | ... | ... | ... |
sheetname: basestring
if the model is specified in an excel file, what sheet?
Returns
-------
PySD Model Object
Notes
-----
Creates an intermediate file in vensim `.mdl` syntax, just so that
the existing vensim parsing machinery can be used.
"""
if isinstance(table_file, str):
extension = table_file.split('.')[-1]
if extension in ['xls', 'xlsx']:
table = pd.read_excel(table_file, sheetname=sheetname)
elif extension == 'csv':
table = pd.read_csv(table_file, encoding='UTF-8')
elif extension == 'tab':
table = pd.read_csv(table_file, sep='\t', encoding='UTF-8')
else:
raise ValueError('Unknown file or table type')
else:
raise ValueError('Unknown file or table type')
if not set(table.columns).issuperset({'Variable', 'Equation'}):
raise ValueError('Table must contain at least columns "Variable" and "Equation"')
if "Units" not in set(table.columns):
warnings.warn('Column for "Units" not found', RuntimeWarning, stacklevel=2)
table['Units'] = ''
if "Min" not in set(table.columns):
warnings.warn('Column for "Min" not found', RuntimeWarning, stacklevel=2)
table['Min'] = ''
if "Max" not in set(table.columns):
warnings.warn('Column for "Max" not found', RuntimeWarning, stacklevel=2)
table['Max'] = ''
mdl_file = table_file.replace(extension, 'mdl')
with open(mdl_file, 'w', encoding='UTF-8') as outfile:
for element in table.to_dict(orient='records'):
outfile.write(
"%(Variable)s = \n"
"\t %(Equation)s \n"
"\t~\t %(Units)s [%(Min)s, %(Max)s] \n"
"\t~\t %(Comment)s \n\t|\n\n" % element
)
outfile.write(u'\\\---/// Sketch information - this is where sketch stuff would go.')
return read_vensim(mdl_file) | 3.46875 | 3 |
brambox/boxes/formats.py | thesuperorange/task-conditioned | 331 | 12762416 | #
# Copyright EAVISE
#
from .annotations import annotation_formats
from .detections import detection_formats
__all__ = ['formats', 'annotation_formats', 'detection_formats']
formats = {}
for key in annotation_formats:
formats['anno_'+key] = annotation_formats[key]
for key in detection_formats:
formats['det_'+key] = detection_formats[key]
| 2 | 2 |
groinkbot/__init__.py | GroinkIndustries/GroinkBot | 0 | 12762417 | <filename>groinkbot/__init__.py
from .interface import *
from .service import * | 1.15625 | 1 |
Code Templates/Google.py | cnm06/Competitive-Programming | 994 | 12762418 | f = open('sample-input.txt')
o = open('sample-output.txt', 'w')
t = int(f.readline().strip())
for i in xrange(1, t + 1):
o.write("Case #{}: ".format(i))
n = int(f.readline().strip())
x = [int(j) for j in f.readline().strip().split()]
y = [int(j) for j in f.readline().strip().split()]
o.write("\n")
| 2.84375 | 3 |
dae/dae/gpf_instance/gpf_instance.py | iossifovlab/gpf | 0 | 12762419 | <gh_stars>0
import os
import logging
import pandas as pd
import math
import json
from dae.genome.genomes_db import GenomesDB
from dae.enrichment_tool.background_facade import BackgroundFacade
from dae.gene.weights import GeneWeightsDb
from dae.gene.scores import ScoresFactory
from dae.gene.gene_sets_db import GeneSetsDb
from dae.gene.denovo_gene_sets_db import DenovoGeneSetsDb
from dae.studies.variants_db import VariantsDb
from dae.pheno.pheno_db import PhenoDb
from dae.pheno_browser.db import DbManager
from dae.backends.storage.genotype_storage_factory import \
GenotypeStorageFactory
from dae.configuration.gpf_config_parser import GPFConfigParser
from dae.configuration.schemas.dae_conf import dae_conf_schema
from dae.configuration.schemas.gene_info import gene_info_conf
from dae.configuration.schemas.genomic_scores import genomic_scores_schema
from dae.configuration.schemas.autism_gene_profile import (
autism_gene_tool_config
)
from dae.autism_gene_profile.db import AutismGeneProfileDB
from dae.autism_gene_profile.statistic import AGPStatistic
from dae.utils.helpers import isnan
from dae.utils.dae_utils import cached, join_line
logger = logging.getLogger(__name__)
class GPFInstance(object):
def __init__(
self,
dae_config=None,
config_file="DAE.conf",
work_dir=None,
defaults=None,
load_eagerly=False):
if dae_config is None:
# FIXME Merge defaults with newly-loaded config
assert not defaults, defaults
if work_dir is None:
work_dir = os.environ["DAE_DB_DIR"]
config_file = os.path.join(work_dir, config_file)
dae_config = GPFConfigParser.load_config(
config_file, dae_conf_schema
)
self.dae_config = dae_config
self.dae_db_dir = work_dir
self.__autism_gene_profile_config = None
self.load_eagerly = load_eagerly
if load_eagerly:
self.genomes_db
self.gene_sets_db
self._gene_info_config
self._pheno_db
self._variants_db
self._gene_info_config
self.denovo_gene_sets_db
self._score_config
self._scores_factory
self.genotype_storage_db
self._background_facade
@property # type: ignore
@cached
def genomes_db(self):
return GenomesDB(
self.dae_config.dae_data_dir, self.dae_config.genomes_db.conf_file
)
@property # type: ignore
@cached
def _pheno_db(self):
return PhenoDb(dae_config=self.dae_config)
@property # type: ignore
@cached
def _gene_info_config(self):
logger.debug(
f"loading gene info config file: "
f"{self.dae_config.gene_info_db.conf_file}")
return GPFConfigParser.load_config(
self.dae_config.gene_info_db.conf_file, gene_info_conf
)
@property # type: ignore
@cached
def gene_weights_db(self):
return GeneWeightsDb(self._gene_info_config)
@property # type: ignore
@cached
def _score_config(self):
return GPFConfigParser.load_config(
self.dae_config.genomic_scores_db.conf_file, genomic_scores_schema
)
@property # type: ignore
@cached
def _scores_factory(self):
return ScoresFactory(self._score_config)
@property # type: ignore
@cached
def genotype_storage_db(self):
return GenotypeStorageFactory(self.dae_config)
@property # type: ignore
@cached
def _variants_db(self):
return VariantsDb(
self.dae_config,
self.genomes_db,
self.genotype_storage_db,
)
@property # type: ignore
@cached
def _autism_gene_profile_db(self):
config = None if self._autism_gene_profile_config is None else\
self._autism_gene_profile_config.to_dict()
agpdb = AutismGeneProfileDB(
config,
os.path.join(self.dae_db_dir, "agpdb")
)
return agpdb
def reload(self):
reload_properties = [
"__variants_db",
"_denovo_gene_sets_db",
"_gene_sets_db",
]
for cached_val_name in reload_properties:
setattr(self, cached_val_name, None)
@property # type: ignore
@cached
def _autism_gene_profile_config(self):
agp_config = self.dae_config.autism_gene_tool_config
if agp_config is None or not os.path.exists(agp_config.conf_file):
return None
return GPFConfigParser.load_config(
self.dae_config.autism_gene_tool_config.conf_file,
autism_gene_tool_config
)
@property # type: ignore
@cached
def gene_sets_db(self):
logger.debug("creating new instance of GeneSetsDb")
return GeneSetsDb(
self._gene_info_config, load_eagerly=self.load_eagerly)
@property # type: ignore
@cached
def denovo_gene_sets_db(self):
return DenovoGeneSetsDb(self)
@property # type: ignore
@cached
def _background_facade(self):
return BackgroundFacade(self._variants_db)
def get_genotype_data_ids(self, local_only=False):
return (
self._variants_db.get_all_genotype_study_ids()
+ self._variants_db.get_all_genotype_group_ids()
)
def get_genotype_data(self, genotype_data_id):
genotype_data_study = self._variants_db.get_genotype_study(
genotype_data_id)
if genotype_data_study:
return genotype_data_study
return self._variants_db.get_genotype_group(genotype_data_id)
def get_all_genotype_data(self):
genotype_studies = self._variants_db.get_all_genotype_studies()
genotype_data_groups = self._variants_db.get_all_genotype_groups()
return genotype_studies + genotype_data_groups
def get_genotype_data_config(self, genotype_data_id):
config = self._variants_db.get_genotype_study_config(genotype_data_id)
if config is not None:
return config
return self._variants_db.get_genotype_group_config(
genotype_data_id
)
def register_genotype_data(self, genotype_data):
self._variants_db.register_genotype_data(genotype_data)
# Phenotype data
def get_phenotype_db_config(self):
return self._pheno_db.config
def get_phenotype_data_ids(self):
return self._pheno_db.get_phenotype_data_ids()
def get_phenotype_data(self, phenotype_data_id):
return self._pheno_db.get_phenotype_data(phenotype_data_id)
def get_all_phenotype_data(self):
return self._pheno_db.get_all_phenotype_data()
def get_phenotype_data_config(self, phenotype_data_id):
return self._pheno_db.get_phenotype_data_config(phenotype_data_id)
# Pheno browser
def get_pheno_config(self, study_wrapper):
dbname = study_wrapper.config.phenotype_data
return self._pheno_db.config[dbname]
def has_pheno_data(self, study_wrapper):
return study_wrapper.phenotype_data is not None
def get_instruments(self, study_wrapper):
return study_wrapper.phenotype_data.instruments.keys()
def get_pheno_dbfile(self, study_wrapper):
config = self.get_pheno_config(study_wrapper)
return config.browser_dbfile
def get_pheno_images_url(self, study_wrapper):
config = self.get_pheno_config(study_wrapper)
return config.browser_images_url
def get_measures_info(self, study_wrapper):
dbfile = self.get_pheno_dbfile(study_wrapper)
images_url = self.get_pheno_images_url(study_wrapper)
db = DbManager(dbfile=dbfile)
db.build()
return {
"base_image_url": images_url,
"has_descriptions": db.has_descriptions,
"regression_names": db.regression_display_names,
}
def search_measures(self, study_wrapper, instrument, search_term):
dbfile = self.get_pheno_dbfile(study_wrapper)
db = DbManager(dbfile=dbfile)
db.build()
measures = db.search_measures(instrument, search_term)
for m in measures:
if m["values_domain"] is None:
m["values_domain"] = ""
m["measure_type"] = m["measure_type"].name
m["regressions"] = []
regressions = db.get_regression_values(m["measure_id"]) or []
for reg in regressions:
reg = dict(reg)
if isnan(reg["pvalue_regression_male"]):
reg["pvalue_regression_male"] = "NaN"
if isnan(reg["pvalue_regression_female"]):
reg["pvalue_regression_female"] = "NaN"
m["regressions"].append(reg)
yield {
"measure": m,
}
def has_measure(self, study_wrapper, measure_id):
return study_wrapper.phenotype_data.has_measure(measure_id)
def get_measure_description(self, study_wrapper, measure_id):
measure = study_wrapper.phenotype_data.measures[measure_id]
out = {
"instrument_name": measure.instrument_name,
"measure_name": measure.measure_name,
"measure_type": measure.measure_type.name,
"values_domain": measure.domain,
}
if not math.isnan(measure.min_value):
out["min_value"] = measure.min_value
if not math.isnan(measure.max_value):
out["max_value"] = measure.max_value
return out
def get_regressions(self, study_wrapper):
dataset_config = self.get_genotype_data_config(
study_wrapper.study_id)
pheno_config = self.get_phenotype_db_config()
browser_dbfile = \
pheno_config[dataset_config.phenotype_data].browser_dbfile
db = DbManager(
browser_dbfile)
db.build()
if db is None:
return None
return db.regression_display_names_with_ids
# Genomic scores
def get_genomic_scores(self):
return self._scores_factory.get_scores()
# Gene weights
def has_gene_weight(self, weight_id):
return weight_id in self.gene_weights_db
def get_gene_weight(self, weight_id):
return self.gene_weights_db[weight_id]
def get_all_gene_weights(self):
return self.gene_weights_db.get_gene_weights()
# Gene info config
def get_chromosomes(self):
csvfile = self._gene_info_config.chromosomes.file
reader = pd.read_csv(csvfile, delimiter="\t")
reader["#chrom"] = reader["#chrom"].map(lambda x: x[3:])
col_rename = {"chromStart": "start", "chromEnd": "end"}
reader = reader.rename(columns=col_rename)
cols = ["start", "end", "name", "gieStain"]
reader["start"] = pd.to_numeric(reader["start"], downcast="integer")
reader["end"] = pd.to_numeric(reader["end"], downcast="integer")
reader = (
reader.groupby("#chrom")[cols]
.apply(lambda x: x.to_dict(orient="records"))
.to_dict()
)
return [{"name": k, "bands": v} for k, v in reader.items()]
def get_gene_info_gene_weights(self):
return self._gene_info_config.gene_weights
# Genomes DB
def get_genome(self):
return self.genomes_db.get_genome()
# Common reports
def get_common_report(self, study_id):
study = self.get_genotype_data(study_id)
if study is None or study.is_remote:
return None
try:
common_report_path = study.config.common_report.file_path
if not common_report_path or not os.path.exists(
common_report_path
):
return None
with open(common_report_path, "r") as crf:
common_report = json.load(crf)
return common_report
except AssertionError:
return None
return common_report.to_dict()
def get_all_common_report_configs(self):
configs = []
local_ids = self.get_genotype_data_ids(True)
for gd_id in local_ids:
config = self.get_genotype_data_config(gd_id)
if config.common_report is not None:
configs.append(config.common_report)
return configs
def get_common_report_families_data(self, common_report_id):
genotype_data = GPFInstance.get_genotype_data(self, common_report_id)
if not genotype_data:
return None
data = []
data.append(
[
"familyId",
"personId",
"dadId",
"momId",
"sex",
"status",
"role",
"genotype_data_study",
]
)
families = list(genotype_data.families.values())
families.sort(key=lambda f: f.family_id)
for f in families:
for p in f.members_in_order:
row = [
p.family_id,
p.person_id,
p.dad_id if p.dad_id else "0",
p.mom_id if p.mom_id else "0",
p.sex,
p.status,
p.role,
genotype_data.name,
]
data.append(row)
return map(join_line, data)
# Gene sets
def get_gene_sets_collections(self):
return self.gene_sets_db.collections_descriptions
def has_gene_set_collection(self, gsc_id):
return self.gene_sets_db.has_gene_set_collection(gsc_id)
def get_all_gene_sets(self, collection_id):
return self.gene_sets_db.get_all_gene_sets(collection_id)
def get_gene_set(self, collection_id, gene_set_id):
return self.gene_sets_db.get_gene_set(collection_id, gene_set_id)
def get_denovo_gene_sets(self, datasets):
return self.denovo_gene_sets_db.get_gene_set_descriptions(datasets)
def has_denovo_gene_sets(self):
return len(self.denovo_gene_sets_db) > 0
def get_all_denovo_gene_sets(self, types, datasets):
return self.denovo_gene_sets_db.get_all_gene_sets(types, datasets)
def get_denovo_gene_set(self, gene_set_id, types, datasets):
return self.denovo_gene_sets_db.get_gene_set(
gene_set_id, types, datasets)
# Variants DB
def get_dataset(self, dataset_id):
return self._variants_db.get(dataset_id)
# Enrichment
def get_study_enrichment_config(self, dataset_id):
return self._background_facade.get_study_enrichment_config(dataset_id)
def has_background(self, dataset_id, background_name):
return self._background_facade.has_background(
dataset_id, background_name)
def get_study_background(self, dataset_id, background_name):
return self._background_facade.get_study_background(
dataset_id, background_name)
# AGP
def get_agp_configuration(self):
return self._autism_gene_profile_db.configuration
def get_agp_statistic(self, gene_symbol):
return self._autism_gene_profile_db.get_agp(gene_symbol)
def get_all_agp_statistics(self):
return self._autism_gene_profile_db.get_all_agps()
def _agp_from_table_row(self, row):
config = self._autism_gene_profile_config
gene_symbol = row["symbol_name"]
genomic_scores = dict()
for gs_category in config.genomic_scores:
category_name = gs_category["category"]
genomic_scores[category_name] = dict()
for score in gs_category["scores"]:
score_name = score["score_name"]
full_score_id = f"{category_name}_{score_name}"
genomic_scores[category_name][score_name] = {
"value": row[full_score_id],
"format": score["format"]
}
gene_sets_categories = config.gene_sets
gene_sets = []
for gs_category in gene_sets_categories:
category_name = gs_category["category"]
for gene_set in gs_category["sets"]:
set_id = gene_set["set_id"]
collection_id = gene_set["collection_id"]
full_gs_id = f"{collection_id}_{set_id}"
if row[full_gs_id] == 1:
gene_sets.append(full_gs_id)
variant_counts = {}
for dataset_id, filters in config.datasets.items():
current_counts = dict()
for ps in filters.person_sets:
person_set = ps.set_name
for statistic in filters.statistics:
statistic_id = statistic["id"]
counts = current_counts.get(person_set)
if not counts:
current_counts[person_set] = dict()
counts = current_counts[person_set]
count = row[
f"{dataset_id}_{person_set}_{statistic_id}"
]
rate = row[
f"{dataset_id}_{person_set}_{statistic_id}_rate"
]
counts[statistic_id] = {
"count": count,
"rate": rate
}
variant_counts[dataset_id] = current_counts
return AGPStatistic(
gene_symbol, gene_sets,
genomic_scores, variant_counts
)
def query_all_agp_statistics(
self, symbol_like=None, sort_by=None, order=None):
rows = self._autism_gene_profile_db.query_agps(
None, symbol_like, sort_by, order
)
statistics = list(map(
self._agp_from_table_row,
rows
))
return statistics
def query_agp_statistics(
self, page, symbol_like=None, sort_by=None, order=None):
rows = self._autism_gene_profile_db.query_agps(
page, symbol_like, sort_by, order
)
statistics = list(map(
self._agp_from_table_row,
rows
))
return statistics
# DAE config
def get_selected_genotype_data(self):
return self.dae_config.gpfjs.selected_genotype_data
| 1.726563 | 2 |
netscrypt/development/preliminary/code/basics/client.py | QQuick/Netscrypt | 2 | 12762420 | import netscrypt
with netscrypt.Client ('localhost', 6666) as client:
dogs = client ('dogs')
for dog in dogs:
print (dog.name)
print (dog.speak ('wraff'))
| 2.734375 | 3 |
posix_checkapi/TRACES/POT/testcv_notify_all.py | JustinCappos/checkapi | 0 | 12762421 | """
Author: <NAME>
Start Date: May 16th, 2009
Purpose: Test cases for the condition variable primitive implementation.
"""
#begin include cv.repy
"""
Author: <NAME>
Start date: May 24th, 2009
Purpose: This module provides condition variable (cv) interface.
Abstract: Conceptually a condition variable is a queue of threads,
associated with a semaphore upon which a thread(s) may wait for some
assertion to become true. Thus each condition variable is associated with
some assertion. While a thread is waiting upon a condition variable, that
thread is not considered to occupy the semaphore, and so other threads may
enter the semaphore to notify the waiting thread(s).
Thread-Safety: Safe to call notify_one()/notify_all()/wait()
concurrently. However, in case you call destroy() make sure this
is a last call for that conditional variable -- otherwise you will
receive an exception about invalid handle.
"""
#begin include semaphore.repy
"""
Author: <NAME>
Start date: May 15th, 2009
Purpose: A simple library that provides a semaphore abstration on top of repy's locks...
"""
#begin include uniqueid.repy
"""
Author: <NAME>
Module: A simple library that provides a unique ID for each call
Start date: November 11th, 2008
This is a really, really simple module, only broken out to avoid duplicating
functionality.
NOTE: This will give unique ids PER FILE. If you have multiple python
modules that include this, they will have the potential to generate the
same ID.
"""
# This is a list to prevent using part of the user's mycontext dict
uniqueid_idlist = [0]
uniqueid_idlock = createlock()
def uniqueid_getid():
"""
<Purpose>
Return a unique ID in a threadsafe way
<Arguments>
None
<Exceptions>
None
<Side Effects>
None.
<Returns>
The ID (an integer)
"""
uniqueid_idlock.acquire(True)
# I'm using a list because I need a global, but don't want to use the
# programmer's dict
myid = uniqueid_idlist[0]
uniqueid_idlist[0] = uniqueid_idlist[0] + 1
uniqueid_idlock.release()
return myid
#end include uniqueid.repy
# this dictionary stores private data about the semaphores. The format of an
# entry is: {'semcount' = 0, 'blockedlist' = [], 'semlock'=createlock()}.
# semcount is the number of nodes that can do a down before it blocks.
# blockedlist is the set of nodes that are already blocked.
# it will never be true that 'semcount' > 0 AND 'blockedlist' != []
semaphore_handle_dict = {}
def semaphore_create():
"""
<Purpose>
Create a new semaphore and return it to the user.
<Arguments>
None
<Exceptions>
None
<Side Effects>
None.
<Returns>
The semaphore handle
"""
thissemhandle = uniqueid_getid()
newlock = createlock()
semaphore_handle_dict[thissemhandle] = {'semcount':0, 'blockedlist':[],
'semlock':newlock}
return thissemhandle
def semaphore_destroy(semaphorehandle):
"""
<Purpose>
Clean up a semaphore that is no longer needed. All currently blocked
threads will be unblocked. All future uses of the semaphore will fail.
<Arguments>
semaphorehandle: The semaphore handle to destroy
<Exceptions>
None
<Side Effects>
None.
<Returns>
True if it cleaned up the semaphore handle, False if the handle was
already cleaned up
"""
# Acquire the lock. If this fails, assume the semaphore has already been
# cleaned up
try:
# I intentionally index both of these so that if the handle is removed by
# another call to semaphore_destroy in the mean time. All calls that
# acquire the lock need to do this.
semaphore_handle_dict[semaphorehandle]['semlock'].acquire(True)
except (IndexError, KeyError):
return False
# NOTE: We will release all parties that are blocking on the semaphore...
# Is this the right thing to do?
for blockedthreadlock in semaphore_handle_dict[semaphorehandle]['blockedlist']:
blockedthreadlock.release()
# I need to get (and release) the lock so that I can unblock anyone waiting
# to modify the semaphore. (They will then get an error)
mylock = semaphore_handle_dict[semaphorehandle]['semlock']
del semaphore_handle_dict[semaphorehandle]
return True
def semaphore_up(semaphorehandle):
"""
<Purpose>
Increment a sempahore (possibly unblocking a thread)
<Arguments>
semaphorehandle: The semaphore handle
<Exceptions>
ValueError if the semaphorehandle is invalid.
<Side Effects>
None.
<Returns>
None
"""
try:
# I intentionally index both of these so that if the handle is removed by
# another call to semaphore_destroy in the mean time. All calls that
# acquire the lock need to do this.
semaphore_handle_dict[semaphorehandle]['semlock'].acquire(True)
except (IndexError, KeyError):
raise ValueError("Invalid or destroyed semaphore handle")
# If someone is blocked, then release the first one
if semaphore_handle_dict[semaphorehandle]['blockedlist']:
assert(semaphore_handle_dict[semaphorehandle]['semcount'] == 0)
thefirstblockedthread = semaphore_handle_dict[semaphorehandle]['blockedlist'].pop(0)
thefirstblockedthread.release()
else:
# If no one is blocked, instead increment the count...
semaphore_handle_dict[semaphorehandle]['semcount'] = semaphore_handle_dict[semaphorehandle]['semcount'] + 1
semaphore_handle_dict[semaphorehandle]['semlock'].release()
def semaphore_down(semaphorehandle):
"""
<Purpose>
Decrement a sempahore (possibly blocking this thread)
<Arguments>
semaphorehandle: The semaphore handle
<Exceptions>
ValueError if the semaphorehandle is invalid.
<Side Effects>
None.
<Returns>
None.
"""
try:
# I intentionally index both of these so that if the handle is removed by
# another call to semaphore_destroy in the mean time. All calls that
# acquire the lock need to do this.
semaphore_handle_dict[semaphorehandle]['semlock'].acquire(True)
except (IndexError, KeyError):
raise ValueError("Invalid or destroyed semaphore handle")
# If the semaphore count is 0, we should block. The list is a queue, so
# we should append a lock for ourselves to the end.
if semaphore_handle_dict[semaphorehandle]['semcount'] == 0:
# get a lock for us and do an acquire so that the next acquire will block.
mylock = createlock()
mylock.acquire(True)
semaphore_handle_dict[semaphorehandle]['blockedlist'].append(mylock)
# release the semaphore lock...
semaphore_handle_dict[semaphorehandle]['semlock'].release()
# acquire my lock... (someone who does an up or destroy will release us)
mylock.acquire(True)
else:
# Since the count is > 0, we should decrement
semaphore_handle_dict[semaphorehandle]['semcount'] = semaphore_handle_dict[semaphorehandle]['semcount'] - 1
# release the semaphore lock...
semaphore_handle_dict[semaphorehandle]['semlock'].release()
#end include semaphore.repy
cv_handle_dict = {}
def cv_create():
"""
<Purpose>
Create a new condition variable and return it to the user.
<Precondition>
None.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The semaphore handle.
"""
# An unique ID associated with the condition variable.
new_handle = uniqueid_getid()
# Waiters Semaphore is used as a simple mutex, assuring that at most
# one function in this module is executed at any point in time.
waiters_semaphore = semaphore_create()
semaphore_up(waiters_semaphore)
# Dispatcher Semaphore is used as a queuing mechanism for threads.
dispatcher_semaphore = semaphore_create()
# How many waiting threads do we have in queue?
waiter_count = 0
cv_handle_dict[new_handle] = {'waiters_semaphore':waiters_semaphore,
'dispatcher_semaphore':dispatcher_semaphore,
'waiter_count':waiter_count}
return new_handle
def cv_destroy(handle):
"""
<Purpose>
Destroy the condition variable.
<Arguments>
handle: The condition variable handle.
<Precondition>
All threads waiting on this condition variable have been notified by a
call to notify_one or notify_all.
No other function calls in this module should be called concurrently or
after. The fact that some other function call in this module might raise
an exception while the condition variable is getting destroyed implies a
design error in client's code.
<Exceptions>
ValueError if the condition variable handle is invalid.
<Side Effects>
Undefined behavior when the second precondition is not met.
<Returns>
None.
"""
try:
waiters_semaphore = cv_handle_dict[handle]['waiters_semaphore']
# Block all other functions from accessing the number of waiting threads.
semaphore_down(waiters_semaphore)
# Are there any threads waiting for this condition variable? If so,
# notify the client by raising the exception. This is an exceptional
# state and implies a bug in client's code.
if cv_handle_dict[handle]['waiter_count'] > 0:
raise RuntimeError("condition variable thread queue not empty")
# Now that we know that the thread queue is empty, we can safely
# delete all internal variables.
semaphore_destroy(cv_handle_dict[handle]['dispatcher_semaphore'])
semaphore_destroy(cv_handle_dict[handle]['waiters_semaphore'])
del cv_handle_dict[handle]
except (IndexError, KeyError, ValueError):
raise ValueError("invalid or destroyed condition variable handle: " + str(handle))
def cv_wait(handle):
"""
<Purpose>
Wait for a condition.
<Arguments>
handle: The condition variable handle.
<Precondition>
None.
<Exceptions>
ValueError if the condition variable handle is invalid.
<Side Effects>
None.
<Returns>
None.
"""
try:
waiters_semaphore = cv_handle_dict[handle]['waiters_semaphore']
# OK, we want to wait for a condition. Signal the Writers Semaphore
# that we want to enter a critical section, and increment the
# number of threads that are currently waiting.
semaphore_down(waiters_semaphore) # Begin critical section.
cv_handle_dict[handle]['waiter_count'] = cv_handle_dict[handle]['waiter_count'] + 1
semaphore_up(waiters_semaphore) # End critical section.
# ... and wait for the condition to happen.
semaphore_down(cv_handle_dict[handle]['dispatcher_semaphore'])
except (IndexError, KeyError, ValueError):
raise ValueError("invalid or destroyed condition variable handle: " + str(handle))
def cv_notify_one(handle):
"""
<Purpose>
Notify the next thread in line that the condition was met.
<Arguments>
handle: The condition variable handle.
<Precondition>
None.
<Exceptions>
ValueError if the condition variable handle is invalid.
<Side Effects>
None.
<Returns>
None.
"""
try:
waiters_semaphore = cv_handle_dict[handle]['waiters_semaphore']
semaphore_down(waiters_semaphore) # Begin critical section.
# In case there is at least one thread waiting for a condition,
# update the number of threads waiting for that condition, and
# signal the change.
if cv_handle_dict[handle]['waiter_count'] > 0:
cv_handle_dict[handle]['waiter_count'] = cv_handle_dict[handle]['waiter_count'] - 1
semaphore_up(cv_handle_dict[handle]['dispatcher_semaphore'])
semaphore_up(waiters_semaphore) # End critical section.
except (IndexError, KeyError, ValueError):
raise ValueError("invalid or destroyed condition variable handle: " + str(handle))
def cv_notify_all(handle):
"""
<Purpose>
Notify all waiting threads that the condition was met.
<Arguments>
handle: The condition variable handle.
<Precondition>
None.
<Exceptions>
ValueError if the condition variable handle is invalid.
<Side Effects>
None.
<Returns>
None.
"""
try:
waiters_semaphore = cv_handle_dict[handle]['waiters_semaphore']
semaphore_down(waiters_semaphore) # Begin critical section.
# Cycle through all waiting threads and signal the change.
while cv_handle_dict[handle]['waiter_count'] > 0:
cv_handle_dict[handle]['waiter_count'] = cv_handle_dict[handle]['waiter_count'] - 1
semaphore_up(cv_handle_dict[handle]['dispatcher_semaphore'])
semaphore_up(waiters_semaphore) # End critical section.
except (IndexError, KeyError, ValueError):
raise ValueError("invalid or destroyed condition variable handle: " + str(handle))
#end include cv.repy
def _cv_functor(condition, number, container):
"""
Internal function that adds the specified number to the specified
container only when it receives a notification for a given condition.
"""
cv_wait(condition)
container.append(number)
def cv_test_notify_all():
"""
Very similar to cv_test_notify_one(). The only difference is that instead
of calling notify_one() N times, we are doing a single notify_all() call.
This time we are only checking to see if both containers are same in size,
since we know that FIFO order is preserved.
"""
condition = cv_create()
container = []
limit = 5
for count in range(limit):
settimer(0.0, _cv_functor, (condition, count, container,))
sleep(1)
cv_notify_all(condition)
sleep(1)
cv_destroy(condition)
if len(container) == limit:
pass
else:
print "fail: notify_all failed: some threads were never executed"
if callfunc == 'initialize':
cv_test_notify_all()
exitall()
| 2.71875 | 3 |
dev/tools/leveleditor/direct/showbase/PandaObject.py | CrankySupertoon01/Toontown-2 | 1 | 12762422 | """Undocumented Module"""
__all__ = []
## from DirectObject import *
## from pandac.PandaModules import *
##
## class PandaObject(DirectObject):
## """
## This is the class that all Panda/Show classes should inherit from
## """
## pass
| 1.59375 | 2 |
spacetimeformer/spacetimeformer_model/nn/scalenorm.py | Azerrroth/spacetimeformer | 0 | 12762423 | import torch
from torch import nn
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.scale = dim ** -0.5
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps) * self.scale
x = x / n * self.g
return x
| 2.6875 | 3 |
app/auth/routes.py | jod35/Fast-Food-Fast | 0 | 12762424 | from flask import Blueprint, redirect, url_for, jsonify, make_response, request
from ..models.users import User
from ..utils.database import db
from flask_login import login_user, logout_user
from ..controllers.methods import check_email_exists, check_username_exists
auth_bp = Blueprint("auth", __name__)
@auth_bp.route("/signup", methods=["POST"])
def create_account():
data = request.get_json()
username = data.get("username")
email = data.get("email")
tel_phone = data.get("tel_phone")
password = data.get("password")
confirm = data.get("password")
if not check_email_exists(email) and not check_username_exists(password):
new_user = User(username=username, email=email, tel_phone=tel_phone)
new_user.create_password_hash(password)
new_user.save()
return make_response(
jsonify({"message": "Account Created Successully!", "success": True}), 201
)
else:
return make_response(
jsonify({"message": "Invalid Credentials", "success": False})
)
@auth_bp.route("/login", methods=["POST"])
def sign_in_user():
username = request.form.get("username")
password = request.form.get("password")
user = User.query.filter_by(username=username).first()
if user and user.check_password(password):
login_user(user)
if user.isAdmin:
return redirect(url_for("ui.new_orders"))
else:
return redirect(url_for("ui.users_orders"))
else:
return redirect(url_for("ui.login_failed"))
@auth_bp.route("/logout")
def logout():
logout_user()
return redirect(url_for("ui.index"))
| 2.890625 | 3 |
__init__.py | JarbasAl/skill-email-commands | 2 | 12762425 | from mycroft.skills import MycroftSkill
from mycroft.messagebus.message import Message
from mail_monitor import EmailMonitor
from os.path import dirname, join
class EmailMonitorSkill(MycroftSkill):
def __init__(self):
super().__init__()
self.email_config = self.config_core.get("email", {})
if "processed_emails" not in self.settings:
self.settings["processed_emails"] = []
def initialize(self):
if "mail" not in self.email_config or "password" not in \
self.email_config or "whitelist" not in self.email_config or\
not self.email_config["whitelist"]:
self.speak_dialog("error")
raise RuntimeError
else:
filter = "(UNSEEN)"
if self.email_config.get("include_read"):
filter = "(ALL)"
if "include_read" in self.email_config:
self.email_config.pop("include_read")
self.email_config["filter"] = filter
try:
self.mail_client = EmailMonitor(**self.email_config)
self.mail_client.on_new_email = self.handle_new_email
self.mail_client.setDaemon(True)
self.mail_client.start()
except:
self.speak_dialog("error")
raise
def get_intro_message(self):
self.speak_dialog("intro")
def handle_new_email(self, email):
if email in self.settings["processed_emails"]:
# don't process same email twice
# important if "include_read" is set
# some uses cases, like using siri Notes, will mark emails as read
return
self.gui.show_animated_image(
join(dirname(__file__), "ui", "inbox.gif"), override_idle=2)
self.settings["processed_emails"].append(email)
self.log.debug(str(email))
self.bus.emit(Message("recognizer_loop:utterance",
{"utterances": [email['payload']]},
{"source": email['email'],
"destinatary": "skills"}))
def shutdown(self):
self.mail_client.stop()
def create_skill():
return EmailMonitorSkill()
| 2.421875 | 2 |
canflood/hlpr/Q.py | dhanyatha-harish-ibigroup/TestCanFloowWorkFlow | 0 | 12762426 | '''
Created on Feb. 25, 2020
@author: cefect
helper functions w/ Qgis api
'''
#==============================================================================
# imports------------
#==============================================================================
#python
import os, configparser, logging, inspect, copy, datetime, re
import pandas as pd
import numpy as np
#qgis
from qgis.core import *
from qgis.analysis import QgsNativeAlgorithms
from qgis.gui import QgisInterface
from PyQt5.QtCore import QVariant, QMetaType
from PyQt5.QtWidgets import QProgressBar
"""throws depceciationWarning"""
import processing
#==============================================================================
# customs
#==============================================================================
mod_logger = logging.getLogger('Q') #get the root logger
from hlpr.exceptions import QError as Error
import hlpr.basic as basic
from hlpr.basic import get_valid_filename
#==============================================================================
# globals
#==============================================================================
fieldn_max_d = {'SpatiaLite':50, 'ESRI Shapefile':10, 'Memory storage':50, 'GPKG':50}
npc_pytype_d = {'?':bool,
'b':int,
'd':float,
'e':float,
'f':float,
'q':int,
'h':int,
'l':int,
'i':int,
'g':float,
'U':str,
'B':int,
'L':int,
'Q':int,
'H':int,
'I':int,
'O':str, #this is the catchall 'object'
}
type_qvar_py_d = {10:str, 2:int, 135:float, 6:float, 4:int, 1:bool, 16:datetime.datetime, 12:str} #QVariant.types to pythonic types
#parameters for lots of statistic algos
stat_pars_d = {'First': 0, 'Last': 1, 'Count': 2, 'Sum': 3, 'Mean': 4, 'Median': 5,
'St dev (pop)': 6, 'Minimum': 7, 'Maximum': 8, 'Range': 9, 'Minority': 10,
'Majority': 11, 'Variety': 12, 'Q1': 13, 'Q3': 14, 'IQR': 15}
#==============================================================================
# classes -------------
#==============================================================================
class Qcoms(basic.ComWrkr): #baseclass for working w/ pyqgis outside the native console
driverName = 'SpatiaLite' #default data creation driver type
out_dName = driverName #default output driver/file type
q_hndls = ['crs', 'crsid', 'algo_init', 'qap', 'vlay_drivers']
algo_init = False #flag indicating whether the algos have been initialized
qap = None
mstore = None
def __init__(self,
feedback=None,
#init controls
init_q_d = {}, #container of initilzied objects
crsid = 'EPSG:4326', #default crsID if no init_q_d is passed
**kwargs
):
""""
#=======================================================================
# plugin use
#=======================================================================
QprojPlugs don't execute super cascade
#=======================================================================
# Qgis inheritance
#=======================================================================
for single standalone runs
all the handles will be generated and Qgis instanced
for console runs
handles should be passed to avoid re-instancing Qgis
for session standalone runs
handles passed
for swapping crs
run set_crs() on the session prior to spawning the child
"""
#=======================================================================
# defaults
#=======================================================================
if feedback is None:
"""by default, building our own feedbacker
passed to ComWrkr.setup_feedback()
"""
feedback = MyFeedBackQ()
#=======================================================================
# cascade
#=======================================================================
super().__init__(
feedback = feedback,
**kwargs) #initilzie teh baseclass
log = self.logger
#=======================================================================
# attachments
#=======================================================================
self.fieldn_max_d=fieldn_max_d
self.crsid=crsid
#=======================================================================
# Qgis setup COMMON
#=======================================================================
"""both Plugin and StandAlone runs should call these"""
self.qproj = QgsProject.instance()
"""
each worker will have their own store
used to wipe any intermediate layers
"""
self.mstore = QgsMapLayerStore() #build a new map store
#do your own init (standalone r uns)
if len(init_q_d) == 0:
self._init_standalone()
else:
#check everything is there
miss_l = set(self.q_hndls).difference(init_q_d.keys())
assert len(miss_l)==0, 'init_q_d missing handles: %s'%miss_l
#set the handles
for k,v in init_q_d.items():
setattr(self, k, v)
self._upd_qd()
self.proj_checks()
#=======================================================================
# attach inputs
#=======================================================================
self.logger.debug('Qcoms.__init__ finished w/ out_dir: \n %s'%self.out_dir)
return
#==========================================================================
# standalone methods-----------
#==========================================================================
def _init_standalone(self, #setup for qgis runs
crsid = None,
):
"""
WARNING! do not call twice (phantom crash)
"""
log = self.logger.getChild('_init_standalone')
if crsid is None: crsid = self.crsid
#=======================================================================
# #crs
#=======================================================================
crs = QgsCoordinateReferenceSystem(crsid)
assert isinstance(crs, QgsCoordinateReferenceSystem), 'bad crs type'
assert crs.isValid()
self.crs = crs
self.qproj.setCrs(crs)
log.info('crs set to \'%s\''%self.crs.authid())
#=======================================================================
# setup qgis
#=======================================================================
self.qap = self.init_qgis()
self.algo_init = self.init_algos()
self.set_vdrivers()
#=======================================================================
# wrap
#=======================================================================
self._upd_qd()
log.debug('Qproj._init_standalone finished')
return
def _upd_qd(self): #set a fresh parameter set
self.init_q_d = {k:getattr(self, k) for k in self.q_hndls}
def init_qgis(self, #instantiate qgis
gui = False):
"""
WARNING: need to hold this app somewhere. call in the module you're working in (scripts)
"""
log = self.logger.getChild('init_qgis')
try:
QgsApplication.setPrefixPath(r'C:/OSGeo4W64/apps/qgis-ltr', True)
app = QgsApplication([], gui)
# Update prefix path
#app.setPrefixPath(r"C:\OSGeo4W64\apps\qgis", True)
app.initQgis()
#logging.debug(QgsApplication.showSettings())
""" was throwing unicode error"""
log.info(u' QgsApplication.initQgis. version: %s, release: %s'%(
Qgis.QGIS_VERSION.encode('utf-8'), Qgis.QGIS_RELEASE_NAME.encode('utf-8')))
return app
except:
raise Error('QGIS failed to initiate')
def init_algos(self): #initiilize processing and add providers
"""
crashing without raising an Exception
"""
log = self.logger.getChild('init_algos')
if not isinstance(self.qap, QgsApplication):
raise Error('qgis has not been properly initlized yet')
from processing.core.Processing import Processing
Processing.initialize() #crashing without raising an Exception
QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())
assert not self.feedback is None, 'instance needs a feedback method for algos to work'
log.info('processing initilzied w/ feedback: \'%s\''%(type(self.feedback).__name__))
return True
def set_vdrivers(self):
log = self.logger.getChild('set_vdrivers')
#build vector drivers list by extension
"""couldnt find a good built-in to link extensions with drivers"""
vlay_drivers = {'SpatiaLite':'sqlite', 'OGR':'shp'}
#vlay_drivers = {'sqlite':'SpatiaLite', 'shp':'OGR','csv':'delimitedtext'}
for ext in QgsVectorFileWriter.supportedFormatExtensions():
dname = QgsVectorFileWriter.driverForExtension(ext)
if not dname in vlay_drivers.keys():
vlay_drivers[dname] = ext
#add in missing/duplicated
for vdriver in QgsVectorFileWriter.ogrDriverList():
if not vdriver.driverName in vlay_drivers.keys():
vlay_drivers[vdriver.driverName] ='?'
self.vlay_drivers = vlay_drivers
log.debug('built driver:extensions dict: \n %s'%vlay_drivers)
return
def set_crs(self, #load, build, and set the project crs
crsid = None, #integer
crs = None, #QgsCoordinateReferenceSystem
logger=None,
):
#=======================================================================
# setup and defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('set_crs')
if crsid is None:
crsid = self.crsid
#=======================================================================
# if not isinstance(crsid, int):
# raise IOError('expected integer for crs')
#=======================================================================
#=======================================================================
# build it
#=======================================================================
if crs is None:
crs = QgsCoordinateReferenceSystem(crsid)
assert isinstance(crs, QgsCoordinateReferenceSystem)
self.crs=crs #overwrite
if not self.crs.isValid():
raise IOError('CRS built from %i is invalid'%self.crs.authid())
#=======================================================================
# attach to project
#=======================================================================
self.qproj.setCrs(self.crs)
self.crsid = self.crs.authid()
if not self.qproj.crs().description() == self.crs.description():
raise Error('qproj crs does not match sessions')
log.info('crs set to EPSG: %s, \'%s\''%(self.crs.authid(), self.crs.description()))
self._upd_qd()
self.proj_checks(logger=log)
return self.crs
def proj_checks(self,
logger=None):
#log = self.logger.getChild('proj_checks')
if not self.driverName in self.vlay_drivers:
raise Error('unrecognized driver name')
if not self.out_dName in self.vlay_drivers:
raise Error('unrecognized driver name')
assert self.algo_init
assert not self.feedback is None
assert not self.progressBar is None
#=======================================================================
# crs checks
#=======================================================================
assert isinstance(self.crs, QgsCoordinateReferenceSystem)
assert self.crs.isValid()
assert self.crs.authid()==self.qproj.crs().authid(), 'crs mismatch'
assert self.crs.authid() == self.crsid, 'crs mismatch'
assert not self.crs.authid()=='', 'got empty CRS!'
#=======================================================================
# handle checks
#=======================================================================
assert isinstance(self.init_q_d, dict)
miss_l = set(self.q_hndls).difference(self.init_q_d.keys())
assert len(miss_l)==0, 'init_q_d missing handles: %s'%miss_l
for k,v in self.init_q_d.items():
assert getattr(self, k) == v, k
#log.info('project passed all checks')
return
def print_qt_version(self):
import inspect
from PyQt5 import Qt
vers = ['%s = %s' % (k,v) for k,v in vars(Qt).items() if k.lower().find('version') >= 0 and not inspect.isbuiltin(v)]
print('\n'.join(sorted(vers)))
#===========================================================================
# LOAD/WRITE LAYERS-----------
#===========================================================================
def load_vlay(self,
fp,
logger=None,
providerLib='ogr',
aoi_vlay = None,
allow_none=True, #control check in saveselectedfeastures
addSpatialIndex=True,
):
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
if logger is None: logger = self.logger
log = logger.getChild('load_vlay')
basefn = os.path.splitext(os.path.split(fp)[1])[0]
log.debug('loading from %s'%fp)
vlay_raw = QgsVectorLayer(fp,basefn,providerLib)
#=======================================================================
# # checks
#=======================================================================
if not isinstance(vlay_raw, QgsVectorLayer):
raise IOError
#check if this is valid
if not vlay_raw.isValid():
raise Error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
#check if it has geometry
if vlay_raw.wkbType() == 100:
raise Error('loaded vlay has NoGeometry')
assert isinstance(self.mstore, QgsMapLayerStore)
"""only add intermediate layers to store
self.mstore.addMapLayer(vlay_raw)"""
if not vlay_raw.crs()==self.qproj.crs():
log.warning('crs mismatch: \n %s\n %s'%(
vlay_raw.crs(), self.qproj.crs()))
#=======================================================================
# aoi slice
#=======================================================================
if isinstance(aoi_vlay, QgsVectorLayer):
log.info('slicing by aoi %s'%aoi_vlay.name())
vlay = self.selectbylocation(vlay_raw, aoi_vlay, allow_none=allow_none,
logger=log, result_type='layer')
#check for no selection
if vlay is None:
return None
vlay.setName(vlay_raw.name()) #reset the name
#clear original from memory
self.mstore.addMapLayer(vlay_raw)
self.mstore.removeMapLayers([vlay_raw])
else:
vlay = vlay_raw
#=======================================================================
# clean------
#=======================================================================
#spatial index
if addSpatialIndex and (not vlay_raw.hasSpatialIndex()==QgsFeatureSource.SpatialIndexPresent):
self.createspatialindex(vlay_raw, logger=log)
#=======================================================================
# wrap
#=======================================================================
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay
def load_rlay(self, fp,
aoi_vlay = None,
logger=None):
if logger is None: logger = self.logger
log = logger.getChild('load_rlay')
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
assert QgsRasterLayer.isValidRasterFileName(fp), \
'requested file is not a valid raster file type: %s'%fp
basefn = os.path.splitext(os.path.split(fp)[1])[0]
#Import a Raster Layer
log.debug('QgsRasterLayer(%s, %s)'%(fp, basefn))
rlayer = QgsRasterLayer(fp, basefn)
"""
hanging for some reason...
QgsRasterLayer(C:\LS\03_TOOLS\CanFlood\_git\tutorials\1\haz_rast\haz_1000.tif, haz_1000)
"""
#=======================================================================
# rlayer = QgsRasterLayer(r'C:\LS\03_TOOLS\CanFlood\_git\tutorials\1\haz_rast\haz_1000.tif',
# 'haz_1000')
#=======================================================================
#===========================================================================
# check
#===========================================================================
assert isinstance(rlayer, QgsRasterLayer), 'failed to get a QgsRasterLayer'
assert rlayer.isValid(), "Layer failed to load!"
if not rlayer.crs() == self.qproj.crs():
log.warning('loaded layer \'%s\' crs mismatch!'%rlayer.name())
log.debug('loaded \'%s\' from \n %s'%(rlayer.name(), fp))
#=======================================================================
# aoi
#=======================================================================
if not aoi_vlay is None:
log.debug('clipping w/ %s'%aoi_vlay.name())
assert isinstance(aoi_vlay, QgsVectorLayer)
rlay2 = self.cliprasterwithpolygon(rlayer,aoi_vlay, logger=log, layname=rlayer.name())
#clean up
mstore = QgsMapLayerStore() #build a new store
mstore.addMapLayers([rlayer]) #add the layers to the store
mstore.removeAllMapLayers() #remove all the layers
else:
rlay2 = rlayer
return rlay2
def write_rlay(self, #make a local copy of the passed raster layer
rlayer, #raster layer to make a local copy of
extent = 'layer', #write extent control
#'layer': use the current extent (default)
#'mapCanvas': use the current map Canvas
#QgsRectangle: use passed extents
resolution = 'raw', #resolution for output
opts = ["COMPRESS=LZW"], #QgsRasterFileWriter.setCreateOptions
out_dir = None, #directory for puts
newLayerName = None,
logger=None,
):
"""
because processing tools only work on local copies
#=======================================================================
# coordinate transformation
#=======================================================================
NO CONVERSION HERE!
can't get native API to work. use gdal_warp instead
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
if out_dir is None: out_dir = self.out_dir
if newLayerName is None: newLayerName = rlayer.name()
newFn = get_valid_filename('%s.tif'%newLayerName) #clean it
out_fp = os.path.join(out_dir, newFn)
log = logger.getChild('write_rlay')
log.debug('on \'%s\' w/ \n crs:%s \n extents:%s\n xUnits:%.4f'%(
rlayer.name(), rlayer.crs(), rlayer.extent(), rlayer.rasterUnitsPerPixelX()))
#=======================================================================
# precheck
#=======================================================================
assert isinstance(rlayer, QgsRasterLayer)
assert os.path.exists(out_dir)
if os.path.exists(out_fp):
msg = 'requested file already exists! and overwrite=%s \n %s'%(
self.overwrite, out_fp)
if self.overwrite:
log.warning(msg)
else:
raise Error(msg)
#=======================================================================
# extract info from layer
#=======================================================================
"""consider loading the layer and duplicating the renderer?
renderer = rlayer.renderer()"""
provider = rlayer.dataProvider()
#build projector
projector = QgsRasterProjector()
#projector.setCrs(provider.crs(), provider.crs())
#build and configure pipe
pipe = QgsRasterPipe()
if not pipe.set(provider.clone()): #Insert a new known interface in default place
raise Error("Cannot set pipe provider")
if not pipe.insert(2, projector): #insert interface at specified index and connect
raise Error("Cannot set pipe projector")
#pipe = rlayer.pipe()
#coordinate transformation
"""see note"""
transformContext = self.qproj.transformContext()
#=======================================================================
# extents
#=======================================================================
if extent == 'layer':
extent = rlayer.extent()
elif extent=='mapCanvas':
assert isinstance(self.iface, QgisInterface), 'bad key for StandAlone?'
#get the extent, transformed to the current CRS
extent = QgsCoordinateTransform(
self.qproj.crs(),
rlayer.crs(),
transformContext
).transformBoundingBox(self.iface.mapCanvas().extent())
assert isinstance(extent, QgsRectangle), 'expected extent=QgsRectangle. got \"%s\''%extent
#expect the requested extent to be LESS THAN what we have in the raw raster
assert rlayer.extent().width()>=extent.width(), 'passed extents too wide'
assert rlayer.extent().height()>=extent.height(), 'passed extents too tall'
#=======================================================================
# resolution
#=======================================================================
#use the resolution of the raw file
if resolution == 'raw':
"""this respects the calculated extents"""
nRows = int(extent.height()/rlayer.rasterUnitsPerPixelY())
nCols = int(extent.width()/rlayer.rasterUnitsPerPixelX())
else:
"""dont think theres any decent API support for the GUI behavior"""
raise Error('not implemented')
#=======================================================================
# #build file writer
#=======================================================================
file_writer = QgsRasterFileWriter(out_fp)
#file_writer.Mode(1) #???
if not opts is None:
file_writer.setCreateOptions(opts)
log.debug('writing to file w/ \n %s'%(
{'nCols':nCols, 'nRows':nRows, 'extent':extent, 'crs':rlayer.crs()}))
#execute write
error = file_writer.writeRaster( pipe, nCols, nRows, extent, rlayer.crs(), transformContext)
log.info('wrote to file \n %s'%out_fp)
#=======================================================================
# wrap
#=======================================================================
if not error == QgsRasterFileWriter.NoError:
raise Error(error)
assert os.path.exists(out_fp)
assert QgsRasterLayer.isValidRasterFileName(out_fp), \
'requested file is not a valid raster file type: %s'%out_fp
return out_fp
def vlay_write(self, #write a VectorLayer
vlay,
out_fp=None,
driverName='GPKG',
fileEncoding = "CP1250",
opts = QgsVectorFileWriter.SaveVectorOptions(), #empty options object
overwrite=None,
logger=None):
"""
help(QgsVectorFileWriter.SaveVectorOptions)
QgsVectorFileWriter.SaveVectorOptions.driverName='GPKG'
opt2 = QgsVectorFileWriter.BoolOption(QgsVectorFileWriter.CreateOrOverwriteFile)
help(QgsVectorFileWriter)
"""
#==========================================================================
# defaults
#==========================================================================
if logger is None: logger=self.logger
log = logger.getChild('vlay_write')
if overwrite is None: overwrite=self.overwrite
if out_fp is None: out_fp = os.path.join(self.out_dir, '%s.gpkg'%vlay.name())
#===========================================================================
# assemble options
#===========================================================================
opts.driverName = driverName
opts.fileEncoding = fileEncoding
#===========================================================================
# checks
#===========================================================================
#file extension
fhead, ext = os.path.splitext(out_fp)
if not 'gpkg' in ext:
raise Error('unexpected extension: %s'%ext)
if os.path.exists(out_fp):
msg = 'requested file path already exists!. overwrite=%s \n %s'%(
overwrite, out_fp)
if overwrite:
log.warning(msg)
os.remove(out_fp) #workaround... should be away to overwrite with the QgsVectorFileWriter
else:
raise Error(msg)
if vlay.dataProvider().featureCount() == 0:
raise Error('\'%s\' has no features!'%(
vlay.name()))
if not vlay.isValid():
Error('passed invalid layer')
#=======================================================================
# write
#=======================================================================
error = QgsVectorFileWriter.writeAsVectorFormatV2(
vlay, out_fp,
QgsCoordinateTransformContext(),
opts,
)
#=======================================================================
# wrap and check
#=======================================================================
if error[0] == QgsVectorFileWriter.NoError:
log.info('layer \' %s \' written to: \n %s'%(vlay.name(),out_fp))
return out_fp
raise Error('FAILURE on writing layer \' %s \' with code:\n %s \n %s'%(vlay.name(),error, out_fp))
def load_dtm(self, #convienece loader for assining the correct attribute
fp,
logger=None,
**kwargs):
if logger is None: logger=self.logger
log=logger.getChild('load_dtm')
self.dtm_rlay = self.load_rlay(fp, logger=log, **kwargs)
return self.dtm_rlay
#==========================================================================
# GENERIC METHODS-----------------
#==========================================================================
def vlay_new_df2(self, #build a vlay from a df
df_raw,
geo_d = None, #container of geometry objects {fid: QgsGeometry}
crs=None,
gkey = None, #data field linking with geo_d (if None.. uses df index)
layname='df',
index = False, #whether to include the index as a field
logger=None,
):
"""
performance enhancement over vlay_new_df
simpler, clearer
although less versatile
"""
#=======================================================================
# setup
#=======================================================================
if crs is None: crs = self.qproj.crs()
if logger is None: logger = self.logger
log = logger.getChild('vlay_new_df')
#=======================================================================
# index fix
#=======================================================================
df = df_raw.copy()
if index:
if not df.index.name is None:
coln = df.index.name
df.index.name = None
else:
coln = 'index'
df[coln] = df.index
#=======================================================================
# precheck
#=======================================================================
#make sure none of hte field names execeed the driver limitations
max_len = self.fieldn_max_d[self.driverName]
#check lengths
boolcol = df_raw.columns.str.len() >= max_len
if np.any(boolcol):
log.warning('passed %i columns which exeed the max length=%i for driver \'%s\'.. truncating: \n %s'%(
boolcol.sum(), max_len, self.driverName, df_raw.columns[boolcol].tolist()))
df.columns = df.columns.str.slice(start=0, stop=max_len-1)
#make sure the columns are unique
assert df.columns.is_unique, 'got duplicated column names: \n %s'%(df.columns.tolist())
#check datatypes
assert np.array_equal(df.columns, df.columns.astype(str)), 'got non-string column names'
#check the geometry
if not geo_d is None:
assert isinstance(geo_d, dict)
if not gkey is None:
assert gkey in df_raw.columns
#assert 'int' in df_raw[gkey].dtype.name
#check gkey match
l = set(df_raw[gkey].drop_duplicates()).difference(geo_d.keys())
assert len(l)==0, 'missing %i \'%s\' keys in geo_d: %s'%(len(l), gkey, l)
#against index
else:
#check gkey match
l = set(df_raw.index).difference(geo_d.keys())
assert len(l)==0, 'missing %i (of %i) fid keys in geo_d: %s'%(len(l), len(df_raw), l)
#===========================================================================
# assemble the fields
#===========================================================================
#column name and python type
fields_d = {coln:np_to_pytype(col.dtype) for coln, col in df.items()}
#fields container
qfields = fields_build_new(fields_d = fields_d, logger=log)
#=======================================================================
# assemble the features
#=======================================================================
#convert form of data
feats_d = dict()
for fid, row in df.iterrows():
feat = QgsFeature(qfields, fid)
#loop and add data
for fieldn, value in row.items():
#skip null values
if pd.isnull(value): continue
#get the index for this field
findx = feat.fieldNameIndex(fieldn)
#get the qfield
qfield = feat.fields().at(findx)
#make the type match
ndata = qtype_to_pytype(value, qfield.type(), logger=log)
#set the attribute
if not feat.setAttribute(findx, ndata):
raise Error('failed to setAttribute')
#setgeometry
if not geo_d is None:
if gkey is None:
gobj = geo_d[fid]
else:
gobj = geo_d[row[gkey]]
feat.setGeometry(gobj)
#stor eit
feats_d[fid]=feat
log.debug('built %i \'%s\' features'%(
len(feats_d),
QgsWkbTypes.geometryDisplayString(feat.geometry().type()),
))
#=======================================================================
# get the geo type
#=======================================================================\
if not geo_d is None:
gtype = QgsWkbTypes().displayString(next(iter(geo_d.values())).wkbType())
else:
gtype='None'
#===========================================================================
# buidl the new layer
#===========================================================================
vlay = vlay_new_mlay(gtype,
crs,
layname,
qfields,
list(feats_d.values()),
logger=log,
)
self.createspatialindex(vlay, logger=log)
#=======================================================================
# post check
#=======================================================================
if not geo_d is None:
if vlay.wkbType() == 100:
raise Error('constructed layer has NoGeometry')
return vlay
def check_aoi(self, #special c hecks for AOI layers
vlay):
assert isinstance(vlay, QgsVectorLayer)
assert 'Polygon' in QgsWkbTypes().displayString(vlay.wkbType())
assert vlay.dataProvider().featureCount()==1
assert vlay.crs() == self.qproj.crs(), 'aoi CRS (%s) does not match project (%s)'%(vlay.crs(), self.qproj.crs())
return
#==========================================================================
# ALGOS--------------
#==========================================================================
def deletecolumn(self,
in_vlay,
fieldn_l, #list of field names
invert=False, #whether to invert selected field names
layname = None,
logger=None,
):
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:deletecolumn'
if logger is None: logger=self.logger
log = logger.getChild('deletecolumn')
self.vlay = in_vlay
#=======================================================================
# field manipulations
#=======================================================================
fieldn_l = self._field_handlr(in_vlay, fieldn_l, invert=invert, logger=log)
if len(fieldn_l) == 0:
log.debug('no fields requsted to drop... skipping')
return self.vlay
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'COLUMN' : fieldn_l,
'INPUT' : in_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
if layname is None:
layname = '%s_delf'%self.vlay.name()
res_vlay.setName(layname) #reset the name
return res_vlay
def joinattributesbylocation(self,
#data definitions
vlay,
join_vlay, #layer from which to extract attribue values onto th ebottom vlay
jlay_fieldn_l, #list of field names to extract from the join_vlay
selected_only = False,
jvlay_selected_only = False, #only consider selected features on the join layer
#algo controls
prefix = '',
method=0, #one-to-many
predicate_l = ['intersects'],#list of geometric serach predicates
discard_nomatch = False, #Discard records which could not be joined
#data expectations
join_nullvs = True, #allow null values on jlay_fieldn_l on join_vlay
join_df = None, #if join_nullvs=FALSE, data to check for nulls (skips making a vlay_get_fdf)
allow_field_rename = False, #allow joiner fields to be renamed when mapped onto the main
allow_none = False,
#geometry expectations
expect_all_hits = False, #wheter every main feature intersects a join feature
expect_j_overlap = False, #wheter to expect the join_vlay to beoverlapping
expect_m_overlap = False, #wheter to expect the mainvlay to have overlaps
logger=None,
):
"""
TODO: really need to clean this up...
discard_nomatch:
TRUE: two resulting layers have no features in common
FALSE: in layer retains all non matchers, out layer only has the non-matchers?
METHOD: Join type
- 0: Create separate feature for each located feature (one-to-many)
- 1: Take attributes of the first located feature only (one-to-one)
"""
#=======================================================================
# presets
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('joinattributesbylocation')
self.vlay = vlay
algo_nm = 'qgis:joinattributesbylocation'
predicate_d = {'intersects':0,'contains':1,'equals':2,'touches':3,'overlaps':4,'within':5, 'crosses':6}
jlay_fieldn_l = self._field_handlr(join_vlay,
jlay_fieldn_l,
invert=False)
#=======================================================================
# jgeot = vlay_get_bgeo_type(join_vlay)
# mgeot = vlay_get_bgeo_type(self.vlay)
#=======================================================================
mfcnt = self.vlay.dataProvider().featureCount()
#jfcnt = join_vlay.dataProvider().featureCount()
mfnl = vlay_fieldnl(self.vlay)
expect_overlaps = expect_j_overlap or expect_m_overlap
#=======================================================================
# geometry expectation prechecks
#=======================================================================
"""should take any geo
if not (jgeot == 'polygon' or mgeot == 'polygon'):
raise Error('one of the layres has to be a polygon')
if not jgeot=='polygon':
if expect_j_overlap:
raise Error('join vlay is not a polygon, expect_j_overlap should =False')
if not mgeot=='polygon':
if expect_m_overlap:
raise Error('main vlay is not a polygon, expect_m_overlap should =False')
if expect_all_hits:
if discard_nomatch:
raise Error('discard_nomatch should =FALSE if you expect all hits')
if allow_none:
raise Error('expect_all_hits=TRUE and allow_none=TRUE')
#method checks
if method==0:
if not jgeot == 'polygon':
raise Error('passed method 1:m but jgeot != polygon')
if not expect_j_overlap:
if not method==0:
raise Error('for expect_j_overlap=False, method must = 0 (1:m) for validation')
"""
#=======================================================================
# data expectation checks
#=======================================================================
#make sure none of the joiner fields are already on the layer
if len(mfnl)>0: #see if there are any fields on the main
l = basic.linr(jlay_fieldn_l, mfnl, result_type='matching')
if len(l) > 0:
#w/a prefix
if not prefix=='':
log.debug('%i fields on the joiner \'%s\' are already on \'%s\'... prefixing w/ \'%s\': \n %s'%(
len(l), join_vlay.name(), self.vlay.name(), prefix, l))
else:
log.debug('%i fields on the joiner \'%s\' are already on \'%s\'...renameing w/ auto-sufix: \n %s'%(
len(l), join_vlay.name(), self.vlay.name(), l))
if not allow_field_rename:
raise Error('%i field names overlap: %s'%(len(l), l))
#make sure that the joiner attributes are not null
if not join_nullvs:
if jvlay_selected_only:
raise Error('not implmeneted')
#pull thedata
if join_df is None:
join_df = vlay_get_fdf(join_vlay, fieldn_l=jlay_fieldn_l, db_f=self.db_f, logger=log)
#slice to the columns of interest
join_df = join_df.loc[:, jlay_fieldn_l]
#check for nulls
booldf = join_df.isna()
if np.any(booldf):
raise Error('got %i nulls on \'%s\' field %s data'%(
booldf.sum().sum(), join_vlay.name(), jlay_fieldn_l))
#=======================================================================
# assemble pars
#=======================================================================
#convert predicate to code
pred_code_l = [predicate_d[name] for name in predicate_l]
#selection flags
if selected_only:
"""WARNING! This will limit the output to only these features
(despite the DISCARD_NONMATCHING flag)"""
main_input = self._get_sel_obj(self.vlay)
else:
main_input = self.vlay
if jvlay_selected_only:
join_input = self._get_sel_obj(join_vlay)
else:
join_input = join_vlay
#assemble pars
ins_d = { 'DISCARD_NONMATCHING' : discard_nomatch,
'INPUT' : main_input,
'JOIN' : join_input,
'JOIN_FIELDS' : jlay_fieldn_l,
'METHOD' : method,
'OUTPUT' : 'TEMPORARY_OUTPUT',
#'NON_MATCHING' : 'TEMPORARY_OUTPUT', #not working as expected. see get_misses
'PREDICATE' : pred_code_l,
'PREFIX' : prefix}
log.info('extracting %i fields from %i feats from \'%s\' to \'%s\' join fields: %s'%
(len(jlay_fieldn_l), join_vlay.dataProvider().featureCount(),
join_vlay.name(), self.vlay.name(), jlay_fieldn_l))
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay, join_cnt = res_d['OUTPUT'], res_d['JOINED_COUNT']
log.debug('got results: \n %s'%res_d)
#===========================================================================
# post checks
#===========================================================================
hit_fcnt = res_vlay.dataProvider().featureCount()
if not expect_overlaps:
if not discard_nomatch:
if not hit_fcnt == mfcnt:
raise Error('in and out fcnts dont match')
else:
pass
#log.debug('expect_overlaps=False, unable to check fcnts')
#all misses
if join_cnt == 0:
log.warning('got no joins from \'%s\' to \'%s\''%(
self.vlay.name(), join_vlay.name()))
if not allow_none:
raise Error('got no joins!')
if discard_nomatch:
if not hit_fcnt == 0:
raise Error('no joins but got some hits')
#some hits
else:
#check there are no nulls
if discard_nomatch and not join_nullvs:
#get data on first joiner
fid_val_ser = vlay_get_fdata(res_vlay, jlay_fieldn_l[0], logger=log, fmt='ser')
if np.any(fid_val_ser.isna()):
raise Error('discard=True and join null=FALSe but got %i (of %i) null \'%s\' values in the reuslt'%(
fid_val_ser.isna().sum(), len(fid_val_ser), fid_val_ser.name
))
#=======================================================================
# get the new field names
#=======================================================================
new_fn_l = set(vlay_fieldnl(res_vlay)).difference(vlay_fieldnl(self.vlay))
#=======================================================================
# wrap
#=======================================================================
log.debug('finished joining %i fields from %i (of %i) feats from \'%s\' to \'%s\' join fields: %s'%
(len(new_fn_l), join_cnt, self.vlay.dataProvider().featureCount(),
join_vlay.name(), self.vlay.name(), new_fn_l))
return res_vlay, new_fn_l, join_cnt
def joinbylocationsummary(self,
vlay, #polygon layer to sample from
join_vlay, #layer from which to extract attribue values onto th ebottom vlay
jlay_fieldn_l, #list of field names to extract from the join_vlay
jvlay_selected_only = False, #only consider selected features on the join layer
predicate_l = ['intersects'],#list of geometric serach predicates
smry_l = ['sum'], #data summaries to apply
discard_nomatch = False, #Discard records which could not be joined
use_raw_fn=False, #whether to convert names back to the originals
layname=None,
):
"""
WARNING: This ressets the fids
discard_nomatch:
TRUE: two resulting layers have no features in common
FALSE: in layer retains all non matchers, out layer only has the non-matchers?
"""
"""
view(join_vlay)
"""
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:joinbylocationsummary'
predicate_d = {'intersects':0,'contains':1,'equals':2,'touches':3,'overlaps':4,'within':5, 'crosses':6}
summaries_d = {'count':0, 'unique':1, 'min':2, 'max':3, 'range':4, 'sum':5, 'mean':6}
log = self.logger.getChild('joinbylocationsummary')
#=======================================================================
# defaults
#=======================================================================
if isinstance(jlay_fieldn_l, set):
jlay_fieldn_l = list(jlay_fieldn_l)
#convert predicate to code
pred_code_l = [predicate_d[pred_name] for pred_name in predicate_l]
#convert summaries to code
sum_code_l = [summaries_d[smry_str] for smry_str in smry_l]
if layname is None: layname = '%s_jsmry'%vlay.name()
#=======================================================================
# prechecks
#=======================================================================
if not isinstance(jlay_fieldn_l, list):
raise Error('expected a list')
#check requested join fields
fn_l = [f.name() for f in join_vlay.fields()]
s = set(jlay_fieldn_l).difference(fn_l)
assert len(s)==0, 'requested join fields not on layer: %s'%s
#check crs
assert join_vlay.crs().authid() == vlay.crs().authid()
#=======================================================================
# assemble pars
#=======================================================================
main_input=vlay
if jvlay_selected_only:
join_input = self._get_sel_obj(join_vlay)
else:
join_input = join_vlay
#assemble pars
ins_d = { 'DISCARD_NONMATCHING' : discard_nomatch,
'INPUT' : main_input,
'JOIN' : join_input,
'JOIN_FIELDS' : jlay_fieldn_l,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREDICATE' : pred_code_l,
'SUMMARIES' : sum_code_l,
}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
res_vlay.setName(layname) #reset the name
#get new field names
nfn_l = set([f.name() for f in res_vlay.fields()]).difference([f.name() for f in vlay.fields()])
"""
view(res_vlay)
"""
#=======================================================================
# post check
#=======================================================================
for fn in nfn_l:
rser = vlay_get_fdata(res_vlay, fieldn=fn, logger=log, fmt='ser')
if rser.isna().all().all():
log.warning('%s \'%s\' got all nulls'%(vlay.name(), fn))
#=======================================================================
# rename fields
#=======================================================================
if use_raw_fn:
assert len(smry_l)==1, 'rename only allowed for single sample stat'
rnm_d = {s:s.replace('_%s'%smry_l[0],'') for s in nfn_l}
s = set(rnm_d.values()).symmetric_difference(jlay_fieldn_l)
assert len(s)==0, 'failed to convert field names'
res_vlay = vlay_rename_fields(res_vlay, rnm_d, logger=log)
nfn_l = jlay_fieldn_l
log.info('sampled \'%s\' w/ \'%s\' (%i hits) and \'%s\'to get %i new fields \n %s'%(
join_vlay.name(), vlay.name(), res_vlay.dataProvider().featureCount(),
smry_l, len(nfn_l), nfn_l))
return res_vlay, nfn_l
def joinattributestable(self, #join csv edata to a vector layer
vlay, table_fp, fieldNm,
method = 1, #join type
#- 0: Create separate feature for each matching feature (one-to-many)
#- 1: Take attributes of the first matching feature only (one-to-one)
csv_params = {'encoding':'System',
'type':'csv',
'maxFields':'10000',
'detectTypes':'yes',
'geomType':'none',
'subsetIndex':'no',
'watchFile':'no'},
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_j'%vlay.name()
algo_nm = 'native:joinattributestable'
log = self.logger.getChild('joinattributestable')
#=======================================================================
# prechecks
#=======================================================================
assert isinstance(vlay, QgsVectorLayer)
assert os.path.exists(table_fp)
assert fieldNm in [f.name() for f in vlay.fields()], 'vlay missing link field %s'%fieldNm
#=======================================================================
# setup table layer
#=======================================================================
uriW = QgsDataSourceUri()
for pName, pValue in csv_params.items():
uriW.setParam(pName, pValue)
table_uri = r'file:///' + table_fp.replace('\\','/') +'?'+ str(uriW.encodedUri(), 'utf-8')
table_vlay = QgsVectorLayer(table_uri,'table',"delimitedtext")
assert fieldNm in [f.name() for f in table_vlay.fields()], 'table missing link field %s'%fieldNm
#=======================================================================
# assemble p ars
#=======================================================================
ins_d = { 'DISCARD_NONMATCHING' : True,
'FIELD' : 'xid', 'FIELDS_TO_COPY' : [],
'FIELD_2' : 'xid',
'INPUT' : vlay,
'INPUT_2' : table_vlay,
'METHOD' : method,
'OUTPUT' : 'TEMPORARY_OUTPUT', 'PREFIX' : '' }
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'native:buffer\' with ins_d: \n %s'%ins_d)
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished w/ %i feats'%res_vlay.dataProvider().featureCount())
return res_vlay
def cliprasterwithpolygon(self,
rlay_raw,
poly_vlay,
layname = None,
#output = 'TEMPORARY_OUTPUT',
logger = None,
):
"""
clipping a raster layer with a polygon mask using gdalwarp
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('cliprasterwithpolygon')
if layname is None:
layname = '%s_clipd'%rlay_raw.name()
algo_nm = 'gdal:cliprasterbymasklayer'
#=======================================================================
# precheck
#=======================================================================
assert isinstance(rlay_raw, QgsRasterLayer)
assert isinstance(poly_vlay, QgsVectorLayer)
assert 'Poly' in QgsWkbTypes().displayString(poly_vlay.wkbType())
assert rlay_raw.crs() == poly_vlay.crs()
#=======================================================================
# run algo
#=======================================================================
ins_d = { 'ALPHA_BAND' : False,
'CROP_TO_CUTLINE' : True,
'DATA_TYPE' : 0,
'EXTRA' : '',
'INPUT' : rlay_raw,
'KEEP_RESOLUTION' : True,
'MASK' : poly_vlay,
'MULTITHREADING' : False,
'NODATA' : None,
'OPTIONS' : '',
'OUTPUT' : 'TEMPORARY_OUTPUT',
'SET_RESOLUTION' : False,
'SOURCE_CRS' : None,
'TARGET_CRS' : None,
'X_RESOLUTION' : None,
'Y_RESOLUTION' : None,
}
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def cliprasterwithpolygon2(self, #with saga
rlay_raw,
poly_vlay,
ofp = None,
layname = None,
#output = 'TEMPORARY_OUTPUT',
logger = None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('cliprasterwithpolygon')
if layname is None:
if not ofp is None:
layname = os.path.splitext(os.path.split(ofp)[1])[0]
else:
layname = '%s_clipd'%rlay_raw.name()
if ofp is None:
ofp = os.path.join(self.out_dir,layname+'.sdat')
if os.path.exists(ofp):
msg = 'requseted filepath exists: %s'%ofp
if self.overwrite:
log.warning('DELETING'+msg)
os.remove(ofp)
else:
raise Error(msg)
algo_nm = 'saga:cliprasterwithpolygon'
#=======================================================================
# precheck
#=======================================================================
if os.path.exists(ofp):
msg = 'requested filepath exists: %s'%ofp
if self.overwrite:
log.warning(msg)
else:
raise Error(msg)
if not os.path.exists(os.path.dirname(ofp)):
os.makedirs(os.path.dirname(ofp))
#assert QgsRasterLayer.isValidRasterFileName(ofp), 'invalid filename: %s'%ofp
assert 'Poly' in QgsWkbTypes().displayString(poly_vlay.wkbType())
assert rlay_raw.crs() == poly_vlay.crs()
#=======================================================================
# run algo
#=======================================================================
ins_d = { 'INPUT' : rlay_raw,
'OUTPUT' : ofp,
'POLYGONS' : poly_vlay }
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def srastercalculator(self,
formula,
rlay_d, #container of raster layers to perform calculations on
logger=None,
layname=None,
ofp=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('srastercalculator')
assert 'a' in rlay_d
if layname is None:
if not ofp is None:
layname = os.path.splitext(os.path.split(ofp)[1])[0]
else:
layname = '%s_calc'%rlay_d['a'].name()
if ofp is None:
ofp = os.path.join(self.out_dir, layname+'.sdat')
if not os.path.exists(os.path.dirname(ofp)):
log.info('building basedir: %s'%os.path.dirname(ofp))
os.makedirs(os.path.dirname(ofp))
if os.path.exists(ofp):
msg = 'requseted filepath exists: %s'%ofp
if self.overwrite:
log.warning(msg)
os.remove(ofp)
else:
raise Error(msg)
#=======================================================================
# execute
#=======================================================================
algo_nm = 'saga:rastercalculator'
ins_d = { 'FORMULA' : formula,
'GRIDS' : rlay_d.pop('a'),
'RESAMPLING' : 3,
'RESULT' : ofp,
'TYPE' : 7,
'USE_NODATA' : False,
'XGRIDS' : list(rlay_d.values())}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['RESULT']):
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['RESULT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def grastercalculator(self, #GDAL raster calculator
formula,
rlay_d, #container of raster layers to perform calculations on
nodata=0,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('grastercalculator')
algo_nm = 'gdal:rastercalculator'
if layname is None:
layname = '%s_calc'%rlay_d['a'].name()
#=======================================================================
# prechecks
#=======================================================================
assert 'A' in rlay_d
#=======================================================================
# populate
#=======================================================================
for rtag in ('A', 'B', 'C', 'D', 'E', 'F'):
#set dummy placeholders for missing rasters
if not rtag in rlay_d:
rlay_d[rtag] = None
#check what the usre pasased
else:
assert isinstance(rlay_d[rtag], QgsRasterLayer), 'passed bad %s'%rtag
assert rtag in formula, 'formula is missing a reference to \'%s\''%rtag
#=======================================================================
# execute
#=======================================================================
ins_d = { 'BAND_A' : 1, 'BAND_B' : -1, 'BAND_C' : -1, 'BAND_D' : -1, 'BAND_E' : -1, 'BAND_F' : -1,
'EXTRA' : '',
'FORMULA' : formula,
'INPUT_A' : rlay_d['A'], 'INPUT_B' : rlay_d['B'], 'INPUT_C' : rlay_d['C'],
'INPUT_D' : rlay_d['D'], 'INPUT_E' : rlay_d['E'], 'INPUT_F' : rlay_d['F'],
'NO_DATA' : nodata,
'OPTIONS' : '',
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RTYPE' : 5 }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
assert os.path.exists(res_d['OUTPUT']), 'failed to get result'
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def qrastercalculator(self, #QGIS native raster calculator
formula,
ref_layer = None, #reference layer
logger=None,
layname=None,
):
"""executes the algorhithim... better to use the constructor directly
QgsRasterCalculator"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('qrastercalculator')
algo_nm = 'qgis:rastercalculator'
if layname is None:
if ref_layer is None:
layname = 'qrastercalculator'
else:
layname = '%s_calc'%ref_layer.name()
#=======================================================================
# execute
#=======================================================================
"""
formula = '\'haz_100yr_cT2@1\'-\'dtm_cT1@1\''
"""
ins_d = { 'CELLSIZE' : 0,
'CRS' : None,
'EXPRESSION' : formula,
'EXTENT' : None,
'LAYERS' : [ref_layer], #referecnce layer
'OUTPUT' : 'TEMPORARY_OUTPUT' }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['RESULT']):
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['RESULT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
def addgeometrycolumns(self, #add geometry data as columns
vlay,
layname=None,
logger=None,
):
if logger is None: logger=self.logger
log = logger.getChild('addgeometrycolumns')
algo_nm = 'qgis:exportaddgeometrycolumns'
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'CALC_METHOD' : 0, #use layer's crs
'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
#===========================================================================
# post formatting
#===========================================================================
if layname is None:
layname = '%s_gcol'%self.vlay.name()
res_vlay.setName(layname) #reset the name
return res_vlay
def buffer(self, vlay,
distance, #buffer distance to apply
dissolve = False,
end_cap_style = 0,
join_style = 0,
miter_limit = 2,
segments = 5,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_buf'%vlay.name()
algo_nm = 'native:buffer'
log = self.logger.getChild('buffer')
distance = float(distance)
#=======================================================================
# prechecks
#=======================================================================
if distance==0 or np.isnan(distance):
raise Error('got no buffer!')
#=======================================================================
# build ins
#=======================================================================
"""
distance = 3.0
dcopoy = copy.copy(distance)
"""
ins_d = {
'INPUT': vlay,
'DISSOLVE' : dissolve,
'DISTANCE' : distance,
'END_CAP_STYLE' : end_cap_style,
'JOIN_STYLE' : join_style,
'MITER_LIMIT' : miter_limit,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'SEGMENTS' : segments}
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'native:buffer\' with ins_d: \n %s'%ins_d)
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished')
return res_vlay
def selectbylocation(self, #select features (from main laye) by geoemtric relation with comp_vlay
vlay, #vlay to select features from
comp_vlay, #vlay to compare
result_type = 'select',
method= 'new', #Modify current selection by
pred_l = ['intersect'], #list of geometry predicate names
#expectations
allow_none = False,
logger = None,
):
#=======================================================================
# setups and defaults
#=======================================================================
if logger is None: logger=self.logger
algo_nm = 'native:selectbylocation'
log = logger.getChild('selectbylocation')
#===========================================================================
# #set parameter translation dictoinaries
#===========================================================================
meth_d = {'new':0}
pred_d = {
'are within':6,
'intersect':0,
'overlap':5,
}
#predicate (name to value)
pred_l = [pred_d[pred_nm] for pred_nm in pred_l]
#=======================================================================
# setup
#=======================================================================
ins_d = {
'INPUT' : vlay,
'INTERSECT' : comp_vlay,
'METHOD' : meth_d[method],
'PREDICATE' : pred_l }
log.debug('executing \'%s\' on \'%s\' with: \n %s'
%(algo_nm, vlay.name(), ins_d))
#===========================================================================
# #execute
#===========================================================================
_ = processing.run(algo_nm, ins_d, feedback=self.feedback)
#=======================================================================
# check
#=======================================================================
fcnt = vlay.selectedFeatureCount()
if fcnt == 0:
msg = 'No features selected!'
if allow_none:
log.warning(msg)
else:
raise Error(msg)
#=======================================================================
# wrap
#=======================================================================
log.debug('selected %i (of %i) features from %s'
%(vlay.selectedFeatureCount(),vlay.dataProvider().featureCount(), vlay.name()))
return self._get_sel_res(vlay, result_type=result_type, logger=log, allow_none=allow_none)
def saveselectedfeatures(self,#generate a memory layer from the current selection
vlay,
logger=None,
allow_none = False,
layname=None):
#===========================================================================
# setups and defaults
#===========================================================================
if logger is None: logger = self.logger
log = logger.getChild('saveselectedfeatures')
algo_nm = 'native:saveselectedfeatures'
if layname is None:
layname = '%s_sel'%vlay.name()
#=======================================================================
# precheck
#=======================================================================
fcnt = vlay.selectedFeatureCount()
if fcnt == 0:
msg = 'No features selected!'
if allow_none:
log.warning(msg)
return None
else:
raise Error(msg)
log.debug('on \'%s\' with %i feats selected'%(
vlay.name(), vlay.selectedFeatureCount()))
#=======================================================================
# # build inputs
#=======================================================================
ins_d = {'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT'}
log.debug('\'native:saveselectedfeatures\' on \'%s\' with: \n %s'
%(vlay.name(), ins_d))
#execute
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
assert isinstance(res_vlay, QgsVectorLayer)
#===========================================================================
# wrap
#===========================================================================
res_vlay.setName(layname) #reset the name
return res_vlay
def polygonfromlayerextent(self,
vlay,
round_to=0, #adds a buffer to the result?
logger=None,
layname=None):
"""
This algorithm takes a map layer and generates a new vector layer with the
minimum bounding box (rectangle polygon with N-S orientation) that covers the input layer.
Optionally, the extent can be enlarged to a rounded value.
"""
#===========================================================================
# setups and defaults
#===========================================================================
if logger is None: logger = self.logger
log = logger.getChild('polygonfromlayerextent')
algo_nm = 'qgis:polygonfromlayerextent'
if layname is None:
layname = '%s_exts'%vlay.name()
#=======================================================================
# precheck
#=======================================================================
#=======================================================================
# # build inputs
#=======================================================================
ins_d = {'INPUT' : vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'ROUND_TO':round_to}
log.debug('\'%s\' on \'%s\' with: \n %s'
%(algo_nm, vlay.name(), ins_d))
#execute
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
assert isinstance(res_vlay, QgsVectorLayer)
#===========================================================================
# wrap
#===========================================================================
res_vlay.setName(layname) #reset the name
return res_vlay
def fixgeometries(self, vlay,
logger=None,
layname=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
if layname is None:
layname = '%s_fix'%vlay.name()
algo_nm = 'native:fixgeometries'
log = self.logger.getChild('fixgeometries')
#=======================================================================
# build ins
#=======================================================================
"""
distance = 3.0
dcopoy = copy.copy(distance)
"""
ins_d = {
'INPUT': vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
}
#=======================================================================
# execute
#=======================================================================
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
res_vlay = res_d['OUTPUT']
res_vlay.setName(layname) #reset the name
log.debug('finished')
return res_vlay
def createspatialindex(self,
in_vlay,
logger=None,
):
#=======================================================================
# presets
#=======================================================================
algo_nm = 'qgis:createspatialindex'
if logger is None: logger=self.logger
log = self.logger.getChild('createspatialindex')
in_vlay
#=======================================================================
# assemble pars
#=======================================================================
#assemble pars
ins_d = { 'INPUT' : in_vlay }
log.debug('executing \'%s\' with ins_d: \n %s'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
#===========================================================================
# post formatting
#===========================================================================
#=======================================================================
# if layname is None:
# layname = '%s_si'%self.vlay.name()
#
# res_vlay.setName(layname) #reset the name
#=======================================================================
return
def warpreproject(self, #repojrect a raster
rlay_raw,
crsOut = None, #crs to re-project to
layname = None,
options = 'COMPRESS=DEFLATE|PREDICTOR=2|ZLEVEL=9',
output = 'TEMPORARY_OUTPUT',
logger = None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('warpreproject')
if layname is None:
layname = '%s_rproj'%rlay_raw.name()
algo_nm = 'gdal:warpreproject'
if crsOut is None: crsOut = self.crs #just take the project's
#=======================================================================
# precheck
#=======================================================================
"""the algo accepts 'None'... but not sure why we'd want to do this"""
assert isinstance(crsOut, QgsCoordinateReferenceSystem), 'bad crs type'
assert isinstance(rlay_raw, QgsRasterLayer)
assert rlay_raw.crs() != crsOut, 'layer already on this CRS!'
#=======================================================================
# run algo
#=======================================================================
ins_d = {
'DATA_TYPE' : 0,
'EXTRA' : '',
'INPUT' : rlay_raw,
'MULTITHREADING' : False,
'NODATA' : None,
'OPTIONS' : options,
'OUTPUT' : output,
'RESAMPLING' : 0,
'SOURCE_CRS' : None,
'TARGET_CRS' : crsOut,
'TARGET_EXTENT' : None,
'TARGET_EXTENT_CRS' : None,
'TARGET_RESOLUTION' : None,
}
log.debug('executing \'%s\' with ins_d: \n %s \n\n'%(algo_nm, ins_d))
res_d = processing.run(algo_nm, ins_d, feedback=self.feedback)
log.debug('finished w/ \n %s'%res_d)
if not os.path.exists(res_d['OUTPUT']):
"""failing intermittently"""
raise Error('failed to get a result')
res_rlay = QgsRasterLayer(res_d['OUTPUT'], layname)
#=======================================================================
# #post check
#=======================================================================
assert isinstance(res_rlay, QgsRasterLayer), 'got bad type: %s'%type(res_rlay)
assert res_rlay.isValid()
assert rlay_raw.bandCount()==res_rlay.bandCount(), 'band count mismatch'
res_rlay.setName(layname) #reset the name
log.debug('finished w/ %s'%res_rlay.name())
return res_rlay
#===========================================================================
# ALGOS - CUSTOM--------
#===========================================================================
def vlay_pts_dist(self, #get the distance between points in a given order
vlay_raw,
ifn = 'fid', #fieldName to index by
request = None,
result = 'vlay_append', #result type
logger=None):
#===========================================================================
# defaults
#===========================================================================
if logger is None: logger=self.logger
log = logger.getChild('vlay_pts_dist')
if request is None:
request = QgsFeatureRequest(
).addOrderBy(ifn, ascending=True
).setSubsetOfAttributes([ifn], vlay_raw.fields())
#===========================================================================
# precheck
#===========================================================================
assert 'Point' in QgsWkbTypes().displayString(vlay_raw.wkbType()), 'passed bad geo type'
#see if indexer is unique
ifn_d = vlay_get_fdata(vlay_raw, fieldn=ifn, logger=log)
assert len(set(ifn_d.values()))==len(ifn_d)
#===========================================================================
# loop and calc
#===========================================================================
d = dict()
first, geo_prev = True, None
for i, feat in enumerate(vlay_raw.getFeatures(request)):
assert not feat.attribute(ifn) in d, 'indexer is not unique!'
geo = feat.geometry()
if first:
first=False
else:
d[feat.attribute(ifn)] = geo.distance(geo_prev)
geo_prev = geo
log.info('got %i distances using \"%s\''%(len(d), ifn))
#===========================================================================
# check
#===========================================================================
assert len(d) == (vlay_raw.dataProvider().featureCount() -1)
#===========================================================================
# results typing
#===========================================================================
if result == 'dict': return d
elif result == 'vlay_append':
#data manip
ncoln = '%s_dist'%ifn
df_raw = vlay_get_fdf(vlay_raw, logger=log)
df = df_raw.join(pd.Series(d, name=ncoln), on=ifn)
assert df[ncoln].isna().sum()==1, 'expected 1 null'
#reassemble
geo_d = vlay_get_fdata(vlay_raw, geo_obj=True, logger=log)
return self.vlay_new_df2(df, geo_d=geo_d, logger=log,
layname='%s_%s'%(vlay_raw.name(), ncoln))
#==========================================================================
# privates----------
#==========================================================================
def _field_handlr(self, #common handling for fields
vlay, #layer to check for field presence
fieldn_l, #list of fields to handle
invert = False,
logger=None,
):
if logger is None: logger=self.logger
log = logger.getChild('_field_handlr')
#=======================================================================
# all flag
#=======================================================================
if isinstance(fieldn_l, str):
if fieldn_l == 'all':
fieldn_l = vlay_fieldnl(vlay)
log.debug('user passed \'all\', retrieved %i fields: \n %s'%(
len(fieldn_l), fieldn_l))
else:
raise Error('unrecognized fieldn_l\'%s\''%fieldn_l)
#=======================================================================
# type setting
#=======================================================================
if isinstance(fieldn_l, tuple) or isinstance(fieldn_l, np.ndarray) or isinstance(fieldn_l, set):
fieldn_l = list(fieldn_l)
#=======================================================================
# checking
#=======================================================================
if not isinstance(fieldn_l, list):
raise Error('expected a list for fields, instead got \n %s'%fieldn_l)
#vlay_check(vlay, exp_fieldns=fieldn_l)
#=======================================================================
# #handle inversions
#=======================================================================
if invert:
big_fn_s = set(vlay_fieldnl(vlay)) #get all the fields
#get the difference
fieldn_l = list(big_fn_s.difference(set(fieldn_l)))
log.debug('inverted selection from %i to %i fields'%
(len(big_fn_s), len(fieldn_l)))
return fieldn_l
def _get_sel_obj(self, vlay): #get the processing object for algos with selections
log = self.logger.getChild('_get_sel_obj')
assert isinstance(vlay, QgsVectorLayer)
if vlay.selectedFeatureCount() == 0:
raise Error('Nothing selected on \'%s\'. exepects some pre selection'%(vlay.name()))
#handle project layer store
if self.qproj.mapLayer(vlay.id()) is None:
#layer not on project yet. add it
if self.qproj.addMapLayer(vlay, False) is None:
raise Error('failed to add map layer \'%s\''%vlay.name())
log.debug('based on %i selected features from \'%s\''%(len(vlay.selectedFeatureIds()), vlay.name()))
return QgsProcessingFeatureSourceDefinition(source=vlay.id(),
selectedFeaturesOnly=True,
featureLimit=-1,
geometryCheck=QgsFeatureRequest.GeometryAbortOnInvalid)
def _get_sel_res(self, #handler for returning selection like results
vlay, #result layer (with selection on it
result_type='select',
#expectiions
allow_none = False,
logger=None
):
#=======================================================================
# setup
#=======================================================================
if logger is None: logger = self.logger
log = logger.getChild('_get_sel_res')
#=======================================================================
# precheck
#=======================================================================
if vlay.selectedFeatureCount() == 0:
if not allow_none:
raise Error('nothing selected')
return None
#log.debug('user specified \'%s\' for result_type'%result_type)
#=======================================================================
# by handles
#=======================================================================
if result_type == 'select':
#log.debug('user specified \'select\', doing nothing with %i selected'%vlay.selectedFeatureCount())
result = None
elif result_type == 'fids':
result = vlay.selectedFeatureIds() #get teh selected feature ids
elif result_type == 'feats':
result = {feat.id(): feat for feat in vlay.getSelectedFeatures()}
elif result_type == 'layer':
result = self.saveselectedfeatures(vlay, logger=log)
else:
raise Error('unexpected result_type kwarg')
return result
def _in_out_checking(self,res_vlay,
):
"""placeholder"""
def __exit__(self, #destructor
*args,**kwargs):
self.mstore.removeAllMapLayers()
super().__exit__(*args,**kwargs) #initilzie teh baseclass
class MyFeedBackQ(QgsProcessingFeedback):
"""
wrapper for easier reporting and extended progress
Dialogs:
built by QprojPlug.qproj_setup()
Qworkers:
built by Qcoms.__init__()
"""
def __init__(self,
logger=mod_logger):
self.logger=logger.getChild('FeedBack')
super().__init__()
def setProgressText(self, text):
self.logger.debug(text)
def pushInfo(self, info):
self.logger.info(info)
def pushCommandInfo(self, info):
self.logger.info(info)
def pushDebugInfo(self, info):
self.logger.info(info)
def pushConsoleInfo(self, info):
self.logger.info(info)
def reportError(self, error, fatalError=False):
self.logger.error(error)
def upd_prog(self, #advanced progress handling
prog_raw, #pass None to reset
method='raw', #whether to append value to the progress
):
#=======================================================================
# defaults
#=======================================================================
#get the current progress
progress = self.progress()
#===================================================================
# prechecks
#===================================================================
#make sure we have some slots connected
"""not sure how to do this"""
#=======================================================================
# reseting
#=======================================================================
if prog_raw is None:
"""
would be nice to reset the progressBar.. .but that would be complicated
"""
self.setProgress(0)
return
#=======================================================================
# setting
#=======================================================================
if method=='append':
prog = min(progress + prog_raw, 100)
elif method=='raw':
prog = prog_raw
elif method == 'portion':
rem_prog = 100-progress
prog = progress + rem_prog*(prog_raw/100)
assert prog<=100
#===================================================================
# emit signalling
#===================================================================
self.setProgress(prog)
#==============================================================================
# FUNCTIONS----------
#==============================================================================
def init_q(gui=False):
try:
QgsApplication.setPrefixPath(r'C:/OSGeo4W64/apps/qgis-ltr', True)
app = QgsApplication([], gui)
# Update prefix path
#app.setPrefixPath(r"C:\OSGeo4W64\apps\qgis", True)
app.initQgis()
#logging.debug(QgsApplication.showSettings())
""" was throwing unicode error"""
print(u' QgsApplication.initQgis. version: %s, release: %s'%(
Qgis.QGIS_VERSION.encode('utf-8'), Qgis.QGIS_RELEASE_NAME.encode('utf-8')))
return app
except:
raise Error('QGIS failed to initiate')
def vlay_check( #helper to check various expectations on the layer
vlay,
exp_fieldns = None, #raise error if these field names are OUT
uexp_fieldns = None, #raise error if these field names are IN
real_atts = None, #list of field names to check if attribute value are all real
bgeot = None, #basic geo type checking
fcnt = None, #feature count checking. accepts INT or QgsVectorLayer
fkey = None, #optional secondary key to check
mlay = False, #check if its a memory layer or not
chk_valid = False, #check layer validty
logger = mod_logger,
db_f = False,
):
#=======================================================================
# prechecks
#=======================================================================
if vlay is None:
raise Error('got passed an empty vlay')
if not isinstance(vlay, QgsVectorLayer):
raise Error('unexpected type: %s'%type(vlay))
log = logger.getChild('vlay_check')
checks_l = []
#=======================================================================
# expected field names
#=======================================================================
if not basic.is_null(exp_fieldns): #robust null checking
skip=False
if isinstance(exp_fieldns, str):
if exp_fieldns=='all':
skip=True
if not skip:
fnl = basic.linr(exp_fieldns, vlay_fieldnl(vlay),
'expected field names', vlay.name(),
result_type='missing', logger=log, fancy_log=db_f)
if len(fnl)>0:
raise Error('%s missing expected fields: %s'%(
vlay.name(), fnl))
checks_l.append('exp_fieldns=%i'%len(exp_fieldns))
#=======================================================================
# unexpected field names
#=======================================================================
if not basic.is_null(uexp_fieldns): #robust null checking
#fields on the layer
if len(vlay_fieldnl(vlay))>0:
fnl = basic.linr(uexp_fieldns, vlay_fieldnl(vlay),
'un expected field names', vlay.name(),
result_type='matching', logger=log, fancy_log=db_f)
if len(fnl)>0:
raise Error('%s contains unexpected fields: %s'%(
vlay.name(), fnl))
#no fields on the layer
else:
pass
checks_l.append('uexp_fieldns=%i'%len(uexp_fieldns))
#=======================================================================
# null value check
#=======================================================================
#==========================================================================
# if not real_atts is None:
#
# #pull this data
# df = vlay_get_fdf(vlay, fieldn_l = real_atts, logger=log)
#
# #check for nulls
# if np.any(df.isna()):
# raise Error('%s got %i nulls on %i expected real fields: %s'%(
# vlay.name(), df.isna().sum().sum(), len(real_atts), real_atts))
#
#
# checks_l.append('real_atts=%i'%len(real_atts))
#==========================================================================
#=======================================================================
# basic geometry type
#=======================================================================
#==========================================================================
# if not bgeot is None:
# bgeot_lay = vlay_get_bgeo_type(vlay)
#
# if not bgeot == bgeot_lay:
# raise Error('basic geometry type expectation \'%s\' does not match layers \'%s\''%(
# bgeot, bgeot_lay))
#
# checks_l.append('bgeot=%s'%bgeot)
#==========================================================================
#=======================================================================
# feature count
#=======================================================================
if not fcnt is None:
if isinstance(fcnt, QgsVectorLayer):
fcnt=fcnt.dataProvider().featureCount()
if not fcnt == vlay.dataProvider().featureCount():
raise Error('\'%s\'s feature count (%i) does not match %i'%(
vlay.name(), vlay.dataProvider().featureCount(), fcnt))
checks_l.append('fcnt=%i'%fcnt)
#=======================================================================
# fkey
#=======================================================================
#==============================================================================
# if isinstance(fkey, str):
# fnl = vlay_fieldnl(vlay)
#
# if not fkey in fnl:
# raise Error('fkey \'%s\' not in the fields'%fkey)
#
# fkeys_ser = vlay_get_fdata(vlay, fkey, logger=log, fmt='ser').sort_values()
#
# if not np.issubdtype(fkeys_ser.dtype, np.number):
# raise Error('keys are non-numeric. type: %s'%fkeys_ser.dtype)
#
# if not fkeys_ser.is_unique:
# raise Error('\'%s\' keys are not unique'%fkey)
#
# if not fkeys_ser.is_monotonic:
# raise Error('fkeys are not monotonic')
#
# if np.any(fkeys_ser.isna()):
# raise Error('fkeys have nulls')
#
# checks_l.append('fkey \'%s\'=%i'%(fkey, len(fkeys_ser)))
#==============================================================================
#=======================================================================
# storage type
#=======================================================================
if mlay:
if not 'Memory' in vlay.dataProvider().storageType():
raise Error('\"%s\' unexpected storage type: %s'%(
vlay.name(), vlay.dataProvider().storageType()))
checks_l.append('mlay')
#=======================================================================
# validty
#=======================================================================
#==========================================================================
# if chk_valid:
# vlay_chk_validty(vlay, chk_geo=True)
#
# checks_l.append('validity')
#==========================================================================
#=======================================================================
# wrap
#=======================================================================
log.debug('\'%s\' passed %i checks: %s'%(
vlay.name(), len(checks_l), checks_l))
return
def load_vlay( #load a layer from a file
fp,
providerLib='ogr',
logger=mod_logger):
"""
what are we using this for?
see instanc emethod
"""
log = logger.getChild('load_vlay')
assert os.path.exists(fp), 'requested file does not exist: %s'%fp
basefn = os.path.splitext(os.path.split(fp)[1])[0]
#Import a Raster Layer
vlay_raw = QgsVectorLayer(fp,basefn,providerLib)
#check if this is valid
if not vlay_raw.isValid():
log.error('loaded vlay \'%s\' is not valid. \n \n did you initilize?'%vlay_raw.name())
raise Error('vlay loading produced an invalid layer')
#check if it has geometry
if vlay_raw.wkbType() == 100:
log.error('loaded vlay has NoGeometry')
raise Error('no geo')
#==========================================================================
# report
#==========================================================================
vlay = vlay_raw
dp = vlay.dataProvider()
log.info('loaded vlay \'%s\' as \'%s\' %s geo with %i feats from file: \n %s'
%(vlay.name(), dp.storageType(), QgsWkbTypes().displayString(vlay.wkbType()), dp.featureCount(), fp))
return vlay
def vlay_write( #write a VectorLayer
vlay, out_fp,
driverName='GPKG',
fileEncoding = "CP1250",
opts = QgsVectorFileWriter.SaveVectorOptions(), #empty options object
overwrite=False,
logger=mod_logger):
"""
help(QgsVectorFileWriter.SaveVectorOptions)
QgsVectorFileWriter.SaveVectorOptions.driverName='GPKG'
opt2 = QgsVectorFileWriter.BoolOption(QgsVectorFileWriter.CreateOrOverwriteFile)
help(QgsVectorFileWriter)
TODO: Move this back onto Qcoms
"""
#==========================================================================
# defaults
#==========================================================================
log = logger.getChild('vlay_write')
#===========================================================================
# assemble options
#===========================================================================
opts.driverName = driverName
opts.fileEncoding = fileEncoding
#===========================================================================
# checks
#===========================================================================
#file extension
fhead, ext = os.path.splitext(out_fp)
if not 'gpkg' in ext:
raise Error('unexpected extension: %s'%ext)
if os.path.exists(out_fp):
msg = 'requested file path already exists!. overwrite=%s \n %s'%(
overwrite, out_fp)
if overwrite:
log.warning(msg)
os.remove(out_fp) #workaround... should be away to overwrite with the QgsVectorFileWriter
else:
raise Error(msg)
if vlay.dataProvider().featureCount() == 0:
raise Error('\'%s\' has no features!'%(
vlay.name()))
if not vlay.isValid():
Error('passed invalid layer')
error = QgsVectorFileWriter.writeAsVectorFormatV2(
vlay, out_fp,
QgsCoordinateTransformContext(),
opts,
)
#=======================================================================
# wrap and check
#=======================================================================
if error[0] == QgsVectorFileWriter.NoError:
log.info('layer \' %s \' written to: \n %s'%(vlay.name(),out_fp))
return out_fp
raise Error('FAILURE on writing layer \' %s \' with code:\n %s \n %s'%(vlay.name(),error, out_fp))
def vlay_get_fdf( #pull all the feature data and place into a df
vlay,
fmt='df', #result fomrat key.
#dict: {fid:{fieldname:value}}
#df: index=fids, columns=fieldnames
#limiters
request = None, #request to pull data. for more customized requestes.
fieldn_l = None, #or field name list. for generic requests
#modifiers
reindex = None, #optinal field name to reindex df by
#expectations
expect_all_real = False, #whether to expect all real results
allow_none = False,
db_f = False,
logger=mod_logger,
feedback=MyFeedBackQ()):
"""
performance improvement
Warning: requests with getFeatures arent working as expected for memory layers
this could be combined with vlay_get_feats()
also see vlay_get_fdata() (for a single column)
RETURNS
a dictionary in the Qgis attribute dictionary format:
key: generally feat.id()
value: a dictionary of {field name: attribute value}
"""
#===========================================================================
# setups and defaults
#===========================================================================
log = logger.getChild('vlay_get_fdf')
assert isinstance(vlay, QgsVectorLayer)
all_fnl = [fieldn.name() for fieldn in vlay.fields().toList()]
if fieldn_l is None: #use all the fields
fieldn_l = all_fnl
else:
vlay_check(vlay, fieldn_l, logger=logger, db_f=db_f)
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
#===========================================================================
# prechecks
#===========================================================================
if not reindex is None:
if not reindex in fieldn_l:
raise Error('requested reindexer \'%s\' is not a field name'%reindex)
if not vlay.dataProvider().featureCount()>0:
raise Error('no features!')
if len(fieldn_l) == 0:
raise Error('no fields!')
if fmt=='dict' and not (len(fieldn_l)==len(all_fnl)):
raise Error('dict results dont respect field slicing')
assert hasattr(feedback, 'setProgress')
#===========================================================================
# build the request
#===========================================================================
feedback.setProgress(2)
if request is None:
"""WARNING: this doesnt seem to be slicing the fields.
see Alg().deletecolumns()
but this will re-key things
request = QgsFeatureRequest().setSubsetOfAttributes(fieldn_l,vlay.fields())"""
request = QgsFeatureRequest()
#never want geometry
request = request.setFlags(QgsFeatureRequest.NoGeometry)
log.debug('extracting data from \'%s\' on fields: %s'%(vlay.name(), fieldn_l))
#===========================================================================
# loop through each feature and extract the data
#===========================================================================
fid_attvs = dict() #{fid : {fieldn:value}}
fcnt = vlay.dataProvider().featureCount()
for indxr, feat in enumerate(vlay.getFeatures(request)):
#zip values
fid_attvs[feat.id()] = feat.attributes()
feedback.setProgress((indxr/fcnt)*90)
#===========================================================================
# post checks
#===========================================================================
if not len(fid_attvs) == vlay.dataProvider().featureCount():
log.debug('data result length does not match feature count')
if not request.filterType()==3: #check if a filter fids was passed
"""todo: add check to see if the fiter request length matches tresult"""
raise Error('no filter and data length mismatch')
#check the field lengthes
if not len(all_fnl) == len(feat.attributes()):
raise Error('field length mismatch')
#empty check 1
if len(fid_attvs) == 0:
log.warning('failed to get any data on layer \'%s\' with request'%vlay.name())
if not allow_none:
raise Error('no data found!')
else:
if fmt == 'dict':
return dict()
elif fmt == 'df':
return pd.DataFrame()
else:
raise Error('unexpected fmt type')
#===========================================================================
# result formatting
#===========================================================================
log.debug('got %i data elements for \'%s\''%(
len(fid_attvs), vlay.name()))
if fmt == 'dict':
return fid_attvs
elif fmt=='df':
#build the dict
df_raw = pd.DataFrame.from_dict(fid_attvs, orient='index', columns=all_fnl)
#handle column slicing and Qnulls
"""if the requester worked... we probably wouldnt have to do this"""
df = df_raw.loc[:, tuple(fieldn_l)].replace(NULL, np.nan)
feedback.setProgress(95)
if isinstance(reindex, str):
"""
reindex='zid'
view(df)
"""
#try and add the index (fids) as a data column
try:
df = df.join(pd.Series(df.index,index=df.index, name='fid'))
except:
log.debug('failed to preserve the fids.. column already there?')
#re-index by the passed key... should copy the fids over to 'index
df = df.set_index(reindex, drop=True)
log.debug('reindexed data by \'%s\''%reindex)
return df
else:
raise Error('unrecognized fmt kwarg')
def vlay_get_fdata( #get data for a single field from all the features
vlay,
fieldn = None, #get a field name. 'None' returns a dictionary of np.nan
geopropn = None, #get a geometry property
geo_obj = False, #whether to just get the geometry object
request = None, #additional requester (limiting fids). fieldn still required. additional flags added
selected= False, #whether to limit data to just those selected features
fmt = 'dict', #format to return results in
#'singleton' expect and aprovide a unitary value
rekey = None, #field name to rekey dictionary like results by
expect_all_real = False, #whether to expect all real results
dropna = False, #whether to drop nulls from the results
allow_none = False,
logger = mod_logger, db_f=False):
"""
TODO: combine this with vlay_get_fdatas
consider combining with vlay_get_feats
I'm not sure how this will handle requests w/ expressions
"""
log = logger.getChild('vlay_get_fdata')
if request is None:
request = QgsFeatureRequest()
#===========================================================================
# prechecks
#===========================================================================
if geo_obj:
if fmt == 'df': raise IOError
if not geopropn is None: raise IOError
if dropna:
if expect_all_real:
raise Error('cant expect_all_reals AND dropna')
if allow_none:
if expect_all_real:
raise Error('cant allow none and expect all reals')
vlay_check(vlay, exp_fieldns=[fieldn], logger=log, db_f=db_f)
#===========================================================================
# build the request
#===========================================================================
#no geometry
if (geopropn is None) and (not geo_obj):
if fieldn is None:
raise Error('no field name provided')
request = request.setFlags(QgsFeatureRequest.NoGeometry)
request = request.setSubsetOfAttributes([fieldn],vlay.fields())
else:
request = request.setNoAttributes() #dont get any attributes
#===========================================================================
# selection limited
#===========================================================================
if selected:
"""
todo: check if there is already a fid filter placed on the reuqester
"""
log.debug('limiting data pull to %i selected features on \'%s\''%(
vlay.selectedFeatureCount(), vlay.name()))
sfids = vlay.selectedFeatureIds()
request = request.setFilterFids(sfids)
#===========================================================================
# loop through and collect hte data
#===========================================================================
#if db_f: req_log(request, logger=log)
d = dict() #empty container for results
for feat in vlay.getFeatures(request):
#=======================================================================
# get geometry
#=======================================================================
if geo_obj:
d[feat.id()] = feat.geometry()
#=======================================================================
# get a geometry property
#=======================================================================
elif not geopropn is None:
geo = feat.geometry()
func = getattr(geo, geopropn) #get the method
d[feat.id()] = func() #call the method and store
#=======================================================================
# field request
#=======================================================================
else:
#empty shortcut
if qisnull(feat.attribute(fieldn)):
d[feat.id()] = np.nan
else: #pull real data
d[feat.id()] = feat.attribute(fieldn)
log.debug('retrieved %i attributes from features on \'%s\''%(
len(d), vlay.name()))
#===========================================================================
# null handling
#===========================================================================
if selected:
if not len(d) == vlay.selectedFeatureCount():
raise Error('failed to get data matching %i selected features'%(
vlay.selectedFeatureCount()))
if expect_all_real:
boolar = pd.isnull(np.array(list(d.values())))
if np.any(boolar):
raise Error('got %i nulls'%boolar.sum())
if dropna:
"""faster to use dfs?"""
log.debug('dropping nulls from %i'%len(d))
d2 = dict()
for k, v in d.items():
if np.isnan(v):
continue
d2[k] = v
d = d2 #reset
#===========================================================================
# post checks
#===========================================================================
if len(d) == 0:
log.warning('got no results! from \'%s\''%(
vlay.name()))
if not allow_none:
raise Error('allow_none=FALSE and no results')
"""
view(vlay)
"""
#===========================================================================
# rekey
#===========================================================================
if isinstance(rekey, str):
assert fmt=='dict'
d, _ = vlay_key_convert(vlay, d, rekey, id1_type='fid', logger=log)
#===========================================================================
# results
#===========================================================================
if fmt == 'dict':
return d
elif fmt == 'df':
return pd.DataFrame(pd.Series(d, name=fieldn))
elif fmt == 'singleton':
if not len(d)==1:
raise Error('expected singleton')
return next(iter(d.values()))
elif fmt == 'ser':
return pd.Series(d, name=fieldn)
else:
raise IOError
def vlay_new_mlay(#create a new mlay
gtype, #"Point", "LineString", "Polygon", "MultiPoint", "MultiLineString", or "MultiPolygon".
crs,
layname,
qfields,
feats_l,
logger=mod_logger,
):
#=======================================================================
# defaults
#=======================================================================
log = logger.getChild('vlay_new_mlay')
#=======================================================================
# prechecks
#=======================================================================
if not isinstance(layname, str):
raise Error('expected a string for layname, isntead got %s'%type(layname))
if gtype=='None':
log.warning('constructing mlay w/ \'None\' type')
#=======================================================================
# assemble into new layer
#=======================================================================
#initilzie the layer
EPSG_code=int(crs.authid().split(":")[1]) #get teh coordinate reference system of input_layer
uri = gtype+'?crs=epsg:'+str(EPSG_code)+'&index=yes'
vlaym = QgsVectorLayer(uri, layname, "memory")
# add fields
if not vlaym.dataProvider().addAttributes(qfields):
raise Error('failed to add fields')
vlaym.updateFields()
#add feats
if not vlaym.dataProvider().addFeatures(feats_l):
raise Error('failed to addFeatures')
vlaym.updateExtents()
#=======================================================================
# checks
#=======================================================================
if vlaym.wkbType() == 100:
msg = 'constructed layer \'%s\' has NoGeometry'%vlaym.name()
if gtype == 'None':
log.debug(msg)
else:
raise Error(msg)
log.debug('constructed \'%s\''%vlaym.name())
return vlaym
def vlay_new_df(#build a vlay from a df
df_raw,
crs,
geo_d = None, #container of geometry objects {fid: QgsGeometry}
geo_fn_tup = None, #if geo_d=None, tuple of field names to search for coordinate data
layname='df_layer',
allow_fid_mismatch = False,
infer_dtypes = True, #whether to referesh the dtyping in the df
driverName = 'GPKG',
#expectations
expect_unique_colns = True,
logger=mod_logger, db_f = False,
):
"""
todo: migrate off this
"""
#=======================================================================
# setup
#=======================================================================
log = logger.getChild('vlay_new_df')
log.warning('Depcreciate me')
#=======================================================================
# precheck
#=======================================================================
df = df_raw.copy()
max_len=50
#check lengths
boolcol = df_raw.columns.str.len() >= max_len
if np.any(boolcol):
log.warning('passed %i columns which exeed the max length %i for driver \'%s\'.. truncating: \n %s'%(
boolcol.sum(), max_len, driverName, df_raw.columns.values[boolcol]))
df.columns = df.columns.str.slice(start=0, stop=max_len-1)
#make sure the columns are unique
if not df.columns.is_unique:
"""
this can happen especially when some field names are super long and have their unique parts truncated
"""
boolcol = df.columns.duplicated(keep='first')
log.warning('got %i duplicated columns: \n %s'%(
boolcol.sum(), df.columns[boolcol].values))
if expect_unique_colns:
raise Error('got non unique columns')
#drop the duplicates
log.warning('dropping second duplicate column')
df = df.loc[:, ~boolcol]
#===========================================================================
# assemble the features
#===========================================================================
"""this does its own index check"""
feats_d = feats_build(df, logger=log, geo_d = geo_d,infer_dtypes=infer_dtypes,
geo_fn_tup = geo_fn_tup,
allow_fid_mismatch=allow_fid_mismatch, db_f=db_f)
#=======================================================================
# get the geo type
#=======================================================================
if not geo_d is None:
#pull geometry type from first feature
gtype = QgsWkbTypes().displayString(next(iter(geo_d.values())).wkbType())
elif not geo_fn_tup is None:
gtype = 'Point'
else:
gtype = 'None'
#===========================================================================
# buidl the new layer
#===========================================================================
vlay = vlay_new_mlay(gtype, #no geo
crs,
layname,
list(feats_d.values())[0].fields(),
list(feats_d.values()),
logger=log,
)
#=======================================================================
# post check
#=======================================================================
if db_f:
if vlay.wkbType() == 100:
raise Error('constructed layer has NoGeometry')
#vlay_chk_validty(vlay, chk_geo=True, logger=log)
return vlay
def vlay_fieldnl(vlay):
return [field.name() for field in vlay.fields()]
def feats_build( #build a set of features from teh passed data
data, #data from which to build features from (either df or qvlayd)
geo_d = None, #container of geometry objects {fid: QgsGeometry}
geo_fn_tup = None, #if geo_d=None, tuple of field names to search for coordinate data
allow_fid_mismatch = False, #whether to raise an error if the fids set on the layer dont match the data
infer_dtypes = True, #whether to referesh the dtyping in the df
logger=mod_logger, db_f=False):
log = logger.getChild('feats_build')
#===========================================================================
# precheck
#===========================================================================
#geometry input logic
if (not geo_d is None) and (not geo_fn_tup is None):
raise Error('todo: implement non geo layers')
#index match
if isinstance(geo_d, dict):
#get the data fid_l
if isinstance(data, pd.DataFrame):
dfid_l = data.index.tolist()
elif isinstance(data, dict):
dfid_l = list(data.keys())
else:
raise Error('unexpected type')
if not basic.linr(dfid_l, list(geo_d.keys()),'feat_data', 'geo_d',
sort_values=True, result_type='exact', logger=log):
raise Error('passed geo_d and data indexes dont match')
#overrides
if geo_fn_tup:
geofn_hits = 0
sub_field_match = False #dropping geometry fields
else:
sub_field_match = True
log.debug('for %i data type %s'%(
len(data), type(data)))
#===========================================================================
# data conversion
#===========================================================================
if isinstance(data, pd.DataFrame):
#check the index (this will be the fids)
if not data.index.dtype.char == 'q':
raise Error('expected integer index')
fid_ar = data.index.values
#infer types
if infer_dtypes:
data = data.infer_objects()
#convert the data
qvlayd = df_to_qvlayd(data)
#=======================================================================
# build fields container from data
#=======================================================================
"""we need to convert numpy types to pytypes.
these are later convert to Qtypes"""
fields_d = dict()
for coln, col in data.items():
if not geo_fn_tup is None:
if coln in geo_fn_tup:
geofn_hits +=1
continue #skip this one
#set the type for this name
fields_d[coln] = np_to_pytype(col.dtype, logger=log)
qfields = fields_build_new(fields_d = fields_d, logger=log)
#=======================================================================
# some checks
#=======================================================================
if db_f:
#calc hte expectation
if geo_fn_tup is None:
exp_cnt= len(data.columns)
else:
exp_cnt = len(data.columns) - len(geo_fn_tup)
if not exp_cnt == len(fields_d):
raise Error('only generated %i fields from %i columns'%(
len(data.columns), len(fields_d)))
#check we got them all
if not exp_cnt == len(qfields):
raise Error('failed to create all the fields')
"""
for field in qfields:
print(field)
qfields.toList()
new_qfield = QgsField(fname, qtype, typeName=QMetaType.typeName(QgsField(fname, qtype).type()))
"""
else:
fid_ar = np.array(list(data.keys()))
#set the data
qvlayd = data
#===========================================================================
# build fields container from data
#===========================================================================
#slice out geometry data if there
sub_d1 = list(qvlayd.values())[0] #just get the first
sub_d2 = dict()
for fname, value in sub_d1.items():
if not geo_fn_tup is None:
if fname in geo_fn_tup:
geofn_hits +=1
continue #skip this one
sub_d2[fname] = value
#build the fields from this sample data
qfields = fields_build_new(samp_d = sub_d2, logger=log)
#check for geometry field names
if not geo_fn_tup is None:
if not geofn_hits == len(geo_fn_tup):
log.error('missing some geometry field names form the data')
raise IOError
#===========================================================================
# extract geometry
#===========================================================================
if geo_d is None:
#check for nulls
if db_f:
chk_df= pd.DataFrame.from_dict(qvlayd, orient='index')
if chk_df.loc[:, geo_fn_tup].isna().any().any():
raise Error('got some nulls on the geometry fields: %s'%geo_fn_tup)
geo_d = dict()
for fid, sub_d in copy.copy(qvlayd).items():
#get the xy
xval, yval = sub_d.pop(geo_fn_tup[0]), sub_d.pop(geo_fn_tup[1])
#build the geometry
geo_d[fid] = QgsGeometry.fromPointXY(QgsPointXY(xval,yval))
#add the cleaned data back in
qvlayd[fid] = sub_d
#===========================================================================
# check geometry
#===========================================================================
if db_f:
#precheck geometry validty
for fid, geo in geo_d.items():
if not geo.isGeosValid():
raise Error('got invalid geometry on %i'%fid)
#===========================================================================
# loop through adn build features
#===========================================================================
feats_d = dict()
for fid, sub_d in qvlayd.items():
#=======================================================================
# #log.debug('assembling feature %i'%fid)
# #=======================================================================
# # assmble geometry data
# #=======================================================================
# if isinstance(geo_d, dict):
# geo = geo_d[fid]
#
# elif not geo_fn_tup is None:
# xval = sub_d[geo_fn_tup[0]]
# yval = sub_d[geo_fn_tup[1]]
#
# if pd.isnull(xval) or pd.isnull(yval):
# log.error('got null geometry values')
# raise IOError
#
# geo = QgsGeometry.fromPointXY(QgsPointXY(xval,yval))
# #Point(xval, yval) #make the point geometry
#
# else:
# geo = None
#=======================================================================
#=======================================================================
# buidl the feature
#=======================================================================
#=======================================================================
# feats_d[fid] = feat_build(fid, sub_d, qfields=qfields, geometry=geo,
# sub_field_match = sub_field_match, #because we are excluding the geometry from the fields
# logger=log, db_f=db_f)
#=======================================================================
feat = QgsFeature(qfields, fid)
for fieldn, value in sub_d.items():
"""
cut out feat_build() for simplicity
"""
#skip null values
if pd.isnull(value): continue
#get the index for this field
findx = feat.fieldNameIndex(fieldn)
#get the qfield
qfield = feat.fields().at(findx)
#make the type match
ndata = qtype_to_pytype(value, qfield.type(), logger=log)
#set the attribute
if not feat.setAttribute(findx, ndata):
raise Error('failed to setAttribute')
#setgeometry
feat.setGeometry(geo_d[fid])
#stor eit
feats_d[fid]=feat
#===========================================================================
# checks
#===========================================================================
if db_f:
#fid match
nfid_ar = np.array(list(feats_d.keys()))
if not np.array_equal(nfid_ar, fid_ar):
log.warning('fid mismatch')
if not allow_fid_mismatch:
raise Error('fid mismatch')
#feature validty
for fid, feat in feats_d.items():
if not feat.isValid():
raise Error('invalid feat %i'%feat.id())
if not feat.geometry().isGeosValid():
raise Error('got invalid geometry on feat \'%s\''%(feat.id()))
"""
feat.geometry().type()
"""
log.debug('built %i \'%s\' features'%(
len(feats_d),
QgsWkbTypes.geometryDisplayString(feat.geometry().type()),
))
return feats_d
def fields_build_new( #build qfields from different data containers
samp_d = None, #sample data from which to build qfields {fname: value}
fields_d = None, #direct data from which to build qfields {fname: pytype}
fields_l = None, #list of QgsField objects
logger=mod_logger):
log = logger.getChild('fields_build_new')
#===========================================================================
# buidl the fields_d
#===========================================================================
if (fields_d is None) and (fields_l is None): #only if we have nothign better to start with
if samp_d is None:
log.error('got no data to build fields on!')
raise IOError
fields_l = []
for fname, value in samp_d.items():
if pd.isnull(value):
log.error('for field \'%s\' got null value')
raise IOError
elif inspect.isclass(value):
raise IOError
fields_l.append(field_new(fname, pytype=type(value)))
log.debug('built %i fields from sample data'%len(fields_l))
#===========================================================================
# buidl the fields set
#===========================================================================
elif fields_l is None:
fields_l = []
for fname, ftype in fields_d.items():
fields_l.append(field_new(fname, pytype=ftype))
log.debug('built %i fields from explicit name/type'%len(fields_l))
#check it
if not len(fields_l) == len(fields_d):
raise Error('length mismatch')
elif fields_d is None: #check we have the other
raise IOError
#===========================================================================
# build the Qfields
#===========================================================================
Qfields = QgsFields()
fail_msg_d = dict()
for indx, field in enumerate(fields_l):
if not Qfields.append(field):
fail_msg_d[indx] = ('%i failed to append field \'%s\''%(indx, field.name()), field)
#report
if len(fail_msg_d)>0:
for indx, (msg, field) in fail_msg_d.items():
log.error(msg)
raise Error('failed to write %i fields'%len(fail_msg_d))
"""
field.name()
field.constraints().constraintDescription()
field.length()
"""
#check it
if not len(Qfields) == len(fields_l):
raise Error('length mismatch')
return Qfields
def field_new(fname,
pytype=str,
driverName = 'SpatiaLite', #desired driver (to check for field name length limitations)
fname_trunc = True, #whether to truncate field names tha texceed the limit
logger=mod_logger): #build a QgsField
#===========================================================================
# precheck
#===========================================================================
if not isinstance(fname, str):
raise IOError('expected string for fname')
#vector layer field name lim itation
max_len = fieldn_max_d[driverName]
"""
fname = 'somereallylongname'
"""
if len(fname) >max_len:
log = logger.getChild('field_new')
log.warning('got %i (>%i)characters for passed field name \'%s\'. truncating'%(len(fname), max_len, fname))
if fname_trunc:
fname = fname[:max_len]
else:
raise Error('field name too long')
qtype = ptype_to_qtype(pytype)
"""
#check this type
QMetaType.typeName(QgsField(fname, qtype).type())
QVariant.String
QVariant.Int
QMetaType.typeName(new_qfield.type())
"""
#new_qfield = QgsField(fname, qtype)
new_qfield = QgsField(fname, qtype, typeName=QMetaType.typeName(QgsField(fname, qtype).type()))
return new_qfield
def vlay_get_bgeo_type(vlay,
match_flags=re.IGNORECASE,
):
gstr = QgsWkbTypes().displayString(vlay.wkbType()).lower()
for gtype in ('polygon', 'point', 'line'):
if re.search(gtype, gstr, flags=match_flags):
return gtype
raise Error('failed to match')
def vlay_rename_fields(
vlay_raw,
rnm_d, #field name conversions to apply {old FieldName:newFieldName}
logger=None,
feedback=None,
):
"""
todo: replace with coms.hp.Qproj.vlay_rename_fields
"""
if logger is None: logger=mod_logger
log=logger.getChild('vlay_rename_fields')
#get a working layer
vlay_raw.selectAll()
vlay = processing.run('native:saveselectedfeatures',
{'INPUT' : vlay_raw, 'OUTPUT' : 'TEMPORARY_OUTPUT'},
feedback=feedback)['OUTPUT']
#get fieldname index conversion for layer
fni_d = {f.name():vlay.dataProvider().fieldNameIndex(f.name()) for f in vlay.fields()}
#check it
for k in rnm_d.keys():
assert k in fni_d.keys(), 'requested field \'%s\' not on layer'%k
#re-index rename request
fiRn_d = {fni_d[k]:v for k,v in rnm_d.items()}
#apply renames
if not vlay.dataProvider().renameAttributes(fiRn_d):
raise Error('failed to rename')
vlay.updateFields()
#check it
fn_l = [f.name() for f in vlay.fields()]
s = set(rnm_d.values()).difference(fn_l)
assert len(s)==0, 'failed to rename %i fields: %s'%(len(s), s)
vlay.setName(vlay_raw.name())
log.debug('applied renames to \'%s\' \n %s'%(vlay.name(), rnm_d))
return vlay
def vlay_key_convert(#convert a list of ids in one form to another
vlay,
id1_objs, #list of ids (or dict keyed b y ids) to get conversion of
id_fieldn, #field name for field type ids
id1_type = 'field', #type of ids passed in the id_l (result will return a dict of th eopposit etype)
#'field': keys in id1_objs are values from some field (on the vlay)
#'fid': keys in id1_objs are fids (on the vlay)
fid_fval_d = None, #optional pre-calced data (for performance improvement)
logger=mod_logger,
db_f = False, #extra checks
):
log = logger.getChild('vlay_key_convert')
#===========================================================================
# handle variable inputs
#===========================================================================
if isinstance(id1_objs, dict):
id1_l = list(id1_objs.keys())
elif isinstance(id1_objs, list):
id1_l = id1_objs
else:
raise Error('unrecognized id1_objs type')
#===========================================================================
# extract the fid to fval conversion
#===========================================================================
if fid_fval_d is None:
#limit the pull by id1s
if id1_type == 'fid':
request = QgsFeatureRequest().setFilterFids(id1_l)
log.debug('pulling \'fid_fval_d\' from %i fids'%(len(id1_l)))
#by field values
elif id1_type == 'field': #limit by field value
raise Error(' not implemented')
#build an expression so we only query features with values matching the id1_l
#===================================================================
# qexp = exp_vals_in_field(id1_l, id_fieldn, qfields = vlay.fields(), logger=log)
# request = QgsFeatureRequest(qexp)
#
# log.debug('pulling \'fid_fval_d\' from %i \'%s\' fvals'%(
# len(id1_l), id_fieldn))
#===================================================================
else:
raise Error('unrecognized id1_type')
fid_fval_d = vlay_get_fdata(vlay, fieldn=id_fieldn, request =request, logger=log,
expect_all_real=True, fmt='dict')
#no need
else:
log.debug('using passed \'fid_fval_d\' with %i'%len(fid_fval_d))
#check them
if db_f:
#log.debug('\'fid_fval_d\': \n %s'%fid_fval_d)
for dname, l in (
('keys', list(fid_fval_d.keys())),
('values', list(fid_fval_d.values()))
):
if not len(np.unique(np.array(l))) == len(l):
raise Error('got non unique \'%s\' on fid_fval_d'%dname)
#===========================================================================
# swap keys
#===========================================================================
if id1_type == 'fid':
id1_id2_d = fid_fval_d #o flip necessary
elif id1_type == 'field': #limit by field value
log.debug('swapping keys')
id1_id2_d = dict(zip(
fid_fval_d.values(), fid_fval_d.keys()
))
else:
raise Error('unrecognized id1_type')
#=======================================================================
# #make conversion
#=======================================================================
#for dictionaries
if isinstance(id1_objs, dict):
res_objs = dict()
for id1, val in id1_objs.items():
res_objs[id1_id2_d[id1]] = val
log.debug('got converted DICT results with %i'%len(res_objs))
#for lists
elif isinstance(id1_objs, list):
res_objs = [id1_id2_d[id1] for id1 in id1_objs]
log.debug('got converted LIST results with %i'%len(res_objs))
else:
raise Error('unrecognized id1_objs type')
return res_objs, fid_fval_d #converted objects, conversion dict ONLY FOR THSE OBJECTS!
#==============================================================================
# type checks-----------------
#==============================================================================
def qisnull(obj):
if obj is None:
return True
if isinstance(obj, QVariant):
if obj.isNull():
return True
else:
return False
if pd.isnull(obj):
return True
else:
return False
def is_qtype_match(obj, qtype_code, logger=mod_logger): #check if the object matches the qtype code
log = logger.getChild('is_qtype_match')
#get pythonic type for this code
try:
py_type = type_qvar_py_d[qtype_code]
except:
if not qtype_code in type_qvar_py_d.keys():
log.error('passed qtype_code \'%s\' not in dict from \'%s\''%(qtype_code, type(obj)))
raise IOError
if not isinstance(obj, py_type):
#log.debug('passed object of type \'%s\' does not match Qvariant.type \'%s\''%(type(obj), QMetaType.typeName(qtype_code)))
return False
else:
return True
#==============================================================================
# type conversions----------------
#==============================================================================
def np_to_pytype(npdobj, logger=mod_logger):
if not isinstance(npdobj, np.dtype):
raise Error('not passed a numpy type')
try:
return npc_pytype_d[npdobj.char]
except Exception as e:
log = logger.getChild('np_to_pytype')
if not npdobj.char in npc_pytype_d.keys():
log.error('passed npdtype \'%s\' not found in the conversion dictionary'%npdobj.name)
raise Error('failed oto convert w/ \n %s'%e)
def qtype_to_pytype( #convert object to the pythonic type taht matches the passed qtype code
obj,
qtype_code, #qtupe code (qfield.type())
logger=mod_logger):
if is_qtype_match(obj, qtype_code): #no conversion needed
return obj
#===========================================================================
# shortcut for nulls
#===========================================================================
if qisnull(obj):
return None
#get pythonic type for this code
py_type = type_qvar_py_d[qtype_code]
try:
return py_type(obj)
except:
#datetime
if qtype_code == 16:
return obj.toPyDateTime()
log = logger.getChild('qtype_to_pytype')
if obj is None:
log.error('got NONE type')
elif isinstance(obj, QVariant):
log.error('got a Qvariant object')
else:
log.error('unable to map object \'%s\' of type \'%s\' to type \'%s\''
%(obj, type(obj), py_type))
"""
QMetaType.typeName(obj)
"""
raise IOError
def ptype_to_qtype(py_type, logger=mod_logger): #get the qtype corresponding to the passed pytype
"""useful for buildign Qt objects
really, this is a reverse
py_type=str
"""
if not inspect.isclass(py_type):
logger.error('got unexpected type \'%s\''%type(py_type))
raise Error('bad type')
#build a QVariant object from this python type class, then return its type
try:
qv = QVariant(py_type())
except:
logger.error('failed to build QVariant from \'%s\''%type(py_type))
raise IOError
"""
#get the type
QMetaType.typeName(qv.type())
"""
return qv.type()
def df_to_qvlayd( #convert a data frame into the layer data structure (keeyd by index)
df, #data to convert. df index should match fid index
logger=mod_logger):
log = logger.getChild('df_to_qvlayd')
d = dict() #data dictionary in qgis structure
#prechecks
if not df.index.is_unique:
log.error('got passed non-unique index')
raise IOError
#===========================================================================
# loop and fill
#===========================================================================
for fid, row in df.iterrows():
#=======================================================================
# build sub data
#=======================================================================
sub_d = dict() #sub data structure
for fieldn, value in row.items():
sub_d[fieldn] = value
#=======================================================================
# add the sub into the main
#=======================================================================
d[fid] = sub_d
if not len(df) == len(d):
log.error('got length mismatch')
raise IOError
log.debug('converted df %s into qvlayd'%str(df.shape))
return d
def view(#view the vector data (or just a df) as a html frame
obj, logger=mod_logger,
#**gfd_kwargs, #kwaqrgs to pass to vlay_get_fdatas() 'doesnt work well with the requester'
):
if isinstance(obj, pd.DataFrame) or isinstance(obj, pd.Series):
df = obj
elif isinstance(obj, QgsVectorLayer):
"""this will index the viewed frame by fid"""
df = vlay_get_fdf(obj)
else:
raise Error('got unexpected object type: %s'%type(obj))
basic.view(df)
logger.info('viewer closed')
return
if __name__ == '__main__':
print('???')
| 1.648438 | 2 |
ProsperFlask-deb/{{cookiecutter.project_name}}/{{cookiecutter.app_name}}/__init__.py | EVEprosper/ProsperCookiecutters | 0 | 12762427 | """__init__.py: Flask app configuration"""
from os import path
try:
from flask import Flask
import {{cookiecutter.app_name}}.{{cookiecutter.endpoint_group}} as {{cookiecutter.endpoint_group}}
import {{cookiecutter.app_name}}.config as api_config
import prosper.common.prosper_logging as p_logging
except ImportError:
import warnings
warnings.warn('environment not set up yet')
HERE = path.abspath(path.dirname(__file__))
def create_app(
settings=None,
local_configs=None,
):
"""create Flask application (ROOT)
Modeled from: https://github.com/yabb85/ueki/blob/master/ueki/__init__.py
Args:
settings (:obj:`dict`, optional): collection of Flask options
local_configs (:obj:`configparser.ConfigParser` optional): app private configs
log_builder (:obj:`prosper_config.ProsperLogger`, optional): logging container
"""
app = Flask(__name__)
if settings:
app.config.update(settings)
{{cookiecutter.endpoint_group}}.API.init_app(app)
log_builder = p_logging.ProsperLogger(
'{{cookiecutter.app_name}}',
{{cookiecutter.log_path}},
local_configs
)
if not app.debug:
log_builder.configure_discord_logger()
log_builder.configure_slack_logger()
else:
log_builder.configure_debug_logger()
if log_builder:
for handle in log_builder:
app.logger.addHandler(handle)
api_config.LOGGER = log_builder.get_logger()
api_config.CONFIG = local_configs
{{cookiecutter.endpoint_group}}.LOGGER = app.logger
return app
| 2.09375 | 2 |
python/fate_flow/db/db_utils.py | PromiseChan/FATE-Flow | 22 | 12762428 | <reponame>PromiseChan/FATE-Flow
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_flow.utils.log_utils import getLogger
from fate_flow.db.db_models import DB
from fate_flow.db.runtime_config import RuntimeConfig
LOGGER = getLogger()
@DB.connection_context()
def bulk_insert_into_db(model, data_source, logger):
try:
try:
DB.create_tables([model])
except Exception as e:
logger.exception(e)
batch_size = 50 if RuntimeConfig.USE_LOCAL_DATABASE else 1000
for i in range(0, len(data_source), batch_size):
with DB.atomic():
model.insert_many(data_source[i:i + batch_size]).execute()
return len(data_source)
except Exception as e:
logger.exception(e)
return 0
def get_dynamic_db_model(base, job_id):
return type(base.model(table_index=get_dynamic_tracking_table_index(job_id=job_id)))
def get_dynamic_tracking_table_index(job_id):
return job_id[:8]
def fill_db_model_object(model_object, human_model_dict):
for k, v in human_model_dict.items():
attr_name = 'f_%s' % k
if hasattr(model_object.__class__, attr_name):
setattr(model_object, attr_name, v)
return model_object
| 1.75 | 2 |
index.py | Samwisebuze/virtuoso-container | 2 | 12762429 | <filename>index.py<gh_stars>1-10
from flask import Flask, request, jsonify
app = Flask(__name__)
# JSON POST request format:
# {
# "networkId": "<unique id>",
# "networkDetails": [
# {
# "machineId": "<unique id>",
# "machineAddress": "<unique address>",
# "machineType": "<host | switch>",
# "adjacentMachines": [<list of adjacent machine ids>]
# },
# ...
# ]
# }
#
#
# Example curl (note that the content-type is 'application/json'):
# curl --request POST \
# --url http://127.0.0.1:5000/api/v1/create-network \
# --header 'content-type: application/json' \
# --data '{
# "networkId": "<unique id>",
# "networkDetails": [
# {
# "machineId": "<unique id>",
# "machineAddress": "<unique address>",
# "machineType": "<host | switch>",
# "adjacentMachines": [<list of adjacent machine ids>]
# }
# ]
# }'
@app.route('/api/v1/create-network', methods=['POST'])
def create_network():
if request.data:
incoming_json = request.get_json()
network_id = incoming_json['networkId']
network_details = incoming_json['networkDetails']
# Machine information is now available as a list of objects:
# - machineId
# - machineAddress
# - machineType
# - adjacentMachines
print(network_details)
return jsonify({ 'status': 'OK' })
else:
return jsonify({ 'status': 'not OK' })
# Example curl (note that the content-type is 'application/json'):
# curl --request DELETE \
# --url http://127.0.0.1:5000/api/v1/delete-network \
# --header 'content-type: application/json' \
# --data '{
# "networkId": "<unique id>"
# }'
@app.route('/api/v1/delete-network', methods=['DELETE'])
def destroy_network():
if request.data:
incoming_json = request.get_json()
network_id = incoming_json['networkId']
# Destroy the given network by network_id...
return jsonify({ 'status': 'OK' })
else:
return jsonify({ 'status': 'not OK' })
if __name__ == '__main__':
app.run()
| 3.046875 | 3 |
train.py | ablou1/dqn-navigation | 0 | 12762430 | <gh_stars>0
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
from dqn_agent import DqnAgent, DoubleDqnAgent
from dueling_dqn_agent import DuelingDqnAgent, DuelingDoubleDqnAgent
import torch
import random
# Load the Banana environment
env = UnityEnvironment(file_name="Banana_Windows_x86_64/Banana.exe")
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
action_size = brain.vector_action_space_size # number of actions
# examine the state space
state = env_info.vector_observations[0]
state_size = len(state)
# Create the agent to train with the parameters to use
agent = DuelingDqnAgent(state_size=state_size, action_size=action_size, seed=0, batch_size=32, hidden_layer_size=32)
def dqn(agent, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995, save_checkpoint=False):
"""Deep Q-Learning.
Params
======
agent (Agent): agent to train
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
best_score = 13.0 # Only save the agent if he gets a result better than 13.0
# Number of episodes needed to solve the environment (mean score of 13 on the 100 last episodes)
episode_solved = n_episodes
scores = [] # list containing scores from each episode
scores_mean = [] # List containing mean value of score_window
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
env_info = env.reset()[brain_name]
state = env_info.vector_observations[0] # get the current state
score = 0
for t in range(max_t):
action = agent.act(state, eps)
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
scores_mean.append(np.mean(scores_window))
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window) >= best_score:
episode_solved = min(episode_solved, i_episode-100)
best_score = np.mean(scores_window)
if save_checkpoint:
checkpoint = {'state_size': agent.state_size,
'action_size': agent.action_size,
'hidden_layer_size': agent.hidden_layer_size,
'state_dict': agent.qnetwork_local.state_dict()
}
torch.save(checkpoint, f'{agent.name}_checkpoint.pth')
if episode_solved < n_episodes:
print(f'\n{agent.name} - best average score : {best_score} - Environment solved after {episode_solved} episodes')
return scores, scores_mean
scores, _ = dqn(agent, eps_decay=0.98, save_checkpoint=True)
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
env.close()
| 2.34375 | 2 |
examples/ahrs/python/ukf/__init__.py | rafaelrietmann/ukf | 320 | 12762431 | <reponame>rafaelrietmann/ukf
#Copyright (C) 2013 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
from ctypes import *
# Taken from c/cukf.h
UKF_PRECISION_FLOAT = 0
UKF_PRECISION_DOUBLE = 1
state = None
state_error = None
innovation = None
covariance = None
parameters = None
parameters_error = None
# Internal globals, set during init
_cukf = None
_REAL_T = None
# Internal classes, wrapping cukf structs directly
class _SensorParams(Structure):
pass
class _State(Structure):
def __repr__(self):
fields = {
"attitude": tuple(self.attitude),
"angular_velocity": tuple(self.angular_velocity),
"acceleration": tuple(self.angular_velocity)
}
return str(fields)
class _StateError(Structure):
def __repr__(self):
fields = {
"attitude": tuple(self.attitude),
"angular_velocity": tuple(self.angular_velocity)
}
return str(fields)
class _Innovation(Structure):
def __repr__(self):
fields = {
"accel": tuple(self.accel),
"gyro": tuple(self.gyro),
"mag": tuple(self.mag)
}
return str(fields)
class _Parameters(Structure):
def __repr__(self):
field = {
"accel_bias": tuple(self.accel_bias),
"gyro_bias": tuple(self.gyro_bias),
"mag_bias": tuple(self.mag_bias),
"mag_scale": tuple(self.mag_scale),
"mag_field_norm": tuple(self.mag_field_norm),
"mag_field_inclination": tuple(self.mag_field_inclination)
}
return std(fields)
# Public interface
def iterate(dt):
global _cukf, state, state_error, innovation, parameters, parameters_error
if not _cukf:
raise RuntimeError("Please call ukf.init()")
_cukf.ukf_set_state(state)
_cukf.ukf_iterate(dt)
_cukf.ukf_sensor_clear()
_cukf.ukf_get_state(state)
_cukf.ukf_get_state_error(state_error)
_cukf.ukf_get_innovation(innovation)
_cukf.ukf_get_parameters(parameters)
_cukf.ukf_get_parameters_error(parameters_error)
def set_sensors(accelerometer=None, gyroscope=None, magnetometer=None):
if accelerometer is not None:
_cukf.ukf_sensor_set_accelerometer(*accelerometer)
if gyroscope is not None:
_cukf.ukf_sensor_set_gyroscope(*gyroscope)
if magnetometer is not None:
_cukf.ukf_sensor_set_magnetometer(*magnetometer)
def configure_sensors(accelerometer_covariance=None,
gyroscope_covariance=None, magnetometer_covariance=None):
params = _SensorParams()
if getattr(accelerometer_covariance, '__iter__', False):
params.accel_covariance = accelerometer_covariance
elif accelerometer_covariance is not None:
params.accel_covariance = (accelerometer_covariance, ) * 3
else:
params.accel_covariance = (1.0, 1.0, 1.0)
if getattr(gyroscope_covariance, '__iter__', False):
params.gyro_covariance = gyroscope_covariance
elif gyroscope_covariance is not None:
params.gyro_covariance = (gyroscope_covariance, ) * 3
else:
params.gyro_covariance = (1.0, 1.0, 1.0)
if getattr(magnetometer_covariance, '__iter__', False):
params.mag_covariance = magnetometer_covariance
elif magnetometer_covariance is not None:
params.mag_covariance = (magnetometer_covariance, ) * 3
else:
params.mag_covariance = (1.0, 1.0, 1.0)
_cukf.ukf_set_params(params)
def configure_process_noise(process_noise_covariance):
_cukf.ukf_set_process_noise((_REAL_T * 6)(*process_noise_covariance))
def init():
global _cukf, _REAL_T, state, state_error, innovation, parameters, parameters_error
lib = os.path.join(os.path.dirname(__file__), "libahrs.dylib")
_cukf = cdll.LoadLibrary(lib)
_cukf.ukf_init.argtypes = []
_cukf.ukf_init.restype = None
_cukf.ukf_config_get_precision.argtypes = []
_cukf.ukf_config_get_precision.restype = c_long
_cukf.ukf_config_get_state_dim.argtypes = []
_cukf.ukf_config_get_state_dim.restype = c_long
_cukf.ukf_config_get_measurement_dim.argtypes = []
_cukf.ukf_config_get_measurement_dim.restype = c_long
_PRECISION = _cukf.ukf_config_get_precision()
_REAL_T = c_double if _PRECISION == UKF_PRECISION_DOUBLE else c_float
_STATE_DIM = _cukf.ukf_config_get_state_dim()
_MEASUREMENT_DIM = _cukf.ukf_config_get_measurement_dim()
_SensorParams._fields_ = [
("accel_covariance", _REAL_T * 3),
("gyro_covariance", _REAL_T * 3),
("mag_covariance", _REAL_T * 3)
]
_State._fields_ = [
("attitude", _REAL_T * 4),
("angular_velocity", _REAL_T * 3),
("acceleration", _REAL_T * 3)
]
_StateError._fields_ = [
("attitude", _REAL_T * 3),
("angular_velocity", _REAL_T * 3)
]
_Innovation._fields_ = [
("accel", _REAL_T * 3),
("gyro", _REAL_T * 3),
("mag", _REAL_T * 3)
]
_Parameters._fields_ = [
("accel_bias", _REAL_T * 3),
("gyro_bias", _REAL_T * 3),
("mag_bias", _REAL_T * 3),
("mag_scale", _REAL_T * 3),
("mag_field_norm", _REAL_T),
("mag_field_inclination", _REAL_T),
]
# Set up the function prototypes
_cukf.ukf_set_attitude.argtypes = [_REAL_T, _REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_attitude.restype = None
_cukf.ukf_set_angular_velocity.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_angular_velocity.restype = None
_cukf.ukf_get_state.argtypes = [POINTER(_State)]
_cukf.ukf_get_state.restype = None
_cukf.ukf_set_state.argtypes = [POINTER(_State)]
_cukf.ukf_set_state.restype = None
_cukf.ukf_get_state_error.argtypes = [POINTER(_StateError)]
_cukf.ukf_get_state_error.restype = None
_cukf.ukf_get_innovation.argtypes = [POINTER(_Innovation)]
_cukf.ukf_get_innovation.restype = None
_cukf.ukf_get_state_covariance.argtypes = [
POINTER(_REAL_T * (_STATE_DIM**2))]
_cukf.ukf_get_state_covariance.restype = None
_cukf.ukf_sensor_clear.argtypes = []
_cukf.ukf_sensor_clear.restype = None
_cukf.ukf_sensor_set_accelerometer.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_accelerometer.restype = None
_cukf.ukf_sensor_set_gyroscope.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_gyroscope.restype = None
_cukf.ukf_sensor_set_magnetometer.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_magnetometer.restype = None
_cukf.ukf_set_params.argtypes = [POINTER(_SensorParams)]
_cukf.ukf_set_params.restype = None
_cukf.ukf_iterate.argtypes = [c_float]
_cukf.ukf_iterate.restype = None
_cukf.ukf_set_process_noise.argtypes = [POINTER(_REAL_T * _STATE_DIM)]
_cukf.ukf_set_process_noise.restype = None
_cukf.ukf_get_parameters.argtypes = [POINTER(_Parameters)]
_cukf.ukf_get_parameters.restype = None
_cukf.ukf_get_parameters_error.argtypes = [POINTER(_Parameters)]
_cukf.ukf_get_parameters_error.restype = None
# Initialize the library
_cukf.ukf_init()
# Set up the state
state = _State()
_cukf.ukf_get_state(state)
# Set up the state errors
state_error = _StateError()
_cukf.ukf_get_state_error(state_error)
# Set up the innovation
innovation = _Innovation()
# Set up the parameters
parameters = _Parameters()
_cukf.ukf_get_parameters(parameters)
# Set up the parameter errors
parameters_error = _Parameters()
_cukf.ukf_get_parameters_error(parameters_error)
| 2.46875 | 2 |
test/inventory/mock_inventories/empty.py | oddlama/forge | 14 | 12762432 | hosts = []
| 1.101563 | 1 |
grr/server/aff4_objects/registry_init.py | panhania/grr | 0 | 12762433 | #!/usr/bin/env python
"""Load all aff4 objects in order to populate the registry.
"""
# pylint: disable=unused-import
from grr.server.aff4_objects import aff4_grr
from grr.server.aff4_objects import collects
from grr.server.aff4_objects import cronjobs
from grr.server.aff4_objects import filestore
from grr.server.aff4_objects import security
from grr.server.aff4_objects import standard
from grr.server.aff4_objects import stats
from grr.server.aff4_objects import stats_store
from grr.server.aff4_objects import user_managers
from grr.server.aff4_objects import users
| 1.351563 | 1 |
models/users_model.py | raphaelcordon/Resenhando | 0 | 12762434 |
class Users:
def __init__(self, id, name, surname, password, email, changepass, read_comment, read_like):
self.id = id
self.name = name
self.surname = surname
self.password = password
self.email = email
self.changepass = changepass
self.read_comment = read_comment
self.read_like = read_like
class UsersPass:
def __init__(self, id, password):
self.id = id
self.password = password
| 3.0625 | 3 |
tests/unit/test_metadata.py | pwoolvett/petri | 1 | 12762435 | import pytest
from tests.unit import a_pkg_import
@pytest.fixture(scope="function")
def read_meta(a_pkg_import):
a_pkg = a_pkg_import()
return a_pkg.pkg.meta
def test_meta(read_meta):
expected = {
"name": "a-pkg",
"version": "1.2.3",
"author": "<NAME>",
"author-email": "<EMAIL>",
"summary": "A description",
}
for name, value in expected.items():
assert getattr(read_meta, name) == value
| 2.65625 | 3 |
ocsmesh/utils.py | noaa-ocs-modeling/geomesh | 0 | 12762436 | <filename>ocsmesh/utils.py<gh_stars>0
from collections import defaultdict
from itertools import permutations
from typing import Union, Dict, Sequence, Tuple
from functools import reduce
from multiprocessing import cpu_count, Pool
from copy import deepcopy
import jigsawpy
from jigsawpy import jigsaw_msh_t # type: ignore[import]
from matplotlib.path import Path # type: ignore[import]
import matplotlib.pyplot as plt # type: ignore[import]
from matplotlib.tri import Triangulation # type: ignore[import]
import numpy as np # type: ignore[import]
from pyproj import CRS, Transformer # type: ignore[import]
from scipy.interpolate import ( # type: ignore[import]
RectBivariateSpline, griddata)
from scipy import sparse
from shapely.geometry import ( # type: ignore[import]
Polygon, MultiPolygon,
box, GeometryCollection, Point, MultiPoint,
LineString, LinearRing)
from shapely.ops import polygonize, linemerge
import geopandas as gpd
import utm
ELEM_2D_TYPES = ['tria3', 'quad4', 'hexa8']
def must_be_euclidean_mesh(func):
def decorator(mesh, *args, **kwargs):
if mesh.mshID.lower() != 'euclidean-mesh':
msg = f"Not implemented for mshID={mesh.mshID}"
raise NotImplementedError(msg)
return func(mesh, *args, **kwargs)
return decorator
def mesh_to_tri(mesh):
"""
mesh is a jigsawpy.jigsaw_msh_t() instance.
"""
return Triangulation(
mesh.vert2['coord'][:, 0],
mesh.vert2['coord'][:, 1],
mesh.tria3['index'])
def cleanup_isolates(mesh):
# For triangle only (TODO: add support for other types)
node_indexes = np.arange(mesh.vert2['coord'].shape[0])
used_indexes = np.unique(mesh.tria3['index'])
vert2_idxs = np.where(
np.isin(node_indexes, used_indexes, assume_unique=True))[0]
# Since tria simplex refers to node index which always starts from
# 0 after removing isolate nodes we can use the map approach
tria3 = mesh.tria3['index'].flatten()
renum = {old: new for new, old in enumerate(np.unique(tria3))}
tria3 = np.array([renum[i] for i in tria3])
tria3 = tria3.reshape(mesh.tria3['index'].shape)
mesh.vert2 = mesh.vert2.take(vert2_idxs, axis=0)
if len(mesh.value) > 0:
mesh.value = mesh.value.take(vert2_idxs, axis=0)
mesh.tria3 = np.asarray(
[(tuple(indices), mesh.tria3['IDtag'][i])
for i, indices in enumerate(tria3)],
dtype=jigsaw_msh_t.TRIA3_t)
def put_edge2(mesh):
tri = Triangulation(
mesh.vert2['coord'][:, 0],
mesh.vert2['coord'][:, 1],
mesh.tria3['index'])
mesh.edge2 = np.array(
[(edge, 0) for edge in tri.edges], dtype=jigsaw_msh_t.EDGE2_t)
def geom_to_multipolygon(mesh):
vertices = mesh.vert2['coord']
idx_ring_coll = index_ring_collection(mesh)
polygon_collection = []
for polygon in idx_ring_coll.values():
exterior = vertices[polygon['exterior'][:, 0], :]
interiors = []
for interior in polygon['interiors']:
interiors.append(vertices[interior[:, 0], :])
polygon_collection.append(Polygon(exterior, interiors))
return MultiPolygon(polygon_collection)
def get_boundary_segments(mesh):
coords = mesh.vert2['coord']
boundary_edges = get_boundary_edges(mesh)
boundary_verts = np.unique(boundary_edges)
boundary_coords = coords[boundary_verts]
vert_map = {
orig: new for new, orig in enumerate(boundary_verts)}
new_boundary_edges = np.array(
[vert_map[v] for v in boundary_edges.ravel()]).reshape(
boundary_edges.shape)
graph = sparse.lil_matrix(
(len(boundary_verts), len(boundary_verts)))
for vert1, vert2 in new_boundary_edges:
graph[vert1, vert2] = 1
n_components, labels = sparse.csgraph.connected_components(
graph, directed=False)
segments = []
for i in range(n_components):
conn_mask = np.any(np.isin(
new_boundary_edges, np.nonzero(labels == i)),
axis=1)
conn_edges = new_boundary_edges[conn_mask]
this_segment = linemerge(boundary_coords[conn_edges])
if not this_segment.is_simple:
# Pinched nodes also result in non-simple linestring,
# but they can be handled gracefully, here we are looking
# for other issues like folded elements
test_polys = list(polygonize(this_segment))
if not test_polys:
raise ValueError(
"Mesh boundary crosses itself! Folded element(s)!")
segments.append(this_segment)
return segments
def get_mesh_polygons(mesh):
# TODO: Copy mesh?
target_mesh = mesh
result_polys = []
# 2-pass find, first find using polygons that intersect non-boundary
# vertices, then from the rest of the mesh find polygons that
# intersect any vertex
for find_pass in range(2):
coords = target_mesh.vert2['coord']
if len(coords) == 0:
continue
boundary_edges = get_boundary_edges(target_mesh)
lines = get_boundary_segments(target_mesh)
poly_gen = polygonize(lines)
polys = list(poly_gen)
polys = sorted(polys, key=lambda p: p.area, reverse=True)
bndry_verts = np.unique(boundary_edges)
if find_pass == 0:
non_bndry_verts = np.setdiff1d(
np.arange(len(coords)), bndry_verts)
pnts = MultiPoint(coords[non_bndry_verts])
else:
pnts = MultiPoint(coords[bndry_verts])
# NOTE: This logic requires polygons to be sorted by area
pass_valid_polys = []
while len(pnts):
idx = np.random.randint(len(pnts))
pnt = pnts[idx]
polys_gdf = gpd.GeoDataFrame(
{'geometry': polys, 'list_index': range(len(polys))})
res_gdf = polys_gdf[polys_gdf.intersects(pnt)]
if len(res_gdf) == 0:
# How is this possible?!
pnts = MultiPoint([*pnts[:idx], *pnts[idx + 1:]])
if pnts.is_empty:
break
continue
poly = res_gdf.geometry.iloc[0]
polys.pop(res_gdf.iloc[0].list_index)
pass_valid_polys.append(poly)
pnts = pnts.difference(poly)
if pnts.is_empty:
break
if isinstance(pnts, Point):
pnts = MultiPoint([pnts])
result_polys.extend(pass_valid_polys)
target_mesh = clip_mesh_by_shape(
target_mesh,
shape=MultiPolygon(pass_valid_polys),
inverse=True, fit_inside=True)
return MultiPolygon(result_polys)
def repartition_features(linestring, max_verts):
features = []
if len(linestring.coords) > max_verts:
new_feat = []
for segment in list(map(LineString, zip(
linestring.coords[:-1],
linestring.coords[1:]))):
new_feat.append(segment)
if len(new_feat) == max_verts - 1:
features.append(linemerge(new_feat))
new_feat = []
if len(new_feat) != 0:
features.append(linemerge(new_feat))
else:
features.append(linestring)
return features
def transform_linestring(
linestring: LineString,
target_size: float,
):
distances = [0.]
while distances[-1] + target_size < linestring.length:
distances.append(distances[-1] + target_size)
distances.append(linestring.length)
linestring = LineString([
linestring.interpolate(distance)
for distance in distances
])
return linestring
def needs_sieve(mesh, area=None):
areas = [polygon.area for polygon in geom_to_multipolygon(mesh)]
if area is None:
remove = np.where(areas < np.max(areas))[0].tolist()
else:
remove = []
for idx, patch_area in enumerate(areas):
if patch_area <= area:
remove.append(idx)
if len(remove) > 0:
return True
return False
def put_id_tags(mesh):
# start enumerating on 1 to avoid issues with indexing on fortran models
mesh.vert2 = np.array(
[(coord, id+1) for id, coord in enumerate(mesh.vert2['coord'])],
dtype=jigsaw_msh_t.VERT2_t
)
mesh.tria3 = np.array(
[(index, id+1) for id, index in enumerate(mesh.tria3['index'])],
dtype=jigsaw_msh_t.TRIA3_t
)
mesh.quad4 = np.array(
[(index, id+1) for id, index in enumerate(mesh.quad4['index'])],
dtype=jigsaw_msh_t.QUAD4_t
)
mesh.hexa8 = np.array(
[(index, id+1) for id, index in enumerate(mesh.hexa8['index'])],
dtype=jigsaw_msh_t.HEXA8_t
)
def _get_sieve_mask(mesh, polygons, sieve_area):
# NOTE: Some polygons are ghost polygons (interior)
areas = [p.area for p in polygons]
if sieve_area is None:
remove = np.where(areas < np.max(areas))[0].tolist()
else:
remove = []
for idx, patch_area in enumerate(areas):
if patch_area <= sieve_area:
remove.append(idx)
# if the path surrounds the node, these need to be removed.
vert2_mask = np.full((mesh.vert2['coord'].shape[0],), False)
for idx in remove:
path = Path(polygons[idx].exterior.coords, closed=True)
vert2_mask = vert2_mask | path.contains_points(mesh.vert2['coord'])
return vert2_mask
def _sieve_by_mask(mesh, sieve_mask):
# if the path surrounds the node, these need to be removed.
vert2_mask = sieve_mask.copy()
# select any connected nodes; these ones are missed by
# path.contains_point() because they are at the path edges.
_idxs = np.where(vert2_mask)[0]
conn_verts = get_surrounding_elem_verts(mesh, _idxs)
vert2_mask[conn_verts] = True
# Also, there might be some dangling triangles without neighbors,
# which are also missed by path.contains_point()
lone_elem_verts = get_lone_element_verts(mesh)
vert2_mask[lone_elem_verts] = True
# Mask out elements containing the unwanted nodes.
tria3_mask = np.any(vert2_mask[mesh.tria3['index']], axis=1)
# Tria and node removal and renumbering indexes ...
tria3_id_tag = mesh.tria3['IDtag'].take(np.where(~tria3_mask)[0])
tria3_index = mesh.tria3['index'][~tria3_mask, :].flatten()
used_indexes = np.unique(tria3_index)
node_indexes = np.arange(mesh.vert2['coord'].shape[0])
renum = {old: new for new, old in enumerate(np.unique(tria3_index))}
tria3_index = np.array([renum[i] for i in tria3_index])
tria3_index = tria3_index.reshape((tria3_id_tag.shape[0], 3))
vert2_idxs = np.where(np.isin(node_indexes, used_indexes))[0]
# update vert2
mesh.vert2 = mesh.vert2.take(vert2_idxs, axis=0)
# update value
if len(mesh.value) > 0:
mesh.value = mesh.value.take(vert2_idxs, axis=0)
# update tria3
mesh.tria3 = np.array(
[(tuple(indices), tria3_id_tag[i])
for i, indices in enumerate(tria3_index)],
dtype=jigsaw_msh_t.TRIA3_t)
def finalize_mesh(mesh, sieve_area=None):
cleanup_isolates(mesh)
while True:
no_op = True
pinched_nodes = get_pinched_nodes(mesh)
if len(pinched_nodes):
no_op = False
# TODO drop fewer elements for pinch
clip_mesh_by_vertex(
mesh, pinched_nodes,
can_use_other_verts=True,
inverse=True, in_place=True)
boundary_polys = get_mesh_polygons(mesh)
sieve_mask = _get_sieve_mask(mesh, boundary_polys, sieve_area)
if np.sum(sieve_mask):
no_op = False
_sieve_by_mask(mesh, sieve_mask)
if no_op:
break
cleanup_isolates(mesh)
put_id_tags(mesh)
def remesh_small_elements(opts, geom, mesh, hfun):
"""
This function uses all the inputs for a given jigsaw meshing
process and based on that finds and fixes tiny elements that
might occur during initial meshing by iteratively remeshing
"""
# TODO: Implement for quad, etc.
hmin = np.min(hfun.value)
equilat_area = np.sqrt(3)/4 * hmin**2
# List of arbitrary coef of equilateral triangle area for a givven
# minimum mesh size to come up with a decent cut off.
coeffs = [0.5, 0.2, 0.1, 0.05]
fixed_mesh = mesh
for coef in coeffs:
tria_areas = calculate_tria_areas(fixed_mesh)
tiny_sz = coef * equilat_area
tiny_verts = np.unique(fixed_mesh.tria3['index'][tria_areas < tiny_sz, :].ravel())
if len(tiny_verts) == 0:
break
mesh_clip = clip_mesh_by_vertex(fixed_mesh, tiny_verts, inverse=True)
fixed_mesh = jigsawpy.jigsaw_msh_t()
fixed_mesh.mshID = 'euclidean-mesh'
fixed_mesh.ndims = +2
if hasattr(mesh, 'crs'):
fixed_mesh.crs = mesh.crs
jigsawpy.lib.jigsaw(
opts, geom, fixed_mesh, init=mesh_clip, hfun=hfun)
return fixed_mesh
def sieve(mesh, area=None):
"""
A mesh can consist of multiple separate subdomins on as single structure.
This functions removes subdomains which are equal or smaller than the
provided area. Default behaviours is to remove all subdomains except the
largest one.
"""
# select the nodes to remove based on multipolygon areas
multipolygon = geom_to_multipolygon(mesh)
areas = [polygon.area for polygon in multipolygon]
if area is None:
remove = np.where(areas < np.max(areas))[0].tolist()
else:
remove = []
for idx, patch_area in enumerate(areas):
if patch_area <= area:
remove.append(idx)
# if the path surrounds the node, these need to be removed.
vert2_mask = np.full((mesh.vert2['coord'].shape[0],), False)
for idx in remove:
path = Path(multipolygon[idx].exterior.coords, closed=True)
vert2_mask = vert2_mask | path.contains_points(mesh.vert2['coord'])
# select any connected nodes; these ones are missed by
# path.contains_point() because they are at the path edges.
_idxs = np.where(vert2_mask)[0]
conn_verts = get_surrounding_elem_verts(mesh, _idxs)
vert2_mask[conn_verts] = True
# Also, there might be some dangling triangles without neighbors,
# which are also missed by path.contains_point()
lone_elem_verts = get_lone_element_verts(mesh)
vert2_mask[lone_elem_verts] = True
# Mask out elements containing the unwanted nodes.
tria3_mask = np.any(vert2_mask[mesh.tria3['index']], axis=1)
# Renumber indexes ...
# isolated node removal does not require elimination of triangles from
# the table, therefore the length of the indexes is constant.
# We must simply renumber the tria3 indexes to match the new node indexes.
# Essentially subtract one, but going from the bottom of the index table
# to the top.
used_indexes = np.unique(mesh.tria3['index'])
node_indexes = np.arange(mesh.vert2['coord'].shape[0])
tria3_idxs = np.where(~np.isin(node_indexes, used_indexes))[0]
tria3_id_tag = mesh.tria3['IDtag'].take(np.where(~tria3_mask)[0])
tria3_index = mesh.tria3['index'][~tria3_mask, :].flatten()
for idx in reversed(tria3_idxs):
tria3_index[np.where(tria3_index >= idx)] -= 1
tria3_index = tria3_index.reshape((tria3_id_tag.shape[0], 3))
vert2_idxs = np.where(np.isin(node_indexes, used_indexes))[0]
# update vert2
mesh.vert2 = mesh.vert2.take(vert2_idxs, axis=0)
# update value
if len(mesh.value) > 0:
mesh.value = mesh.value.take(vert2_idxs, axis=0)
# update tria3
mesh.tria3 = np.array(
[(tuple(indices), tria3_id_tag[i])
for i, indices in enumerate(tria3_index)],
dtype=jigsaw_msh_t.TRIA3_t)
def sort_edges(edges):
if len(edges) == 0:
return edges
# start ordering the edges into linestrings
edge_collection = []
ordered_edges = [edges.pop(-1)]
e0, e1 = [list(t) for t in zip(*edges)]
while len(edges) > 0:
if ordered_edges[-1][1] in e0:
idx = e0.index(ordered_edges[-1][1])
ordered_edges.append(edges.pop(idx))
elif ordered_edges[0][0] in e1:
idx = e1.index(ordered_edges[0][0])
ordered_edges.insert(0, edges.pop(idx))
elif ordered_edges[-1][1] in e1:
idx = e1.index(ordered_edges[-1][1])
ordered_edges.append(
list(reversed(edges.pop(idx))))
elif ordered_edges[0][0] in e0:
idx = e0.index(ordered_edges[0][0])
ordered_edges.insert(
0, list(reversed(edges.pop(idx))))
else:
edge_collection.append(tuple(ordered_edges))
idx = -1
ordered_edges = [edges.pop(idx)]
e0.pop(idx)
e1.pop(idx)
# finalize
if len(edge_collection) == 0 and len(edges) == 0:
edge_collection.append(tuple(ordered_edges))
else:
edge_collection.append(tuple(ordered_edges))
return edge_collection
def index_ring_collection(mesh):
# find boundary edges using triangulation neighbors table,
# see: https://stackoverflow.com/a/23073229/7432462
boundary_edges = []
tri = mesh_to_tri(mesh)
idxs = np.vstack(
list(np.where(tri.neighbors == -1))).T
for i, j in idxs:
boundary_edges.append(
(int(tri.triangles[i, j]),
int(tri.triangles[i, (j+1) % 3])))
init_idx_ring_coll = sort_edges(boundary_edges)
# sort index_rings into corresponding "polygons"
areas = []
vertices = mesh.vert2['coord']
for index_ring in init_idx_ring_coll:
e0, _ = [list(t) for t in zip(*index_ring)]
areas.append(float(Polygon(vertices[e0, :]).area))
# maximum area must be main mesh
idx = areas.index(np.max(areas))
exterior = init_idx_ring_coll.pop(idx)
areas.pop(idx)
_id = 0
idx_ring_coll = {}
idx_ring_coll[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
while len(init_idx_ring_coll) > 0:
# find all internal rings
potential_interiors = []
for i, index_ring in enumerate(init_idx_ring_coll):
e0, e1 = [list(t) for t in zip(*index_ring)]
if path.contains_point(vertices[e0[0], :]):
potential_interiors.append(i)
# filter out nested rings
real_interiors = []
for i, p_interior in reversed(list(enumerate(potential_interiors))):
_p_interior = init_idx_ring_coll[p_interior]
check = [init_idx_ring_coll[_]
for j, _ in reversed(list(enumerate(potential_interiors)))
if i != j]
has_parent = False
for _path in check:
e0, e1 = [list(t) for t in zip(*_path)]
_path = Path(vertices[e0 + [e0[0]], :], closed=True)
if _path.contains_point(vertices[_p_interior[0][0], :]):
has_parent = True
break
if not has_parent:
real_interiors.append(p_interior)
# pop real rings from collection
for i in reversed(sorted(real_interiors)):
idx_ring_coll[_id]['interiors'].append(
np.asarray(init_idx_ring_coll.pop(i)))
areas.pop(i)
# if no internal rings found, initialize next polygon
if len(init_idx_ring_coll) > 0:
idx = areas.index(np.max(areas))
exterior = init_idx_ring_coll.pop(idx)
areas.pop(idx)
_id += 1
idx_ring_coll[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
return idx_ring_coll
def outer_ring_collection(mesh):
idx_ring_coll = index_ring_collection(mesh)
exterior_ring_collection = defaultdict()
for key, ring in idx_ring_coll.items():
exterior_ring_collection[key] = ring['exterior']
return exterior_ring_collection
def inner_ring_collection(mesh):
idx_ring_coll = index_ring_collection(mesh)
inner_ring_coll = defaultdict()
for key, rings in idx_ring_coll.items():
inner_ring_coll[key] = rings['interiors']
return inner_ring_coll
def get_multipolygon_from_pathplot(ax):
# extract linear_rings from plot
linear_ring_collection = []
for path_collection in ax.collections:
for path in path_collection.get_paths():
polygons = path.to_polygons(closed_only=True)
for linear_ring in polygons:
if linear_ring.shape[0] > 3:
linear_ring_collection.append(
LinearRing(linear_ring))
if len(linear_ring_collection) > 1:
# reorder linear rings from above
areas = [Polygon(linear_ring).area
for linear_ring in linear_ring_collection]
idx = np.where(areas == np.max(areas))[0][0]
polygon_collection = []
outer_ring = linear_ring_collection.pop(idx)
path = Path(np.asarray(outer_ring.coords), closed=True)
while len(linear_ring_collection) > 0:
inner_rings = []
for i, linear_ring in reversed(
list(enumerate(linear_ring_collection))):
xy = np.asarray(linear_ring.coords)[0, :]
if path.contains_point(xy):
inner_rings.append(linear_ring_collection.pop(i))
polygon_collection.append(Polygon(outer_ring, inner_rings))
if len(linear_ring_collection) > 0:
areas = [Polygon(linear_ring).area
for linear_ring in linear_ring_collection]
idx = np.where(areas == np.max(areas))[0][0]
outer_ring = linear_ring_collection.pop(idx)
path = Path(np.asarray(outer_ring.coords), closed=True)
multipolygon = MultiPolygon(polygon_collection)
else:
multipolygon = MultiPolygon(
[Polygon(linear_ring_collection.pop())])
return multipolygon
def signed_polygon_area(vertices):
# https://code.activestate.com/recipes/578047-area-of-polygon-using-shoelace-formula/
n = len(vertices) # of vertices
area = 0.0
for i in range(n):
j = (i + 1) % n
area += vertices[i][0] * vertices[j][1]
area -= vertices[j][0] * vertices[i][1]
return area / 2.0
def vertices_around_vertex(mesh):
if mesh.mshID == 'euclidean-mesh':
def append(geom):
for simplex in geom['index']:
for i, j in permutations(simplex, 2):
vert_list[i].add(j)
vert_list = defaultdict(set)
append(mesh.tria3)
append(mesh.quad4)
append(mesh.hexa8)
return vert_list
msg = f"Not implemented for mshID={mesh.mshID}"
raise NotImplementedError(msg)
def get_surrounding_elem_verts(mesh, in_vert):
'''
Find vertices of elements connected to input vertices
'''
tria = mesh.tria3['index']
quad = mesh.quad4['index']
hexa = mesh.hexa8['index']
# NOTE: np.any is used so that vertices that are not in in_verts
# triangles but are part of the triangles that include in_verts
# are considered too
mark_tria = np.any(
(np.isin(tria.ravel(), in_vert).reshape(
tria.shape)), 1)
mark_quad = np.any(
(np.isin(quad.ravel(), in_vert).reshape(
quad.shape)), 1)
mark_hexa = np.any(
(np.isin(hexa.ravel(), in_vert).reshape(
hexa.shape)), 1)
conn_verts = np.unique(np.concatenate(
(tria[mark_tria, :].ravel(),
quad[mark_quad, :].ravel(),
hexa[mark_hexa, :].ravel())))
return conn_verts
def get_lone_element_verts(mesh):
'''
Also, there might be some dangling triangles without neighbors,
which are also missed by path.contains_point()
'''
tria = mesh.tria3['index']
quad = mesh.quad4['index']
hexa = mesh.hexa8['index']
unq_verts, counts = np.unique(
np.concatenate((tria.ravel(), quad.ravel(), hexa.ravel())),
return_counts=True)
once_verts = unq_verts[counts == 1]
# NOTE: np.all so that lone elements are found vs elements that
# have nodes that are used only once
mark_tria = np.all(
(np.isin(tria.ravel(), once_verts).reshape(
tria.shape)), 1)
mark_quad = np.all(
(np.isin(quad.ravel(), once_verts).reshape(
quad.shape)), 1)
mark_hexa = np.all(
(np.isin(hexa.ravel(), once_verts).reshape(
hexa.shape)), 1)
lone_elem_verts = np.unique(np.concatenate(
(tria[mark_tria, :].ravel(),
quad[mark_quad, :].ravel(),
hexa[mark_hexa, :].ravel())))
return lone_elem_verts
# https://en.wikipedia.org/wiki/Polygon_mesh#Summary_of_mesh_representation
# V-V All vertices around vertex
# E-F All edges of a face
# V-F All vertices of a face
# F-V All faces around a vertex
# E-V All edges around a vertex
# F-E Both faces of an edge
# V-E Both vertices of an edge
# Flook Find face with given vertices
def get_verts_in_shape(
mesh: jigsaw_msh_t,
shape: Union[box, Polygon, MultiPolygon],
from_box: bool = False,
) -> Sequence[int]:
if from_box:
crd = mesh.vert2['coord']
xmin, ymin, xmax, ymax = shape.bounds
in_box_idx_1 = np.arange(len(crd))[crd[:, 0] > xmin]
in_box_idx_2 = np.arange(len(crd))[crd[:, 0] < xmax]
in_box_idx_3 = np.arange(len(crd))[crd[:, 1] > ymin]
in_box_idx_4 = np.arange(len(crd))[crd[:, 1] < ymax]
in_box_idx = reduce(
np.intersect1d, (in_box_idx_1, in_box_idx_2,
in_box_idx_3, in_box_idx_4))
return in_box_idx
pt_series = gpd.GeoSeries(gpd.points_from_xy(
mesh.vert2['coord'][:,0], mesh.vert2['coord'][:,1]))
shp_series = gpd.GeoSeries(shape)
in_shp_idx = pt_series.sindex.query_bulk(
shp_series, predicate="intersects")
return in_shp_idx
@must_be_euclidean_mesh
def get_cross_edges(
mesh: jigsaw_msh_t,
shape: Union[box, Polygon, MultiPolygon],
) -> Sequence[Tuple[int, int]]:
'''
Return the list of edges crossing the input shape exterior
'''
coords = mesh.vert2['coord']
coord_dict = {}
for i, coo in enumerate(coords):
coord_dict[tuple(coo)] = i
gdf_shape = gpd.GeoDataFrame(
geometry=gpd.GeoSeries(shape))
exteriors = [pl.exterior for pl in gdf_shape.explode().geometry]
# TODO: Reduce domain of search for faster results
all_edges = get_mesh_edges(mesh, unique=True)
edge_coords = coords[all_edges, :]
gdf_edg = gpd.GeoDataFrame(
geometry=gpd.GeoSeries(linemerge(edge_coords)))
gdf_x = gpd.sjoin(
gdf_edg.explode(),
gpd.GeoDataFrame(geometry=gpd.GeoSeries(exteriors)),
how='inner', op='intersects')
cut_coords = [
list(cooseq)
for cooseq in gdf_x.geometry.apply(lambda i: i.coords).values]
cut_edges = np.array([
(coo_list[i], coo_list[i+1])
for coo_list in cut_coords
for i in range(len(coo_list)-1) ])
cut_edge_idx = np.array(
[coord_dict[tuple(coo)]
for coo in cut_edges.reshape(-1, 2)]).reshape(
cut_edges.shape[:2])
return cut_edge_idx
def clip_mesh_by_shape(
mesh: jigsaw_msh_t,
shape: Union[box, Polygon, MultiPolygon],
use_box_only: bool = False,
fit_inside: bool = True,
inverse: bool = False,
in_place: bool = False,
check_cross_edges: bool = False
) -> jigsaw_msh_t:
# NOTE: Checking cross edge is only meaningful when
# fit inside flag is NOT set
edge_flag = check_cross_edges and not fit_inside
# If we want to calculate inverse based on shape, calculating
# from bbox first results in the wrong result
if not inverse or use_box_only:
# First based on bounding box only
shape_box = box(*shape.bounds)
# TODO: Optimize for multipolygons (use separate bboxes)
in_box_idx = get_verts_in_shape(mesh, shape_box, True)
if edge_flag and not inverse:
x_edge_idx = get_cross_edges(mesh, shape_box)
in_box_idx = np.append(in_box_idx, np.unique(x_edge_idx))
mesh = clip_mesh_by_vertex(
mesh, in_box_idx, not fit_inside, inverse, in_place)
if use_box_only:
if edge_flag and inverse:
x_edge_idx = get_cross_edges(mesh, shape_box)
mesh = remove_mesh_by_edge(
mesh, x_edge_idx, in_place)
return mesh
in_shp_idx = get_verts_in_shape(mesh, shape, False)
if edge_flag and not inverse:
x_edge_idx = get_cross_edges(mesh, shape)
in_shp_idx = np.append(in_shp_idx, np.unique(x_edge_idx))
mesh = clip_mesh_by_vertex(
mesh, in_shp_idx, not fit_inside, inverse, in_place)
if edge_flag and inverse:
x_edge_idx = get_cross_edges(mesh, shape)
mesh = remove_mesh_by_edge(mesh, x_edge_idx, in_place)
return mesh
def remove_mesh_by_edge(
mesh: jigsaw_msh_t,
edges: Sequence[Tuple[int, int]],
in_place: bool = False
) -> jigsaw_msh_t:
mesh_out = mesh
if not in_place:
mesh_out = deepcopy(mesh)
# NOTE: This method selects more elements than needed as it
# uses only existance of more than two of the vertices attached
# to the input edges in the element as criteria.
edge_verts = np.unique(edges)
for etype in ELEM_2D_TYPES:
elems = getattr(mesh, etype)['index']
# If a given element contains to vertices from
# a crossing edge, it is selected
test = np.sum(np.isin(elems, edge_verts), axis=1)
elems = elems[test < 2]
setattr(mesh_out, etype, np.array(
[(idx, 0) for idx in elems],
dtype=getattr(
jigsawpy.jigsaw_msh_t, f'{etype.upper()}_t')))
return mesh_out
def clip_mesh_by_vertex(
mesh: jigsaw_msh_t,
vert_in: Sequence[int],
can_use_other_verts: bool = False,
inverse: bool = False,
in_place: bool = False
) -> jigsaw_msh_t:
if mesh.mshID == 'euclidean-mesh' and mesh.ndims == 2:
coord = mesh.vert2['coord']
# TODO: What about edge2 if in_place?
mesh_types = {
'tria3': 'TRIA3_t',
'quad4': 'QUAD4_t',
'hexa8': 'HEXA8_t'
}
elm_dict = {
key: getattr(mesh, key)['index'] for key in mesh_types}
# Whether elements that include "in"-vertices can be created
# using vertices other than "in"-vertices
mark_func = np.all
if can_use_other_verts:
mark_func = np.any
mark_dict = {
key: mark_func(
(np.isin(elems.ravel(), vert_in).reshape(
elems.shape)), 1)
for key, elems in elm_dict.items()}
# Whether to return elements found by "in" vertices or return
# all elements except them
if inverse:
mark_dict = {
key: np.logical_not(mark)
for key, mark in mark_dict.items()}
# Find elements based on old vertex index
elem_draft_dict = {
key: elm_dict[key][mark_dict[key], :]
for key in elm_dict}
crd_old_to_new = {
index: i for i, index
in enumerate(sorted(np.unique(np.concatenate(
[draft.ravel()
for draft in elem_draft_dict.values()]
))))
}
elem_final_dict = {
key: np.array(
[[crd_old_to_new[x] for x in element]
for element in draft])
for key, draft in elem_draft_dict.items()
}
new_coord = coord[list(crd_old_to_new.keys()), :]
value = np.zeros(shape=(0, 0), dtype=jigsaw_msh_t.REALS_t)
if len(mesh.value) == len(coord):
value = mesh.value.take(
list(crd_old_to_new.keys()), axis=0).copy()
mesh_out = mesh
if not in_place:
mesh_out = jigsaw_msh_t()
mesh_out.mshID = mesh.mshID
mesh_out.ndims = mesh.ndims
if hasattr(mesh, "crs"):
mesh_out.crs = deepcopy(mesh.crs)
mesh_out.value = value
mesh_out.vert2 = np.array(
[(coo, 0) for coo in new_coord],
dtype=jigsaw_msh_t.VERT2_t)
for key, elem_type in mesh_types.items():
setattr(
mesh_out,
key,
np.array(
[(con, 0) for con in elem_final_dict[key]],
dtype=getattr(jigsaw_msh_t, elem_type)))
return mesh_out
msg = (f"Not implemented for"
f" mshID={mesh.mshID} and dim={mesh.ndims}")
raise NotImplementedError(msg)
@must_be_euclidean_mesh
def get_mesh_edges(mesh: jigsaw_msh_t, unique=True):
# NOTE: For msh_t type vertex id and index are the same
trias = mesh.tria3['index']
quads = mesh.quad4['index']
hexas = mesh.hexa8['index']
# Get unique set of edges by rolling connectivity
# and joining connectivities in 3rd dimension, then sorting
# to get all edges with lower index first
all_edges = np.empty(shape=(0, 2), dtype=trias.dtype)
for elm_type in [trias, quads, hexas]:
if elm_type.shape[0]:
edges = np.sort(
np.stack(
(elm_type, np.roll(elm_type, shift=1, axis=1)),
axis=2),
axis=2)
edges = edges.reshape(np.product(edges.shape[0:2]), 2)
all_edges = np.vstack((all_edges, edges))
if unique:
all_edges = np.unique(all_edges, axis=0)
return all_edges
@must_be_euclidean_mesh
def calculate_tria_areas(mesh):
coord = mesh.vert2['coord']
trias = mesh.tria3['index']
tria_coo = coord[
np.sort(np.stack((trias, np.roll(trias, shift=1, axis=1)),
axis=2),
axis=2)]
tria_side_components = np.diff(tria_coo, axis=2).squeeze()
tria_sides = np.sqrt(
np.sum(np.power(np.abs(tria_side_components), 2),
axis=2).squeeze())
perimeter = np.sum(tria_sides, axis=1) / 2
perimeter = perimeter.reshape(len(perimeter), 1)
# pylint: disable=W0632
a_side, b_side, c_side = np.split(tria_sides, 3, axis=1)
tria_areas = np.sqrt(
perimeter*(perimeter-a_side)
* (perimeter-b_side)*(perimeter-c_side)
).squeeze()
return tria_areas
@must_be_euclidean_mesh
def calculate_edge_lengths(mesh, transformer=None):
coord = mesh.vert2['coord']
if transformer is not None:
coord = np.vstack(
transformer.transform(coord[:, 0], coord[:, 1])).T
# Get unique set of edges by rolling connectivity
# and joining connectivities in 3rd dimension, then sorting
# to get all edges with lower index first
all_edges = get_mesh_edges(mesh, unique=True)
# ONLY TESTED FOR TRIA AS OF NOW
# This part of the function is generic for tria and quad
# Get coordinates for all edge vertices
edge_coords = coord[all_edges, :]
# Calculate length of all edges based on acquired coords
edge_lens = np.sqrt(
np.sum(
np.power(
np.abs(np.diff(edge_coords, axis=1)), 2)
,axis=2)).squeeze()
edge_dict = defaultdict(float)
for i, edge in enumerate(all_edges):
edge_dict[tuple(edge)] = edge_lens[i]
return edge_dict
@must_be_euclidean_mesh
def elements(mesh):
elements_id = []
elements_id.extend(list(mesh.tria3['IDtag']))
elements_id.extend(list(mesh.quad4['IDtag']))
elements_id.extend(list(mesh.hexa8['IDtag']))
elements_id = range(1, len(elements_id)+1) \
if len(set(elements_id)) != len(elements_id) else elements_id
elems = []
elems.extend(list(mesh.tria3['index']))
elems.extend(list(mesh.quad4['index']))
elems.extend(list(mesh.hexa8['index']))
elems = {
elements_id[i]: indexes for i, indexes in enumerate(elems)}
return elems
@must_be_euclidean_mesh
def faces_around_vertex(mesh):
_elements = elements(mesh)
length = max(map(len, _elements.values()))
y = np.array([xi+[-99999]*(length-len(xi)) for xi in _elements.values()])
faces_around_vert = defaultdict(set)
for i, coord in enumerate(mesh.vert2['index']):
# TODO:
pass
# np.isin(i, axis=0)
# faces_around_vert[i].add()
faces_around_vert = defaultdict(set)
def get_boundary_edges(mesh):
'''
Find internal and external boundaries of mesh
'''
coord = mesh.vert2['coord']
all_edges = get_mesh_edges(mesh, unique=False)
# Simplexes (list of node indices)
all_edges, e_cnt = np.unique(all_edges, axis=0, return_counts=True)
shared_edges = all_edges[e_cnt == 2]
boundary_edges = all_edges[e_cnt == 1]
return boundary_edges
def get_pinched_nodes(mesh):
'''
Find nodes through which fluid cannot flow
'''
boundary_edges = get_boundary_edges(mesh)
# Node indices
boundary_verts, vb_cnt = np.unique(boundary_edges, return_counts=True)
# vertices/nodes that have more than 2 boundary edges are pinch
pinch_verts = boundary_verts[vb_cnt > 2]
return pinch_verts
def has_pinched_nodes(mesh):
# Older function: computationally more expensive and missing some
# nodes
_inner_ring_collection = inner_ring_collection(mesh)
all_nodes = []
for inner_rings in _inner_ring_collection.values():
for ring in inner_rings:
all_nodes.extend(np.asarray(ring)[:, 0].tolist())
u, c = np.unique(all_nodes, return_counts=True)
if len(u[c > 1]) > 0:
return True
return False
def cleanup_pinched_nodes(mesh):
# Older function: computationally more expensive and missing some
# nodes
_inner_ring_collection = inner_ring_collection(mesh)
all_nodes = []
for inner_rings in _inner_ring_collection.values():
for ring in inner_rings:
all_nodes.extend(np.asarray(ring)[:, 0].tolist())
u, c = np.unique(all_nodes, return_counts=True)
mesh.tria3 = mesh.tria3.take(
np.where(
~np.any(np.isin(mesh.tria3['index'], u[c > 1]), axis=1))[0],
axis=0)
def interpolate(src: jigsaw_msh_t, dst: jigsaw_msh_t, **kwargs):
if src.mshID == 'euclidean-grid' and dst.mshID == 'euclidean-mesh':
interpolate_euclidean_grid_to_euclidean_mesh(src, dst, **kwargs)
elif src.mshID == 'euclidean-mesh' and dst.mshID == 'euclidean-mesh':
interpolate_euclidean_mesh_to_euclidean_mesh(src, dst, **kwargs)
else:
raise NotImplementedError(
f'Not implemented type combination: source={src.mshID}, '
f'dest={dst.mshID}')
def interpolate_euclidean_mesh_to_euclidean_mesh(
src: jigsaw_msh_t,
dst: jigsaw_msh_t,
method='linear',
fill_value=np.nan
):
values = griddata(
src.vert2['coord'],
src.value.flatten(),
dst.vert2['coord'],
method=method,
fill_value=fill_value
)
dst.value = np.array(
values.reshape(len(values), 1), dtype=jigsaw_msh_t.REALS_t)
def interpolate_euclidean_grid_to_euclidean_mesh(
src: jigsaw_msh_t,
dst: jigsaw_msh_t,
bbox=None,
kx=3,
ky=3,
s=0
):
values = RectBivariateSpline(
src.xgrid,
src.ygrid,
src.value.T,
bbox=bbox or [None, None, None, None],
kx=kx,
ky=ky,
s=s
).ev(
dst.vert2['coord'][:, 0],
dst.vert2['coord'][:, 1])
dst.value = np.array(
values.reshape((values.size, 1)),
dtype=jigsaw_msh_t.REALS_t)
def tricontourf(
mesh,
ax=None,
show=False,
figsize=None,
extend='both',
colorbar=False,
**kwargs
):
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
tcf = ax.tricontourf(
mesh.vert2['coord'][:, 0],
mesh.vert2['coord'][:, 1],
mesh.tria3['index'],
mesh.value.flatten(),
**kwargs)
if colorbar:
plt.colorbar(tcf)
if show:
plt.gca().axis('scaled')
plt.show()
return ax
def triplot(
mesh,
axes=None,
show=False,
figsize=None,
color='k',
linewidth=0.07,
**kwargs
):
if axes is None:
fig = plt.figure(figsize=figsize)
axes = fig.add_subplot(111)
axes.triplot(
mesh.vert2['coord'][:, 0],
mesh.vert2['coord'][:, 1],
mesh.tria3['index'],
color=color,
linewidth=linewidth,
**kwargs)
if show:
axes.axis('scaled')
plt.show()
return axes
def reproject(
mesh: jigsaw_msh_t,
dst_crs: Union[str, CRS]
):
src_crs = mesh.crs
dst_crs = CRS.from_user_input(dst_crs)
transformer = Transformer.from_crs(src_crs, dst_crs, always_xy=True)
# pylint: disable=E0633
x, y = transformer.transform(
mesh.vert2['coord'][:, 0], mesh.vert2['coord'][:, 1])
mesh.vert2 = np.array(
[([x[i], y[i]], mesh.vert2['IDtag'][i]) for i
in range(len(mesh.vert2['IDtag']))],
dtype=jigsaw_msh_t.VERT2_t)
mesh.crs = dst_crs
def limgrad(mesh, dfdx, imax=100):
"""
See https://github.com/dengwirda/mesh2d/blob/master/hjac-util/limgrad.m
for original source code.
"""
tri = mesh_to_tri(mesh)
xy = np.vstack([tri.x, tri.y]).T
edge = tri.edges
dx = np.subtract(xy[edge[:, 0], 0], xy[edge[:, 1], 0])
dy = np.subtract(xy[edge[:, 0], 1], xy[edge[:, 1], 1])
elen = np.sqrt(dx**2+dy**2)
ffun = mesh.value.flatten()
aset = np.zeros(ffun.shape)
ftol = np.min(ffun) * np.sqrt(np.finfo(float).eps)
# precompute neighbor table
point_neighbors = defaultdict(set)
for simplex in tri.triangles:
for i, j in permutations(simplex, 2):
point_neighbors[i].add(j)
# iterative smoothing
for _iter in range(1, imax+1):
aidx = np.where(aset == _iter-1)[0]
if len(aidx) == 0.:
break
active_idxs = np.argsort(ffun[aidx])
for active_idx in active_idxs:
adjacent_edges = point_neighbors[active_idx]
for adj_edge in adjacent_edges:
if ffun[adj_edge] > ffun[active_idx]:
fun1 = ffun[active_idx] + elen[active_idx] * dfdx
if ffun[adj_edge] > fun1+ftol:
ffun[adj_edge] = fun1
aset[adj_edge] = _iter
else:
fun2 = ffun[adj_edge] + elen[active_idx] * dfdx
if ffun[active_idx] > fun2+ftol:
ffun[active_idx] = fun2
aset[active_idx] = _iter
if not _iter < imax:
msg = f'limgrad() did not converge within {imax} iterations.'
raise Exception(msg)
return ffun
def msh_t_to_grd(msh: jigsaw_msh_t) -> Dict:
src_crs = msh.crs if hasattr(msh, 'crs') else None
coords = msh.vert2['coord']
desc = "EPSG:4326"
if src_crs is not None:
# TODO: Support non EPSG:4326 CRS
# desc = src_crs.to_string()
epsg_4326 = CRS.from_epsg(4326)
if not src_crs.equals(epsg_4326):
transformer = Transformer.from_crs(
src_crs, epsg_4326, always_xy=True)
coords = np.vstack(
transformer.transform(coords[:, 0], coords[:, 1])).T
nodes = {
i + 1: [tuple(p.tolist()), v] for i, (p, v) in
enumerate(zip(coords, -msh.value))}
# NOTE: Node IDs are node index + 1
elems = {
i + 1: v + 1 for i, v in enumerate(msh.tria3['index'])}
offset = len(elems)
elems.update({
offset + i + 1: v + 1 for i, v in enumerate(msh.quad4['index'])})
return {'description': desc,
'nodes': nodes,
'elements': elems}
def grd_to_msh_t(_grd: Dict) -> jigsaw_msh_t:
msh = jigsaw_msh_t()
msh.ndims = +2
msh.mshID = 'euclidean-mesh'
id_to_index = {node_id: index for index, node_id
in enumerate(_grd['nodes'].keys())}
triangles = [list(map(lambda x: id_to_index[x], element)) for element
in _grd['elements'].values() if len(element) == 3]
quads = [list(map(lambda x: id_to_index[x], element)) for element
in _grd['elements'].values() if len(element) == 4]
msh.vert2 = np.array([(coord, 0) for coord, _ in _grd['nodes'].values()],
dtype=jigsaw_msh_t.VERT2_t)
msh.tria3 = np.array([(index, 0) for index in triangles],
dtype=jigsaw_msh_t.TRIA3_t)
msh.quad4 = np.array([(index, 0) for index in quads],
dtype=jigsaw_msh_t.QUAD4_t)
value = [value for _, value in _grd['nodes'].values()]
msh.value = np.array(np.array(value).reshape((len(value), 1)),
dtype=jigsaw_msh_t.REALS_t)
crs = _grd.get('crs')
if crs is not None:
msh.crs = CRS.from_user_input(crs)
return msh
def msh_t_to_2dm(msh: jigsaw_msh_t):
coords = msh.vert2['coord']
src_crs = msh.crs if hasattr(msh, 'crs') else None
if src_crs is not None:
epsg_4326 = CRS.from_epsg(4326)
if not src_crs.equals(epsg_4326):
transformer = Transformer.from_crs(
src_crs, epsg_4326, always_xy=True)
coords = np.vstack(
transformer.transform(coords[:, 0], coords[:, 1])).T
return {
'ND': {i+1: (coord, msh.value[i, 0] if not
np.isnan(msh.value[i, 0]) else -99999)
for i, coord in enumerate(coords)},
'E3T': {i+1: index+1 for i, index
in enumerate(msh.tria3['index'])},
'E4Q': {i+1: index+1 for i, index
in enumerate(msh.quad4['index'])}
}
def sms2dm_to_msh_t(_sms2dm: Dict) -> jigsaw_msh_t:
msh = jigsaw_msh_t()
msh.ndims = +2
msh.mshID = 'euclidean-mesh'
id_to_index = {node_id: index for index, node_id
in enumerate(_sms2dm['ND'].keys())}
if 'E3T' in _sms2dm:
triangles = [list(map(lambda x: id_to_index[x], element)) for element
in _sms2dm['E3T'].values()]
msh.tria3 = np.array([(index, 0) for index in triangles],
dtype=jigsaw_msh_t.TRIA3_t)
if 'E4Q' in _sms2dm:
quads = [list(map(lambda x: id_to_index[x], element)) for element
in _sms2dm['E4Q'].values()]
msh.quad4 = np.array([(index, 0) for index in quads],
dtype=jigsaw_msh_t.QUAD4_t)
msh.vert2 = np.array([(coord, 0) for coord, _ in _sms2dm['ND'].values()],
dtype=jigsaw_msh_t.VERT2_t)
value = [value for _, value in _sms2dm['ND'].values()]
msh.value = np.array(np.array(value).reshape((len(value), 1)),
dtype=jigsaw_msh_t.REALS_t)
crs = _sms2dm.get('crs')
if crs is not None:
msh.crs = CRS.from_user_input(crs)
return msh
@must_be_euclidean_mesh
def msh_t_to_utm(msh):
utm_crs = estimate_mesh_utm(msh)
if utm_crs is None:
return
transformer = Transformer.from_crs(
msh.crs, utm_crs, always_xy=True)
coords = msh.vert2['coord']
# pylint: disable=E0633
coords[:, 0], coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
msh.vert2['coord'][:] = coords
msh.crs = utm_crs
def estimate_bounds_utm(bounds, crs="EPSG:4326"):
in_crs = CRS.from_user_input(crs)
if in_crs.is_geographic:
x0, y0, x1, y1 = bounds
_, _, number, letter = utm.from_latlon(
(y0 + y1)/2, (x0 + x1)/2)
# PyProj 3.2.1 throws error if letter is provided
utm_crs = CRS(
proj='utm',
zone=f'{number}',
south=(y0 + y1)/2 < 0,
ellps={
'GRS 1980': 'GRS80',
'WGS 84': 'WGS84'
}[in_crs.ellipsoid.name]
)
return utm_crs
return None
@must_be_euclidean_mesh
def estimate_mesh_utm(msh):
if hasattr(msh, 'crs'):
coords = msh.vert2['coord']
x0, y0, x1, y1 = (
np.min(coords[:, 0]), np.min(coords[:, 1]),
np.max(coords[:, 0]), np.max(coords[:, 1]))
utm_crs = estimate_bounds_utm((x0, y0, x1, y1), msh.crs)
return utm_crs
return None
def get_polygon_channels(polygon, width, simplify=None, join_style=3):
# Operations are done without any CRS info consideration
polys_gdf = gpd.GeoDataFrame(
geometry=gpd.GeoSeries(polygon))
if isinstance(simplify, (int, float)):
polys_gdf = gpd.GeoDataFrame(
geometry=polys_gdf.simplify(
tolerance=simplify,
preserve_topology=False))
buffer_size = width/2
buffered_gdf = gpd.GeoDataFrame(
geometry=polys_gdf.buffer(-buffer_size).buffer(
buffer_size,
join_style=join_style))
buffered_gdf = buffered_gdf[~buffered_gdf.is_empty]
if len(buffered_gdf) == 0:
# All is channel!
return polygon
channels_gdf = gpd.overlay(
polys_gdf, buffered_gdf, how='difference')
# Use square - 1/4 circle as cleanup criteria
channels_gdf = gpd.GeoDataFrame(
geometry=gpd.GeoSeries(
[p for i in channels_gdf.geometry
for p in i.geoms
if p.area > width**2 * (1-np.pi/4)]))
ret_val = channels_gdf.unary_union
if isinstance(ret_val, GeometryCollection):
return None
if isinstance(ret_val, Polygon):
ret_val = MultiPolygon([ret_val])
return ret_val
def merge_msh_t(
*mesh_list,
out_crs="EPSG:4326",
drop_by_bbox=True,
can_overlap=True,
check_cross_edges=False):
# TODO: Add support for quad4 and hexa8
dst_crs = CRS.from_user_input(out_crs)
coord = []
index = []
value = []
offset = 0
mesh_shape_list = []
# Last has the highest priority
for mesh in mesh_list[::-1]:
if not dst_crs.equals(mesh.crs):
# To avoid modifying inputs
mesh = deepcopy(mesh)
reproject(mesh, dst_crs)
if drop_by_bbox:
x = mesh.vert2['coord'][:, 0]
y = mesh.vert2['coord'][:, 1]
mesh_shape = box(np.min(x), np.min(y), np.max(x), np.max(y))
else:
mesh_shape = get_mesh_polygons(mesh)
for ishp in mesh_shape_list:
# NOTE: fit_inside = True w/ inverse = True results
# in overlap when clipping low-priority mesh
mesh = clip_mesh_by_shape(
mesh, ishp,
use_box_only=drop_by_bbox,
fit_inside=can_overlap,
inverse=True,
check_cross_edges=check_cross_edges)
mesh_shape_list.append(mesh_shape)
index.append(mesh.tria3['index'] + offset)
coord.append(mesh.vert2['coord'])
value.append(mesh.value)
offset += coord[-1].shape[0]
composite_mesh = jigsaw_msh_t()
composite_mesh.mshID = 'euclidean-mesh'
composite_mesh.ndims = 2
composite_mesh.vert2 = np.array(
[(coord, 0) for coord in np.vstack(coord)],
dtype=jigsaw_msh_t.VERT2_t)
composite_mesh.tria3 = np.array(
[(index, 0) for index in np.vstack(index)],
dtype=jigsaw_msh_t.TRIA3_t)
composite_mesh.value = np.array(
np.vstack(value),
dtype=jigsaw_msh_t.REALS_t)
composite_mesh.crs = dst_crs
return composite_mesh
def add_pool_args(func):
def wrapper(*args, nprocs=None, pool=None, **kwargs):
# TODO: Modify docstring?
if pool is not None:
rv = func(*args, **kwargs, pool=pool)
else:
# Check nprocs
nprocs = -1 if nprocs is None else nprocs
nprocs = cpu_count() if nprocs == -1 else nprocs
with Pool(processes=nprocs) as new_pool:
rv = func(*args, **kwargs, pool=new_pool)
new_pool.join()
return rv
return wrapper
| 1.8125 | 2 |
scripts/5a-render-model3.py | lpenuelac/ImageAnalysis | 93 | 12762437 | #!/usr/bin/env python3
# 6a-render-model3.py - investigate delauney triangulation for
# individual image surface mesh generation.
# for all the images in the fitted group, generate a 2d polygon
# surface fit. Then project the individual images onto this surface
# and generate an AC3D model.
#
# Note: insufficient image overlap (or long linear image match chains)
# are not good. Ideally we would have a nice mesh of match pairs for
# best results.
#
# this script can also project onto the SRTM surface, or a flat ground
# elevation plane.
import argparse
import cv2
import pickle
import math
import numpy as np
import os.path
import scipy.spatial
from props import getNode
from lib import groups
from lib import panda3d
from lib import project
from lib import srtm
from lib import transformations
mesh_steps = 8 # 1 = corners only
r2d = 180 / math.pi
tolerance = 0.5
parser = argparse.ArgumentParser(description='Set the initial camera poses.')
parser.add_argument('project', help='project directory')
parser.add_argument('--group', type=int, default=0, help='group index')
parser.add_argument('--texture-resolution', type=int, default=512, help='texture resolution (should be 2**n, so numbers like 256, 512, 1024, etc.')
parser.add_argument('--srtm', action='store_true', help='use srtm elevation')
parser.add_argument('--ground', type=float, help='force ground elevation in meters')
parser.add_argument('--direct', action='store_true', help='use direct pose')
args = parser.parse_args()
proj = project.ProjectMgr(args.project)
proj.load_images_info()
# lookup ned reference
ref_node = getNode("/config/ned_reference", True)
ref = [ ref_node.getFloat('lat_deg'),
ref_node.getFloat('lon_deg'),
ref_node.getFloat('alt_m') ]
# setup SRTM ground interpolator
srtm.initialize( ref, 6000, 6000, 30 )
width, height = proj.cam.get_image_params()
print("Loading optimized match points ...")
matches = pickle.load( open( os.path.join(proj.analysis_dir, "matches_grouped"), "rb" ) )
# load the group connections within the image set
group_list = groups.load(proj.analysis_dir)
# initialize temporary structures for vanity stats
for image in proj.image_list:
image.sum_values = 0.0
image.sum_count = 0.0
image.max_z = -9999.0
image.min_z = 9999.0
image.pool_xy = []
image.pool_z = []
image.pool_uv = []
image.fit_xy = []
image.fit_z = []
image.fit_uv = []
image.fit_edge = []
# sort through points to build a global list of feature coordinates
# and a per-image list of feature coordinates
print('Reading feature locations from optimized match points ...')
raw_points = []
raw_values = []
for match in matches:
if match[1] == args.group and len(match[2:]) > 2: # used by current group
ned = match[0]
raw_points.append( [ned[1], ned[0]] )
raw_values.append( ned[2] )
for m in match[2:]:
if proj.image_list[m[0]].name in group_list[args.group]:
image = proj.image_list[ m[0] ]
image.pool_xy.append( [ned[1], ned[0]] )
image.pool_z.append( -ned[2] )
image.pool_uv.append( m[1] )
z = -ned[2]
image.sum_values += z
image.sum_count += 1
if z < image.min_z:
image.min_z = z
#print(min_z, match)
if z > image.max_z:
image.max_z = z
#print(max_z, match)
K = proj.cam.get_K(optimized=True)
dist_coeffs = np.array(proj.cam.get_dist_coeffs(optimized=True))
def undistort(uv_orig):
# convert the point into the proper format for opencv
uv_raw = np.zeros((1,1,2), dtype=np.float32)
uv_raw[0][0] = (uv_orig[0], uv_orig[1])
# do the actual undistort
uv_new = cv2.undistortPoints(uv_raw, K, dist_coeffs, P=K)
# print(uv_orig, type(uv_new), uv_new)
return uv_new[0][0]
# cull points from the per-image pool that project outside the grid boundaries
for image in proj.image_list:
size = len(image.pool_uv)
for i in reversed(range(len(image.pool_uv))): # iterate in reverse order
uv_new = undistort(image.pool_uv[i])
if uv_new[0] < 0 or uv_new[0] >= width or uv_new[1] < 0 or uv_new[1] >= height:
print("out of range")
print('Generating Delaunay mesh and interpolator ...')
print(len(raw_points))
global_tri_list = scipy.spatial.Delaunay(np.array(raw_points))
interp = scipy.interpolate.LinearNDInterpolator(global_tri_list, raw_values)
def intersect2d(ned, v, avg_ground):
p = ned[:] # copy
# sanity check (always assume camera pose is above ground!)
if v[2] <= 0.0:
return p
eps = 0.01
count = 0
#print("start:", p)
#print("vec:", v)
#print("ned:", ned)
tmp = interp([p[1], p[0]])[0]
if not np.isnan(tmp):
surface = tmp
else:
print("Notice: starting vector intersect with avg ground elev:", avg_ground)
surface = avg_ground
error = abs(p[2] - surface)
#print("p=%s surface=%s error=%s" % (p, surface, error))
while error > eps and count < 25:
d_proj = -(ned[2] - surface)
factor = d_proj / v[2]
n_proj = v[0] * factor
e_proj = v[1] * factor
#print(" proj = %s %s" % (n_proj, e_proj))
p = [ ned[0] + n_proj, ned[1] + e_proj, ned[2] + d_proj ]
#print(" new p:", p)
tmp = interp([p[1], p[0]])[0]
if not np.isnan(tmp):
surface = tmp
error = abs(p[2] - surface)
#print(" p=%s surface=%.2f error = %.3f" % (p, surface, error))
count += 1
#print("surface:", surface)
#if np.isnan(surface):
# #print(" returning nans")
# return [np.nan, np.nan, np.nan]
dy = ned[0] - p[0]
dx = ned[1] - p[1]
dz = ned[2] - p[2]
dist = math.sqrt(dx*dx+dy*dy)
angle = math.atan2(-dz, dist) * r2d # relative to horizon
if angle < 30:
print(" returning high angle nans:", angle)
return [np.nan, np.nan, np.nan]
else:
return p
def intersect_vectors(ned, v_list, avg_ground):
pt_list = []
for v in v_list:
p = intersect2d(ned, v.flatten(), avg_ground)
pt_list.append(p)
return pt_list
for image in proj.image_list:
if image.sum_count > 0:
image.z_avg = image.sum_values / float(image.sum_count)
# print(image.name, 'avg elev:', image.z_avg)
else:
image.z_avg = 0
# compute the uv grid for each image and project each point out into
# ned space, then intersect each vector with the srtm / ground /
# delauney surface.
#for group in group_list:
if True:
group = group_list[args.group]
#if len(group) < 3:
# continue
for name in group:
image = proj.findImageByName(name)
print(image.name, image.z_avg)
# scale the K matrix if we have scaled the images
K = proj.cam.get_K(optimized=True)
IK = np.linalg.inv(K)
grid_list = []
u_list = np.linspace(0, width, mesh_steps + 1)
v_list = np.linspace(0, height, mesh_steps + 1)
# horizontal edges
for u in u_list:
grid_list.append( [u, 0] )
grid_list.append( [u, height] )
# vertical edges (minus corners)
for v in v_list[1:-1]:
grid_list.append( [0, v] )
grid_list.append( [width, v] )
#print('grid_list:', grid_list)
distorted_uv = proj.redistort(grid_list, optimized=True)
distorted_uv = grid_list
if args.direct:
proj_list = project.projectVectors( IK, image.get_body2ned(),
image.get_cam2body(),
grid_list )
else:
#print(image.get_body2ned(opt=True))
proj_list = project.projectVectors( IK,
image.get_body2ned(opt=True),
image.get_cam2body(),
grid_list )
#print 'proj_list:', proj_list
if args.direct:
ned, ypr, quat = image.get_camera_pose()
else:
ned, ypr, quat = image.get_camera_pose(opt=True)
#print('cam orig:', image.camera_pose['ned'], 'optimized:', ned)
if args.ground:
pts_ned = project.intersectVectorsWithGroundPlane(ned,
args.ground,
proj_list)
elif args.srtm:
pts_ned = srtm.interpolate_vectors(ned, proj_list)
else:
# intersect with our polygon surface approximation
pts_ned = intersect_vectors(ned, proj_list, -image.z_avg)
#print(image.name, "pts_3d (ned):\n", pts_ned)
# convert ned to xyz and stash the result for each image
image.grid_list = []
for p in pts_ned:
image.fit_xy.append([p[1], p[0]])
image.fit_z.append(-p[2])
image.fit_edge.append(True)
image.fit_uv = distorted_uv
print('len:', len(image.fit_xy), len(image.fit_z), len(image.fit_uv))
# Triangle fit algorithm
group = group_list[args.group]
#if len(group) < 3:
# continue
for name in group:
image = proj.findImageByName(name)
print(image.name, image.z_avg)
done = False
dist_uv = []
while not done:
tri_list = scipy.spatial.Delaunay(np.array(image.fit_xy))
interp = scipy.interpolate.LinearNDInterpolator(tri_list, image.fit_z)
# find the point in the pool furthest from the triangulated surface
next_index = None
max_error = 0.0
for i, pt in enumerate(image.pool_xy):
z = interp(image.pool_xy[i])[0]
if not np.isnan(z):
error = abs(z - image.pool_z[i])
if error > max_error:
max_error = error
next_index = i
if max_error > tolerance:
print("adding index:", next_index, "error:", max_error)
image.fit_xy.append(image.pool_xy[next_index])
image.fit_z.append(image.pool_z[next_index])
image.fit_uv.append(image.pool_uv[next_index])
image.fit_edge.append(False)
del image.pool_xy[next_index]
del image.pool_z[next_index]
del image.pool_uv[next_index]
else:
print("finished")
done = True
image.fit_uv.extend(proj.undistort_uvlist(image, dist_uv))
print(name, 'len:', len(image.fit_xy), len(image.fit_z), len(image.fit_uv))
# generate the panda3d egg models
dir_node = getNode('/config/directories', True)
img_src_dir = dir_node.getString('images_source')
panda3d.generate_from_fit(proj, group_list[args.group], src_dir=img_src_dir,
analysis_dir=proj.analysis_dir,
resolution=args.texture_resolution)
| 2.546875 | 3 |
projects/TextDet/textdetection/__init__.py | AzeroGYH/detectron2_crpn | 0 | 12762438 | #
# Modified by GYH
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import add_textdetection_config
from .modeling import TextROIHeads | 0.742188 | 1 |
tests/__init__.py | IntelAI/experiments | 22 | 12762439 | <filename>tests/__init__.py
from lib.exp import Client
import uuid
from kubernetes import client as k8sclient
test_namespace = k8sclient.V1Namespace()
test_namespace.metadata = k8sclient.V1ObjectMeta(name='ns'+str(uuid.uuid4()))
def setup():
v1_api = k8sclient.CoreV1Api()
v1_api.create_namespace(test_namespace)
c = Client(namespace=test_namespace.metadata.name)
c.create_crds()
def teardown():
v1_api = k8sclient.CoreV1Api()
v1_api.delete_namespace(
name=test_namespace.metadata.name,
body=k8sclient.models.V1DeleteOptions())
| 1.875 | 2 |
nerblackbox/modules/datasets/formatter/sic_formatter.py | flxst/nerblackbox | 0 | 12762440 | <gh_stars>0
import subprocess
from typing import Dict, List, Optional, Tuple
import pandas as pd
from nerblackbox.modules.datasets.formatter.base_formatter import (
BaseFormatter,
SENTENCES_ROWS,
)
from nerblackbox.modules.utils.env_variable import env_variable
class SICFormatter(BaseFormatter):
def __init__(self):
ner_dataset = "sic"
ner_tag_list = [
"person",
"animal",
"myth",
"place",
"inst",
"product",
"work",
"event",
"other",
]
super().__init__(ner_dataset, ner_tag_list)
####################################################################################################################
# ABSTRACT BASE METHODS
####################################################################################################################
def get_data(self, verbose: bool) -> None: # pragma: no cover
"""
I: get data
Args:
verbose: [bool]
"""
bash_cmds = [
f"mkdir {env_variable('DIR_DATASETS')}/_sic",
f"curl -o {env_variable('DIR_DATASETS')}/_sic/sic.zip "
"https://www.ling.su.se/polopoly_fs/1.99145.1380811903\!/menu/standard/file/sic.zip",
f"cd {env_variable('DIR_DATASETS')}/_sic && unzip -o sic.zip",
f"mkdir {env_variable('DIR_DATASETS')}/sic/raw_data",
f"mv {env_variable('DIR_DATASETS')}/_sic/sic/annotated/* {env_variable('DIR_DATASETS')}/sic/raw_data",
f"rm -r {env_variable('DIR_DATASETS')}/_sic",
f"cat {env_variable('DIR_DATASETS')}/sic/raw_data/*.conll "
f"> {env_variable('DIR_DATASETS')}/sic/sic-train.conll",
]
for bash_cmd in bash_cmds:
if verbose:
print(bash_cmd)
try:
subprocess.run(bash_cmd, shell=True, check=True)
except subprocess.CalledProcessError as e:
print(e)
def create_ner_tag_mapping(self) -> Dict[str, str]:
"""
II: customize ner_training tag mapping if wanted
Returns:
ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data
"""
return dict()
def format_data(
self, shuffle: bool = True, write_csv: bool = True
) -> Optional[SENTENCES_ROWS]:
"""
III: format data
Args:
shuffle: whether to shuffle rows of dataset
write_csv: whether to write dataset to csv (should always be True except for testing)
Returns:
sentences_rows: only if write_csv = False
"""
for phase in ["train"]:
sentences_rows = self._read_original_file(phase)
if shuffle:
sentences_rows = self._shuffle_dataset(phase, sentences_rows)
if write_csv: # pragma: no cover
self._write_formatted_csv(phase, sentences_rows)
else:
return sentences_rows
return None
def set_original_file_paths(self) -> None:
"""
III: format data
Changed Attributes:
file_paths: [Dict[str, str]], e.g. {'train': <path_to_train_csv>, 'val': ..}
Returns: -
"""
self.file_name = {
"train": "sic-train.conll",
"val": "sic-dev.conll",
"test": "sic-test.conll",
}
def _parse_row(self, _row: str) -> List[str]:
"""
III: format data
Args:
_row: e.g. "Det PER X B"
Returns:
_row_list: e.g. ["Det", "PER", "X", "B"]
"""
_row_list = _row.split("\t")
if _row_list == ["\n"]:
return []
else:
return _row_list
def _format_original_file(self, _row_list: List[str]) -> Optional[List[str]]:
"""
III: format data
Args:
_row_list: e.g. ["test", "PER", "X", "B"]
Returns:
_row_list_formatted: e.g. ["test", "B-PER"]
"""
if not len(_row_list) in [13]:
print(
f"ATTENTION! row_list = {_row_list} should consist of 13 parts! -> treat as empty row"
)
return None
_row_list_formatted = [
_row_list[1],
self.transform_tags(_row_list[-3], _row_list[-2]),
]
return _row_list_formatted
def resplit_data(
self, val_fraction: float, write_csv: bool = True
) -> Optional[Tuple[pd.DataFrame, ...]]:
"""
IV: resplit data
Args:
val_fraction: [float], e.g. 0.3
write_csv: whether to write dataset to csv (should always be True except for testing)
Returns:
df_train: only if write_csv = False
df_val: only if write_csv = False
df_test: only if write_csv = False
"""
# train -> train, val, test
df_train_val_test = self._read_formatted_csvs(["train"])
df_train_val, df_test = self._split_off_validation_set(
df_train_val_test, val_fraction
)
df_train, df_val = self._split_off_validation_set(df_train_val, val_fraction)
if write_csv: # pragma: no cover
self._write_final_csv("train", df_train)
self._write_final_csv("val", df_val)
self._write_final_csv("test", df_test)
return None
else:
return df_train, df_val, df_test
####################################################################################################################
# HELPER: READ ORIGINAL
####################################################################################################################
@staticmethod
def transform_tags(bio: str, tag: str) -> str:
"""
Args:
bio: e.g. 'O', 'B', 'I'
tag: e.g. '_', 'person', ..
Returns:
transformed tag: e.g. 'O', 'B-person', ..
"""
if bio == "O":
return "O"
else:
return f"{bio}-{tag}"
| 2.28125 | 2 |
symmetricDifference.py | Pratyaksh7/PythonPrograms-Hackerrank | 0 | 12762441 | <reponame>Pratyaksh7/PythonPrograms-Hackerrank
M = int(input())
m1 = set(map(int, input().split()))
N = int(input())
n1 = set(map(int, input().split()))
output = list(m1.union(n1).difference(m1.intersection(n1)))
output.sort()
for i in output:
print(i)
| 3.53125 | 4 |
lisp/evaluator.py | qriollo/lispbot | 3 | 12762442 | # coding:utf-8:
from lisp.sexpressions import (
SexprNumber, SexprString, SexprCons, SexprNil, SexprSymbol,
SexprTrue, SexprFalse, SexprProcedure, SexprBuiltin, SexprList,
SexprBool, bool_value, num_value, string_value, is_keyword,
intern_symbol, consp, symbolp, symbol_name, car, cdr, set_car, set_cdr,
is_null, build_list, is_number,
)
def generic_fold(py2lisp, lisp2py, op, z, lst):
r = z
for x in lst:
r = op(r, lisp2py(x))
return py2lisp(r)
def num_fold(op, z, lst):
return generic_fold(SexprNumber, num_value, op, z, lst)
def bool_fold(op, z, lst):
return generic_fold(SexprBool, bool_value, op, z, lst)
def string_fold(op, z, lst):
return generic_fold(SexprString, string_value, op, z, lst)
def procedure_definition(environment, x):
if isinstance(x, SexprProcedure):
return SexprCons(intern_symbol('fun'),
SexprCons(x.parameters(),
x.body()))
elif isinstance(x, SexprSymbol):
p = env_lookup(environment, x)
if p is None or not isinstance(p, SexprProcedure):
raise Exception(u'El símbolo no está ligado a un procedimiento.')
return SexprCons(intern_symbol('def'),
SexprCons(SexprCons(x, p.parameters()),
p.body()))
else:
raise Exception(u'Se esperaba un símbolo o booleano')
def global_environment():
env = SexprCons({}, SexprNil())
def db(name, function):
env_define(env, intern_symbol(name), SexprBuiltin(name, function))
db('+', lambda env, *args: num_fold(lambda x, y: x + y, 0, args))
db('-', lambda env, a, b: SexprNumber(num_value(a) - num_value(b)))
db('*', lambda env, *args: num_fold(lambda x, y: x * y, 1, args))
db('/', lambda env, a, b: SexprNumber(num_value(a) / num_value(b)))
db('%', lambda env, a, b: SexprNumber(num_value(a) % num_value(b)))
db('and', lambda env, *args: bool_fold(lambda x, y: x and y, True, args))
db('&&', lambda env, *args: bool_fold(lambda x, y: x and y, True, args))
db('not', lambda env, x: SexprBool(not bool_value(x)))
db('=', lambda env, x, y: SexprBool(is_number(x) and is_number(y) and num_value(x) == num_value(y)))
db('or', lambda env, *args: bool_fold(lambda x, y: x or y, False, args))
db('||', lambda env, *args: bool_fold(lambda x, y: x or y, False, args))
db('>', lambda env, x, y: SexprBool(num_value(x) > num_value(y)))
db('>=', lambda env, x, y: SexprBool(num_value(x) >= num_value(y)))
db('<', lambda env, x, y: SexprBool(num_value(x) < num_value(y)))
db('<=', lambda env, x, y: SexprBool(num_value(x) <= num_value(y)))
db('str+', lambda env, *args: string_fold(lambda x, y: x + y, "", args))
db('cons', lambda env, x, y: SexprCons(x, y))
db('car', lambda env, x: car(x))
db('cdr', lambda env, x: cdr(x))
db('consp', lambda env, x: SexprBool(consp(x)))
db('set-car', lambda env, x, y: set_car(x, y))
db('set-cdr', lambda env, x, y: set_cdr(x, y))
db('symbolp', lambda env, x: SexprBool(symbolp(x)))
db('null', lambda env, x: SexprBool(is_null(x)))
db('eq', lambda env, x, y: SexprBool(x == y))
db('list', lambda env, *args: SexprList(args))
db('apply', lambda env, f, *args: fun_apply(f, args, env))
db('procedure-definition', procedure_definition)
db('string->int', lambda env, x: SexprNumber(int(string_value(x))))
return env
def rib_keys(rib):
if isinstance(rib, dict):
return rib.keys()
else:
d = {}
while consp(rib):
association = car(rib)
d[car(association)] = 1
rib = cdr(rib)
return d.keys()
def rib_lookup(rib, symbol):
if isinstance(rib, dict):
return rib.get(symbol, None)
else:
while consp(rib):
association = car(rib)
if car(association) == symbol:
return cdr(association)
rib = cdr(rib)
return None
def rib_define(rib, symbol, value):
if isinstance(rib, dict):
rib[symbol] = value
return rib
else:
return SexprCons(SexprCons(symbol, value), rib)
def rib_set(rib, symbol, value):
if isinstance(rib, dict):
rib[symbol] = value
else:
while consp(rib):
association = car(rib)
if car(association) == symbol:
set_cdr(association, value)
break
def env_keys(environment):
d = {}
while consp(environment):
rib = car(environment)
for k in rib_keys(rib):
d[k] = 1
environment = cdr(environment)
return d.keys()
def env_lookup(environment, symbol):
if is_keyword(symbol):
return symbol
while consp(environment):
rib = car(environment)
result = rib_lookup(rib, symbol)
if result is not None:
return result
environment = cdr(environment)
raise Exception('Variable no definida: ' + symbol_name(symbol))
def env_define(environment, symbol, value):
if is_keyword(symbol):
raise Exception('No se puede definir una keyword.')
if isinstance(value, SexprProcedure):
value.set_name(symbol_name(symbol))
rib = car(environment)
result = rib_lookup(rib, symbol)
if result is not None:
#raise Exception('Variable ya definida: ' + symbol_name(symbol))
rib_set(rib, symbol, value)
else:
set_car(environment, rib_define(car(environment), symbol, value))
def env_set(environment, symbol, value):
if is_keyword(symbol):
raise Exception('No se puede definir una keyword.')
while consp(environment):
rib = car(environment)
if rib_lookup(rib, symbol) is not None:
rib_set(rib, symbol, value)
return
environment = cdr(environment)
raise Exception('Variable no definida: ' + symbol_name(symbol))
def env_bind(environment, parameters, arguments):
environment = SexprCons(SexprNil(), environment)
while consp(parameters):
if not consp(arguments):
raise Exception(u'Faltan parámetros')
env_define(environment, car(parameters), car(arguments))
parameters = cdr(parameters)
arguments = cdr(arguments)
if parameters == SexprNil():
if arguments != SexprNil():
raise Exception(u'Sobran parámetros')
elif isinstance(parameters, SexprSymbol):
env_define(environment, parameters, arguments)
else:
raise Exception(u'Lista de parámetros deforme')
return environment
def first(expr):
return car(expr)
def second(expr):
return car(cdr(expr))
def third(expr):
return car(cdr(cdr(expr)))
def fourth(expr):
return car(cdr(cdr(cdr(expr))))
def eval_expression(expr, environment):
if isinstance(expr, SexprNumber):
return expr
elif isinstance(expr, SexprString):
return expr
elif isinstance(expr, SexprSymbol):
return env_lookup(environment, expr)
elif isinstance(expr, SexprCons):
head = first(expr)
if head == intern_symbol('quote'):
return second(expr)
elif head == intern_symbol('do'):
return eval_block(cdr(expr), environment)
elif head == intern_symbol('def'):
if consp(second(expr)):
expr2 = SexprList([
intern_symbol('def'),
car(second(expr)),
SexprCons(
intern_symbol('fun'),
SexprCons(
cdr(second(expr)),
cdr(cdr(expr))
)
)
])
return eval_expression(expr2, environment)
else:
value = eval_expression(third(expr), environment)
env_define(environment, second(expr), value)
return value
elif head == intern_symbol('let'):
local_environment = SexprCons(SexprNil(), environment)
decls = second(expr)
body = cdr(cdr(expr))
while consp(decls):
variable = first(decls)
value = eval_expression(second(decls), environment)
env_define(local_environment, variable, value)
decls = cdr(cdr(decls))
return eval_block(cdr(cdr(expr)), local_environment)
elif head == intern_symbol('let*'):
local_environment = SexprCons(SexprNil(), environment)
decls = second(expr)
body = cdr(cdr(expr))
while consp(decls):
variable = first(decls)
value = eval_expression(second(decls), local_environment)
env_define(local_environment, variable, value)
decls = cdr(cdr(decls))
return eval_block(cdr(cdr(expr)), local_environment)
elif head == intern_symbol('set'):
value = eval_expression(third(expr), environment)
env_set(environment, second(expr), value)
return value
elif head == intern_symbol('if'):
rest = cdr(expr)
while consp(rest):
if consp(cdr(rest)):
cond = eval_expression(first(rest), environment)
if cond != SexprFalse():
return eval_expression(second(rest), environment)
rest = cdr(cdr(rest))
else:
return eval_expression(car(rest), environment)
return SexprNil()
elif head == intern_symbol('fun'):
return SexprProcedure(environment, second(expr), cdr(cdr(expr)))
else:
function = eval_expression(head, environment)
arguments = eval_list(cdr(expr), environment)
return eval_application(function, arguments, environment)
else:
raise Exception(u'Expresión no reconocida: ' + repr(expr))
def eval_block(block, environment):
res = SexprNil()
while consp(block):
res = eval_expression(car(block), environment)
block = cdr(block)
return res
def eval_list(expr, environment):
res = []
while consp(expr):
res.append(eval_expression(car(expr), environment))
expr = cdr(expr)
return SexprList(res)
def eval_application(function, arguments, environment):
if isinstance(function, SexprProcedure):
closure_environment = function.environment()
local_environment = env_bind(closure_environment,
function.parameters(),
arguments)
return eval_block(function.body(), local_environment)
elif isinstance(function, SexprBuiltin):
return function.call(environment, arguments)
else:
raise Exception(u'El valor no es aplicable.')
def fun_apply(function, arguments, environment):
if len(arguments) == 0:
return eval_application(function, SexprNil(), environment)
xs = []
for x in arguments[:-1]:
xs.append(x)
rest = arguments[-1]
return eval_application(function, build_list(xs, rest), environment)
| 2.546875 | 3 |
main.py | Aus-miner/Miner-Model | 18 | 12762443 | <reponame>Aus-miner/Miner-Model
import plotly.express as px
import plotly.io as pio
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
from agents import *
from generators import *
from CMDataLoader import CMDataLoader
from Simulator import Simulator
from plotutils import update_layout_wrapper
import config
import constants
import random
# my_palette = ["#264653","#9D1DC8","#287271", "#645DAC","#636EFA", "#ECA400","#FE484E","#8484E8", "#03b800" ,"#9251e1","#F4A261"]
# my_palette = ["#54478c","#9D1DC8","#2c699a","#048ba8","#0db39e","#16db93","#83e377","#b9e769","#efea5a","#f1c453","#f29e4c"]
my_palette = ["#1f00a7","#9d1dc8","#00589f","#009b86","#00a367","#67a300","#645dac","#eca400","#fd7e00","#b6322b", "#FE484E"]
hardware_palette = ["#009b86", "#9D1DC8"]
opex_palette = ["#9D1DC8","#264653","#8484E8"]
primary_color = ["#9d1dc8"]
def save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix):
pd.DataFrame({'price': prices, 'hashrate': global_hash_rate, 'trials': n_trials}).to_csv(f"plots/{file_suffix}/env_values_{file_suffix}.csv", index = False)
user_positions.to_csv(f"plots/{file_suffix}/user_values_{file_suffix}.csv", index = False)
def get_environment_plots(prices, global_hash_rate, n_trials, title_suffix):
price_fig = update_layout_wrapper(px.line(x = list(range(len(prices))), y = prices,
labels = {"y": "Price (USD)", "x": "Day"},
title = f"Simulated Bitcoin Price over {n_trials} Trials {title_suffix}",
color_discrete_sequence = primary_color,
width=1600, height=900))
hashrate_fig = update_layout_wrapper(px.line(x = list(range(len(global_hash_rate))), y = global_hash_rate,
labels = {"y": "Hash Rate (EH/s)", "x": "Day"},
title = f"Simulated Bitcoin Network Hash Rate over {n_trials} Trials {title_suffix}",
color_discrete_sequence = primary_color,
width=1600, height=900))
return (price_fig, hashrate_fig)
def get_user_plots(user_positions, n_trials, title_suffix, elec_cost, palette):
user_positions_e_c = user_positions.loc[user_positions.elec_cost == elec_cost]
long_btc_fig = update_layout_wrapper(px.line(user_positions_e_c.loc[user_positions_e_c.strategy == constants.Strategy.LONG_BTC.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "machine_type",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "machine_type": "Machine Type "},
title = f"Simulated Position Value over {n_trials} Trials {title_suffix}, Long BTC, ${elec_cost} per kWh",
color_discrete_sequence = palette,
width=1600, height=900))
sell_daily_fig = update_layout_wrapper(px.line(user_positions_e_c.loc[user_positions_e_c.strategy == constants.Strategy.SELL_DAILY.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "machine_type",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "machine_type": "Machine Type "},
title = f"Simulated Position Value over {n_trials} Trials {title_suffix}, Selling Daily, ${elec_cost} per kWh",
color_discrete_sequence = palette,
width=1600, height=900))
return (long_btc_fig, sell_daily_fig)
def get_summary_plots(price_params, fee_params, block_subsidy, n_trials, title_suffix, file_suffix, user_machine_prices = config.machine_prices, elec_costs = [0.04, 0.07], palette = my_palette):
init_prices = PriceGenerator(price_params).generate_prices()
user_miners_long_btc, user_miners_sell_daily = UserMinerGenerator().generate_user_miners(machine_prices = user_machine_prices, elec_costs = elec_costs)
env_miners = MinerGenerator().generate_miner_distribution()
sim = Simulator(env_miners = env_miners,
user_miners_long_btc = user_miners_long_btc,
user_miners_sell_daily = user_miners_sell_daily,
prices = init_prices,
price_params = price_params,
fee_params = fee_params,
block_subsidy = block_subsidy)
sim.run_simulation_n_trials(n_trials)
user_positions = sim.get_avg_user_positions()
prices = sim.get_avg_prices()
global_hash_rate = sim.get_avg_global_hash_rate()
price_fig, hashrate_fig = get_environment_plots(prices, global_hash_rate, n_trials, title_suffix)
price_fig.write_image(f"plots/{file_suffix}/price_plot_{file_suffix}.png", scale=8)
hashrate_fig.write_image(f"plots/{file_suffix}/hashrate_plot_{file_suffix}.png", scale=8)
for elec_cost in user_positions.elec_cost.unique():
user_figs = get_user_plots(user_positions, n_trials, title_suffix, elec_cost, palette)
user_figs[0].write_image(f"plots/{file_suffix}/long_btc_plot_{file_suffix}_{int(elec_cost * 100)}.png", scale=8)
user_figs[1].write_image(f"plots/{file_suffix}/sell_daily_plot_{file_suffix}_{int(elec_cost * 100)}.png", scale=8)
save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix)
def get_user_opex_plots(user_positions, n_trials, title_suffix, machine_type, palette):
user_positions_m_t = user_positions.loc[user_positions.machine_type == machine_type.value]
long_btc_fig = update_layout_wrapper(px.line(user_positions_m_t.loc[user_positions_m_t.strategy == constants.Strategy.LONG_BTC.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "elec_cost",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "elec_cost": "Electricity Cost (USD/kWh) "},
title = f"Simulated Position Value over {n_trials} Trials using {machine_type.value} {title_suffix}, Long BTC",
color_discrete_sequence = palette,
width=1600, height=900))
sell_daily_fig = update_layout_wrapper(px.line(user_positions_m_t.loc[user_positions_m_t.strategy == constants.Strategy.SELL_DAILY.value].sort_values(by=['day']),
x = "day", y = "total_position_usd", color = "elec_cost",
labels = {"total_position_usd": "Simulated Position (USD)", "day": "Day", "elec_cost": "Electricity Cost (USD/kWh) "},
title = f"Simulated Position Value over {n_trials} Trials using {machine_type.value} {title_suffix}, Selling Daily",
color_discrete_sequence = palette,
width=1600, height=900))
return (long_btc_fig, sell_daily_fig)
def get_summary_plots_opex(price_params, fee_params, block_subsidy, n_trials, title_suffix, file_suffix, user_machine_prices = config.machine_prices, elec_costs = [0.04, 0.07], palette = opex_palette):
init_prices = PriceGenerator(price_params).generate_prices()
user_miners_long_btc, user_miners_sell_daily = UserMinerGenerator().generate_user_miners(machine_prices = user_machine_prices, elec_costs = elec_costs)
env_miners = MinerGenerator().generate_miner_distribution()
sim = Simulator(env_miners = env_miners,
user_miners_long_btc = user_miners_long_btc,
user_miners_sell_daily = user_miners_sell_daily,
prices = init_prices,
price_params = price_params,
fee_params = fee_params,
block_subsidy = block_subsidy)
sim.run_simulation_n_trials(n_trials)
user_positions = sim.get_avg_user_positions()
prices = sim.get_avg_prices()
global_hash_rate = sim.get_avg_global_hash_rate()
price_fig, hashrate_fig = get_environment_plots(prices, global_hash_rate, n_trials, title_suffix)
price_fig.write_image(f"plots/{file_suffix}/price_plot_{file_suffix}.png", scale=8)
hashrate_fig.write_image(f"plots/{file_suffix}/hashrate_plot_{file_suffix}.png", scale=8)
for machine_type in user_machine_prices:
user_figs = get_user_opex_plots(user_positions, n_trials, title_suffix, machine_type, palette)
user_figs[0].write_image(f"plots/{file_suffix}/long_btc_plot_{file_suffix}_{machine_type.value}.png", scale=8)
user_figs[1].write_image(f"plots/{file_suffix}/sell_daily_plot_{file_suffix}_{machine_type.value}.png", scale=8)
save_csvs(prices, global_hash_rate, n_trials, user_positions, file_suffix)
if __name__ == '__main__':
random.seed(1032009)
np.random.seed(1032009)
n_trials = 25
fee_params = CMDataLoader.get_historical_fee_params()
block_subsidy = 6.25
historical_price_params = CMDataLoader.get_historical_price_params()
get_summary_plots(historical_price_params, fee_params, block_subsidy, n_trials, "with Historical Parameters", "historical")
bearish_price_params = (historical_price_params[0], -1 * abs(historical_price_params[1]), historical_price_params[2])
get_summary_plots(bearish_price_params, fee_params, block_subsidy, n_trials, "with Bearish Parameters", "bearish")
corrections_price_params = (historical_price_params[0], 0, historical_price_params[2] * 1.25)
get_summary_plots(corrections_price_params, fee_params, block_subsidy, n_trials, "in Bull Market with Corrections", "corrections")
s9_s19_prices = {key: config.machine_prices[key] for key in [constants.MachineName.ANTMINER_S9, constants.MachineName.ANTMINER_S19]}
get_summary_plots(historical_price_params, fee_params, block_subsidy, n_trials, "with Historical Parameters", "historical-machines", s9_s19_prices, [0.03], hardware_palette)
get_summary_plots_opex(bearish_price_params, fee_params, block_subsidy, n_trials, "with Bearish Parameters", "bearish-opex", s9_s19_prices, [0.03, 0.04, 0.05], opex_palette)
| 2.046875 | 2 |
Python/moveFromCurrentDirectory.py | frankcash/Misc | 0 | 12762444 | <filename>Python/moveFromCurrentDirectory.py
import subprocess
PATH = "pwd"
process = subprocess.Popen(PATH.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
currentDirectory = output.rstrip()
documentsDirectory = "~/Documents/"
print currentDirectory
#shutil.copy(src, dst)
| 2.765625 | 3 |
017/main.py | alexprengere/euler | 0 | 12762445 | #!/usr/bin/env python
"""
$ python main.py 1000
21124
"""
import sys
def spell(n):
digits = [
'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine',
'ten', 'eleven', 'twelve', 'thirteen', 'fourteen',
'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'
]
tens = [
None, None, 'twenty', 'thirty', 'forty',
'fifty', 'sixty', 'seventy', 'eighty', 'ninety'
]
K, M, G = 1000, 1000000, 1000000000
if n < 20:
return digits[n]
if n < 100:
if n % 10 == 0:
return tens[n // 10]
else:
return tens[n // 10] + '-' + digits[n % 10]
if n < K:
if n % 100 == 0:
return digits[n // 100] + ' hundred'
else:
return digits[n // 100] + ' hundred and ' + spell(n % 100)
if n < M:
if n % K == 0:
return spell(n // K) + ' thousand'
else:
return spell(n // K) + ' thousand, ' + spell(n % K)
if n < G:
if n % M == 0:
return spell(n // M) + ' million'
else:
return spell(n // M) + ' million, ' + spell(n % M)
if n % G == 0:
return spell(n // G) + ' billion'
else:
return spell(n // G) + ' billion, ' + spell(n % G)
if __name__ == '__main__':
n = int(sys.argv[1])
total = 0
for i in range(1, n + 1):
total += len([c for c in spell(i) if c.isalpha()])
print(total)
| 3.890625 | 4 |
boundness_testing.py | ellarcastelloe/bound_systems | 0 | 12762446 | <gh_stars>0
import numpy as np
import pandas as pd
import boundness_tools as bd
def make_grp_cat(gal_cat, twoDorthreeD, absrmag, mag_floor, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, czreal=None, vx=None, vy=None, vz=None, rdist=None):
if mag_floor == -17.0:
galsabovefloor = gal_cat.loc[gal_cat.fl_insample == 1]
else:
galsabovefloor = gal_cat.loc[gal_cat[absrmag] < mag_floor]
grpids = np.unique(np.array(galsabovefloor[grp]))
if twoDorthreeD == '3d':
grps = pd.DataFrame(columns = ['grp', 'grpn', 'radeg','dedeg', 'cz_obs', 'mass', 'logmh', 'nn_inds', 'cz_real', 'vx', 'vy', 'vz', 'rdist', 'boundFlag', 'boundN', 'boundID', 'boundLog337', 'grpR337'])
if twoDorthreeD == '2d':
grps = pd.DataFrame(columns = ['grp', 'grpn', 'radeg','dedeg', 'cz_obs', 'mass', 'logmh', 'nn_inds', 'boundFlag', 'boundN', 'boundID', 'boundLog337', 'grpR337'])
grps.grp = grpids
for i in range(len(grpids)):
thisgrp = gal_cat.loc[(gal_cat[grp] == grpids[i]) & (gal_cat[absrmag] < mag_floor)]
grpra = np.mean(thisgrp[radeg])
grpdec = np.mean(thisgrp[dedeg])
grpczobs = np.mean(thisgrp[cz])
thisgrpn = np.shape(thisgrp)[0]
grpmass = calc_grp_mass(thisgrp, gasmasslogged, logmstar, gasmass, logmh, grpn)
grplogmh = np.log10(sum(10**np.unique(np.array(thisgrp[logmh]))))
grpRvir = bd.Rvir(grplogmh, 0.7)
grps.loc[grps.grp == grpids[i], 'radeg'] = grpra
grps.loc[grps.grp == grpids[i], 'dedeg'] = grpdec
grps.loc[grps.grp == grpids[i], 'cz_obs'] = grpczobs
grps.loc[grps.grp == grpids[i], 'mass'] = grpmass
grps.loc[grps.grp == grpids[i], 'logmh'] = grplogmh
grps.loc[grps.grp == grpids[i], 'grpn'] = thisgrpn
grps.loc[grps.grp == grpids[i], 'grpR337'] = grpRvir
if twoDorthreeD == '3d':
grpczreal = np.mean(thisgrp[czreal])
grprdist = np.mean(thisgrp[rdist])
grpvx = np.mean(thisgrp[vx])
grpvy = np.mean(thisgrp[vy])
grpvz = np.mean(thisgrp[vz])
grps.loc[grps.grp == grpids[i], 'cz_real'] = grpczreal
grps.loc[grps.grp == grpids[i], 'rdist'] = grprdist
grps.loc[grps.grp == grpids[i], 'vx'] = grpvx
grps.loc[grps.grp == grpids[i], 'vy'] = grpvy
grps.loc[grps.grp == grpids[i], 'vz'] = grpvz
ras = np.float128(np.array(grps.radeg))
decs = np.float128(np.array(grps.dedeg))
if twoDorthreeD == '3d':
czs = np.float128(np.array(grps.cz_real))
elif twoDorthreeD == '2d':
czs = np.float128(np.array(grps.cz_obs))
num_neighbors = 40
neighbor_dist, neighbor_ind = bd.nearest_neighbor(ras, decs, czs, grpmass, num_neighbors)
grpids = np.unique(np.array(grps.grp))
for i in range(len(grpids)):
grps.iat[i, 7] = neighbor_ind[i, :]
grps.boundFlag = 0
grps.boundN = grps.grpn
grps.boundID = grps.grp
grps.boundLog337 = grps.logmh
return grps
def make_gal_cat(grp_cat, gal_cat, radeg, dedeg, cz, mag_floor, absrmag, grp, twoDorthreeD):
bdids = np.array(grp_cat.boundID)
for i in range(len(bdids)):
bdid = bdids[i]
bdgrp = grp_cat.loc[grp_cat.boundID == bdid]
thisbound = np.array(bdgrp.boundFlag)[0]
thisboundn = np.array(bdgrp.boundN)[0]
thisbdid = np.array(bdgrp.boundID)[0]
thislogmh_bound = np.array(bdgrp.boundLog337)[0]
thisRvir = np.array(bdgrp.grpR337)[0]
bdgrp_fofids = np.array(grp_cat.loc[grp_cat.boundID == bdid].grp)
for j in range(len(bdgrp_fofids)):
thisfofid = bdgrp_fofids[j]
gal_cat.loc[(gal_cat[grp] == thisfofid) & (gal_cat[absrmag] < mag_floor), 'boundID'] = bdid
bdgrp = gal_cat.loc[gal_cat.boundID == bdid]
ras = bdgrp[radeg] * np.pi/180
decs = bdgrp[dedeg] * np.pi/180
czs = bdgrp[cz] * np.pi/180
thisRproj_bound = bd.Rproj(bdgrp, 0.7, ras, decs, czs)
thistc = bd.crossing_time(bdgrp)
thiscolorgap = bd.color_gap(bdgrp)
thislogg, thislogs = bd.calculate_gas_content(bdgrp)
thisR337overlap = bd.r337overlap_inbdsystem(bdgrp)
if thisboundn > 5:
thisadalpha = bd.AD_test(bdgrp)
gal_cat.loc[gal_cat.boundID == bdid, 'boundADalpha'] = thisadalpha
if thisboundn > 11:
thisdspval = bd.DS_test(bdgrp)
gal_cat.loc[gal_cat.boundID == bdid, 'boundDSpval'] = thisdspval
gal_cat.loc[gal_cat.boundID == bdid, 'boundLogG'] = thislogg
gal_cat.loc[gal_cat.boundID == bdid, 'boundLogS'] = thislogs
gal_cat.loc[gal_cat.boundID == bdid, 'boundURcolorgap'] = thiscolorgap
gal_cat.loc[gal_cat.boundID == bdid, 'boundTCross'] = thistc
gal_cat.loc[gal_cat.boundID == bdid, 'boundFlag'] = thisbound
gal_cat.loc[gal_cat.boundID == bdid, 'boundN'] = thisboundn
gal_cat.loc[gal_cat.boundID == bdid, 'boundLog337'] = thislogmh_bound
gal_cat.loc[gal_cat.boundID == bdid, 'grpR337'] = thisRvir
gal_cat.loc[gal_cat.boundID == bdid, 'boundRproj'] = thisRproj_bound
gal_cat.loc[gal_cat.boundID == bdid, 'boundR337overlap'] = thisR337overlap
return gal_cat
def calc_grp_mass(grp, gasmasslogged, logmstar, gasmass, logmh, grpn):
if gasmasslogged == 'yes':
grpmass = sum(10**grp[logmstar] + 10**grp[gasmass] + (10**grp[logmh]/np.array(grp[grpn])[0]))
elif gasmasslogged == 'no':
grpmass = sum(10**grp[logmstar] + grp[gasmass] + (10**grp[logmh]/np.array(grp[grpn])[0]))
return grpmass
def get_gal_cat_colnames_twoDorthreeD(catpath, mag_floor,absrmag,radeg,dedeg,cz,logmstar,gasmass, gasmasslogged,logmh,grpn,grp,name,urcolor, twoDorthreeD):
"""absrmag, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, and name are the names of the columns in the dataframe that
correspond to each piece of information needed for boundness testing"""
if isinstance(catpath, str) == True:
gal_cat = pd.read_csv(catpath)
else:
gal_cat = catpath
#gal_cat = gal_cat.loc[gal_cat.absrmag < mag_floor]
"""Create new columns for properties of bound systems"""
bound = [0]*np.shape(gal_cat)[0]
gal_cat['boundFlag'] = bound
gal_cat['boundN'] = gal_cat[grpn]
gal_cat['boundID'] = gal_cat[grp]
gal_cat['boundLog337'] = gal_cat[logmh]
gal_cat['grpR337'] = 0
gal_cat['boundRproj'] = 0
gal_cat['boundADalpha'] = 0.
gal_cat['boundTCross'] = 0.
gal_cat['boundURcolorgap'] = 0.
gal_cat['boundDSpval'] = 0.
gal_cat['boundLogG'] = 0.
gal_cat['boundLogS'] = 0.
gal_cat['boundR337overlap'] = 0.
return gal_cat, mag_floor, absrmag, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, name,urcolor, twoDorthreeD
def main():
c = 3.0E5 #km/s
"""Read in FoF catalog"""
ecopath = "/afs/cas.unc.edu/users/e/l/elcastel/ECO/boundness/eco_basecat_wG3grps072321.csv"
mag_floor_eco = -17.33 #ECO
gal_cat, mag_floor, absrmag, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, name, urcolor, twoDorthreeD = get_gal_cat_colnames_twoDorthreeD(ecopath, mag_floor_eco,'absrmag','radeg','dedeg','cz','logmstar','logmgas','yes','logmh','grpn','grp','name', 'modelu_r', '2d')
resolve = "/afs/cas.unc.edu/users/e/l/elcastel/ECO/boundness/resolve_basecat_wG3grps072321.csv"
mag_floor_resolve = -17.0 #RESOLVE
# gal_cat,mag_floor, absrmag, radeg, dedeg, cz, logmstar, gasmass, gasmasslogged, logmh, grpn, grp, name, urcolor, twoDorthreeD = get_gal_cat_colnames_twoDorthreeD(resolve, mag_floor_resolve,'absrmag','radeg','dedeg','cz','logmstar','logmgas', 'yes','logmh','grpn','grp','name', 'modelu_r', '2d')
mock = "/afs/cas.unc.edu/users/e/l/elcastel/ECO/boundness/fofcats_61721/ECO_cat_7_Planck_memb_cat.csv"
# mock = "/afs/cas.unc.edu/users/e/l/elcastel/MockCatalog/csvs/eco_m200_0.csv"
mag_floor_mock = -17.33
# gal_cat, mag_floor, absrmag, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, name, twoDorthreeD = get_gal_cat_colnames_twoDorthreeD(mock, mag_floor_mock,'M_r','ra','dec','cz','logmstar','mhi', 'no','M_group','g_ngal','groupid','g_galtype','2d')
# gal_cat, mag_floor, absrmag, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, name, twoDorthreeD = get_gal_cat_colnames_twoDorthreeD(mock, mag_floor_mock,'M_r','ra','dec','cz','logmstar','mhi', 'no','M_group','g_ngal','groupid','g_galtype','3d')
# gal_cat, mag_floor, absrmag, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, name, twoDorthreeD = get_gal_cat_colnames_twoDorthreeD(mock, mag_floor_mock,'M_r','ra','dec','cz','logmstar','mhi', 'no','loghalom','halo_ngal','haloid','g_galtype','3d')
zackmockpath = "/afs/cas.unc.edu/users/e/l/elcastel/ECO/boundness/ECO_cat_5_Planck_memb_cat_mvir_withG3groups.csv"
zackmock = pd.read_csv(zackmockpath)
zackmock['g3grpntot_l'] = zackmock['g3grpngi_l'] + zackmock['g3grpndw_l']
# gal_cat, mag_floor, absrmag, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, name, twoDorthreeD = get_gal_cat_colnames_twoDorthreeD(zackmock, mag_floor_mock,'M_r','ra','dec','cz','logmstar','mhi','no','g3logmh_l','g3grpntot_l','g3grp_l','g3fc_l','2d')
if twoDorthreeD == '3d':
czreal, vx, vy, vz, rdist = 'cz_nodist', 'vx', 'vy', 'vz', 'r_dist'
if twoDorthreeD == '2d':
df = make_grp_cat(gal_cat, twoDorthreeD, absrmag, mag_floor, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp)
if twoDorthreeD == '3d':
df = make_grp_cat(gal_cat, twoDorthreeD, absrmag, mag_floor, radeg, dedeg, cz, logmstar, gasmass,gasmasslogged, logmh, grpn, grp, czreal=czreal, vx=vx, vy=vy, vz=vz, rdist=rdist)
newID = max(np.array(df.grp)) + 1
grpids = np.array(df.grp)
"""loop through each group"""
for i in range(len(grpids)):
"""Get group and neighbor group"""
thisgrp = df.loc[df.grp == grpids[i]]
which_neighbor = 0
thisgrp_nninds = thisgrp.nn_inds.iloc[0]
neighborgrp_id = df.iloc[thisgrp_nninds[which_neighbor]].grp
neighborgrp = df.loc[df.grp == neighborgrp_id]
"""Make sure group and neighbor group are not already in a bound multi-group system together"""
while np.array(thisgrp.boundID)[0] == np.array(neighborgrp.boundID)[0]:
which_neighbor += 1
neighborgrp_id = df.iloc[thisgrp_nninds[which_neighbor]].grp
neighborgrp = df.loc[df.grp == neighborgrp_id]
"""Test for boundness between group and neighbor group"""
if twoDorthreeD == '2d':
bound, prob, vgrpgrp = bd.test_boundness(thisgrp.radeg, thisgrp.dedeg, thisgrp.cz_obs, thisgrp.mass,neighborgrp.radeg, neighborgrp.dedeg, neighborgrp.cz_obs, neighborgrp.mass)
elif twoDorthreeD == '3d':
bound, ratio, vgrpgrp = bd.test_boundness_3d(thisgrp.radeg, thisgrp.dedeg, thisgrp.rdist, thisgrp.cz_real, thisgrp.mass, thisgrp.vx, thisgrp.vy, thisgrp.vz, neighborgrp.radeg, neighborgrp.dedeg, neighborgrp.rdist, neighborgrp.cz_real, neighborgrp.mass, neighborgrp.vx, neighborgrp.vy, neighborgrp.vz)
"""If the groups are bound, change 'boundFlag' 'boundN' and 'boundID' parameters and continue testing for boundness with increasingly distant neighbors until the original FoF group is not bound to it's kth nearest neighbor"""
while bound == 'yes':
df.loc[df.boundID == np.array(thisgrp.boundID)[0], 'boundFlag'] = 1
df.loc[df.boundID == np.array(thisgrp.boundID)[0], 'boundID'] = newID
df.loc[df.boundID == np.array(neighborgrp.boundID)[0], 'boundFlag'] = 1
df.loc[df.boundID == np.array(neighborgrp.boundID)[0], 'boundID'] = newID
thisgrp = df.loc[df.grp == np.array(thisgrp.grp)[0]]
which_neighbor += 1
neighborgrp_id = df.iloc[thisgrp_nninds[which_neighbor]].grp
neighborgrp = df.loc[df.grp == neighborgrp_id]
if twoDorthreeD == '2d':
bound, prob, vgrpgrp = bd.test_boundness(thisgrp.radeg, thisgrp.dedeg, thisgrp.cz_obs, thisgrp.mass,neighborgrp.radeg, neighborgrp.dedeg, neighborgrp.cz_obs, neighborgrp.mass)
elif twoDorthreeD == '3d':
bound, ratio, vgrpgrp = bd.test_boundness_3d(thisgrp.radeg, thisgrp.dedeg, thisgrp.rdist, thisgrp.cz_real, thisgrp.mass, thisgrp.vx, thisgrp.vy, thisgrp.vz, neighborgrp.radeg, neighborgrp.dedeg, neighborgrp.rdist, neighborgrp.cz_real, neighborgrp.mass, neighborgrp.vx, neighborgrp.vy, neighborgrp.vz)
if i % 100 == 0:
print(i)
newID += 1
bdids = np.unique(np.array(df.boundID))
df['boundADalpha'] = 0.
df['boundTCross'] = 0.
df['boundURcolorgap'] = 0.
df['boundLogS'] = 0.
df['boundLogG'] = 0.
df['boundDSpval'] = 0.
df['boundR337overlap'] = 0.
for i in range(len(bdids)):
bdid = bdids[i]
bdgrp = df.loc[df.boundID == bdid]
thisboundn = sum(np.array(bdgrp.grpn))
logmhgrp = np.log10(sum(10**np.array(bdgrp.logmh)))
df.loc[df.boundID == bdid, 'boundLog337'] = logmhgrp
df.loc[df.boundID == bdid, 'boundN'] = np.float128(thisboundn)
df = df.rename(columns={"logmh": "log337"})
dimdf = gal_cat.loc[gal_cat[absrmag] > mag_floor]
cols = ['boundN', 'boundID', 'boundLog337','grpR337', 'boundRproj', 'modelu_r', 'den1mpc', 'boundADalpha', 'boundTCross','boundURcolorgap', 'log337', 'boundLogS', 'boundLogG', 'boundDSpval']
dimdf['boundFlag'] = 0
for i in range(len(cols)):
thiscol = cols[i]
dimdf[thiscol] = -99.
gal_cat = make_gal_cat(df, gal_cat, radeg, dedeg, cz, mag_floor, absrmag, grp, twoDorthreeD)
for i in range(len(bdids)):
bdid = bdids[i]
bdgrp = df.loc[df.boundID == bdid]
adalpha = np.array(gal_cat.loc[gal_cat.boundID == bdid].boundADalpha)[0]
tcross = np.array(gal_cat.loc[gal_cat.boundID == bdid].boundTCross)[0]
colorgap = np.array(gal_cat.loc[gal_cat.boundID == bdid].boundURcolorgap)[0]
gas = np.array(gal_cat.loc[gal_cat.boundID == bdid].boundLogG)[0]
stars = np.array(gal_cat.loc[gal_cat.boundID == bdid].boundLogS)[0]
dspval = np.array(gal_cat.loc[gal_cat.boundID == bdid].boundDSpval)[0]
overlap = np.array(gal_cat.loc[gal_cat.boundID == bdid].boundR337overlap)[0]
df.loc[df.boundID == bdid, 'boundADalpha'] = adalpha
df.loc[df.boundID == bdid, 'boundTCross'] = tcross
df.loc[df.boundID == bdid, 'boundURcolorgap'] = colorgap
df.loc[df.boundID == bdid, 'boundLogS'] = stars
df.loc[df.boundID == bdid, 'boundLogG'] = gas
df.loc[df.boundID == bdid, 'boundDSpval'] = dspval
df.loc[df.boundID == bdid, 'boundR337overlap'] = overlap
df.to_csv('grpcat_eco_m337_72521_1.csv')
gal_cat = pd.concat([gal_cat, dimdf])
gal_cat.to_csv('galcat_eco_m337_72521_1.csv')
| 2.109375 | 2 |
forcePlates/MayaIntegration/tests/integration/test_pygame_grid_visualize.py | MontyThibault/centre-of-mass-awareness | 0 | 12762447 | <filename>forcePlates/MayaIntegration/tests/integration/test_pygame_grid_visualize.py<gh_stars>0
## Centre of Pressure Uncertainty for Virtual Character Control
## McGill Computer Graphics Lab
##
## Released under the MIT license. This code is free to be modified
## and distributed.
##
## Author: <NAME>, <EMAIL>
## Last Updated: Sep 02, 2016
## ------------------------------------------------------------------------
from plugin.threads.pygame_thread import PyGameThread
import plugin.line_visualize as lv
from plugin.gridcalibration.grid import Grid
import time
import pytest
@pytest.fixture
def restart_pygame():
import pygame
pygame.quit()
def test_feed_visualizers_to_pygame(restart_pygame):
"""
This test will draw an array of ten by ten points, stretched to fit the window.
"""
pgt = PyGameThread()
pgt.start()
grid = Grid(8, 10, 8, 10)
# Generate arbitrary samples
samples = []
for p in grid.points():
origin = p
destination = (p[0] + 1, p[1] + 1)
force = abs(p[0])
sample = (origin, destination, force)
samples.append(sample)
# Generate the drawing functions for the pygame thread
gv = lv.GridVisualizer(grid)
pv = lv.PointVisualizer((0, 0), gv)
sv = lv.SampleVisualizer(samples, gv)
pgt.add_draw_task(gv.draw)
pgt.add_draw_task(pv.draw)
pgt.add_draw_task(sv.draw)
time.sleep(0.1)
# pgt.kill()
# pgt.join()
# pgt.query_exceptions()
| 2.21875 | 2 |
kino/skills/card.py | DongjunLee/kino-bot | 109 | 12762448 | <filename>kino/skills/card.py<gh_stars>100-1000
import arrow
import re
from ..slack.resource import MsgResource
from ..utils.data_handler import DataHandler
from ..utils.member import Member
class BusinessCard(object):
def __init__(self, slackbot=None):
self.fname = "card.json"
self.data_handler = DataHandler()
if slackbot is None:
self.slackbot = SlackerAdapter()
else:
self.slackbot = slackbot
def read_holder(self):
card_data = self.data_handler.read_file(self.fname)
holder_names = ", ".join(card_data.get("holder", []))
holder_names = re.sub("([A-Z])+", r"\1-", holder_names)
self.slackbot.send_message(text=MsgResource.CARD_HOLDER(names=holder_names))
def read_history(self):
card_data = self.data_handler.read_file(self.fname)
historys = "\n - ".join(card_data.get("history", [])[-5:])
self.slackbot.send_message(text=MsgResource.CARD_HISTORY(historys=historys))
def forward(self, member):
if member is None:
self.slackbot.send_message(text=MsgResource.CARD_FORWARD_NONE)
return
elif len(member) > 2:
self.slackbot.send_message(text=MsgResource.CARD_FORWARD_NONE)
return
if len(member) == 2:
from_name = member[0]
to_name = member[1]
else: # len(member) == 1
member_util = Member()
from_name = member_util.get_name(self.slackbot.user)
to_name = member[0]
if from_name != to_name:
card_data = self.data_handler.read_file(self.fname)
holder_data = card_data.get("holder", [])
if from_name not in holder_data:
self.slackbot.send_message(
text=MsgResource.NOT_CARD_HOLDER(from_name=from_name)
)
return
holder_data.remove(from_name)
holder_data.append(to_name)
history_data = card_data.get("history", [])
history_data.append(
arrow.now().format("YYYY-MM-DD HH:mm") + f": {from_name} -> {to_name}"
)
card_data["holder"] = holder_data
card_data["history"] = history_data
self.data_handler.write_file(self.fname, card_data)
self.slackbot.send_message(
text=MsgResource.CARD_FORWARD(from_name=from_name, to_name=to_name)
)
| 2.359375 | 2 |
extras/test_octasphere.py | BruegelN/svg3d | 286 | 12762449 | <reponame>BruegelN/svg3d<filename>extras/test_octasphere.py<gh_stars>100-1000
#!/usr/bin/env python3
import numpy as np
import svgwrite.utils
from octasphere import octasphere
import pyrr
from parent_folder import svg3d
from math import *
create_ortho = pyrr.matrix44.create_orthogonal_projection
create_perspective = pyrr.matrix44.create_perspective_projection
create_lookat = pyrr.matrix44.create_look_at
np.set_printoptions(formatter={'float': lambda x: "{0:+0.3f}".format(x)})
quaternion = pyrr.quaternion
SHININESS = 100
DIFFUSE = np.float32([1.0, 0.8, 0.2])
SPECULAR = np.float32([0.5, 0.5, 0.5])
SIZE = (512, 256)
def rgb(r, g, b):
r = max(0.0, min(r, 1.0))
g = max(0.0, min(g, 1.0))
b = max(0.0, min(b, 1.0))
return svgwrite.utils.rgb(r * 255, g * 255, b * 255)
def rotate_faces(faces):
q = quaternion.create_from_eulers([pi * -0.4, pi * 0.9, 0])
new_faces = []
for f in faces:
verts = [quaternion.apply_to_vector(q, v) for v in f]
new_faces.append(verts)
return np.float32(new_faces)
def translate_faces(faces, offset):
return faces + np.float32(offset)
def merge_faces(faces0, faces1):
return np.vstack([faces0, faces1])
projection = create_perspective(fovy=25, aspect=2, near=10, far=200)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
camera = svg3d.Camera(view_matrix, projection)
def make_octaspheres(ndivisions: int, radius: float, width=0, height=0, depth=0):
verts, indices = octasphere(ndivisions, radius, width, height, depth)
faces = verts[indices]
left = translate_faces(faces, [ -12, 0, 0])
right = translate_faces(rotate_faces(faces), [ 12, 0, 0])
faces = merge_faces(left, right)
ones = np.ones(faces.shape[:2] + (1,))
eyespace_faces = np.dstack([faces, ones])
eyespace_faces = np.dot(eyespace_faces, view_matrix)[:, :, :3]
L = pyrr.vector.normalize(np.float32([20, 20, 50]))
E = np.float32([0, 0, 1])
H = pyrr.vector.normalize(L + E)
def frontface_shader(face_index, winding):
if winding < 0:
return None
face = eyespace_faces[face_index]
p0, p1, p2 = face[0], face[1], face[2]
N = pyrr.vector3.cross(p1 - p0, p2 - p0)
l2 = pyrr.vector3.squared_length(N)
if l2 > 0:
N = N / np.sqrt(l2)
df = max(0, np.dot(N, L))
sf = pow(max(0, np.dot(N, H)), SHININESS)
color = df * DIFFUSE + sf * SPECULAR
color = np.power(color, 1.0 / 2.2)
return dict(fill=rgb(*color), stroke="black", stroke_width="0.001")
print(f"Generated octasphere: {ndivisions}, {radius}, {width}, {height}, {depth}")
return [svg3d.Mesh(faces, frontface_shader)]
vp = svg3d.Viewport(-1, -.5, 2, 1)
engine = svg3d.Engine([])
if False:
mesh = make_octaspheres(ndivisions=2, radius=8)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere3.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=7, width=16, height=16, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere1.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=0, radius=7, width=16, height=16, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere2.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=3, width=12, height=12, depth=12)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere4.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=1, width=12, height=12, depth=12)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere5.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=3, width=16, height=16, depth=0)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere6.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=3, width=16, height=0, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere7.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=3, width=0, height=16, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere8.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=0, width=16, height=16, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere9.svg", size=SIZE)
def tile():
verts, indices = octasphere(ndivisions=3, radius=3, width=18, height=18, depth=0)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
faces = verts[indices]
ones = np.ones(faces.shape[:2] + (1,))
eyespace_faces = np.dstack([faces, ones])
eyespace_faces = np.dot(eyespace_faces, view_matrix)[:, :, :3]
L = pyrr.vector.normalize(np.float32([20, 20, 50]))
E = np.float32([0, 0, 1])
H = pyrr.vector.normalize(L + E)
def frontface_shader(face_index, winding):
if winding < 0:
return None
face = eyespace_faces[face_index]
p0, p1, p2 = face[0], face[1], face[2]
N = pyrr.vector3.cross(p1 - p0, p2 - p0)
l2 = pyrr.vector3.squared_length(N)
if l2 > 0:
N = N / np.sqrt(l2)
df = max(0, np.dot(N, L))
sf = pow(max(0, np.dot(N, H)), SHININESS)
color = df * DIFFUSE + sf * SPECULAR
color = np.power(color, 1.0 / 2.2)
return dict(fill=rgb(*color), stroke="black", stroke_width="0.001")
return svg3d.Mesh(faces, frontface_shader)
def rounded_cube():
verts, indices = octasphere(ndivisions=3, radius=1, width=15, height=15, depth=13)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
faces = verts[indices]
ones = np.ones(faces.shape[:2] + (1,))
eyespace_faces = np.dstack([faces, ones])
eyespace_faces = np.dot(eyespace_faces, view_matrix)[:, :, :3]
L = pyrr.vector.normalize(np.float32([20, 20, 50]))
E = np.float32([0, 0, 1])
H = pyrr.vector.normalize(L + E)
def frontface_shader(face_index, winding):
if winding < 0:
return None
face = eyespace_faces[face_index]
p0, p1, p2 = face[0], face[1], face[2]
N = pyrr.vector3.cross(p1 - p0, p2 - p0)
l2 = pyrr.vector3.squared_length(N)
if l2 > 0:
N = N / np.sqrt(l2)
df = max(0, np.dot(N, L))
sf = pow(max(0, np.dot(N, H)), SHININESS)
color = df * DIFFUSE + sf * SPECULAR
color = np.power(color, 1.0 / 2.2)
return dict(fill=rgb(*color), stroke="black", stroke_width="0.001")
return svg3d.Mesh(faces, frontface_shader)
def capsule():
verts, indices = octasphere(ndivisions=3, radius=4, width=18, height=0, depth=0)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
faces = verts[indices]
ones = np.ones(faces.shape[:2] + (1,))
eyespace_faces = np.dstack([faces, ones])
eyespace_faces = np.dot(eyespace_faces, view_matrix)[:, :, :3]
L = pyrr.vector.normalize(np.float32([20, 20, 50]))
E = np.float32([0, 0, 1])
H = pyrr.vector.normalize(L + E)
def frontface_shader(face_index, winding):
if winding < 0:
return None
face = eyespace_faces[face_index]
p0, p1, p2 = face[0], face[1], face[2]
N = pyrr.vector3.cross(p1 - p0, p2 - p0)
l2 = pyrr.vector3.squared_length(N)
if l2 > 0:
N = N / np.sqrt(l2)
df = max(0, np.dot(N, L))
sf = pow(max(0, np.dot(N, H)), SHININESS)
color = df * DIFFUSE + sf * SPECULAR
color = np.power(color, 1.0 / 2.2)
return dict(fill=rgb(*color), stroke="black", stroke_width="0.001")
return svg3d.Mesh(faces, frontface_shader)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
projection = create_perspective(fovy=25, aspect=1, near=10, far=200)
camera = svg3d.Camera(view_matrix, projection)
dx = .9
x = -.5
y = -.15
w, h = 1.3, 1.3
engine.views = [
svg3d.View(camera, svg3d.Scene([tile()]), svg3d.Viewport(x-1, y-.5, w, h)),
svg3d.View(camera, svg3d.Scene([rounded_cube()]), svg3d.Viewport(x-1+dx, y-.5, w, h)),
svg3d.View(camera, svg3d.Scene([capsule()]), svg3d.Viewport(x-1+dx*2, y-.5, w, h)),
]
engine.render("ThreeCuboids.svg", size=(600, 200))
| 2.21875 | 2 |
setup.py | pritchardlabatpsu/cga | 0 | 12762450 | from setuptools import setup, find_packages
setup(
name="ceres_infer",
version="1.0",
author="<NAME>",
description='CERES inference',
long_description=open('README.md').read(),
package_dir={"": "src"},
packages=find_packages("ceres_infer"),
include_package_data=True,
zip_safe=False,
install_requires=open('requirements.txt').read().strip().split('\n')
)
| 1.257813 | 1 |