max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/test_blue_dot.py
|
webknjaz/BlueDot
| 0
|
12778551
|
from bluedot import MockBlueDot, BlueDotSwipe, BlueDotRotation
from time import sleep
from threading import Event, Thread
def test_default_values():
mbd = MockBlueDot()
assert mbd.device == "hci0"
assert mbd.port == 1
assert mbd.running
assert mbd.print_messages
assert mbd.double_press_time == 0.3
assert mbd.rotation_segments == 8
assert mbd.when_client_connects == None
assert mbd.when_client_disconnects == None
assert mbd.when_pressed == None
assert mbd.when_double_pressed == None
assert mbd.when_moved == None
assert mbd.when_released == None
assert mbd.when_swiped == None
def test_modify_values():
mbd = MockBlueDot(device = "hci1", port = 2, auto_start_server = False, print_messages = False)
assert mbd.device == "hci1"
assert mbd.port == 2
assert not mbd.running
assert not mbd.print_messages
mbd.print_messages = True
assert mbd.print_messages
assert mbd.double_press_time == 0.3
mbd.double_press_time = 0.4
assert mbd.double_press_time == 0.4
assert mbd.rotation_segments == 8
mbd.rotation_segments = 16
assert mbd.rotation_segments == 16
def test_start_stop():
mbd = MockBlueDot(auto_start_server = False)
assert not mbd.running
mbd.start()
assert mbd.running
mbd.stop()
assert not mbd.running
def test_connect_disconnect():
mbd = MockBlueDot()
assert not mbd.is_connected
mbd.mock_client_connected()
assert mbd.wait_for_connection(1)
assert mbd.is_connected
mbd.mock_client_disconnected()
assert not mbd.is_connected
def test_when_connect_disconnect():
mbd = MockBlueDot()
event_connect = Event()
mbd.when_client_connects = lambda: event_connect.set()
event_disconnect = Event()
mbd.when_client_disconnects = lambda: event_disconnect.set()
assert not event_connect.is_set()
mbd.mock_client_connected()
assert event_connect.wait(1)
assert not event_disconnect.is_set()
mbd.mock_client_disconnected()
assert event_disconnect.wait(1)
def test_when_connect_disconnect_background():
mbd = MockBlueDot()
event_connect = Event()
mbd.set_when_client_connects(lambda: delay_function(event_connect.set, 0.2), background=True)
event_disconnect = Event()
mbd.set_when_client_disconnects(lambda: delay_function(event_disconnect.set, 0.2), background=True)
mbd.when_client_disconnects = lambda: event_disconnect.set()
assert not event_connect.is_set()
mbd.mock_client_connected()
assert not event_connect.is_set()
assert event_connect.wait(1)
assert not event_disconnect.is_set()
mbd.mock_client_disconnected()
assert not event_disconnect.is_set()
assert event_disconnect.wait(1)
def test_pressed_moved_released():
mbd = MockBlueDot()
mbd.mock_client_connected()
#initial value
assert not mbd.is_pressed
assert mbd.value == 0
#pressed
mbd.mock_blue_dot_pressed(0,0)
assert mbd.is_pressed
assert mbd.value == 1
#released
mbd.mock_blue_dot_released(0,0)
assert not mbd.is_pressed
assert mbd.value == 0
#wait_for_press
delay_function(lambda: mbd.mock_blue_dot_pressed(0,0), 0.5)
assert mbd.wait_for_press(1)
assert not mbd.wait_for_release(0)
#wait_for_release
delay_function(lambda: mbd.mock_blue_dot_released(0,0), 0.5)
assert mbd.wait_for_release(1)
assert not mbd.wait_for_press(0)
def test_double_press():
mbd = MockBlueDot()
mbd.mock_client_connected()
def simulate_double_press():
#sleep longer than the double press time, to clear any past double presses!
sleep(mbd.double_press_time + 0.1)
mbd.mock_blue_dot_pressed(0,0)
mbd.mock_blue_dot_released(0,0)
mbd.mock_blue_dot_pressed(0,0)
mbd.mock_blue_dot_released(0,0)
def simulate_failed_double_press():
sleep(mbd.double_press_time + 0.1)
mbd.mock_blue_dot_pressed(0,0)
mbd.mock_blue_dot_released(0,0)
sleep(mbd.double_press_time + 0.1)
mbd.mock_blue_dot_pressed(0,0)
mbd.mock_blue_dot_released(0,0)
# when_double_pressed
event_double_pressed = Event()
mbd.when_double_pressed = lambda: event_double_pressed.set()
simulate_failed_double_press()
assert not event_double_pressed.is_set()
simulate_double_press()
assert event_double_pressed.is_set()
# wait for double press
# double press the blue dot
delay_function(simulate_double_press, 0.2)
# wait for double press
assert mbd.wait_for_double_press(1)
# dont double press the blue dot
delay_function(simulate_failed_double_press, 0.2)
assert not mbd.wait_for_double_press(1)
def test_when_pressed_moved_released():
mbd = MockBlueDot()
mbd.mock_client_connected()
#when_pressed
event_pressed = Event()
mbd.when_pressed = lambda: event_pressed.set()
#when_double_pressed
event_double_pressed = Event()
mbd.when_double_pressed = lambda: event_double_pressed.set()
#when_moved
event_moved = Event()
mbd.when_moved = lambda: event_moved.set()
#when_released
event_released = Event()
mbd.when_released = lambda: event_released.set()
assert not event_pressed.is_set()
mbd.mock_blue_dot_pressed(0,0)
assert event_pressed.is_set()
assert not event_moved.is_set()
mbd.mock_blue_dot_moved(1,1)
assert event_moved.is_set()
assert not event_released.is_set()
mbd.mock_blue_dot_released(0,0)
assert event_released.is_set()
assert not event_double_pressed.is_set()
mbd.mock_blue_dot_pressed(0,0)
assert event_double_pressed.is_set()
def test_when_pressed_moved_released_background():
mbd = MockBlueDot()
mbd.mock_client_connected()
#when_pressed
event_pressed = Event()
mbd.set_when_pressed(lambda: delay_function(event_pressed.set, 0.2), background=True)
#when_double_pressed
event_double_pressed = Event()
mbd.set_when_double_pressed(lambda: delay_function(event_double_pressed.set, 0.2), background=True)
#when_moved
event_moved = Event()
mbd.set_when_moved(lambda: delay_function(event_moved.set, 0.2), background=True)
#when_released
event_released = Event()
mbd.set_when_released(lambda: delay_function(event_released.set, 0.2), background=True)
# test that the events dont block
assert not event_pressed.is_set()
mbd.mock_blue_dot_pressed(0,0)
assert not event_pressed.is_set()
assert event_pressed.wait(1)
assert not event_moved.is_set()
mbd.mock_blue_dot_moved(1,1)
assert not event_moved.is_set()
assert event_moved.wait(1)
assert not event_released.is_set()
mbd.mock_blue_dot_released(0,0)
assert not event_released.is_set()
assert event_released.wait(1)
# set pressed, moved, released to None so they dont wait
mbd.set_when_pressed(None)
mbd.set_when_moved(None)
mbd.set_when_released(None)
mbd.mock_blue_dot_pressed(0,0)
mbd.mock_blue_dot_moved(1,1)
mbd.mock_blue_dot_released(0,0)
assert not event_double_pressed.is_set()
mbd.mock_blue_dot_pressed(0,0)
assert not event_double_pressed.is_set()
assert event_double_pressed.wait(1)
def test_position():
mbd = MockBlueDot()
mbd.mock_client_connected()
mbd.mock_blue_dot_pressed(0,0)
assert not mbd.position.top
assert mbd.position.middle
assert not mbd.position.bottom
assert not mbd.position.left
assert not mbd.position.right
mbd.mock_blue_dot_moved(1,0)
assert not mbd.position.top
assert not mbd.position.middle
assert not mbd.position.bottom
assert not mbd.position.left
assert mbd.position.right
mbd.mock_blue_dot_moved(-1,0)
assert not mbd.position.top
assert not mbd.position.middle
assert not mbd.position.bottom
assert mbd.position.left
assert not mbd.position.right
mbd.mock_blue_dot_moved(0,1)
assert mbd.position.top
assert not mbd.position.middle
assert not mbd.position.bottom
assert not mbd.position.left
assert not mbd.position.right
mbd.mock_blue_dot_moved(0,-1)
assert not mbd.position.top
assert not mbd.position.middle
assert mbd.position.bottom
assert not mbd.position.left
assert not mbd.position.right
mbd.mock_blue_dot_moved(0.1234, -0.4567)
assert mbd.position.x == 0.1234
assert mbd.position.y == -0.4567
mbd.mock_blue_dot_moved(1, 0)
assert mbd.position.distance == 1
assert mbd.position.angle == 90
def test_interaction():
mbd = MockBlueDot()
mbd.mock_client_connected()
assert mbd.interaction == None
mbd.mock_blue_dot_pressed(-1,0)
assert mbd.interaction.active
assert len(mbd.interaction.positions) == 1
assert mbd.interaction.distance == 0
assert mbd.interaction.pressed_position.x == -1
assert mbd.interaction.pressed_position.y == 0
assert mbd.interaction.current_position.x == -1
assert mbd.interaction.current_position.y == 0
assert mbd.interaction.previous_position == None
assert mbd.interaction.released_position == None
mbd.mock_blue_dot_moved(0,0)
assert mbd.interaction.active
assert len(mbd.interaction.positions) == 2
assert mbd.interaction.distance == 1
assert mbd.interaction.pressed_position.x == -1
assert mbd.interaction.pressed_position.y == 0
assert mbd.interaction.current_position.x == 0
assert mbd.interaction.current_position.y == 0
assert mbd.interaction.previous_position.x == -1
assert mbd.interaction.previous_position.y == 0
assert mbd.interaction.released_position == None
mbd.mock_blue_dot_released(1,0)
assert not mbd.interaction.active
assert len(mbd.interaction.positions) == 3
assert mbd.interaction.distance == 2
assert mbd.interaction.pressed_position.x == -1
assert mbd.interaction.pressed_position.y == 0
assert mbd.interaction.current_position.x == 1
assert mbd.interaction.current_position.y == 0
assert mbd.interaction.previous_position.x == 0
assert mbd.interaction.previous_position.y == 0
assert mbd.interaction.released_position.x == 1
assert mbd.interaction.released_position.y == 0
def test_swipe():
mbd = MockBlueDot()
mbd.mock_client_connected()
def simulate_swipe(
pressed_x, pressed_y,
moved_x, moved_y,
released_x, released_y):
mbd.mock_blue_dot_pressed(pressed_x, pressed_y)
mbd.mock_blue_dot_moved(moved_x, moved_y)
mbd.mock_blue_dot_released(released_x, released_y)
#wait_for_swipe
delay_function(lambda: simulate_swipe(-1,0,0,0,1,0), 0.5)
assert mbd.wait_for_swipe(1)
#when_swiped
event_swiped = Event()
mbd.when_swiped = lambda: event_swiped.set()
assert not event_swiped.is_set()
#simulate swipe left to right
simulate_swipe(-1,0,0,0,1,0)
#check event
assert event_swiped.is_set()
#get the swipe
swipe = BlueDotSwipe(mbd.interaction)
assert swipe.right
assert not swipe.left
assert not swipe.up
assert not swipe.down
#right to left
event_swiped.clear()
simulate_swipe(1,0,0,0,-1,0)
assert event_swiped.is_set()
swipe = BlueDotSwipe(mbd.interaction)
assert not swipe.right
assert swipe.left
assert not swipe.up
assert not swipe.down
#bottom to top
event_swiped.clear()
simulate_swipe(0,-1,0,0,0,1)
assert event_swiped.is_set()
swipe = BlueDotSwipe(mbd.interaction)
assert not swipe.right
assert not swipe.left
assert swipe.up
assert not swipe.down
#top to bottom
event_swiped.clear()
simulate_swipe(0,1,0,0,0,-1)
assert event_swiped.is_set()
swipe = BlueDotSwipe(mbd.interaction)
assert not swipe.right
assert not swipe.left
assert not swipe.up
assert swipe.down
# background
event_swiped.clear()
mbd.set_when_swiped(lambda: delay_function(event_swiped.set, 0.2), background=True)
simulate_swipe(0,1,0,0,0,-1)
assert not event_swiped.is_set()
assert event_swiped.wait(1)
def test_callback_in_class():
class CallbackClass():
def __init__(self):
self.event = Event()
def no_pos(self):
self.event.set()
self.pos = None
def with_pos(self, pos):
self.event.set()
self.pos = pos
cc = CallbackClass()
mbd = MockBlueDot()
mbd.mock_client_connected()
mbd.when_pressed = cc.no_pos
mbd.mock_blue_dot_pressed(0,0)
assert cc.event.is_set()
assert cc.pos is None
mbd.mock_blue_dot_released(0,0)
cc.event.clear()
mbd.when_pressed = cc.with_pos
mbd.mock_blue_dot_pressed(0,0)
assert cc.event.is_set()
assert cc.pos.middle
def test_rotation():
mbd = MockBlueDot()
mbd.mock_client_connected()
event_rotated = Event()
mbd.when_rotated = lambda: event_rotated.set()
assert not event_rotated.is_set()
#press the blue dot, no rotation
mbd.mock_blue_dot_pressed(-0.1,1)
assert not event_rotated.is_set()
r = BlueDotRotation(mbd.interaction, mbd.rotation_segments)
assert not r.valid
assert r.value == 0
assert not r.clockwise
assert not r.anti_clockwise
#rotate clockwise
event_rotated.clear()
mbd.mock_blue_dot_moved(0.1,1)
assert event_rotated.is_set()
r = BlueDotRotation(mbd.interaction, mbd.rotation_segments)
assert r.value == 1
assert r.valid
assert r.clockwise
assert not r.anti_clockwise
#rotate anti-clockwise
event_rotated.clear()
mbd.mock_blue_dot_moved(-0.1,1)
assert event_rotated.is_set()
r = BlueDotRotation(mbd.interaction, mbd.rotation_segments)
assert r.value == -1
assert r.valid
assert not r.clockwise
assert r.anti_clockwise
# background
# rotate clockwise again
event_rotated.clear()
mbd.set_when_rotated(lambda: delay_function(event_rotated.set, 0.2), background=True)
mbd.mock_blue_dot_moved(0.1,1)
assert not event_rotated.is_set()
assert event_rotated.wait(1)
def test_allow_pairing():
mbd = MockBlueDot()
assert not mbd.adapter.discoverable
assert not mbd.adapter.pairable
mbd.allow_pairing()
assert mbd.adapter.discoverable
assert mbd.adapter.pairable
def test_dot_appearance():
mbd = MockBlueDot()
assert mbd.color == "blue"
assert mbd.border == False
assert mbd.square == False
assert mbd.visible == True
mbd.color = "red"
mbd.border = True
mbd.square = True
mbd.visible = False
assert mbd.color == "red"
assert mbd.border == True
assert mbd.square == True
assert mbd.visible == False
def test_dot_colors():
from bluedot.colors import BLUE, RED, GREEN, YELLOW
mbd = MockBlueDot()
assert mbd.color == "blue"
assert mbd.color == (0,0,255)
assert mbd.color == BLUE
assert mbd.color == "#0000ff"
assert mbd.color == "#0000ffff"
mbd.color = RED
assert mbd.color == (255,0,0)
assert mbd.color == "red"
assert mbd.color == "#ff0000"
assert mbd.color == "#ff0000ff"
mbd.color = "green"
assert mbd.color == GREEN
assert mbd.color == (0,128,0)
assert mbd.color == "#008000"
assert mbd.color == "#008000ff"
mbd.color = "#ffff00"
assert mbd.color == YELLOW
assert mbd.color == "yellow"
assert mbd.color == (255,255,0)
assert mbd.color == "#ffff00ff"
mbd.color = "#ffffff11"
assert mbd.color == "#ffffff11"
def delay_function(func, time):
delayed_thread = Thread(target = _delayed_function, args = (func, time))
delayed_thread.start()
def _delayed_function(func, time):
sleep(time)
func()
| 2.359375
| 2
|
src/models/k_mean.py
|
tringn/image_clustering
| 5
|
12778552
|
<reponame>tringn/image_clustering<filename>src/models/k_mean.py
import os
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pandas as pd
import json
def plot_3d(vector_array, save_plot_dir):
"""
Plot 3D vector features distribution from vector array
:param vector_array: (N x 3) vector array, where N is the number of images
:param save_plot_dir: (string) directory to save plot
:return: save 3D distribution feature to disk
"""
principal_df = pd.DataFrame(data=vector_array, columns=['pc1', 'pc2', 'pc3'])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = principal_df['pc1']
ys = principal_df['pc2']
zs = principal_df['pc3']
ax.scatter(xs, ys, zs, s=50, alpha=0.6, edgecolors='w')
ax.set_xlabel('pc1')
ax.set_ylabel('pc2')
ax.set_zlabel('pc3')
plt.savefig(save_plot_dir + '/3D_scatter.png')
plt.close()
def plot_2d(vector_array, save_plot_dir):
"""
Plot 2D vector features distribution from vector array
:param vector_array: (N x 2) vector array, where N is the number of images
:param save_plot_dir: (string) directory to save plot
:return: save 2D distribution feature to disk
"""
principal_df = pd.DataFrame(data = vector_array, columns = ['pc1', 'pc2'])
fig = plt.figure()
ax = fig.add_subplot(111)
xs = principal_df['pc1']
ys = principal_df['pc2']
ax.scatter(xs, ys, s=50, alpha=0.6, edgecolors='w')
ax.set_xlabel('pc1')
ax.set_ylabel('pc2')
plt.savefig(save_plot_dir + '/2D_scatter.png')
plt.close()
def read_vector(img_dir):
"""
Read vector in a directory to array (N x D): N is number of vectors, D is vector's dimension
:param img_dir: (string) directory where feature vectors are
:return: (array) N X D array
"""
vector_files = [f for f in os.listdir(img_dir) if f.endswith(".npz")]
vector_array = []
for img in vector_files:
vector = np.loadtxt(os.path.join(img_dir, img))
vector_array.append(vector)
vector_array = np.asarray(vector_array)
return vector_array, vector_files
def find_best_k(vector_array, save_plot_dir, max_k=100):
"""
Find best number of cluster
:param vector_array: (array) N x D dimension feature vector array
:param save_plot_dir: (string) path to save cost figure
:param max_k: (int) maximum number of cluster to analyze
:return: plot the elbow curve to figure out the best number of cluster
"""
cost = []
dim = vector_array.shape[1]
for i in range(1, max_k):
kmeans = KMeans(n_clusters=i, random_state=0)
kmeans.fit(vector_array)
cost.append(kmeans.inertia_)
# plot the cost against K values
plt.plot(range(1, max_k), cost, color='g', linewidth='3')
plt.xlabel("Value of K")
plt.ylabel("Squared Error (Cost)")
plt.savefig(save_plot_dir + '/cost_' + str(dim) + 'D.png')
plt.close()
def k_mean(vector_array, k):
"""
Apply k-mean clustering approach to assign each feature image in vector array to suitable subsets
:param vector_array: (array) N x D dimension feature vector array
:param k: (int) number of cluster
:return: (array) (N x 1) label array
"""
kmeans = KMeans(n_clusters=k, random_state=0)
kmeans.fit(vector_array)
labels = kmeans.labels_
return labels
def reduce_dim_combine(vector_array, dim=2):
"""
Applying dimension reduction to vector_array
:param vector_array: (array) N x D dimension feature vector array
:param dim: (int) desired dimension after reduction
:return: (array) N x dim dimension feature vector array
"""
# Standardizing the features
vector_array = StandardScaler().fit_transform(vector_array)
# Apply PCA first to reduce dim to 50
pca = PCA(n_components=50)
vector_array = pca.fit_transform(vector_array)
# Apply tSNE to reduce dim to #dim
model = TSNE(n_components=dim, random_state=0)
vector_array = model.fit_transform(vector_array)
return vector_array
if __name__ == "__main__":
# Mode: investiagate to find the best k, inference to cluster
# MODE = "investigate"
MODE = "inference"
# Image vectors root dir
img_dir = "results/image_vectors/"
# Final dimension
dim = 2
for object_name in os.listdir(img_dir):
print("Process %s" % object_name)
# object_name = img_dir.split("/")[-1]
vector_array, img_files = read_vector(os.path.join(img_dir, object_name))
# k_mean(vector_array)
if vector_array.shape[0] >= 450:
# Apply dimensional reducing approach
vector_array = reduce_dim_combine(vector_array, dim)
if MODE == "investigate":
# Plot data distribution after reducing dimension
if dim == 2:
plot_2d(vector_array)
save_plot_dir = "visualization/2D/"
elif dim == 3:
plot_3d(vector_array)
save_plot_dir = "visualization/3D/"
else:
raise ValueError("Not support dimension")
# Plot cost chart to find best value of k
find_best_k(vector_array, object_name, save_plot_dir)
continue
# Find label for each image
labels = k_mean(vector_array, k=40).tolist()
assert len(labels) == len(img_files), "Not equal length"
label_dict = [{"img_file": img_files[i].replace(".npz", "").replace(object_name + '_', ""), "label": str(labels[i]), "prob": "1.0"} for i in range(len(labels))]
# Save to disk
label_dir = "results/img_cluster/"
label_outpath = os.path.join(label_dir, object_name + ".json")
# os.makedirs(label_outpath, exist_ok=True)
with open(label_outpath, 'w') as fp:
json.dump({"data": label_dict}, fp)
| 3
| 3
|
src/data_providing_module/data_providers/split_block_provider.py
|
Freitacr/ML-StockAnalysisProject
| 0
|
12778553
|
"""Data Provider module for providing data blocks made from similar stocks over a set time period, but separated.
This data provider is not intended to be used outside of this module, instead, upon import, this module will create an
instance of a SplitBlockProvider and register it with the global DataProviderRegistry. To register a consumer to
receive data from this provider, use the id provided by data_provider_static_names.SPLIT_BLOCK_PROVIDER.
The separation, or split, referred to by this module is that the data block for one cluster is not combined with
the data block from others into a large training set. This is in contrast to the ClusteredBlockProvider, which
combines its cluster's blocks into a larger data set.
A detailed argument list that is required by this provider can be found in the generate_data method.
"""
from datetime import datetime as dt, timedelta as td
import configparser
from data_providing_module import configurable_registry
from data_providing_module import data_provider_registry
from data_providing_module.data_providers import data_provider_static_names
from stock_data_analysis_module.data_processing_module import stock_cluster_data_manager
from general_utils.config import config_util
ENABLED_CONFIG_ID = "enabled"
class SplitBlockProvider(data_provider_registry.DataProviderBase):
"""Data Provider that provides data constructed by clustering stocks, but keeping the cluster's data separate
The organization of these clusters is handled according to the specifications established in the
StockClusterDataManager, and will operate on the time frame [start_date, end_date]. This time frame is currently
fixed where end_date is the current date, and start_date is 52 * 4 weeks ago (approximately four years).
Additionally this provider supports configuration of certain parameters through the configuration file. These
parameters are listed in the Configurable Parameters section.
Configurable Parameters:
enabled: Whether this provider is enabled for consumers to receive data from.
"""
def generate_prediction_data(self, *args, **kwargs):
"""Generates data that consumers will use to make predictions for the next trading day.
Currently there is no implementation for this, and calling the method will result in a NotImplementedError
"""
raise NotImplementedError()
def __init__(self):
"""Initializes a SplitBlockProvider and registers it to the global DataProviderRegistry
"""
super(SplitBlockProvider, self).__init__()
configurable_registry.config_registry.register_configurable(self)
def write_default_configuration(self, section: "configparser.SectionProxy"):
"""Writes default configuration values into the SectionProxy provided.
For more details see abstract class documentation.
"""
section[ENABLED_CONFIG_ID] = "True"
def load_configuration(self, parser: "configparser.ConfigParser"):
"""Attempts to load the configurable parameters for this provider from the provided parser.
For more details see abstract class documentation.
"""
section = config_util.create_type_section(parser, self)
if not parser.has_option(section.name, ENABLED_CONFIG_ID):
self.write_default_configuration(section)
enabled = parser.getboolean(section.name, ENABLED_CONFIG_ID)
if enabled:
data_provider_registry.registry.register_provider(data_provider_static_names.SPLIT_BLOCK_PROVIDER_ID, self)
def generate_data(self, *args, **kwargs):
"""Generates data for Consumers to use by clustering together stocks in a time period,
The time period for cluster creation is a period of 52 * 4 weeks (approximately 4 years).
Consumers requiring data from this provider are expected to provide the arguments specified in the
*args entry of the Arguments section
The split portion of this data provider is that the data returned is split into different entries in a
dictionary, keyed off of the root stock's ticker. The root stock is the stock that the cluster is based around
and all other data in the cluster is deemed as being similar to the root stock's data.
Arguments:
*args:
List of arguments that are expected to be in the following order, with the specified types
train_columns: List[str]
List of names of columns from a StockDataTable. These will be used to retrieve data
from the database and construct the returned data blocks
expectation_columns: List[int]
List of integers representing the indices of the columns to be used as the target data
in the generation of the data blocks
Returns:
See StockClusterDataManager.retrieve_training_data_movement_targets_split
"""
if len(args) < 1:
raise ValueError('Expected at least the first argument from the following list;' +
' train_columns: List["str"], expectation_columns: List["int"]')
columns = args[0]
expectation_columns = None
if len(args) == 2:
expectation_columns = args[1]
start_date = dt.now() - td(weeks=(52 * 4))
start_date = start_date.isoformat()[:10].replace('-', '/')
end_date = dt.now().isoformat()[:10].replace('-', '/')
data_retriever = stock_cluster_data_manager.StockClusterDataManager(start_date, end_date, column_list=columns)
return data_retriever.retrieveTrainingDataMovementTargetsSplit(expectation_columns=expectation_columns)
provider = SplitBlockProvider()
| 2.78125
| 3
|
aiakos/urls.py
|
aiakos/aiakos
| 4
|
12778554
|
<reponame>aiakos/aiakos
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.views.generic import RedirectView, TemplateView
from django_extauth import v1 as auth_v1
from rest_framework import routers
from .openid_provider import v1 as oauth_v1
v1 = routers.DefaultRouter()
v1.register(r'clients', oauth_v1.ClientViewSet)
v1.register(r'accounts', auth_v1.AccountViewSet, base_name='account')
urlpatterns = [
url(r'^v1/', include(v1.urls)),
url(r'^admin/', admin.site.urls),
url(r'^', include('django_extauth.urls', namespace='extauth')),
url(r'^', include('aiakos.openid_provider.urls', namespace='openid_provider')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 2.71875
| 3
|
AmbidexteriousBounce.py
|
l0vemachin3/AmbidexteriousBounce
| 0
|
12778555
|
<gh_stars>0
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, paddle2, color):
self.canvas = canvas
self.paddle = paddle
self.paddle2 = paddle2
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if self.hit_paddle(pos) == True:
self.y = -3
if self.hit_paddle2(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
return True
return False
def hit_paddle2(self, pos):
paddle2_pos = self.canvas.coords(self.paddle2.id)
if pos[2] >= paddle2_pos[0] and pos[0] <= paddle2_pos[2]:
if pos[3] >= paddle2_pos[1] and pos[3] <= paddle2_pos[3]:
return True
return False
class Ball2:
def __init__(self, canvas, paddle, paddle2, color):
self.canvas = canvas
self.paddle = paddle
self.paddle2 = paddle2
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if self.hit_paddle(pos) == True:
self.y = -3
if self.hit_paddle2(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
return True
return False
def hit_paddle2(self, pos):
paddle2_pos = self.canvas.coords(self.paddle2.id)
if pos[2] >= paddle2_pos[0] and pos[0] <= paddle2_pos[2]:
if pos[3] >= paddle2_pos[1] and pos[3] <= paddle2_pos[3]:
return True
return False
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 300)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
self.canvas.bind_all('<KeyPress-Button_1>')
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -2
def turn_right(self, evt):
self.x = 2
class Paddle2:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 300)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-a>', self.turn_left)
self.canvas.bind_all('<KeyPress-d>', self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -2
def turn_right(self, evt):
self.x = 2
tk = Tk()
tk.title("Ambidexterious Bounce")
tk.resizable(0, 0)
tk.wm_attributes("-topmost", 1)
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle2 = Paddle2(canvas, 'blue')
paddle = Paddle(canvas, 'red')
ball2 = Ball2(canvas, paddle, paddle2, 'yellow')
ball = Ball(canvas, paddle, paddle2, 'green')
while 1:
if ball.hit_bottom == False:
if ball2.hit_bottom == False:
ball2.draw()
ball.draw()
paddle.draw()
paddle2.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
| 3.3125
| 3
|
Python/lc_13.py
|
cmattey/leetcode_problems
| 6
|
12778556
|
<gh_stars>1-10
# 13. Roman to Integer
# Time: O(len(s))
# Space: O(1)
class Solution:
def romanToInt(self, s: str) -> int:
roman_map = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000,
'IV':4,'IX':9,
'XL':40,'XC':90,
'CD':400,'CM':900}
num = 0
index = 0
while index in range(len(s)):
if index+1 in range(len(s)):
if s[index]+s[index+1] in roman_map:
num+=roman_map[s[index]+s[index+1]]
index+=1
else:
num+=roman_map[s[index]]
else:
num+=roman_map[s[index]]
index+=1
return num
| 3.234375
| 3
|
PEtab_problems/Code/Tumor_2d/tumor_script.py
|
EmadAlamoudi/FMC_paper
| 0
|
12778557
|
<gh_stars>0
from time import time
import tumor2d
from fitmulticell.sumstat import SummaryStatistics as ss
import matplotlib.pyplot as plt
from string import capwords
import os
import pyabc
from fitmulticell.model import MorpheusModel
import numpy as np
import scipy
def eucl_dist(sim, obs):
total = 0
for key in sim:
if key in 'loc':
continue
total += scipy.stats.ks_2samp(sim[key], obs[key]).statistic
return total
pop_size = 2
min_eps = 750
min_eps_ori = min_eps
max_nr_pop = 2
# logfilepath = "/home/emad/Insync/<EMAIL>/Google_Drive/Bonn/Github/FMC_paper/PEtab_problems/Code/Tumor_2d/TumorStats.txt"
problempath = "/home/emad/Insync/<EMAIL>/Google_Drive/Bonn/Github/FMC_paper/PEtab_problems/Tumor_2D/Tumour_Spheroid_ScenI_1e.xml"
par_map = {'k_div_max': './Global/Constant[@symbol="k_div_max"]',
'L_init': './Global/Constant[@symbol="L_init"]',
'q_init': './Global/Constant[@symbol="q_init"]',
'L_div': './Global/Constant[@symbol="L_div"]',
'ke_pro': './Global/Constant[@symbol="ke_pro"]',
'ke_deg': './Global/Constant[@symbol="ke_deg"]',
'e_div': './Global/Constant[@symbol="e_div"]',
}
start_time = time()
observation_par = {"k_div_max": 4.17e-2,
"L_init": 1.2e1,
"q_init": 7.5e-1,
"L_div": 100,
"ke_pro": 5e-3,
"ke_deg": 8e-4,
"e_div": 1e-2}
sumstat = ss(output_file="logger_1.csv", ignore=["cell.id", "time"])
model = MorpheusModel(
model_file=problempath,
par_map=par_map,
executable="/home/emad/morpheus-2.2.5",
sumstat=sumstat,
)
observation_morpheus = model.sample(observation_par)
model.par_scale = "log10"
# observation_origin = tumor2d.simulate(division_rate=4.17e-2,
# initial_spheroid_radius=1.2e1,
# initial_quiescent_cell_fraction=7.5e-1,
# division_depth=100,
# ecm_production_rate=5e-3,
# ecm_degradation_rate=8e-4,
# ecm_division_threshold=1e-2)
limits = dict(k_div_max=(-3, -1),
L_init=(1, 3),
q_init=(0, 1.2),
L_div=(-5, 0),
ke_pro=(-5, 0),
ke_deg=(-5, 0),
e_div=(-5, 0))
#
prior = pyabc.Distribution(**{key: pyabc.RV("uniform", a, b - a)
for key, (a, b) in limits.items()})
# data_mean = tumor2d.load_default()[1] # (raw, mean, var)
# In[6]:
# redis_sampler = pyabc.sampler.RedisEvalParallelSampler(host=host, port=port, look_ahead = False)
abc = pyabc.ABCSMC(models=model,
parameter_priors=prior,
distance_function=eucl_dist,
population_size=pop_size)
db_path = "sqlite:///" + "/tmp/" + "test_14param_Felipe.db"
abc.new(db_path, observation_morpheus)
history_f = abc.run(max_nr_populations=max_nr_pop, minimum_epsilon=min_eps_ori)
# petab_problem_path = "/home/emad/Insync/<EMAIL>/Google_Drive/Bonn/Github/FMC_paper" + '/PEtab_problems' + '/Tumor_2D' + '/Tumor_2D.yaml'
# petab_problem = petab_MS.Problem.from_yaml(petab_problem_path)
# importer = PetabImporter(petab_problem)
# PEtab_prior = importer.create_prior()
# par_map_imported = importer.get_par_map()
# obs_pars_imported = petab_problem.get_x_nominal_dict(scaled=True)
# PEtab_par_scale = petab_problem.get_optimization_parameter_scales()
# dict_data_imported = petab_problem.get_measurement_dict()
# PEtab_model = importer.create_model()
# PEtab_model.timeout = 900
# PEtab_model.ignore_list = ["cell.id", "Tension", "time"]
#
# PEtab_tryjectory = PEtab_model.sample(obs_pars_imported)
# model_dir = "/home/emad/Insync/<EMAIL>/Google_Drive/Bonn/Github/FMC_paper" + '/PEtab_problems' + '/Liver_regeneration' + '/YAP_Signaling_Liver_Regeneration_Model_reparametrized_further.xml'
#
# abc = pyabc.ABCSMC(PEtab_model, PEtab_prior, eucl_dist, population_size=2,
# eps=QuantileEpsilon(alpha=0.3), all_accepted=False)
#
# db_path = ("sqlite:///" +
# os.path.join(tempfile.gettempdir(), "test.db"))
# history = abc.new(db_path, dict_data_imported)
# abc.run(max_nr_populations=2)
| 2.03125
| 2
|
policykit/integrations/metagov/views.py
|
hozzjss/policykit
| 1
|
12778558
|
<filename>policykit/integrations/metagov/views.py
import json
import logging
from django.contrib.auth.models import ContentType, Permission
from django.contrib.contenttypes.models import ContentType
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseServerError,
HttpResponseNotFound,
)
from django.views.decorators.csrf import csrf_exempt
from integrations.metagov.models import MetagovProcess, MetagovPlatformAction, MetagovUser
from policyengine.models import Community, CommunityPlatform, CommunityRole
from integrations.slack.models import SlackCommunity
logger = logging.getLogger(__name__)
# INTERNAL ENDPOINT, no auth
@csrf_exempt
def internal_receive_outcome(request, id):
if request.method != "POST" or not request.body:
return HttpResponseBadRequest()
try:
body = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest("unable to decode body")
logger.info(f"Received external process outcome: {body}")
# Special case for Slack voting mechanism
if body["name"] == "slack.emoji-vote":
community = SlackCommunity.objects.get(community__metagov_slug=body["community"])
community.handle_metagov_process(body)
return HttpResponse()
try:
process = MetagovProcess.objects.get(pk=id)
except MetagovProcess.DoesNotExist:
return HttpResponseNotFound()
process.json_data = json.dumps(body)
process.save()
return HttpResponse()
# INTERNAL ENDPOINT, no auth
@csrf_exempt
def internal_receive_action(request):
"""
Receive event from Metagov
"""
if request.method != "POST" or not request.body:
return HttpResponseBadRequest()
try:
body = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest("unable to decode body")
logger.info(f"Received metagov action: {body}")
metagov_community_slug = body.get("community")
try:
community = Community.objects.get(metagov_slug=metagov_community_slug)
except Community.DoesNotExist:
logger.error(f"Received event for community {metagov_community_slug} which doesn't exist in PolicyKit")
return HttpResponseBadRequest("Community does not exist")
# Special cases for receiving events from "governable platforms" that have fully featured integrations
if body.get("source") == "slack":
# Route Slack event to the correct SlackCommunity handler
slack_community = SlackCommunity.objects.filter(community=community).first()
if slack_community is None:
return HttpResponseBadRequest(f"no slack community exists for {metagov_community_slug}")
slack_community.handle_metagov_event(body)
return HttpResponse()
# For all other sources, create generic MetagovPlatformActions.
platform_community = CommunityPlatform.objects.filter(community=community).first()
if platform_community is None:
logger.error(f"No platforms exist for community '{community}'")
return HttpResponse()
# Get or create a MetagovUser that's tied to the PlatformCommunity, and give them permission to propose MetagovPlatformActions
# Hack so MetagovUser username doesn't clash with usernames from other communities (django User requires unique username).
# TODO(#299): make the CommunityUser model unique on community+username, not just username.
initiator = body["initiator"]
prefixed_username = f"{initiator['provider']}.{initiator['user_id']}"
metagov_user, _ = MetagovUser.objects.get_or_create(
username=prefixed_username, provider=initiator["provider"], community=platform_community
)
# Give this user permission to propose any MetagovPlatformAction
user_group, usergroup_created = CommunityRole.objects.get_or_create(
role_name="Base User", name=f"Metagov: {metagov_community_slug}: Base User"
)
if usergroup_created:
user_group.community = platform_community
content_type = ContentType.objects.get_for_model(MetagovPlatformAction)
permission, _ = Permission.objects.get_or_create(
codename="add_metagovaction",
name="Can add metagov action",
content_type=content_type,
)
user_group.permissions.add(permission)
user_group.save()
user_group.user_set.add(metagov_user)
# Create MetagovPlatformAction
new_api_action = MetagovPlatformAction()
new_api_action.community = platform_community
new_api_action.initiator = metagov_user
new_api_action.event_type = f"{body['source']}.{body['event_type']}"
new_api_action.json_data = json.dumps(body["data"])
# Save to create Proposal and trigger policy evaluations
new_api_action.save()
if not new_api_action.pk:
return HttpResponseServerError()
logger.info(f"Created new MetagovPlatformAction with pk {new_api_action.pk}")
return HttpResponse()
| 1.921875
| 2
|
piper/test/test_verbs.py
|
miketarpey/piper
| 0
|
12778559
|
<filename>piper/test/test_verbs.py
from piper.custom import to_julian
from piper.factory import dummy_dataframe
from piper.factory import sample_column_clean_text
from piper.factory import sample_data
from piper.factory import sample_phone_sales
from piper.factory import sample_sales
from piper.factory import simple_series
from piper.verbs import across
from piper.verbs import adorn
from piper.verbs import assign
from piper.verbs import clean_names
from piper.verbs import count
from piper.verbs import distinct
from piper.verbs import drop
from piper.verbs import drop_if
from piper.verbs import duplicated
from piper.verbs import explode
from piper.verbs import flatten_names
from piper.verbs import fmt_dateidx
from piper.verbs import group_by
from piper.verbs import head
from piper.verbs import info
from piper.verbs import inner_join
from piper.verbs import left_join
from piper.verbs import names
from piper.verbs import non_alpha
from piper.verbs import order_by
from piper.verbs import outer_join
from piper.verbs import overlaps
from piper.verbs import pivot_longer
from piper.verbs import pivot_table
from piper.verbs import relocate
from piper.verbs import rename
from piper.verbs import rename_axis
from piper.verbs import replace_names
from piper.verbs import reset_index
from piper.verbs import right_join
from piper.verbs import rows_to_names
from piper.verbs import sample
from piper.verbs import select
from piper.verbs import set_names
from piper.verbs import split_dataframe
from piper.verbs import str_clean_number
from piper.verbs import str_join
from piper.verbs import str_split
from piper.verbs import str_trim
from piper.verbs import summarise
from piper.verbs import summary_df
from piper.verbs import tail
from piper.verbs import transform
from piper.verbs import unstack
from piper.verbs import where
from pandas.api.types import is_float_dtype
from pandas._testing import assert_frame_equal
from pandas._testing import assert_series_equal
import numpy as np
import pandas as pd
import pytest
import random
# t_sample_phone_sales {{{1
@pytest.fixture
def t_sample_phone_sales():
return sample_phone_sales()
# t_sample_sales {{{1
@pytest.fixture
def t_sample_sales():
return sample_sales()
# t_sample_data {{{1
@pytest.fixture
def t_sample_data():
return sample_data()
# t_sample_column_clean_text {{{1
@pytest.fixture
def t_sample_column_clean_text():
return sample_column_clean_text()
# t_dummy_dataframe {{{1
@pytest.fixture
def t_dummy_dataframe():
return dummy_dataframe()
# t_simple_series_01 {{{1
@pytest.fixture
def t_simple_series_01():
return simple_series()
# get_column_list {{{1
@pytest.fixture
def get_column_list():
""" Column list for dataframe
"""
column_list = ['dupe**', 'Customer ', 'mdm no. to use',
'Target-name ', ' Public', '_ Material',
'Prod type', '#Effective ', 'Expired',
'Price% ', 'Currency$']
return column_list
# test_across_single_column_series_object_function {{{1
def test_across_single_column_series_object_function(t_sample_data):
df = t_sample_data
df = across(df, columns='values_1',
function= lambda x: x.astype(float),
series_obj=True)
assert pd.api.types.is_float_dtype(df.values_1)
# test_across_tuple_column_series_object_function {{{1
def test_across_tuple_column_series_object_function(t_sample_data):
df = t_sample_data
df = across(df, columns=('values_1', 'values_2'),
function= lambda x: x.astype(float),
series_obj=True)
assert pd.api.types.is_float_dtype(df.values_1)
assert pd.api.types.is_float_dtype(df.values_2)
# test_across_list_column_series_object_function {{{1
def test_across_list_column_series_object_function(t_sample_data):
df = t_sample_data
df = across(df, columns=['values_1'],
function= lambda x: x.astype(float),
series_obj=True)
assert pd.api.types.is_float_dtype(df.values_1)
# test_across_list_column_not_series_object {{{1
def test_across_list_column_not_series_object(t_sample_data):
df = t_sample_data
df = across(df, columns=['order_dates', 'dates'],
function=lambda x: to_julian(x), series_obj=False)
assert df.loc[360, 'dates'] == 120361
# test_across_list_column_not_series_raise_error {{{1
def test_across_list_column_not_series_raise_error(t_sample_data):
df = t_sample_data
with pytest.raises(ValueError):
df = across(df, columns=['order_dates', 'dates'],
function=lambda x: to_julian(x), series_obj=True)
# test_across_list_column_series_values_raise_attr_error {{{1
def test_across_list_column_series_values_raise_attr_error(t_sample_data):
df = t_sample_data
with pytest.raises(AttributeError):
across(df, columns=['values_1', 'values_2'],
function=lambda x: x.astype(int), series_obj=False)
# test_adorn_row_total {{{1
def test_adorn_row_total(t_sample_data):
df = t_sample_data
df = group_by(df, ['countries'])
df = summarise(df, total=('values_1', 'sum'))
df = adorn(df)
expected = 73604
actual = df.loc['All'].values[0]
assert expected == actual
# test_adorn_column_total {{{1
def test_adorn_column_total(t_sample_data):
df = t_sample_data
df = group_by(df, ['countries'])
df = summarise(df, total=('values_1', 'sum'))
df = adorn(df, axis = 'column')
expected = 8432
actual = df.loc['Sweden', 'All']
assert expected == actual
# test_adorn_with_ignore_row_index {{{1
def test_adorn_row_with_ignore_row_index(t_sample_data):
df = t_sample_data
df = group_by(df, ['countries'])
df = summarise(df, total=('values_1', 'sum')).reset_index()
df = adorn(df, axis = 'row', ignore_index=True)
expected = 'All'
actual = df.iloc[df.shape[0]-1, 0]
assert expected == actual
# test_adorn_column_with_column_specified {{{1
def test_adorn_column_with_column_specified(t_sample_data):
df = t_sample_data
df = group_by(df, ['countries'])
df = summarise(df, total=('values_1', 'sum'))
df = adorn(df, columns='total', axis = 'column')
expected = 8432
actual = df.loc['Sweden', 'All']
assert expected == actual
# test_adorn_column_with_column_list_specified {{{1
def test_adorn_column_with_column_list_specified(t_sample_data):
df = t_sample_data
df = group_by(df, ['countries', 'regions'])
df = summarise(df, total=('values_1', 'sum'))
df = assign(df, total2=lambda x: x.total * 10)
df = adorn(df, columns=['total', 'total2'], axis = 'both')
expected = ['total', 'total2', 'All']
actual = df.columns.tolist()
assert expected == actual
# test_adorn_column_with_column_str_specified {{{1
def test_adorn_column_with_column_str_specified(t_sample_data):
df = t_sample_data
df = group_by(df, ['countries', 'regions'])
df = summarise(df, total=('values_1', 'sum'))
df = assign(df, total2=lambda x: x.total * 10)
df = adorn(df, columns='total', axis = 'both')
expected = ['total', 'total2', 'All']
actual = df.columns.tolist()
assert expected == actual
# test_assign {{{1
def test_assign(t_sample_data):
"""
"""
df = t_sample_data
df = where(df, "ids == 'A'")
df = where(df, "values_1 > 300 & countries.isin(['Italy', 'Spain'])")
df = assign(df, new_field=lambda x: x.countries.str[:3]+x.regions,
another=lambda x:3*x.values_1)
expected = ['dates', 'order_dates', 'countries',
'regions', 'ids', 'values_1',
'values_2', 'new_field', 'another']
actual = df.columns.tolist()
assert expected == actual
# test_assign_with_dataframe_object_formulas {{{1
def test_assign_with_dataframe_object_formulas(t_sample_data):
"""
"""
df = t_sample_data
df = where(df, "ids == 'A'")
df = where(df, "values_1 > 300 & countries.isin(['Italy', 'Spain'])")
df = assign(df, new_field=lambda x: x.countries.str[:3] + x.regions,
another=lambda x: 3*x.values_1)
expected = ['dates', 'order_dates', 'countries', 'regions', 'ids', 'values_1',
'values_2', 'new_field', 'another']
actual = df.columns.tolist()
assert expected == actual
# test_assign_with_tuple_function {{{1
def test_assign_with_tuple_function(t_sample_data):
df = t_sample_data
df = assign(df, reversed=('regions', lambda x: x[::-1]))
df = select(df, ['-dates', '-order_dates'])
expected = ['countries', 'regions', 'ids', 'values_1', 'values_2', 'reversed']
actual = df.columns.tolist()
assert expected == actual
# test_assign_with_value_error {{{1
def test_assign_with_value_error(t_sample_data):
df = t_sample_data
with pytest.raises(ValueError):
actual = assign(df, reversed=lambda x: x[::-1])
# test_replace_names {{{1
def test_replace_names():
dict_ = {'number$': 'nbr', 'revenue per cookie': 'unit revenue',
'cost per cookie': 'unit cost', 'month': 'mth',
'revenue per cookie': 'unit revenue', 'product': 'item', 'year': 'yr'}
cols = ['Country', 'Product', 'Units Sold', 'Revenue per cookie', 'Cost per cookie',
'Revenue', 'Cost', 'Profit', 'Date', 'Month Number', 'Month Name', 'Year']
expected = ['country','item', 'units_sold', 'unit_revenue', 'unit_cost',
'revenue', 'cost', 'profit', 'date', 'mth_nbr', 'mth_name', 'yr']
df = pd.DataFrame(None, columns=cols)
df = replace_names(df, dict_, info=True)
df = clean_names(df)
assert expected == list(df.columns)
# test_clean_names_report_case_with_title {{{1
def test_clean_names_report_case_with_title(get_column_list):
"""
"""
expected = ['Dupe', 'Customer', 'Mdm No To Use', 'Target Name', 'Public',
'Material', 'Prod Type', 'Effective', 'Expired',
'Price', 'Currency']
dx = pd.DataFrame(None, columns=get_column_list)
actual = clean_names(dx, case='report', title=True).columns.tolist()
assert expected == actual
# test_clean_names_snake_case_with_title {{{1
def test_clean_names_snake_case_with_title(get_column_list):
"""
"""
expected = ['dupe', 'customer', 'mdm_no_to_use', 'target_name', 'public',
'material', 'prod_type', 'effective', 'expired',
'price', 'currency']
dx = pd.DataFrame(None, columns=get_column_list)
actual = clean_names(dx, case='snake').columns.tolist()
assert expected == actual
# test_clean_names_camel_case_with_title {{{1
def test_clean_names_camel_case_with_title(get_column_list):
"""
"""
expected = ['Dupe', 'Customer', 'MdmNoToUse', 'TargetName', 'Public',
'Material', 'ProdType', 'Effective', 'Expired', 'Price', 'Currency']
dx = pd.DataFrame(None, columns=get_column_list)
actual = clean_names(dx, case='camel', title=True).columns.tolist()
assert expected == actual
# test_clean_names_camel_case_without_title {{{1
def test_clean_names_camel_case_without__title(get_column_list):
"""
"""
expected = ['dupe', 'customer', 'mdmNoToUse', 'targetName', 'public',
'material', 'prodType', 'effective', 'expired', 'price', 'currency']
dx = pd.DataFrame(None, columns=get_column_list)
actual = clean_names(dx, case='camel', title=False).columns.tolist()
assert expected == actual
# test_names_as_list {{{1
def test_names_dataframe(t_sample_data):
df = t_sample_data
expected = ['dates', 'order_dates', 'countries', 'regions', 'ids', 'values_1', 'values_2']
actual = names(df, astype='list')
assert expected == actual
expected = {'ids': 'ids', 'regions': 'regions'}
actual = names(df[['ids', 'regions']], astype='dict')
assert expected == actual
expected = pd.DataFrame(['ids', 'regions'], columns=['column_names'])
actual = names(df[['ids', 'regions']], astype='dataframe')
assert_frame_equal(expected, actual)
expected = "['ids', 'regions']"
actual = names(df[['ids', 'regions']], astype='text')
assert expected == actual
# test_names_as_series {{{1
def test_names_as_series(t_sample_data):
df = t_sample_data
cols = ['dates', 'order_dates', 'countries', 'regions', 'ids', 'values_1', 'values_2']
expected = pd.Series(cols, index=range(len(cols)), name='column_names')
actual = names(df, astype='series')
assert_series_equal(expected, actual)
# test_names_as_regex {{{1
def test_names_as_regex(t_sample_data):
prices = {
'prices': [100, 200, 300],
'contract': ['A', 'B', 'A'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01']
}
df = pd.DataFrame(prices)
expected = ['effective', 'expired']
actual = names(df, regex='e', astype='list')
assert expected == actual
# test_rows_to_names_with_title {{{1
def test_rows_to_names_with_title():
data = {'A': ['Order', 'Qty', 10, 40],
'B': ['Order', 'Number', 12345, 12346]}
df = pd.DataFrame(data)
df = rows_to_names(df, delimitter=' ')
expected = ['Order Qty', 'Order Number']
actual = df.columns.to_list()
assert expected == actual
# test_rows_to_names_title_bad_data {{{1
def test_rows_to_names_title_bad_data():
data = {'A': ['Order ', 'Qty ', 10, 40],
'B': [' Order', ' Number%', 12345, 12346]}
df = pd.DataFrame(data)
df = rows_to_names(df, delimitter=' ')
expected = ['Order Qty', 'Order Number']
actual = df.columns.to_list()
assert expected == actual
# test_rows_to_names_with_nans {{{1
def test_rows_to_names_with_nans():
data = {'A': ['Customer', 'id', 48015346, 49512432],
'B': ['Order', 'Number', 'DE-12345', 'FR-12346'],
'C': [np.nan, 'Qty', 10, 40],
'D': ['Item', 'Number', 'SW-10-2134', 'YH-22-2030'],
'E': [np.nan, 'Description', 'Screwdriver Set', 'Workbench']}
df = pd.DataFrame(data)
df = rows_to_names(df, fillna=True)
expected = ['Customer Id', 'Order Number', 'Order Qty',
'Item Number', 'Item Description']
actual = df.columns.to_list()
assert expected == actual
# test_count_series{ {{1
def test_count_series(t_simple_series_01):
s1 = t_simple_series_01
expected = (3, 3)
actual = count(s1).shape
assert expected == actual
# test_count_sort {{{1
def test_count_sort(t_simple_series_01):
s1 = t_simple_series_01
expected = 3
count_ = count(s1, sort_values=False)
actual = count_.loc['E', 'n']
assert expected == actual
# test_count_with_total {{{1
def test_count_with_total(t_simple_series_01):
s1 = t_simple_series_01
expected = 100.0
count_ = count(s1, totals=True)
actual = count_.loc['Total', '%']
assert expected == actual
# test_count_with_total_percent_cum_percent {{{1
def test_count_with_total_percent_cum_percent(t_simple_series_01):
s1 = t_simple_series_01
expected = (4, 3)
actual = count(s1, totals=True, sort_values=True,
percent=True, cum_percent=True).shape
assert expected == actual
# test_count_with_cum_percent_with_threshold {{{1
def test_count_with_cum_percent_with_threshold(t_simple_series_01):
s1 = t_simple_series_01
expected = (2, 3)
count_ = count(s1, threshold=81, cum_percent=True)
actual = count_.shape
assert expected == actual
# test_count_not_found_column {{{1
def test_count_not_found_column(t_sample_data):
df = t_sample_data
expected = None
actual = count(df, 'invalid_column')
assert expected == actual
# test_count_no_column {{{1
def test_count_no_column(t_sample_data):
df = t_sample_data
expected = (7, 3)
actual = count(df).shape
assert expected == actual
# test_count_column_reset_index_true {{{1
def test_count_column_reset_index_true(t_sample_data):
df = t_sample_data
expected = (8, 4)
actual = count(df, 'countries', reset_index=True).shape
assert expected == actual
# test_count_single_column {{{1
def test_count_single_column(t_sample_data):
df = t_sample_data
expected = (4, 3)
actual = count(df, 'regions').shape
assert expected == actual
# test_count_multi_column {{{1
def test_count_multi_column(t_sample_data):
df = t_sample_data
query = "regions == 'East' and countries.isin(['Italy'])"
df = df.query(query)
expected = (1, 5)
actual = count(df, ['regions', 'countries']).reset_index().shape
assert expected == actual
# test_count_multi_column_with_percent {{{1
def test_count_multi_column_with_percent(t_sample_data):
df = t_sample_data
df = df.query("regions == 'East' and countries.isin(['Italy'])")
df = count(df, ['regions', 'countries'], percent=True, cum_percent=True)
expected = 100.0
actual = df.loc[('East', 'Italy'), 'cum %']
assert expected == actual
# test_count_multi_column_with_cum_percent_threshold {{{1
def test_count_multi_column_with_cum_percent_threshold(t_sample_data):
df = t_sample_data
df = df.query("regions == 'East'")
df = count(df, ['regions', 'countries'],
percent=True, cum_percent=True, threshold=81)
expected = (5, 3)
actual = df.shape
assert expected == actual
# test_count_with_categorical {{{1
def test_count_with_categorical(t_sample_data):
df = t_sample_data
df.countries = df.countries.astype('category')
df = df.query("regions == 'East'")
df = count(df, 'countries',
percent=True, cum_percent=True, threshold=81)
expected = (5, 3)
actual = df.shape
assert expected == actual
# test_distinct {{{1
def test_distinct(t_sample_data):
df = t_sample_data
df = select(df, ['countries', 'regions', 'ids'])
df = distinct(df, 'ids', shape=True)
expected = (5, 3)
actual = df.shape
assert expected == actual
# test_drop {{{1
def test_drop(t_sample_data):
"""
"""
df = t_sample_data
df = drop(df, columns=['countries', 'regions'])
expected = ['dates', 'order_dates', 'ids', 'values_1', 'values_2']
actual = df.columns.tolist()
assert expected == actual
# test_drop_if {{{1
def test_drop_if(t_dummy_dataframe):
"""
"""
df = t_dummy_dataframe
expected = (5, 5)
actual = drop_if(df).shape
assert expected == actual
# test_drop_if_isna {{{1
def test_drop_if_isna(t_dummy_dataframe):
"""
"""
df = t_dummy_dataframe
expected = (5, 5)
df.loc[:, 'blank_1': 'blank_5'] = np.nan
df = drop_if(df, value='isna')
actual = df.shape
assert expected == actual
# test_duplicated {{{1
def test_duplicated(t_simple_series_01):
"""
"""
df = t_simple_series_01.to_frame()
df = duplicated(df, keep=False, sort=True)
expected = 3 # Duplicate records
actual = df.duplicate.value_counts()[1]
assert expected == actual
# test_duplicated_duplicates_only {{{1
def test_duplicated_duplicates_only(t_simple_series_01):
"""
"""
df = t_simple_series_01.to_frame()
df = t_simple_series_01.to_frame()
df = duplicated(df, keep='first', duplicates=True, sort=True)
expected = (2, 2)
actual = df.shape
assert expected == actual
# test_explode {{{1
def test_explode(t_sample_data):
"""
"""
df = t_sample_data
df = group_by(df, 'countries')
df = summarise(df, ids=('ids', set))
expected = (40, 1)
actual = explode(df, 'ids').shape
assert expected == actual
# test_flatten_names_no_index {{{1
def test_flatten_names_no_index(t_sample_data):
"""
"""
df = t_sample_data
df = group_by(df, ['countries', 'regions'])
df = summarise(df, total=('values_2', 'sum'))
df = df.unstack()
df = df.reset_index()
# Twice called is deliberate NOT a mistake :)
# The second call is to make sure function does
# not crash, just returns passed given column names
df = flatten_names(df, remove_prefix='total')
df = flatten_names(df, remove_prefix='total')
expected = ['countries', 'East', 'North', 'South', 'West']
actual = df.columns.to_list()
assert expected == actual
# test_flatten_names_keep_prefix {{{1
def test_flatten_names_keep_prefix(t_sample_data):
"""
"""
df = t_sample_data
df = group_by(df, ['countries', 'regions'])
df = summarise(df, total=('values_2', 'sum'))
df = df.unstack()
df = flatten_names(df)
expected = ['total_East', 'total_North', 'total_South', 'total_West']
actual = df.columns.to_list()
assert expected == actual
# test_flatten_names_lose_prefix {{{1
def test_flatten_names_lose_prefix(t_sample_data):
"""
"""
df = t_sample_data
df = group_by(df, ['countries', 'regions'])
df = summarise(df, total=('values_2', 'sum'))
df = df.unstack()
df = flatten_names(df, remove_prefix='total')
expected = ['East', 'North', 'South', 'West']
actual = df.columns.to_list()
assert expected == actual
# test_flatten_names_remove_prefix {{{1
def test_flatten_names_remove_prefix(t_sample_data):
"""
"""
df = t_sample_data
df = group_by(df, ['countries', 'regions'])
df = summarise(df, total=('values_2', 'sum'))
df = df.unstack()
df = flatten_names(df, remove_prefix='total')
expected = ['East', 'North', 'South', 'West']
actual = df.columns.to_list()
assert expected == actual
# test_split_dataframe {{{1
def test_split_dataframe(t_sample_data):
dataframes = split_dataframe(sample_data(), chunk_size=100)
expected = 367
actual = sum([df.shape[0] for df in dataframes])
assert expected == actual
# test_summary_df {{{1
def test_generate_summary_df():
"""
"""
dict_a = {'column_A': {'0': 'A100', '1': 'A101', '2': 'A101',
'3': 'A102', '4': 'A103', '5': 'A103',
'6': 'A103', '7': 'A104', '8': 'A105',
'9': 'A105', '10': 'A102', '11': 'A103'}}
df = pd.DataFrame(dict_a)
dict_b = {'column_B': {'0': 'First Row', '1': 'Second Row',
'2': 'Fourth Row', '3': 'Fifth Row',
'4': 'Third Row', '5': 'Fourth Row',
'6': 'Fifth Row', '7': 'Sixth Row',
'8': 'Seventh Row', '9': 'Eighth Row',
'10': 'Ninth Row', ' 11': 'Tenth Row'}}
df2 = pd.DataFrame(dict_b)
datasets = [('1st dataset', df), ('2nd dataset', df2)]
summary = summary_df(datasets, title='Summary',
col_total='Total records',
add_grand_total=True,
grand_total='Grand total')
expected = 24
actual = summary.loc['Grand total', 'Total records']
assert expected == actual
# test_has_special_chars {{{1
def test_non_alpha(t_sample_data):
"""
"""
np.random.seed(42)
alphabet="0123456789abcdefghijklmnopqrstuvwxyz!$%()"
word_list = []
for _ in range(20):
chars = np.random.choice(list(alphabet), size=np.random.randint(1, 10))
word_list.append(''.join(chars))
df = pd.DataFrame(word_list, columns=['words'])
df = non_alpha(df, 'words')
df_count = count(df, 'non_alpha')
assert df_count.shape == (2, 3)
# test_head_with_series {{{1
def test_head_with_series(t_simple_series_01):
"""
"""
s1 = t_simple_series_01
expected = (4,)
actual = head(s1).shape
assert expected == actual
# test_head_with_dataframe {{{1
def test_head_with_dataframe(t_sample_data):
"""
"""
df = t_sample_data
expected = (4, 7)
actual = head(df).shape
assert expected == actual
# test_head_with_names_function {{{1
def test_head_with_names_function(t_sample_data):
"""
"""
df = t_sample_data
expected = (4, 4)
actual = head(df[names(df, regex='order|dates|values', astype='list')]).shape
assert expected == actual
# test_head_with_tablefmt_plain {{{1
def test_head_with_tablefmt_plain(t_dummy_dataframe):
"""
"""
df = t_dummy_dataframe
df.loc[:, 'blank_1': 'blank_5'] = np.nan
df = drop_if(df, value='isna')
result = head(df, tablefmt='plain')
assert result == None
# test_info {{{1
def test_info(t_simple_series_01):
df = t_simple_series_01.to_frame()
expected = (1, 7)
actual = info(df).shape
assert expected == actual
# test_info_with_dupes {{{1
def test_info_with_dupes(t_simple_series_01):
df = t_simple_series_01.to_frame()
expected = (1, 8)
actual = info(df, n_dupes=True).shape
assert expected == actual
# test_info_with_na_cols {{{1
def test_info_with_na_cols(t_simple_series_01):
df = t_simple_series_01.to_frame()
expected = (0, 7)
actual = info(df, fillna=True).shape
assert expected == actual
# test_order_by_single_col_ascending {{{1
def test_order_by_single_col_ascending():
prices = {'prices': [100, 200, 300],
'contract': ['A', 'B', 'A'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01']}
exp = 200
got = (pd.DataFrame(prices)
.pipe(group_by, 'contract')
.pipe(summarise, prices=('prices', 'sum'))
.pipe(order_by, by='prices').iloc[0, 0])
assert exp == got
# test_order_by_single_col_descending {{{1
def test_order_by_single_col_descending():
prices = {'prices': [100, 200, 300],
'contract': ['A', 'B', 'A'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01']}
exp = 400
got = (pd.DataFrame(prices)
.pipe(group_by, 'contract')
.pipe(summarise, prices=('prices', 'sum'))
.pipe(order_by, '-prices').iloc[0, 0])
assert exp == got
# test_order_by_multi_col_descending {{{1
def test_order_by_multi_col_descending(t_sample_sales):
exp = 404440.24
df = (t_sample_sales
.pipe(group_by, ['location', 'product'])
.pipe(summarise, TotalSales=('actual_sales', 'sum'))
.pipe(order_by, ['location', '-TotalSales']))
got = df.loc[('London', slice(None)), 'TotalSales'][0]
assert exp == got
# test_order_by_multi_col_ascending_by_keyword {{{1
def test_order_by_multi_col_ascending_by_keyword(t_sample_sales):
exp = 274674.0
df = (t_sample_sales
.pipe(group_by, ['location', 'product'])
.pipe(summarise, TotalSales=('actual_sales', 'sum'))
.pipe(order_by, by=['location', 'TotalSales']))
got = df.loc[('London', slice(None)), 'TotalSales'][0]
assert exp == got
# test_order_by_multi_col_ascending_without_keyword {{{1
def test_order_by_multi_col_ascending_without_keyword(t_sample_sales):
exp = 274674.0
df = (t_sample_sales
.pipe(group_by, ['location', 'product'])
.pipe(summarise, TotalSales=('actual_sales', 'sum'))
.pipe(order_by, ['location', 'TotalSales']))
got = df.loc[('London', slice(None)), 'TotalSales'][0]
assert exp == got
# test_overlaps {{{1
def test_overlaps():
prices = {'prices': [100, 200, 300],
'contract': ['A', 'B', 'A'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01']}
df = pd.DataFrame(prices)
expected = (3, 5)
actual = overlaps(df, start='effective', end='expired', unique_key='contract')
assert expected == actual.shape
# test_overlaps_raises_key_error {{{1
def test_overlaps_raises_key_error():
prices = {'prices': [100, 200, 300],
'contract': ['A', 'B', 'A'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01']}
df = pd.DataFrame(prices)
with pytest.raises(KeyError):
actual = overlaps(df, start='false_field', end='expired', unique_key='contract')
# test_overlaps_no_price {{{1
def test_overlaps_unique_key_list():
prices = {'prices': [100, 200, 300],
'contract': ['A', 'B', 'A'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01']}
df = pd.DataFrame(prices)
expected = (3, 5)
actual = overlaps(df, start='effective', end='expired', unique_key=['contract'])
assert expected == actual.shape
# test_pivot_longer {{{1
def test_pivot_longer(t_sample_sales):
df = t_sample_sales
df = df.pivot_table(index=['location', 'month'], columns='product', values='actual_sales')
df = pivot_longer(df, col_level=0, ignore_index=False)
actual = df.loc[('Paris', '2021-01-01')].query("product == 'Beachwear'").values[0][1]
expected = 20612.035
assert expected == actual
# test_pivot_table {{{1
def test_pivot_table(t_sample_data):
"""
"""
df = t_sample_data
pv = pivot_table(df, index=['countries', 'regions'], values='values_1')
pv.rename(columns={'values_1': 'totals'}, inplace=True)
expected = 6507.9683290565645
actual = pv.totals.sum()
assert expected == actual
# test_pivot_table_sort_ascending_false {{{1
def test_pivot_table_sort_ascending_false(t_sample_data):
"""
"""
df = t_sample_data
pv = pivot_table(df, index=['countries'], values='values_1')
pv.sort_values(by='values_1', ascending=False, inplace=True)
expected = (8, 1)
actual = pv.shape
assert expected == actual
# test_pivot_name_error {{{1
def test_pivot_name_error(t_sample_data):
"""
Should send log message regarding invalid key/name
pv object should be None and generate KeyError
"""
df = t_sample_data
with pytest.raises(KeyError):
pv = pivot_table(df, index=['countries_wrong_name'], values='values_1')
# test_pivot_percent_calc {{{1
def test_pivot_percent_calc(t_sample_data):
"""
"""
df = t_sample_data
pv = pivot_table(df, index=['countries', 'regions'], values='values_1')
pv.rename(columns={'values_1': 'totals'}, inplace=True)
pv.sort_values(by='totals', ascending=False, inplace=True)
pv['%'] = pv.totals.apply(lambda x: x*100/pv.totals.sum())
expected = 2.874168873331256
actual = pv.loc[('Norway','East'), '%']
assert expected == actual
# test_pivot_cum_percent_calc {{{1
def test_pivot_cum_percent_calc(t_sample_data):
"""
"""
df = t_sample_data
pv = pivot_table(df, index=['countries', 'regions'], values='values_1')
pv.rename(columns={'values_1': 'totals'}, inplace=True)
pv.sort_values(by='totals', ascending=False, inplace=True)
pv['%'] = pv.totals.apply(lambda x: x*100/pv.totals.sum())
pv['cum %'] = pv['%'].cumsum()
expected = 79.67310369428336
actual = pv.loc[('Sweden', 'West'), 'cum %']
assert expected == actual
# test_pivot_table_multi_grouper {{{1
def test_pivot_table_multi_grouper(t_sample_data):
"""
"""
df = t_sample_data
p2 = pivot_table(df, index=['dates', 'order_dates',
'regions', 'ids'],
freq='Q',
format_date=True)
assert p2.loc[('Mar 2020', 'Mar 2020', 'East', 'A'), 'values_1'] > 0
# test_pivot_table_single_grouper {{{1
def test_pivot_table_single_grouper(t_sample_data):
"""
"""
df = t_sample_data
p2 = pivot_table(df, index=['dates', 'regions', 'ids'],
freq='Q', format_date=True)
assert p2.loc[('Mar 2020', 'East', 'A'), 'values_1'] > 0
# test_relocate_no_column {{{1
def test_relocate_no_column(t_sample_data):
"""
"""
df = t_sample_data
with pytest.raises(KeyError):
actual = relocate(df, column=None, loc='first')
# test_pivot_longer_tuple_args {{{1
def test_pivot_longer_tuple_args(t_sample_phone_sales):
"""
"""
df = t_sample_phone_sales
df = assign(df, sales_price=lambda x: x.unit_price * x.qty)
df = where(df, "invoice_dt.dt.month.between(3, 3)")
df = group_by(df, ['region', 'country', 'rep'])
df = summarise(df, total_sales=('sales_price', 'sum'))
df = unstack(df)
df = flatten_names(df, remove_prefix='total_sales')
df = reset_index(df)
actual = pivot_longer(df, ('region', 'country'), 'actual_sales')
assert actual.shape == (10, 4)
# test_pivot_longer_tuple_kwargs {{{1
def test_pivot_longer_tuple_kwargs(t_sample_phone_sales):
"""
"""
df = t_sample_phone_sales
df = assign(df, sales_price=lambda x: x.unit_price * x.qty)
df = where(df, "invoice_dt.dt.month.between(3, 3)")
df = group_by(df, ['region', 'country', 'rep'])
df = summarise(df, total_sales=('sales_price', 'sum'))
df = unstack(df)
df = flatten_names(df, remove_prefix='total_sales')
df = reset_index(df)
actual = pivot_longer(df,
id_vars=('region', 'country'),
value_vars='actual_sales')
assert actual.shape == (10, 4)
# test_relocate_index {{{1
def test_relocate_index(t_sample_data):
"""
"""
df = t_sample_data
df = df.set_index(['countries', 'regions'])
df = relocate(df, 'regions', loc='first', index=True)
expected = ['regions', 'countries']
actual = df.index.names
assert expected == actual
# test_relocate_single_column {{{1
def test_relocate_single_column(t_sample_data):
"""
"""
df = t_sample_data
df = relocate(df, 'regions', loc='first')
expected = ['regions', 'dates', 'order_dates', 'countries', 'ids', 'values_1', 'values_2']
actual = df.columns.values.tolist()
assert expected == actual
# test_relocate_single_column_last_column {{{1
def test_relocate_single_column_last_column(t_sample_data):
"""
"""
df = t_sample_data
df = relocate(df, 'regions', loc='last')
expected = ['dates', 'order_dates', 'countries', 'ids', 'values_1', 'values_2', 'regions']
actual = df.columns.values.tolist()
assert expected == actual
# test_relocate_multi_column_first {{{1
def test_relocate_multi_column_first(t_sample_data):
"""
"""
df = t_sample_data
df = relocate(df, ['dates', 'regions', 'countries'], loc='first')
expected = ['dates', 'regions', 'countries', 'order_dates',
'ids', 'values_1', 'values_2']
actual = df.columns.tolist()
assert expected == actual
# test_relocate_multi_column_last {{{1
def test_relocate_multi_column_last(t_sample_data):
"""
"""
df = t_sample_data
df = relocate(df, ['dates', 'regions', 'countries'], loc='last')
expected = ['order_dates', 'ids', 'values_1', 'values_2',
'dates', 'regions', 'countries' ]
actual = df.columns.values.tolist()
assert expected == actual
# test_relocate_multi_column_before {{{1
def test_relocate_multi_column_before(t_sample_data):
"""
"""
df = t_sample_data
df = relocate(df, ['dates', 'regions', 'countries'], loc='before',
ref_column='values_1')
expected = ['order_dates', 'ids', 'dates', 'regions',
'countries', 'values_1', 'values_2']
actual = df.columns.values.tolist()
assert expected == actual
# test_relocate_multi_column_after {{{1
def test_relocate_multi_column_after(t_sample_data):
"""
"""
df = t_sample_data
df = relocate(df, ['dates', 'regions', 'countries'], loc='after',
ref_column='order_dates')
expected = ['order_dates', 'dates', 'regions', 'countries',
'ids', 'values_1', 'values_2']
actual = df.columns.values.tolist()
assert expected == actual
# test_relocate_multi_column_after {{{1
def test_relocate_index_column_after(t_sample_data):
"""
"""
df = t_sample_data
df = df.set_index(['countries', 'regions'])
df = relocate(df, column='countries', loc='after', ref_column='regions')
expected = 'regions'
actual = df.index.names[1]
assert expected == actual
# test_rename {{{1
def test_rename(t_sample_data):
"""
"""
expected = ['trans_dt', 'order_dates', 'countries',
'regions', 'ids', 'values_1',
'values_2']
df = t_sample_data
df = rename(df, columns={'dates': 'trans_dt'})
actual = df.columns.to_list()
assert expected == actual
# test_rename_axis {{{1
def test_rename_axis(t_sample_data):
"""
"""
expected = ['AAA', 'BBB']
df = t_sample_data
df = pivot_table(df, index=['countries', 'regions'], values='values_1')
df = rename_axis(df, mapper=('AAA', 'BBB'), axis='rows')
actual = df.index.names
assert expected == actual
# test_resample_groupby {{{1
def test_resample_groupby(t_sample_data):
""" """
df = t_sample_data
g1 = group_by(df, by=['dates'], freq='Q').sum()
g1 = fmt_dateidx(g1, freq='Q')
# Tidy axis labels
g1.index.name = 'Date period'
g1.columns = ['Totals1', 'Totals2']
assert g1.loc['Mar 2020', 'Totals1'] > 0
# test_resample_multi_grouper_groupby {{{1
def test_resample_multi_grouper_groupby():
""" """
prices = {
'ids': [1, 6, 8, 3],
'prices': [100, 200, 300, 400],
'country': ['Canada', 'USA', 'United Kingdom', 'France'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30', '2020-10-10'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01', '2023-12-31'] }
df = pd.DataFrame(prices)
df.effective = pd.to_datetime(df.effective)
df.expired = pd.to_datetime(df.expired)
cols = ['country', 'effective', 'expired', 'prices']
g1 = group_by(df, by=cols, freq='Q').sum()
expected = (4, 1)
actual = g1.shape
assert expected == actual
# test_resample_groupby_multi_index_single_grouper {{{1
def test_resample_groupby_multi_index_single_grouper():
"""
"""
prices = {
'ids': [1, 6, 8, 3],
'prices': [100, 200, 300, 400],
'country': ['Canada', 'USA', 'United Kingdom', 'France'],
'effective': ['2020-01-01', '2020-03-03', '2020-05-30', '2020-10-10'],
'expired': ['2020-12-31', '2021-04-30', '2022-04-01', '2023-12-31'] }
df = pd.DataFrame(prices)
df.effective = pd.to_datetime(df.effective)
df.expired = pd.to_datetime(df.expired)
cols = ['country', 'effective', 'prices']
g1 = group_by(df, by=cols, freq='Q').sum()
expected = (4, 1)
actual = g1.shape
assert expected == actual
# test_sample_series {{{1
def test_sample_series(t_simple_series_01):
"""
"""
s1 = t_simple_series_01
expected = (2, 1)
actual = sample(s1, random_state=42).shape
assert expected == actual
# test_sample_dataframe {{{1
def test_sample_dataframe(t_sample_data):
"""
"""
df = t_sample_data
expected = (2, 7)
actual = sample(df, random_state=42).shape
assert expected == actual
# test_select_no_parms {{{1
def test_select_no_parms(t_sample_data):
df = t_sample_data
df = select(df)
expected = ['dates', 'order_dates', 'countries',
'regions', 'ids', 'values_1', 'values_2']
actual = df.columns.tolist()
assert expected == actual
# test_select_str_with_regex {{{1
def test_select_str_with_regex(t_sample_data):
df = t_sample_data
df = select(df, regex='values')
expected = ['values_1', 'values_2']
actual = df.columns.tolist()
assert expected == actual
# test_select_str_with_regex {{{1
def test_select_str_with_like(t_sample_data):
df = t_sample_data
df = select(df, like='values')
expected = ['values_1', 'values_2']
actual = df.columns.tolist()
assert expected == actual
# test_select_slice_with_integers {{{1
def test_select_slice_with_integers(t_sample_data):
df = t_sample_data
df = select(df, (3 , 6))
expected = ['countries', 'regions', 'ids', 'values_1']
actual = df.columns.tolist()
assert expected == actual
# test_select_slice_with_column_names {{{1
def test_select_slice_with_column_names(t_sample_data):
df = t_sample_data
df = select(df, ('countries', 'values_1'))
expected = ['countries', 'regions', 'ids', 'values_1']
actual = df.columns.tolist()
assert expected == actual
# test_select_column_list {{{1
def test_select_column_list(t_sample_data):
df = t_sample_data
df = select(df, ['order_dates'])
expected = ['order_dates']
actual = df.columns.tolist()
assert expected == actual
# test_select_columns_list {{{1
def test_select_columns_list(t_sample_data):
df = t_sample_data
df = select(df, ['order_dates', 'countries'])
expected = ['order_dates', 'countries']
actual = df.columns.tolist()
assert expected == actual
# test_select_column_str {{{1
def test_select_column_str(t_sample_data):
df = t_sample_data
df = select(df, 'order_dates')
expected = ['order_dates']
actual = df.columns.tolist()
assert expected == actual
# test_select_excluding_column_str {{{1
def test_select_excluding_column_str(t_sample_data):
df = t_sample_data
df = select(df, '-order_dates')
expected = ['dates', 'countries', 'regions',
'ids', 'values_1', 'values_2']
actual = df.columns.tolist()
assert expected == actual
# test_select_excluding_column_list {{{1
def test_select_excluding_column_list(t_sample_data):
df = t_sample_data
df = select(df, ['-order_dates'])
expected = ['dates', 'countries', 'regions',
'ids', 'values_1', 'values_2']
actual = df.columns.tolist()
assert expected == actual
# test_select_columns {{{1
def test_select_columns(t_sample_data):
df = t_sample_data
df = select(df, ['dates', 'order_dates'])
expected = ['dates', 'order_dates']
actual = df.columns.tolist()
assert expected == actual
# test_select_include {{{1
def test_select_include(t_sample_data):
df = t_sample_data
df = select(df, include='number')
expected = ['values_1', 'values_2']
actual = df.columns.tolist()
assert expected == actual
# test_select_exclude {{{1
def test_select_exclude(t_sample_data):
df = t_sample_data
df = select(df, exclude='number')
expected = ['dates', 'order_dates', 'countries', 'regions', 'ids']
actual = df.columns.tolist()
assert expected == actual
# test_select_invalid_column {{{1
def test_select_invalid_column(t_sample_data):
df = t_sample_data
with pytest.raises(KeyError):
actual = select(df, 'AAA')
# test_set_names {{{1
def test_set_names():
data = {'A': ['Order', 'Qty', 10, 40],
'B': ['Order', 'Number', 12345, 12346]}
df = pd.DataFrame(data)
df = set_names(df, ['C', 'D'])
expected = ['C', 'D']
actual = df.columns.to_list()
assert expected == actual
# test_str_clean_number {{{1
def test_str_clean_number():
values = ['$ 1000.48', '-23,500.54', '1004,0 00 .22', '-£43,000',
'EUR 304s0,00.00', '354.00-', '301 ', '4q5056 GBP',
'USD 20621.54973']
expected = [1000.48, -23500.54, 1004000.22, -43000.0, 304000.0,
-354.0, 301.0, 45056.0, 20621.54973]
df = pd.DataFrame(values, columns=['values'])
df['values'] = str_clean_number(df['values'])
assert expected == df['values'].values.tolist()
# test_str_clean_number_decimal_comma {{{1
def test_str_clean_number_decimal_comma():
values = ['$ 1000.48', '-23,500.54', '1004,0 00 .22', '-£43,000',
'EUR 304s0,00.00', '354.00-', '301 ', '4q5056 GBP',
'USD 20621.54973']
expected = ['100048', '-23,50054', '1004,00022', '-43,000',
'3040,0000', '-35400', '301', '45056', '2062154973']
df = pd.DataFrame(values, columns=['values'])
df['values'] = str_clean_number(df['values'], decimal=',')
assert expected == df['values'].values.tolist()
# test_str_join_str_column_raise_columns_list_error {{{1
def test_str_join_str_column_raise_columns_list_error(t_sample_sales):
df = t_sample_sales
with pytest.raises(NameError):
actual = str_join(df, columns='actual_sales',
sep='|', column='combined_col', drop=False)
# test_str_join_str_column_raise_column_must_be_string_error {{{1
def test_str_join_str_column_raise_column_must_be_string_error(t_sample_sales):
df = t_sample_sales
with pytest.raises(TypeError):
actual = str_join(df, columns=['actual_sales', 'product'],
sep='|', column=['combined_col'], drop=False)
# test_str_join_str_column_raise_at_least_two_columns_error {{{1
def test_str_join_str_column_raise_at_least_two_columns_error(t_sample_sales):
df = t_sample_sales
with pytest.raises(ValueError):
actual = str_join(df, columns=['actual_sales'],
sep='|', column='combined_col', drop=False)
# test_str_join_str_column_raise_check_columns_name_error {{{1
def test_str_join_str_column_raise_check_columns_name_error(t_sample_sales):
df = t_sample_sales
with pytest.raises(NameError):
actual = str_join(df, columns=['actual_sales', 'product_wrong'],
sep='|', column='combined_col', drop=False)
# test_str_join_str_column_drop_false {{{1
def test_str_join_str_column_drop_false(t_sample_sales):
df = t_sample_sales
actual = str_join(df,
columns=['actual_sales', 'product'],
sep='|',
column='combined_col',
drop=False)
assert actual.loc[4, 'combined_col'] == '29209.08|Beachwear'
assert 'actual_sales' in actual.columns.tolist()
assert 'product' in actual.columns.tolist()
# test_str_join_str_3_columns_drop_true {{{1
def test_str_join_str_3_columns_drop_true(t_sample_sales):
df = t_sample_sales
actual = str_join(df,
columns=['actual_sales', 'product', 'actual_profit'],
sep='|',
column='combined_col',
drop=True)
assert actual.loc[4, 'combined_col'] == '29209.08|Beachwear|1752.54'
assert 'actual_sales' not in actual.columns.tolist()
assert 'actual_profit' not in actual.columns.tolist()
assert 'product' not in actual.columns.tolist()
# test_str_join_str_3_columns_default_join_column_drop_true {{{1
def test_str_join_str_3_columns_default_join_column_drop_true(t_sample_sales):
df = t_sample_sales
actual = str_join(df,
columns=['actual_sales', 'product', 'actual_profit'],
sep='|',
drop=True)
assert actual.loc[4, '0'] == '29209.08|Beachwear|1752.54'
assert 'actual_sales' not in actual.columns.tolist()
assert 'actual_profit' not in actual.columns.tolist()
assert 'product' not in actual.columns.tolist()
# test_str_join_str_column_drop_true {{{1
def test_str_join_str_column_drop_true(t_sample_sales):
df = t_sample_sales
actual = str_join(df,
columns=['actual_sales', 'product'],
sep='|',
column='combined_col',
drop=True)
assert actual.loc[4, 'combined_col'] == '29209.08|Beachwear'
assert 'actual_sales' not in actual.columns.tolist()
assert 'product' not in actual.columns.tolist()
# test_str_join_str_column_replace_original_column_drop_true {{{1
def test_str_join_str_column_replace_original_column_drop_true(t_sample_sales):
df = t_sample_sales
actual = str_join(df,
columns=['actual_sales', 'product'],
sep='|',
column='actual_sales',
drop=True)
assert actual.loc[4, 'actual_sales'] == '29209.08|Beachwear'
assert 'actual_sales' in actual.columns.tolist()
assert 'product' not in actual.columns.tolist()
# test_str_split_str_column_drop_false {{{1
def test_str_split_str_column_drop_false(t_sample_sales):
df = t_sample_sales
actual = str_split(df, 'product', pat=' ', drop=False)
# Since columns not specified, default field names provided
actual[[0, 1, 2]].shape == (200, 3)
assert actual.shape == (200, 10)
# test_str_split_str_column_drop_true {{{1
def test_str_split_str_column_drop_true(t_sample_sales):
df = t_sample_sales
actual = str_split(df, 'product', pat=' ', drop=True)
# Since columns not specified, default field names provided
actual[[0, 1, 2]].shape == (200, 3)
assert actual.shape == (200, 9)
# test_str_split_date_column_drop_true_expand_false {{{1
def test_str_split_date_column_drop_true_expand_false(t_sample_sales):
df = t_sample_sales
actual = str_split(df, 'month', pat='-', drop=True, expand=False)
assert actual.loc[4:4, 'month'].values[0] == ['2021', '01', '01']
# test_str_split_date_column_drop_true_expand_True {{{1
def test_str_split_date_column_drop_true_expand_True(t_sample_sales):
df = t_sample_sales
actual = str_split(df, 'month', columns=['year', 'month', 'day'],
pat='-', drop=True, expand=True)
assert actual.loc[4:4, 'year'].values[0] == '2021'
# test_str_split_number_column_drop_true_expand_True {{{1
def test_str_split_number_column_drop_true_expand_True(t_sample_sales):
df = t_sample_sales
actual = str_split(df, 'actual_sales',
columns=['number', 'precision'],
pat='.', drop=True, expand=True)
assert actual.loc[4:4, 'number'].values[0] == '29209'
# test_str_split_number_raise_type_error_column_str {{{1
def test_str_split_number_raise_type_error_column_str(t_sample_sales):
df = t_sample_sales
with pytest.raises(TypeError):
actual = str_split(df, ['actual_sales'], columns=['number', 'precision'],
pat='.', drop=True, expand=True)
# test_str_split_number_raise_type_error_columns_list_like {{{1
def test_str_split_number_raise_type_error_columns_list_like(t_sample_sales):
df = t_sample_sales
with pytest.raises(TypeError):
actual = str_split(df, 'actual_sales', columns='number',
pat='.', drop=True, expand=True)
# test_str_split_number_raise_name_error_column_in_list {{{1
def test_str_split_number_raise_name_error_column_in_list(t_sample_sales):
df = t_sample_sales
with pytest.raises(NameError):
actual = str_split(df, 'actual_sales_wrong', columns=['number', 'precision'],
pat='.', drop=True, expand=True)
# test_str_trim_blanks {{{1
def test_str_trim_blanks(t_sample_column_clean_text):
"""
"""
df = t_sample_column_clean_text
df['test_col'] = (df['test_col'].str.replace(r'(\w)\s+(\w)', r'\1 \2', regex=True)
.str.title())
expected = ['First Row', 'Second Row', 'Fourth Row', 'Fifth Row',
'Thrid Row', 'Fourth Row', 'Fifth Row', 'Sixth Row',
'Seventh Row', 'Eighth Row', 'Ninth Row', 'Tenth Row']
df2 = str_trim(df, str_columns=['test_col'])
actual = df2['test_col'].to_list()
assert expected == actual
str_trim(df, str_columns=None)
actual = df['test_col'].to_list()
assert expected == actual
# test_str_trim_blanks_duplicate_column_name {{{1
def test_str_trim_blanks_duplicate_column_name(t_sample_data):
"""
"""
df = t_sample_data
df.columns = ['dates', 'order_dates', 'regions', 'regions', 'ids', 'values_1', 'values_2']
df2 = str_trim(df)
expected = ['dates', 'order_dates', 'regions', 'regions2', 'ids', 'values_1', 'values_2']
actual = df2.columns.tolist()
assert expected == actual
# test_summarise_default{{{1
def test_summarise_default(t_sample_data):
df = t_sample_data
expected = (7, 2)
actual = summarise(df)
assert expected == actual.shape
# test_summarise_value {{{1
def test_summarise_value(t_sample_data):
df = t_sample_data
expected = (1, )
actual = summarise(df, {'values_1': 'sum'})
assert expected == actual.shape
def test_summarise_no_parms(t_sample_sales):
df = t_sample_sales
df = group_by(df, 'product')
df = summarise(df)
df = where(df, "product == 'Footwear'")
actual = select(df, 'actual_profit').values[0][0]
expected = 85837.479999999981
assert expected == actual
# test_tail_with_series {{{1
def test_tail_with_series(t_simple_series_01):
"""
"""
s1 = t_simple_series_01
expected = (4,)
actual = tail(s1).shape
assert expected == actual
# test_tail_with_dataframe {{{1
def test_tail_with_dataframe(t_sample_data):
"""
"""
df = t_sample_data
expected = (4, 7)
actual = tail(df).shape
assert expected == actual
# test_tail_with_tablefmt_plain {{{1
def test_tail_with_tablefmt_plain(t_dummy_dataframe):
"""
"""
df = t_dummy_dataframe
df.loc[:, 'blank_1': 'blank_5'] = np.nan
df = drop_if(df, value='isna')
result = tail(df, tablefmt='plain')
assert result == None
# test_transform_no_parms {{{1
def test_transform_no_parms(t_sample_sales):
""" """
df = t_sample_sales
df = group_by(df, ['product', 'location'])
df = summarise(df)
df = transform(df)
df = where(df, "product == 'Beachwear'")
actual = select(df, "g%").values[0][0]
assert 32.58 == actual
# test_transform {{{1
def test_transform(t_sample_data):
""" """
index = ['countries', 'regions']
cols = ['countries', 'regions', 'ids', 'values_1', 'values_2']
df = t_sample_data[cols]
gx = transform(df, index=index, g_perc=('values_2', 'percent'))
gx = transform(df, index=index, total_group_value=('values_2', 'sum'))
gx.set_index(['countries', 'regions', 'ids'], inplace=True)
expected = (367, 4)
actual = gx.shape
assert expected == actual
# test_transform_with_sort {{{1
def test_transform_with_sort(t_sample_data):
"""
"""
index = ['countries', 'regions']
cols = ['countries', 'regions', 'ids', 'values_1', 'values_2']
df = t_sample_data[cols]
gx = transform(df, index=index, g_perc=('values_2', 'percent'))
gx = transform(df, index=index, total_group_value=('values_2', 'sum'))
gx = gx.set_index(['countries', 'regions', 'ids'])
expected = (367, 4)
actual = gx.shape
assert expected == actual
# test_transform_custom_function {{{1
def test_transform_custom_function(t_sample_data):
"""
"""
index = ['countries', 'regions']
cols = ['countries', 'regions', 'ids', 'values_1', 'values_2']
df = t_sample_data[cols]
gx = transform(df, index=index, g_perc=('values_2', 'percent'))
gx = transform(df, index=index, total_group_value=('values_2', lambda x: x.sum()))
gx.set_index(['countries', 'regions', 'ids'], inplace=True)
expected = (367, 4)
actual = gx.shape
assert expected == actual
# test_where {{{1
def test_where(t_sample_data):
df = t_sample_data
expected = (1, 7)
actual = where(df, """regions == 'East' and countries == 'Spain' and values_1 == 29""")
assert expected == actual.shape
# test_inner_join {{{1
def test_inner_join():
"""
"""
order_data = {'OrderNo': [1001, 1002, 1003, 1004, 1005],
'Status': ['A', 'C', 'A', 'A', 'P'],
'Type_': ['SO', 'SA', 'SO', 'DA', 'DD']}
orders = pd.DataFrame(order_data)
status_data = {'Status': ['A', 'C', 'P'],
'description': ['Active', 'Closed', 'Pending']}
statuses = pd.DataFrame(status_data)
order_types_data = {'Type_': ['SA', 'SO'],
'description': ['Sales Order', 'Standing Order'],
'description_2': ['Arbitrary desc', 'another one']}
types_ = pd.DataFrame(order_types_data)
merged_df = inner_join(orders, types_, suffixes=('_orders', '_types'))
expected = (3, 5)
actual = merged_df.shape
assert expected == actual
# test_left_join {{{1
def test_left_join():
"""
"""
order_data = {'OrderNo': [1001, 1002, 1003, 1004, 1005],
'Status': ['A', 'C', 'A', 'A', 'P'],
'Type_': ['SO', 'SA', 'SO', 'DA', 'DD']}
orders = pd.DataFrame(order_data)
status_data = {'Status': ['A', 'C', 'P'],
'description': ['Active', 'Closed', 'Pending']}
statuses = pd.DataFrame(status_data)
order_types_data = {'Type_': ['SA', 'SO'],
'description': ['Sales Order', 'Standing Order'],
'description_2': ['Arbitrary desc', 'another one']}
types_ = pd.DataFrame(order_types_data)
merged_df = left_join(orders, types_, suffixes=('_orders', '_types'))
expected = (5, 5)
actual = merged_df.shape
assert expected == actual
# test_right_join {{{1
def test_right_join():
"""
"""
order_data = {'OrderNo': [1001, 1002, 1003, 1004, 1005],
'Status': ['A', 'C', 'A', 'A', 'P'],
'Type_': ['SO', 'SA', 'SO', 'DA', 'DD']}
orders = pd.DataFrame(order_data)
status_data = {'Status': ['A', 'C', 'P'],
'description': ['Active', 'Closed', 'Pending']}
statuses = pd.DataFrame(status_data)
order_types_data = {'Type_': ['SA', 'SO'],
'description': ['Sales Order', 'Standing Order'],
'description_2': ['Arbitrary desc', 'another one']}
types_ = pd.DataFrame(order_types_data)
merged_df = right_join(orders, types_, suffixes=('_orders', '_types'))
expected = (3, 5)
actual = merged_df.shape
assert expected == actual
# test_outer_join {{{1
def test_outer_join():
"""
"""
order_data = {'OrderNo': [1001, 1002, 1003, 1004, 1005],
'Status': ['A', 'C', 'A', 'A', 'P'],
'Type_': ['SO', 'SA', 'SO', 'DA', 'DD']}
orders = pd.DataFrame(order_data)
status_data = {'Status': ['A', 'C', 'P'],
'description': ['Active', 'Closed', 'Pending']}
statuses = pd.DataFrame(status_data)
order_types_data = {'Type_': ['SA', 'SO'],
'description': ['Sales Order', 'Standing Order'],
'description_2': ['Arbitrary desc', 'another one']}
types_ = pd.DataFrame(order_types_data)
merged_df = outer_join(orders, types_, suffixes=('_orders', '_types'))
expected = (5, 5)
actual = merged_df.shape
assert expected == actual
| 2.203125
| 2
|
wui/version_static_files.py
|
kspar/easy
| 3
|
12778560
|
import sys
import time
def create_versioned_files(src_filename, filenames):
timestamp = int(time.time())
with open(src_filename, encoding='utf-8') as html_file:
html_file_content = html_file.read()
for filename in filenames:
usages_count = html_file_content.count(filename)
if usages_count != 1:
print('ERROR: Found {} usages for file {} (expected exactly 1)'.format(usages_count, filename))
return
new_filename = "{}?v={}".format(filename, timestamp)
html_file_content = html_file_content.replace(filename, new_filename)
with open('versioned.' + src_filename, mode="w", encoding="utf-8") as f:
f.write(html_file_content)
if __name__ == '__main__':
create_versioned_files(sys.argv[1], sys.argv[2:])
| 3.03125
| 3
|
test/iterator.py
|
trK54Ylmz/rocksdb-py
| 3
|
12778561
|
<gh_stars>1-10
import unittest
import rocksdbpy
import shutil
import tempfile
from rocksdbpy import WriteBatch
class TestIterator(unittest.TestCase):
def setUp(self):
self.temp = tempfile.mkdtemp()
wb = WriteBatch()
# add couple of keys and values
wb.add(b'test_add_1', b'test_value')
wb.add(b'test_add_2', b'test_value')
wb.add(b'test_add_3', b'test_value')
self.db = rocksdbpy.open_default(self.temp)
self.db.write(wb)
def tearDown(self):
self.db.close()
shutil.rmtree(self.temp)
def test_simple(self):
# get iterator in default mode which is forward
itr = self.db.iterator()
i = 1
for k, v in itr:
self.assertEqual(b'test_value', v)
self.assertEqual(f'test_add_{i}'.encode('ascii'), k)
i += 1
def test_end(self):
# get iterator in end mode which is reverse
itr = self.db.iterator(mode='end')
i = 3
for k, v in itr:
self.assertEqual(b'test_value', v)
self.assertEqual(f'test_add_{i}'.encode('ascii'), k)
i -= 1
def test_from(self):
# get iterator in from mode which is skips some keys
itr = self.db.iterator(mode='from', key=b'test_add_2')
i = 2
for k, v in itr:
self.assertEqual(b'test_value', v)
self.assertEqual(f'test_add_{i}'.encode('ascii'), k)
i += 1
def test_from_reverse(self):
# get iterator in from mode which is skips some keys and reverse
itr = self.db.iterator(mode='from', key=b'test_add_2', direction=-1)
i = 2
for k, v in itr:
self.assertEqual(b'test_value', v)
self.assertEqual(f'test_add_{i}'.encode('ascii'), k)
i -= 1
def test_count(self):
# get random iterator
itr = self.db.iterator(mode='from', direction=-1)
self.assertEqual(3, itr.len())
def test_valid(self):
# get random iterator
itr = self.db.iterator(mode='from', direction=-1)
self.assertTrue(itr.valid())
| 2.796875
| 3
|
setup.py
|
michellab/bgflow
| 42
|
12778562
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="bgflow",
version="0.1",
description="Boltzmann Generators in PyTorch",
author="<NAME>, <NAME>, <NAME>, <NAME>",
author_email="<EMAIL>",
url="https://www.mi.fu-berlin.de/en/math/groups/comp-mol-bio/index.html",
packages=find_packages()
)
| 1.375
| 1
|
notevault/configmanager.py
|
Sebastian-Hojas/sortnote
| 1
|
12778563
|
import os
class ConfigManager:
def __init__(self, path, dryrun, verbose):
self.path = path
self.dryrun = dryrun
self.verbose = verbose
self.config = []
try:
with open(self.path, 'r') as f:
self.config = [line.strip('\n').strip('\r') for line in f.readlines() if line != "\n"]
except Exception,e:
if self.verbose:
print("Warning: Config '" + self.path + "' does not exist or is emtpy")
def enable(self,directory):
dir = os.path.abspath(directory)
if not self.dryrun:
self.config.append(dir)
# remove duplicates
self.config = list(set(self.config))
self.saveConfig()
def disable(self,directory):
dir = os.path.abspath(directory)
if not self.dryrun:
self.config = filter(lambda a: a != directory, self.config)
self.saveConfig()
def disableAll(self):
if self.verbose:
print("Disabled all directories")
if not self.dryrun:
self.config = []
self.saveConfig()
def saveConfig(self):
with open(self.path, 'w') as f:
f.write("\n".join(self.config))
def status(self):
print("\n".join(self.config))
def enabledFolders(self):
return self.config
| 2.90625
| 3
|
utils.py
|
gyhdtc/QATM_pytorch
| 0
|
12778564
|
from __future__ import print_function, division
import matplotlib.pyplot as plt
import math
from sklearn.metrics import auc
import numpy as np
import cv2
import os, sys
int_ = lambda x: int(round(x))
def IoU( r1, r2 ):
x11, y11, w1, h1 = r1
x21, y21, w2, h2 = r2
x12 = x11 + w1; y12 = y11 + h1
x22 = x21 + w2; y22 = y21 + h2
x_overlap = max(0, min(x12,x22) - max(x11,x21) )
y_overlap = max(0, min(y12,y22) - max(y11,y21) )
I = 1. * x_overlap * y_overlap
U = (y12-y11)*(x12-x11) + (y22-y21)*(x22-x21) - I
J = I/U
return J
def evaluate_iou( rect_gt, rect_pred ):
# score of iou
score = [ IoU(i, j) for i, j in zip(rect_gt, rect_pred) ]
return score
def compute_score( x, w, h ):
# score of response strength
k = np.ones( (h, w) )
score = cv2.filter2D(x, -1, k)
score[:, :w//2] = 0
score[:, math.ceil(-w/2):] = 0
score[:h//2, :] = 0
score[math.ceil(-h/2):, :] = 0
return score
def locate_bbox( a, w, h ):
row = np.argmax( np.max(a, axis=1) )
col = np.argmax( np.max(a, axis=0) )
x = col - 1. * w / 2
y = row - 1. * h / 2
return x, y, w, h
def score2curve( score, thres_delta = 0.01 ):
thres = np.linspace( 0, 1, int(1./thres_delta)+1 )
success_num = []
for th in thres:
success_num.append( np.sum(score >= (th+1e-6)) )
success_rate = np.array(success_num) / len(score)
return thres, success_rate
def all_sample_iou( score_list, gt_list):
num_samples = len(score_list)
iou_list = []
for idx in range(num_samples):
score, image_gt = score_list[idx], gt_list[idx]
w, h = image_gt[2:]
pred_rect = locate_bbox( score, w, h )
iou = IoU( image_gt, pred_rect )
iou_list.append( iou )
return iou_list
def plot_success_curve( iou_score, title='' ):
thres, success_rate = score2curve( iou_score, thres_delta = 0.05 )
auc_ = np.mean( success_rate[:-1] ) # this is same auc protocol as used in previous template matching papers #auc_ = auc( thres, success_rate ) # this is the actual auc
plt.figure()
plt.grid(True)
plt.xticks(np.linspace(0,1,11))
plt.yticks(np.linspace(0,1,11))
plt.ylim(0, 1)
plt.title(title + 'auc={}'.format(auc_))
plt.plot( thres, success_rate )
plt.show()
| 2.296875
| 2
|
FlaskBackend/main_wallet_create.py
|
IKalonji/mbongo_algorand_wallet
| 4
|
12778565
|
<reponame>IKalonji/mbongo_algorand_wallet<filename>FlaskBackend/main_wallet_create.py
import http.client
from os import getenv
# import dotenv
from flask import json
# dotenv.load_dotenv()
# api_key = getenv('API_KEY')
class MainWallet():
def __init__(self):
self.key = ""
def initialize_wallet(self):
wallet_data = create_wallet(self.key)
print("Wallet-data: ", wallet_data)
self.address = wallet_data['address']
self.secret = wallet_data['secret']
main_account = create_virtual_currency(self.key)
if 'errorCode' in main_account.keys():
main_account = get_virtual_currency(self.key)
self.main_account_id = main_account['accountId']
else:
self.main_account_id = main_account['id']
print("Main-account-data: ", main_account)
def create_wallet(api_key):
conn = http.client.HTTPSConnection("api-eu1.tatum.io")
headers = { 'x-api-key': api_key}
conn.request("GET", "/v3/algorand/wallet", headers=headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def create_virtual_currency(api_key):
conn = http.client.HTTPSConnection("api-eu1.tatum.io")
payload = "{\"name\":\"VC_ZAR\",\"supply\":\"1000000000\",\"basePair\":\"ZAR\",\"baseRate\":1,\"customer\":{\"accountingCurrency\":\"ZAR\",\"customerCountry\":\"SA\",\"externalId\":\"123654\",\"providerCountry\":\"SA\"},\"description\":\"Mbongo Virtual Currency.\",\"accountCode\":\"Main_Account\",\"accountNumber\":\"1234567890\",\"accountingCurrency\":\"ZAR\"}"
headers = {
'content-type': "application/json",
'x-api-key': api_key
}
conn.request("POST", "/v3/ledger/virtualCurrency", payload, headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def get_virtual_currency(api_key):
conn = http.client.HTTPSConnection("api-eu1.tatum.io")
headers = { 'x-api-key': api_key }
conn.request("GET", "/v3/ledger/virtualCurrency/VC_ZAR", headers=headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
| 2.75
| 3
|
examples/scripts/compare_lithium_ion_particle_distribution.py
|
katiezzzzz/PyBaMM
| 1
|
12778566
|
#
# Compare lithium-ion battery models with and without particle size distibution
#
import numpy as np
import pybamm
pybamm.set_logging_level("INFO")
# load models
models = [
pybamm.lithium_ion.DFN(name="standard DFN"),
pybamm.lithium_ion.DFN(name="particle DFN"),
]
# load parameter values
params = [models[0].default_parameter_values, models[1].default_parameter_values]
def negative_distribution(x):
return 1 + 2 * x / models[1].param.l_n
def positive_distribution(x):
return 1 + 2 * (1 - x) / models[1].param.l_p
params[1]["Negative particle distribution in x"] = negative_distribution
params[1]["Positive particle distribution in x"] = positive_distribution
# set up and solve simulations
t_eval = np.linspace(0, 3600, 100)
sols = []
for model, param in zip(models, params):
sim = pybamm.Simulation(model, parameter_values=param)
sol = sim.solve(t_eval)
sols.append(sol)
output_variables = [
"Negative particle surface concentration",
"Electrolyte concentration",
"Positive particle surface concentration",
"Current [A]",
"Negative electrode potential [V]",
"Electrolyte potential [V]",
"Positive electrode potential [V]",
"Terminal voltage [V]",
"Negative particle distribution in x",
"Positive particle distribution in x",
]
# plot
plot = pybamm.QuickPlot(sols, output_variables=output_variables)
plot.dynamic_plot()
| 2.640625
| 3
|
Adapters.indigoPlugin/Contents/Server Plugin/pyrescaler/__init__.py
|
jdhorne/temperature-converter-indigo-plugin
| 1
|
12778567
|
<reponame>jdhorne/temperature-converter-indigo-plugin<filename>Adapters.indigoPlugin/Contents/Server Plugin/pyrescaler/__init__.py<gh_stars>1-10
__all__ = ["pyrescaler", "temperature_scale", "length_scale", "power_scale"]
| 1.335938
| 1
|
experiments/launcher_exp2_collect.py
|
MenshovSergey/DetectChess
| 144
|
12778568
|
<filename>experiments/launcher_exp2_collect.py
import os
import pandas as pd
from os2d.utils.logger import extract_value_from_os2d_binary_log, mAP_percent_to_points
if __name__ == "__main__":
config_path = os.path.dirname(os.path.abspath(__file__))
config_job_name = "exp2"
log_path = os.path.abspath(os.path.join(config_path, "..", "output/exp2"))
def get_result(job_type, # "v1" or "v2"
sub_index,
backbone_arch,
init_model_nickname,
random_seed,
):
job_name = f"{config_job_name}.{sub_index}.{job_type}_seed{random_seed}"
log_folder = job_name + "_" + backbone_arch + "_init_" + init_model_nickname
log_folder = os.path.join(log_path, log_folder)
data_file = os.path.join(log_folder, "train_log.pkl")
return mAP_percent_to_points(extract_value_from_os2d_binary_log(data_file, "mAP@0.50_grozi-val-new-cl", reduce="max")),\
mAP_percent_to_points(extract_value_from_os2d_binary_log(data_file, "mAP@0.50_grozi-val-new-cl", reduce="first"))
table = pd.DataFrame(columns=["arch", "init", "v1-train", "v2-init", "v2-train"])
random_seed = 0
for i, arch, init in zip(range(10),
["ResNet50"] * 5 + ["ResNet101"] * 5,
["fromScratch", "imageNetPth", "imageNetCaffe2", "imageNetCaffe2GroupNorm", "cocoMaskrcnnFpn",
"imageNetPth", "imageNetCaffe2", "buildingsCirtorch", "cocoMaskrcnnFpn", "pascalWeakalign"]
):
val_train_v1, val_init_v1 = get_result("v1", i, arch, init, random_seed)
val_train_v2, val_init_v2 = get_result("v2", i, arch, init, random_seed)
table = table.append({"arch":arch, "init":init,
"v1-train":val_train_v1, "v2-init":val_init_v2, "v2-train":val_train_v2},
ignore_index=True)
print(table, sep='\n')
| 2.109375
| 2
|
src/onevision/nn/model/debugger.py
|
phlong3105/onevision
| 2
|
12778569
|
<reponame>phlong3105/onevision
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Debugger to save results during training.
"""
from __future__ import annotations
import threading
from queue import Queue
from typing import Optional
from torch import Tensor
from onevision.type import Callable
from onevision.utils import console
__all__ = [
"Debugger"
]
# MARK: - Debugger
class Debugger:
"""
Attributes:
every_n_epochs (int):
Number of epochs between debugging. To disable, set
`every_n_epochs=0`. Default: `1`.
run_in_parallel (bool):
If `True` runs debugging process in a separated thread.
Default: `True`.
queue_size (int):
Debug queue size. It should equal the value of `save_max_n`.
Default: `20`.
save_max_n (int):
Maximum debugging items to be kept. Default: `20`.
save_to_subdir (bool):
Save all debug images of the same epoch to a sub-directory naming
after the epoch number. Default: `True`.
image_quality (int):
Image quality to be saved. Default: `95`.
verbose (bool):
If `True` shows the results on the screen. Default: `False`.
show_max_n (int):
Maximum debugging items to be shown. Default: `8`.
show_func (FunCls, optional):
Function to visualize the debug results. Default: `None`.
wait_time (float):
Pause some times before showing the next image. Default: `0.001`.
"""
# MARK: Magic Functions
def __init__(
self,
every_n_epochs : int = 1,
run_in_parallel: bool = True,
queue_size : Optional[int] = 20,
save_max_n : int = 20,
save_to_subdir : bool = True,
image_quality : int = 95,
verbose : bool = False,
show_max_n : int = 8,
show_func : Optional[Callable] = None,
wait_time : float = 0.001,
*args, **kwargs
):
super().__init__()
self.every_n_epochs = every_n_epochs
self.run_in_parallel = run_in_parallel
self.queue_size = queue_size
self.save_max_n = save_max_n
self.save_to_subdir = save_to_subdir
self.image_quality = image_quality
self.verbose = verbose
self.show_max_n = show_max_n
self.show_func = show_func
self.wait_time = wait_time
self.debug_queue = None
self.thread_debugger = None
# self.init_thread()
# MARK: Configure
def init_thread(self):
if self.run_in_parallel:
self.debug_queue = Queue(maxsize=self.queue_size)
self.thread_debugger = threading.Thread(
target=self.show_results_parallel
)
# MARK: Run
def run(
self,
x : Optional[Tensor] = None,
y : Optional[Tensor] = None,
yhat : Optional[Tensor] = None,
filepath: Optional[str] = None,
):
"""Run the debugger process."""
if self.show_func:
if self.thread_debugger:
self.debug_queue.put([x, y, yhat, filepath])
else:
self.show_results(x=x, y=y, yhat=yhat, filepath=filepath)
def run_routine_start(self):
"""Perform operations when run routine starts."""
self.init_thread()
if self.thread_debugger and not self.thread_debugger.is_alive():
self.thread_debugger.start()
def run_routine_end(self):
"""Perform operations when run routine ends."""
if self.thread_debugger and self.thread_debugger.is_alive():
self.debug_queue.put([None, None, None, None])
def is_alive(self) -> bool:
"""Return whether the thread is alive."""
if self.thread_debugger:
return self.thread_debugger.is_alive()
return False
# MARK: Visualize
def show_results(
self,
x : Optional[Tensor] = None,
y : Optional[Tensor] = None,
yhat : Optional[Tensor] = None,
filepath: Optional[str] = None,
*args, **kwargs
):
self.show_func(
x = x,
y = y,
yhat = yhat,
filepath = filepath,
image_quality = self.image_quality,
verbose = self.verbose,
show_max_n = self.show_max_n,
wait_time = self.wait_time,
*args, **kwargs
)
def show_results_parallel(self):
"""Draw `result` in a separated thread."""
while True:
(input, target, pred, filepath) = self.debug_queue.get()
if input is None:
break
self.show_results(x=input, y=target, yhat=pred, filepath=filepath)
# Stop debugger thread
self.thread_debugger.join()
# MARK: Utils
def print(self):
console.log(vars(self))
| 2.390625
| 2
|
src/contact/views.py
|
hvpandey91/CuteCub-PlaySchool-Python-3-Django
| 0
|
12778570
|
from django.shortcuts import render
from django.core.mail import send_mail
from django.conf import settings
# from .forms import contactForms
# Create your views here.
def contact(request):
context = locals()
template = 'contact.html'
return render(request,template,context)
'''def contact(request):
title = 'Contact'
form = contactForms(request.POST or None)
confirm_message = None
if form.is_valid():
name = form.cleaned_data['name']
comment = form.cleaned_data['comment']
subject = 'message from MYSITE.com'
message = '%s %s' % (comment , name)
emailFrom = form.cleaned_data['email']
emailTo = [settings.EMAIL_HOST_USER]
send_mail(subject ,message,emailFrom,emailTo, fail_silently=True)
title = 'Thanks!'
confirm_message = 'Thanks for your message, we will get back to you!!'
form = None
context = {'title': title, 'form': form,'confirm_message': confirm_message,}
template = 'contact.html'
return render(request,template,context)'''
| 2.140625
| 2
|
scripts/positioning.py
|
metratec/ros_ips
| 3
|
12778571
|
#!/usr/bin/env python
"""
Use this node to perform indoor zone location using the metraTec IPS tracking system. Prerequisites for using this node
is a running receiver-node that handles communication with the receiver and thus with the beacons in the vicinity.
Also, make sure that you have defined your zones correctly in the YAML config file.
Subscribed topics:
- ips/receiver/raw (indoor_positioning/StringStamped):
Raw messages received by the UWB receiver
Published topics:
- ips/receiver/current_zone/name (indoor_positioning/StringStamped):
Name of the zone the receiver is currently in
- ips/receiver/current_zone/polygon (geometry_msgs/PolygonStamped):
Polygon comprising the current zone
- ips/receiver/zone_leave (indoor_positioning/StringStamped):
Name of the zone that the receiver has left. Is published at the moment a zone-leave occurs
- ips/receiver/zone_enter (indoor_positioning/StringStamped):
Name of the zone that the receiver has entered. Is published at the moment a zone-enter occurs
Parameters:
- ~config_file (string, default='PKG_DIR/config/zones.yml'):
Path to the configuration file of zones and beacons relative to the package directory
- ~rate (double, default=1):
The publishing rate in messages per second
- ~bcn_len (int, default=2*number_of_beacons):
Buffer length for BCN messages
"""
import rospy
import os
import rospkg
from geometry_msgs.msg import PolygonStamped, Point32
from indoor_positioning.msg import StringStamped
from indoor_positioning.positioning import Positioning
class IPS:
"""Configure ROS node for metraTec IPS indoor positioning system for zone location."""
def __init__(self):
# subscribe to raw messages from USB stick
self.receiver_sub = rospy.Subscriber('ips/receiver/raw', StringStamped, self.callback)
# get directory of config file
config_dir = rospy.get_param('~config_file') if rospy.has_param('~config_file') else 'config/zones.yml'
abs_dir = os.path.join(rospkg.RosPack().get_path('indoor_positioning'), config_dir)
# initialize positioning class
self.positioning = Positioning(abs_dir)
# get number of beacons specified in zones.yml file for default buffer values
n_beacons = self.positioning.n_beacons
# number of messages to keep
self.buffer_length = rospy.get_param('~bcn_len') if rospy.has_param('~bcn_len') else 2*n_beacons
self.buffer_length = 2*n_beacons if self.buffer_length == -1 else self.buffer_length
# list of incoming messages
self.msg_buffer = []
# timestamp from last received message
self.last_time = None
# publishers
# current zone name
self.zone_name_pub = rospy.Publisher('ips/receiver/current_zone/name', StringStamped, queue_size=1)
# polygon of current zone
self.zone_polygon_pub = rospy.Publisher('ips/receiver/current_zone/polygon', PolygonStamped, queue_size=1)
# zone leave event
self.zone_leave_pub = rospy.Publisher('ips/receiver/zone_leave', StringStamped, queue_size=10)
# zone enter event
self.zone_enter_pub = rospy.Publisher('ips/receiver/zone_enter', StringStamped, queue_size=10)
# set publishing rate
self.rate = rospy.Rate(rospy.get_param('~rate')) if rospy.has_param('~rate') else rospy.Rate(1)
def callback(self, msg):
"""
Append incoming messages to list of previous messages.
:param msg: String, message of subscribed topic
"""
# append message to buffer
self.msg_buffer.append(msg.data)
# save time of last raw signal
self.last_time = msg.header.stamp
# delete oldest message if buffer is full
if len(self.msg_buffer) > self.buffer_length:
del(self.msg_buffer[0])
def publish(self):
"""Publish zone information"""
# last zone that the receiver was in
last_zone = None
while not rospy.is_shutdown():
# get the current zone
zone = self.positioning.get_zone(self.msg_buffer) if self.msg_buffer else None
# check if zone change occurred
if zone != last_zone:
# publish zone change event
event = StringStamped()
event.header.stamp = self.last_time
# only zone leave
if zone is None:
event.data = last_zone.name
self.zone_leave_pub.publish(event)
# only zone enter
elif last_zone is None:
event.data = zone.name
self.zone_enter_pub.publish(event)
# leave on zone and enter another
else:
event.data = last_zone.name
self.zone_leave_pub.publish(event)
event.data = zone.name
self.zone_enter_pub.publish(event)
if zone is not None:
# publish zone name
name = StringStamped()
name.header.stamp = self.last_time
name.header.frame_id = zone.frame_id
name.data = zone.name
self.zone_name_pub.publish(name)
# publish zone polygon
polygon = PolygonStamped()
polygon.header.stamp = self.last_time
polygon.header.frame_id = zone.frame_id
points = []
for p in zone.polygon:
points.append(Point32(p[0], p[1], p[2]))
polygon.polygon.points = points
self.zone_polygon_pub.publish(polygon)
# set current zone to last zone
last_zone = zone
# wait to start next iteration
self.rate.sleep()
if __name__ == '__main__':
# start node
rospy.init_node('positioning', anonymous=False)
# initialize IPSReceiver class
ips = IPS()
try:
# publish receiver messages
ips.publish()
except rospy.ROSInterruptException:
pass
| 2.71875
| 3
|
networking/netmiko/main.py
|
maciej233/PYTHON
| 0
|
12778572
|
<reponame>maciej233/PYTHON
#!/home/maciej/environments/networking/bin/python python3
from netmiko import ConnectHandler
r1 = {'device_type': 'cisco_ios', 'host': '172.26.1.1', 'username': 'cisco', 'password': '<PASSWORD>'}
net_connect = ConnectHandler(**r1)
prompt = net_connect.find_prompt()
output_int = net_connect.send_command('show ip int brief')
print(prompt)
print(output_int)
r2 = {'device_type': 'cisco_ios', 'host': '172.26.2.1', 'username': 'cisco', 'password': '<PASSWORD>'}
net_connect2 = ConnectHandler(**r2)
output_r2 = net_connect.send_config_set(['logging buffer 19999'])
print(output_r2)
| 2.453125
| 2
|
bradley_terry.py
|
BryanWBear/py_bradleyterry2
| 0
|
12778573
|
<filename>bradley_terry.py
from itertools import combinations
import pandas as pd
from helper import *
import statsmodels.api as sm
def counts_to_binomial(df):
upper = matrix_to_triangular(df, upper=True)
lower = matrix_to_triangular(df, upper=False)
return upper.join(lower, on=['row', 'col']).reset_index()
def btm(wins_df):
num_players = max(wins_df.col) + 1
design_matrix = get_design_matrix(wins_df)
y = wins_df.wins / (wins_df.wins + wins_df.losses)
design_matrix.columns = [f'e_{i}' for i in range(1, num_players)]
formula = build_formula(design_matrix.columns)
design_matrix.insert(0, 'y', y)
model = sm.formula.glm(formula, family=sm.families.Binomial(), data=design_matrix).fit()
return model
if __name__ == '__main__':
# duplicated Example 1.2 from Bradley-Terry models in R: The BradleyTerry2 package: https://pdfs.semanticscholar.org/9703/5a0ed0ab764f317cf90e1c0d0a9a527145aa.pdf
citations = pd.DataFrame({'Biometrika': [714, 33, 320, 284], 'CommStatist': [730, 425, 813, 276], 'JASA': [498, 68, 1072, 325], 'JRSSB': [221, 17, 142, 188]})
citations.index = ['Biometrika', 'CommStatist', 'JASA', 'JRSSB']
counts_df = counts_to_binomial(citations)
print(btm(counts_df).summary())
| 2.53125
| 3
|
codeforces/anirudhak47/1352/C.py
|
anirudhakulkarni/codes
| 3
|
12778574
|
# cook your dish here
for t in range(int(input())):
#n=input()
n,k=map(int,input().split())
if n!=2:
temp=k
sum=k
flag=False
while temp>=n:
sum+=temp//n
temp=temp%n+temp//n
if k==1:
print(1)
else:
print(sum)
else:
print(2*k-1)
| 3.1875
| 3
|
src/card/types.py
|
Urumasi/tgc-server
| 0
|
12778575
|
from enum import Enum
class CardType(Enum):
NONE = 0
ARTIFACT = 1
BATTLEFIELD = 2
CREATURE = 3
EVENT = 4
EQUIPMENT = 5
HUMAN = CREATURE # Why does this exist...
class CardSubtype(Enum):
NONE = 0
ABOMINATION = 1
ANOMALY = 2
ARMOUR = 3
ATMOSPHERICS = 4
CAT = 5
CLOWN = 6
COMMANDER = 7
CONSTRUCT = 8
CORGI = 9
DOCTOR = 10
DRONE = 11
EMPLOYEE = 12
ENGINEER = 13
EVENT = 14
EXPLORER = 15
HUMAN = 16
INSTANT = 17
LIZARD = 18
MIME = 19
MINER = 20
MOTH = 21
OFFICER = 22
PLANT = 23
PLASMAMAN = 24
PRIMATE = 25
RABBIT = 26
ROBOT = 27
SCIENTIST = 28
SHUTTLE = 29
SILICON = 30
SLOTH = 31
SOLDIER = 32
SPIRIT = 33
SYNDICATE = 34
WEAPON = 35
WORKER = 36
XENOMORPH = 37
class CardFaction(Enum):
NONE = 0
CARGO = 1
COMMAND = 2
ENGINEERING = 3
MEDICAL = 4
SCIENCE = 5
SECURITY = 6
SERVICE = 7
SILICON = 8
SYNDICATE = 9
XENO = 10
class CardRarity(Enum):
NONE = 0
COMMON = 0
UNCOMMON = 1
RARE = 2
EPIC = 3
LEGENDARY = 4
MISPRINT = 5
class CardSet(Enum):
NONE = 0
CORESET2020 = 1
RESINFRONT = 2
class CardKeyword(Enum):
NONE = 0
ASIMOV = 1
BLOCKER = 2
CHANGELING = 3
CLOCKWORK = 4
DEADEYE = 5
FIRST_STRIKE = 6
FURY = 7
GRAYTIDE = 8
HIVEMIND = 9
HOLY = 10
IMMUNITY_BATTLEFIELD = 11
IMMUNITY_EFFECT = 12
IMMUNITY_ENGINEERING_CREATURE = 13
IMMUNITY_SPELL = 14
IMMUNITY_XENO_CREATURE = 15
SQUAD_TACTICS = 16
TAUNT = 17
| 2.828125
| 3
|
old-scripts/cluster-mgmt/bin/cho-failover.py
|
opennetworkinglab/spring-open
| 6
|
12778576
|
#! /usr/bin/env python
import json
import sys
import os
import re
from check_status import *
import time
basename=os.getenv("ONOS_CLUSTER_BASENAME")
operation=['switch all', 'onos stop 8', 'onos stop 7', 'onos stop 6', 'onos stop 5', 'onos start 5;onos start 6;onos start 7;onos start 8', 'switch local']
nr_controllers=[8, 7, 6, 5, 4, 8, 8]
wait1=30
wait2=60
def check_by_pingall():
buf = ""
cmd = "pingall-speedup.sh %s" % (flowdef)
result = os.popen(cmd).read()
buf += result
if re.search("fail 0", result):
return (0, buf)
else:
return (1, buf)
def link_change_core(op):
cmd = "dsh -w %s1 \"sudo ifconfig %s\"" % (basename, op)
os.popen(cmd)
print cmd
def check_flow_nmap():
buf = ""
buf += os.popen("date").read()
print "dump all flows from network map"
cmd = "dsh -w %s1 \"cd ONOS/web; ./get_flow.py all\"" % cluster_basename
buf += os.popen(cmd).read()
return (0, buf)
def check_flow_raw():
buf = ""
print "dump all flows from switches"
cmd = "dsh \"cd ONOS/scripts; ./showflow.sh\""
buf += os.popen(cmd).read()
return (0, buf)
def dump_json(url, filename):
f = open(filename, 'w')
buf = ""
command = "curl -s %s" % (url)
result = os.popen(command).read()
buf += json.dumps(json.loads(result), sort_keys = True, indent = 2)
f.write(buf)
f.close()
def dump_flowgetall(tag):
url="http://%s:%s/wm/flow/getall/json" % (RestIP, RestPort)
filename = "rest-flow-getall-log.%s.log" % tag
dump_json(url, filename)
def check_rest(tag):
url="http://%s:%s/wm/flow/getall/json" % (RestIP, RestPort)
filename = "rest-flow-getall-log.%s.log" % tag
dump_json(url, filename)
url="http://%s:%s/wm/core/topology/switches/all/json" % (RestIP, RestPort)
filename = "rest-sw-log.%s.log" % tag
dump_json(url, filename)
url = "http://%s:%s/wm/core/topology/links/json" % (RestIP, RestPort)
filename = "rest-link-log.%s.log" % tag
dump_json(url, filename)
url = "http://%s:%s/wm/registry/switches/json" % (RestIP, RestPort)
filename = "rest-reg-sw-log.%s.log" % tag
dump_json(url, filename)
url = "http://%s:%s/wm/registry/controllers/json" % (RestIP, RestPort)
filename = "rest-reg-ctrl-log.%s.log" % tag
dump_json(url, filename)
url = "http://%s:%s/wm/flow/getsummary/0/0/json" % (RestIP, RestPort)
filename = "rest-flow-getsummary-log.%s.log" % tag
dump_json(url, filename)
def check_and_log(tag):
global cur_nr_controllers
buf = ""
buf += "check by pingall\n"
(code, result) = check_by_pingall()
if code == 0:
buf += "ping success %s\n" % (result)
else:
buf += "pingall failed\n"
buf += "%s\n" % (result)
error = "error-log.%s.log" % tag
rawflow = "raw-flow-log.%s.log" % tag
ferror = open(error, 'w')
ferror.write(result)
fraw = open(rawflow,'w')
fraw.write(check_flow_raw()[1])
fraw.close()
check_rest(tag)
ferror.write(check_switch()[1])
ferror.write(check_link()[1])
ferror.write(check_switch_local()[1])
ferror.write(check_controllers(cur_nr_controllers)[1])
ferror.close()
return (code, buf)
def plog(string):
global logf
print string
logf.write(string+"\n")
if __name__ == "__main__":
global logf, cur_nr_controllers
argvs = sys.argv
if len(argvs) == 5:
log_filename = sys.argv[1]
flowdef = sys.argv[2]
wait1 = int(sys.argv[3])
wait2 = int(sys.argv[4])
else:
print "usage: %s log_filename flowdef_filename wait1 wait2" % sys.argv[0]
print " wait1: wait time (sec) to check ping after change"
print " wait2: additional wait time (sec) if the first check failed"
sys.exit(1)
logf = open(log_filename, 'w', 0)
plog("flow def: %s" % flowdef)
plog("wait1 : %d" % wait1)
plog("wait2 : %d" % wait2)
plog(check_switch()[1])
plog(check_link()[1])
plog(check_controllers(8)[1])
(code, result) = check_by_pingall()
plog(result)
print result
k = raw_input('hit any key>')
for cycle in range(1000):
for n, op in enumerate(operation):
plog("==== Cycle %d operation %d ====: %s" % (cycle, n, os.popen('date').read()))
# link_change_core(op)
os.popen(op)
plog(op)
cur_nr_controllers = nr_controllers[n]
plog("wait %d sec" % wait1)
time.sleep(wait1)
plog("check and log: %s" % os.popen('date').read())
tstart=int(time.time())
(code, result) = check_and_log("%d.%d.1" % (cycle,n))
plog(result)
plog("done: %s" % os.popen('date').read())
tend=int(time.time())
tdelta=tend-tstart
if not code == 0:
wait = max(0, wait2 - tdelta)
plog("took %d sec for check and log. wait another %d sec" % (tdelta, wait))
time.sleep(wait)
plog("check and log: %s" % os.popen('date').read())
(code, result) = check_and_log("%d.%d.2" % (cycle,n))
plog(result)
plog("done: %s" % os.popen('date').read())
if code == 0:
tag = "%d.%d.2" % (cycle,n)
dump_flowgetall(tag)
rawflow = "raw-flow-log.%s.log" % tag
fraw = open(rawflow,'w')
fraw.write(check_flow_raw()[1])
fraw.close()
logf.close()
| 2.28125
| 2
|
deconz-tool/deconz.py
|
JasperAlgra/ha-scripts
| 0
|
12778577
|
<filename>deconz-tool/deconz.py
#!/usr/bin/env python3
import getopt
import json
import logging
import os
import sys
import voluptuous as vol
import yaml
# from deconzapi import DeCONZAPI, DECONZ_TYPE_USEABLE, DECONZ_ATTR_TYPE
from deconzapi import *
#################################################################
# Manually get API information:
# curl -s http://HOST:PORT/api/APIKEY | jq .
#################################################################
CONFIGNAME = "deconz.yaml"
CONF_HOST = "host"
CONF_PORT = "port"
CONF_APIKEY = "apikey"
CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Optional(CONF_PORT, default=3080): int,
vol.Required(CONF_APIKEY): str,
},
extra=vol.ALLOW_EXTRA,
)
#################################################################
ARG_OPERATION = "operation"
ARG_OPTION1 = "option1"
OPERATION_CONFIG = "config"
OPERATION_DELETE = "delete"
OPERATION_LIST = "list"
OPERATION_OUTPUT = "output"
OPERATION_RENAME = "rename"
OPERATIONS = [OPERATION_CONFIG, OPERATION_LIST, OPERATION_OUTPUT, OPERATION_RENAME]
OPERATION_OUTPUT_TYPE = ["raw", "json"]
OPERATION_RENAME_TYPE = ["raw"]
HELP = """
Commands:
list - list all devices
rename - rename device
config - configure device
output raw - print devices in python format
output json - print devices in pretty JSON format
"""
#################################################################
logging.basicConfig(
level=logging.ERROR, format="%(asctime)s %(levelname)s: %(message)s"
)
LOGGER = logging.getLogger(__name__)
# LOGGER.propagate = False
#################################################################
def parseArg(args):
if len(sys.argv) == 1:
print("INFO: No parameter supplied")
print(HELP)
sys.exit(1)
# Get operation
args[ARG_OPERATION] = sys.argv[1].lower()
if args[ARG_OPERATION] not in OPERATIONS:
print("ERROR: Invalid operation")
print(HELP)
sys.exit(1)
if args[ARG_OPERATION] == OPERATION_OUTPUT:
if len(sys.argv) < 3:
print("ERROR: Not enough parameter")
print(HELP)
sys.exit(1)
args[ARG_OPTION1] = sys.argv[2].lower()
if args[ARG_OPTION1] not in OPERATION_OUTPUT_TYPE:
print("ERROR: Invalid output option")
print(HELP)
sys.exit(1)
if args[ARG_OPERATION] == OPERATION_RENAME:
if len(sys.argv) >= 3:
args[ARG_OPTION1] = sys.argv[2].lower()
if args[ARG_OPTION1] not in OPERATION_OUTPUT_TYPE:
print("ERROR: Invalid output option")
print(HELP)
sys.exit(1)
else:
args[ARG_OPTION1] = ""
#################################################################
def readConfig():
config = None
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
name = f"{location}/{CONFIGNAME}"
try:
with open(name, "r") as f:
config = yaml.safe_load(f)
config = CONFIG_SCHEMA(config)
return config
except Exception as e:
LOGGER.error(
"Exception=%s Msg=%s", type(e).__name__, str(e), exc_info=True,
)
return None
#################################################################
def commandList(devices):
"""
model
reachable
lastupdated
battery
value(s)
"""
hdr = f'{"Type":7} {"Model":40} {"Name":30} {"Address":20} {"Reach":5} {"Last Updated (UTC)":19} {"Values":20}'
print(hdr)
print("".rjust(len(hdr), "-"))
for type in DECONZ_TYPE_USEABLE:
for dev in devices:
if dev[DECONZ_ATTR_TYPE] == type:
# Concat values
values = []
if DECONZ_ATTR_ON in dev:
values.append("On" if dev[DECONZ_ATTR_ON] == True else "Off")
for sensor in dev[DECONZ_SENSORS]:
for key, value in sensor[DECONZ_ATTR_VALUES].items():
values.append(value)
print(
f'{type:7} {dev[DECONZ_ATTR_MODEL][:40]:40} {dev[DECONZ_ATTR_NAME][:30]:30} {", ".join(dev[DECONZ_ATTR_ADDRESS]):20} {str(dev[DECONZ_ATTR_REACHABLE])[:5]:5} {str(dev[DECONZ_ATTR_LASTUPDATED])[:19]:19} {", ".join(values)}'
)
#################################################################
def commandOutput(type, devices):
# 0=raw, 1=json
if type == OPERATION_OUTPUT_TYPE[0]:
# print raw python object, it isn't json format
print(devices)
elif type == OPERATION_OUTPUT_TYPE[1]:
# print pretty json format
print(json.dumps(devices, indent=4, sort_keys=True))
#################################################################
def commandRename(api, devices):
"""Rename devices, there is logic between switches and sub sensors."""
output = []
hdr = f'{"Nr":2}. {"Type":7} {"Model":40} {"Name":30}'
output.append(hdr)
output.append("".rjust(len(hdr), "-"))
seqid = ["q"]
for dev in devices:
output.append(
f"{dev[DECONZ_ATTR_SEQID]:>2}. {dev[DECONZ_ATTR_TYPE]:7} {dev[DECONZ_ATTR_MODEL][:40]:40} {dev[DECONZ_ATTR_NAME][:30]:30}"
)
seqid.append(str(dev[DECONZ_ATTR_SEQID]))
while True:
print("\n".join(output))
r = input("Enter number or q: ")
r = r.rstrip()
# Check if we entered something valid
if r in seqid:
break
print(f"\nERROR: Invalid input '{r}' given!\n")
# if we got quit, terminate
if r == "q":
print("\nINFO: Quit\n")
sys.exit(1)
# Convert input to integer, should always work
seqid = int(r)
# Lets find our specific entry
for dev in devices:
if dev[DECONZ_ATTR_SEQID] == seqid:
break
# depending on the type, we need to do something
# switch=light + all sensors with suffix
# sensor=all senors (same name)
print("\nCurrent name(s):")
if dev[DECONZ_CATEGORY] == DECONZ_LIGHTS:
print("L" + dev[DECONZ_ATTR_ID], " ", dev[DECONZ_ATTR_NAME])
# Go through sensors
for sensor in dev[DECONZ_SENSORS]:
print("S" + sensor[DECONZ_ATTR_ID], " ", sensor[DECONZ_ATTR_NAME])
newname = input("Enter new name: ")
if newname == "":
print("\nINFO: Quit\n")
sys.exit(1)
# Ok, we got something, print and ask for permission
print("\nNew name(s):")
if dev[DECONZ_CATEGORY] == DECONZ_LIGHTS:
print("L" + dev[DECONZ_ATTR_ID], " ", newname)
# Go through sensors
for sensor in dev[DECONZ_SENSORS]:
type = ""
if dev[DECONZ_ATTR_TYPE] == DECONZ_TYPE_SWITCH:
type = sensor[DECONZ_ATTR_TYPE].replace("ZHA", " ")
if type == " Consumption":
type = " Energy"
print("S" + sensor[DECONZ_ATTR_ID], " ", newname + type)
while True:
r = input("Change? [Y/n]: ")
if r in ["Y", "y", ""]:
break
elif r in ["N", "n"]:
print("\nINFO: Quit\n")
sys.exit(1)
if dev[DECONZ_CATEGORY] == DECONZ_LIGHTS:
r = api.modify_light_name(dev[DECONZ_ATTR_ID], newname)
print("Renamed Switch L" + dev[DECONZ_ATTR_ID], "=", r)
# Go through sensors
for sensor in dev[DECONZ_SENSORS]:
type = ""
if dev[DECONZ_ATTR_TYPE] == DECONZ_TYPE_SWITCH:
type = sensor[DECONZ_ATTR_TYPE].replace("ZHA", " ")
if type == " Consumption":
type = " Energy"
r = api.modify_sensor_name(sensor[DECONZ_ATTR_ID], newname + type)
print("Renamed Switch S" + sensor[DECONZ_ATTR_ID], "=", r)
# print(json.dumps(dev, indent=4, sort_keys=True))
"""
ZHAConsumption -> Energy
ZHAPower -> Power
first gather list of devices to show
show a list of devices, then do input() to pick one?
if switch == rename also sub sensors + type
if sensor == rename all sensors (same name)
if other == don't know yet?
"""
#################################################################
def commandConfig(api, devices):
"""Configure options of devices, normally sensors like Philips Motion."""
# We create a new list of devices, because we are only interested
# in devices with a config
newdevices = []
seqid = 0
# Make a new list of sensors with configuration options
for dev in devices:
for sensor in dev[DECONZ_SENSORS]:
if len(sensor[DECONZ_ATTR_CONFIG]) > 0:
# add new seqid to our sensor
seqid += 1
sensor[DECONZ_ATTR_SEQID] = seqid
# also add type/model, it overrules some other data!
# Should maybe make this 'better'?
sensor[DECONZ_ATTR_TYPE] = dev[DECONZ_ATTR_TYPE]
sensor[DECONZ_ATTR_MODEL] = dev[DECONZ_ATTR_MODEL]
newdevices.append(sensor)
output = []
hdr = f'{"Nr":2}. {"Type":7} {"Model":40} {"Name":30} {"Config":50}'
output.append(hdr)
output.append("".rjust(len(hdr), "-"))
seqid = ["q"]
# Check sensors for configuration options
for dev in newdevices:
clist = []
# list config values
for config in dev[DECONZ_ATTR_CONFIG]:
# do not report sensitivitymax, because that is part of sensitivity
if config not in [DECONZ_CONFIG_SENSITIVITYMAX]:
clist.append(config)
# print(dev)
output.append(
f'{dev[DECONZ_ATTR_SEQID]:>2}. {dev[DECONZ_ATTR_TYPE]:7} {dev[DECONZ_ATTR_MODEL][:40]:40} {dev[DECONZ_ATTR_NAME][:30]:30} {", ".join(clist)}'
)
seqid.append(str(dev[DECONZ_ATTR_SEQID]))
while True:
print("\n".join(output))
r = input("Enter number or q: ")
r = r.rstrip()
# Check if we entered something valid
if r in seqid:
break
print(f"\nERROR: Invalid input '{r}' given!\n")
# if we got quit, terminate
if r == "q":
print("\nINFO: Quit\n")
sys.exit(1)
# Lets find our choice from the list
for dev in newdevices:
if str(dev[DECONZ_ATTR_SEQID]) == r:
break
# dev is the one we work with
configlist = []
id = 0
seqid = ["q"]
output = []
hdr = f'{"Nr":2}. {"Name":20} {"Value":20}'
output.append(hdr)
output.append("".rjust(len(hdr), "-"))
# sens max is a special variable
sensitivitymax = ""
for config in dev[DECONZ_ATTR_CONFIG]:
if config in [DECONZ_CONFIG_SENSITIVITYMAX]:
sensitivitymax = str(dev[DECONZ_ATTR_CONFIG][config])
else:
centry = {}
id += 1
centry[DECONZ_ATTR_SEQID] = id
centry[DECONZ_ATTR_TYPE] = config
centry[DECONZ_ATTR_VALUES] = dev[DECONZ_ATTR_CONFIG][config]
configlist.append(centry)
seqid.append(str(id))
# Currently we support int and bool
if type(centry[DECONZ_ATTR_VALUES]) not in [bool, int]:
print(
"ERROR: unsupported type %s (%s) ".format(
type(centry[DECONZ_ATTR_VALUES]), centry
)
)
sys.exit(1)
# Only here we can create the ouput, because of sensitivitymax
for config in configlist:
# Currently we support int and bool
if type(centry[DECONZ_ATTR_VALUES]) is bool:
val = f"{config[DECONZ_ATTR_VALUES]!s}"
elif type(centry[DECONZ_ATTR_VALUES]) is int:
val = str(config[DECONZ_ATTR_VALUES])
else:
val = ""
if config[DECONZ_ATTR_TYPE] == DECONZ_CONFIG_SENSITIVITY:
val += f" (max: {sensitivitymax})"
output.append(
f"{config[DECONZ_ATTR_SEQID]:>2}. {config[DECONZ_ATTR_TYPE]:20} {val:20}"
)
while True:
print("\n".join(output))
r = input("Enter number or q: ")
r = r.rstrip()
# Check if we entered something valid
if r in seqid:
break
print(f"\nERROR: Invalid input '{r}' given!\n")
# if we got quit, terminate
if r == "q":
print("\nINFO: Quit\n")
sys.exit(1)
# Find the to-be-modified entry
seqid = int(r)
for config in configlist:
if config[DECONZ_ATTR_SEQID] == seqid:
break
# We need to modify the value. Only integer and boolean supported
while True:
r = input("Enter new value: ")
r = r.rstrip()
if isinstance(config[DECONZ_ATTR_VALUES], bool):
if r.lower() in ["false", "0"]:
value = False
break
elif r.lower() in ["true", "1"]:
value = True
break
elif isinstance(config[DECONZ_ATTR_VALUES], int):
value = int(r)
break
r = api.modify_sensor_config(dev[DECONZ_ATTR_ID], config[DECONZ_ATTR_TYPE], value)
if r is not None:
# Response is an array
print("INFO: Config change - {}".format(r))
else:
print("ERROR: Response is empty?")
#################################################################
def Main():
configInfo = readConfig()
args = {}
parseArg(args)
LOGGER.debug("Args: %s", args)
# Get DeCONZ API
api = DeCONZAPI(
configInfo[CONF_HOST], configInfo[CONF_PORT], configInfo[CONF_APIKEY]
)
# Get all devices, internally formatted
devices = api.get_all_devices()
if devices is None:
print("ERROR: DeCONZ API returned None?")
if args[ARG_OPERATION] == OPERATION_LIST:
commandList(devices)
if args[ARG_OPERATION] == OPERATION_OUTPUT:
commandOutput(args[ARG_OPTION1], devices)
if args[ARG_OPERATION] == OPERATION_RENAME:
commandRename(api, devices)
if args[ARG_OPERATION] == OPERATION_CONFIG:
commandConfig(api, devices)
"""
if ARGS["operation"] == "LIST":
if "lights" in config:
deconzlights = await Light_GetInfo(config["lights"], config["sensors"])
await Light_Print(deconzlights)
else:
raise Exception("Can not find 'lights' in DeCONZ configuration")
elif ARGS["operation"] == "MODIFY":
if ARGS["field"] == "NAME":
deconzlights = await Light_GetInfo(config["lights"], config["sensors"])
await Light_Modify_Name(api, deconzlights, ARGS["id"], ARGS["value"])
else:
print("ERROR: Modify, unknown field")
sys.exit(1)
deconzapi.DECONZ_TYPE_USEABLE
make modify user-input, it isn't something we use regularly?
https://www.w3schools.com/python/ref_func_input.asp
"""
#################################################################
if __name__ == "__main__":
Main()
# End
| 1.804688
| 2
|
sourcerer/base.py
|
LISTERINE/sourcerer
| 0
|
12778578
|
#!env/bin/python
import re
from numbers import Number
class Statement(object):
""" A line of code
A Statement is a line of code that may or may not have a child Scope
"""
def __init__(self, code='', scope=None, whitespace='', line_ending=''):
"""
self.code is the actual line of code that this object represents.
self.scope (if any) is the child scope of this statement. eg.
self.code -> while 1:
| do thing 1
self.scope ->| do thing 2
| do thing 3
Args:
code (str): The code that will be represented
"""
self.code = code
self.scope = scope if scope is not None else []
self.whitespace = whitespace if whitespace else ' ' * 4
self.line_ending = line_ending
def __iter__(self):
renderer = self.build_renderer()
for node in renderer:
yield node
def __str__(self):
return self.line_ending.join(self)
return self.code
def __repr__(self):
return self.line_ending.join(self)
return self.code
def add_child(self, child):
""" Append a child to this statements scope
Args:
child (Statement): The child to append
"""
if isinstance(child, Statement):
self.scope.append(child)
else:
raise Exception("child " + child + " is not a Statement")
def add_children(self, child_list):
""" A convenience function to load multiple children into this scope
Args:
child_list (list): a list of children to append to this objects scope
"""
for child in child_list:
if isinstance(child, list):
self.add_children(child)
else:
self.add_child(child)
def create_lineage(self, lineage):
""" Create a hierarchical lineage of children with this object as the oldest member. eg.
children = [c1, [c2.1, c2.2], c3]
result -- self (top level)
|-> c1
|-> c2.1
c2.2
|-> c3
"""
current_node = self
for child in lineage:
if child:
if isinstance(child, list):
current_node.add_children(child)
current_node = child[-1]
else:
current_node.add_child(child)
current_node = child
def from_parent(self, parent):
""" Append this statement to the scope of a parent statement
Args:
parent (Statement): The parent to append this statement to
"""
if isinstance(parent, Statement):
parent.scope.append(self)
else:
raise Exception("parent " + parent + " is not a Statement")
def from_lineage(self, lineage):
""" Create a hierarchical list of parents with this object as the youngest decentant
Lineage will be arranged in the order the parents are provided. eg.
parents = [p1, p2, p3]
result -- p1 (top level)
|->p2
|->p3
|->self
In this example the top level parent would be at scope level n+1 and
this object would be at scope level n+4.
Args:
lineage (list): a hierarchical list of parents for this object.
"""
current_node = self
lineage.reverse()
for parent in lineage:
current_node.from_parent(parent)
current_node = parent
def format(self):
""" Abstract function to format properties this object """
pass
def generate(self):
""" Abstract function to set self.code """
pass
def build_renderer(self, *args, **kwargs):
""" Overwrite this function to customize how render should be called
for particular objects
"""
return self.render(*args, **kwargs)
def render(self, level=-1, increment=1):
""" Return this statement and all children recursively
Args:
level (int): The indentation level to set for this statement
increment (int): The number of indentation levels for the child scope
"""
self.format()
self.generate()
yield "{}{}{}".format(self.whitespace * level, self.code,
self.line_ending)
for child in self.scope:
#child.format()
#child.generate()
child_renderer = child.build_renderer(level=level + increment)
while 1:
try:
yield next(child_renderer)
except StopIteration:
break
@staticmethod
def to_statement(item):
""" Convert a string to a Statement
If the argument passed is already a Statement or Statment derivitive, return it unaltered
If the argument passed is a Non-Statment, return input as a Statement
Args:
item (str, Statement): The object to be converted to a Statement
"""
if isinstance(item, Statement):
return item
else:
return Statement(item)
@staticmethod
def make_valid(name):
""" Convert a string to a valid python name
Args:
name (string): The name to be converted
"""
name = re.sub('[^0-9a-zA-Z_]', '', str(name)) # Remove invalid characters
name = re.sub('^[^a-zA-Z_]+', '', str(name)) # Remove leading characters until we find a letter or underscore
return name
class Name(Statement):
""" A variable/function/class/... name
n = Name("helloworld") -> (unquoted) helloworld
NOTE:
Unless the validate flag is set to False, this will reformat a name if
it is not a proper python variable name
n = Name("123*helloworld") -> (unquoted) helloworld
Args:
validate (bool): Should name be converted to valid python variable name?
"""
def __init__(self, code, validate=True, *args, **kwargs):
super(Name, self).__init__(*args, **kwargs)
self.line_ending = ''
self.code = self.make_valid(code) if validate else code
class Str(Statement):
""" A quoted string
Args:
single (bool): Should use single quotes?
s = Str("hello world") -> literal 'hello world'
"""
def __init__(self, code, single=False, *args, **kwargs):
super(Str, self).__init__(*args, **kwargs)
self.line_ending = ''
self.code = self.string_args(code, single)
def string_args(self, args, single):
""" Apply single or double quotes to the strings provided.
Args:
args (string): A string to be quoted
single (bool): Use single quotes rather than double
"""
self.base = '"{}"'
self.quote_type = '"'
if single:
self.base = "'{}'"
self.quote_type = "'"
escaped = "\\{}".format(self.quote_type)
if isinstance(args, Str):
args = str(args).strip(args.quote_type)
if isinstance(args, str) or isinstance(args, Statement):
args = str(args).replace(self.quote_type, escaped)
return self.base.format(args)
class Num(Statement):
""" A number. Signed Int, Float, Long, whatever.
n = Num("4") -> 4
n = Num(4) -> 4
n = Num(4.0) -> 4.0
"""
def __init__(self, code, *args, **kwargs):
super(Num, self).__init__(*args, **kwargs)
self.line_ending = ''
self.code = str(code)
| 4.3125
| 4
|
3-2D-Array/7/2d-array-advanced.py
|
xuxpp/Python-3-exercise
| 0
|
12778579
|
<filename>3-2D-Array/7/2d-array-advanced.py
def get_fibonaccis(cnt):
l = [0, 1]
for _ in range(cnt-2): # Deduct inital 2 numbers
l.append(l[-1] + l[-2])
return l
def is_perfect(n):
return sum([ x for x in range(1, n) if n % x == 0 ]) == n
def get_non_perfect_nums(cnt):
l = []
n = 1
while len(l) != cnt:
if not is_perfect(n):
l.append(n)
n += 1
return l
# Start of the execution
length = 21
l = []
l.append(get_non_perfect_nums(length))
l.append(get_fibonaccis(length))
l.append([ n1 * n2 for n1, n2 in zip(l[0], l[1]) ])
print(l)
| 3.75
| 4
|
orion/app.py
|
brian123zx/orion-server
| 120
|
12778580
|
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS
from raven.contrib.flask import Sentry
from orion.context import Context
from orion.handlers import handler_classes
def init_app(app):
"""
Statefully initialize the Flask application. This involves creating a sever-side application
context and adding route definitions for all endpoint handlers.
:param app: Uninitialized Flask application instance.
:return: Server-side application context.
"""
ctx = Context(app)
CORS(app, supports_credentials=True, origins=[ctx.config.get_value('frontend_url')])
sentry_dsn = ctx.config.get_value('sentry_dsn')
if sentry_dsn:
Sentry(dsn=sentry_dsn).init_app(app)
def map_handler_func(HandlerClass):
"""
Create all necessary params for adding this route to the Flask server.
:param HandlerClass: Handler class to prepare.
:return: A tuple of (path, name, view_func, methods) for this handler.
"""
def handler_wrapper(*args, **kwargs):
# Provide an abstraction for supplying the handler with request JSON.
data = request.get_json(force=True, silent=True) or {}
handler = HandlerClass(ctx, data)
resp_json, status = handler.run(*args, **kwargs)
return jsonify(resp_json), status
return HandlerClass.path, HandlerClass.__name__, handler_wrapper, HandlerClass.methods
for rule, endpoint, view_func, methods in map(map_handler_func, handler_classes):
app.add_url_rule(
rule=rule,
endpoint=endpoint,
view_func=view_func,
methods=methods,
)
return ctx
def create_app():
"""
Create a fully initialized Flask application instance for this server.
:return: The initialized Flask application instance.
"""
app = Flask('orion')
ctx = init_app(app)
app.ctx = ctx
return app
| 2.515625
| 3
|
plan/time_range.py
|
CodePeasants/pyplan
| 0
|
12778581
|
<reponame>CodePeasants/pyplan<filename>plan/time_range.py
# Python standard lib
from datetime import datetime
# Package
from plan.serializable import Serializable
from plan.settings import TIME_ZONE
class TimeRange(Serializable):
def __init__(self, start=None, end=None):
if start is None:
start = datetime.now(TIME_ZONE)
if end is None:
end = start
if not all(isinstance(x, datetime) for x in [start, end]):
raise TypeError('Must provide datetime objects!')
if start > end:
raise ValueError('Cannot have a start time greater than end time!')
if not all(x.tzinfo for x in [start, end]):
raise ValueError('datetime objects must have time zone info!')
self.start = start
self.end = end
def __contains__(self, item):
if isinstance(item, datetime):
return self.start <= item <= self.end
elif isinstance(item, self.__class__):
return item.start >= self.start and item.end <= self.end
else:
raise TypeError('Invalid type!')
def __eq__(self, other):
if isinstance(other, datetime):
return other == self.start and other == self.end
elif isinstance(other, self.__class__):
return other.start == self.start and other.end == self.end
else:
raise TypeError('Invalid type!')
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
if isinstance(other, self.__class__):
return self.start > other.end
elif isinstance(other, datetime):
return self.start > other
else:
raise TypeError(f'Cannot compare {self.__class__.__name__} to {type(other)}.')
def __ge__(self, other):
if isinstance(other, self.__class__):
return self.end <= other.start
elif isinstance(other, datetime):
return self.end <= other
else:
raise TypeError(f'Cannot compare {self.__class__.__name__} to {type(other)}.')
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.end < other.start
elif isinstance(other, datetime):
return self.end < other
else:
raise TypeError(f'Cannot compare {self.__class__.__name__} to {type(other)}.')
def __le__(self, other):
if isinstance(other, self.__class__):
return self.end <= other.start
elif isinstance(other, datetime):
return self.end <= other
else:
raise TypeError(f'Cannot compare {self.__class__.__name__} to {type(other)}.')
def astimezone(self, time_zone):
start = self.start.astimezone(time_zone)
end = self.end.astimezone(time_zone)
return self.__class__(start, end)
@classmethod
def now(cls, time_zone=None):
"""Returns a TimeRange object for the current time."""
current_time = datetime.now(time_zone)
if not time_zone:
current_time = TIME_ZONE.localize(current_time)
return cls(current_time)
| 2.84375
| 3
|
src/train.py
|
lotusxai/join-house-prices-solutions-project
| 1
|
12778582
|
<gh_stars>1-10
import numpy as np
import pandas as pd
import predict
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
import time
from datetime import datetime
import warnings
import os
warnings.filterwarnings('ignore')
# ML libraries
import lightgbm as lgb
import xgboost as xgb
from xgboost import plot_importance, plot_tree
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
le = preprocessing.LabelEncoder()
world_population_dir = os.environ.get("WORLD_POPULATION")
# details adding into columns
def calculate_trend(df, lag_list, column):
for lag in lag_list:
trend_column_lag = "Trend_" + column + "_" + str(lag)
df[trend_column_lag] = (df[column]-df[column].shift(lag, fill_value=-999))/df[column].shift(lag, fill_value=0)
return df
def calculate_lag(df, lag_list, column):
for lag in lag_list:
column_lag = column + "_" + str(lag)
df[column_lag] = df[column].shift(lag, fill_value=0)
return df
def addingColumns(train, test):
# Merge train and test, exclude overlap
dates_overlap = ['2020-03-12','2020-03-13','2020-03-14','2020-03-15','2020-03-16','2020-03-17','2020-03-18',
'2020-03-19','2020-03-20','2020-03-21','2020-03-22','2020-03-23', '2020-03-24']
train2 = train.loc[~train['Date'].isin(dates_overlap)]
all_data = pd.concat([train2, test], axis = 0, sort=False)
# Double check that there are no informed ConfirmedCases and Fatalities after 2020-03-11
all_data.loc[all_data['Date'] >= '2020-03-12', 'ConfirmedCases'] = np.nan
all_data.loc[all_data['Date'] >= '2020-03-12', 'Fatalities'] = np.nan
all_data['Date'] = pd.to_datetime(all_data['Date'])
# Create date columns
# le = preprocessing.LabelEncoder()
all_data['Day_num'] = le.fit_transform(all_data.Date)
all_data['Day'] = all_data['Date'].dt.day
all_data['Month'] = all_data['Date'].dt.month
all_data['Year'] = all_data['Date'].dt.year
# Fill null values given that we merged train-test datasets
all_data['Province/State'].fillna("None", inplace=True)
all_data['ConfirmedCases'].fillna(0, inplace=True)
all_data['Fatalities'].fillna(0, inplace=True)
all_data['Id'].fillna(-1, inplace=True)
all_data['ForecastId'].fillna(-1, inplace=True)
# Aruba has no Lat nor Long. Inform it manually
all_data.loc[all_data['Lat'].isna()==True, 'Lat'] = 12.510052
all_data.loc[all_data['Long'].isna()==True, 'Long'] = -70.009354
return all_data
def addingWolrd(all_data):
ts = time.time()
all_data = calculate_lag(all_data, range(1,7), 'ConfirmedCases')
all_data = calculate_lag(all_data, range(1,7), 'Fatalities')
all_data = calculate_trend(all_data, range(1,7), 'ConfirmedCases')
all_data = calculate_trend(all_data, range(1,7), 'Fatalities')
all_data.replace([np.inf, -np.inf], 0, inplace=True)
all_data.fillna(0, inplace=True)
print("Time spent: ", time.time()-ts)
# Load countries data file
world_population = pd.read_csv(world_population_dir)
# Select desired columns and rename some of them
world_population = world_population[['Country (or dependency)', 'Population (2020)', 'Density (P/Km²)', 'Land Area (Km²)', 'Med. Age', 'Urban Pop %']]
world_population.columns = ['Country (or dependency)', 'Population (2020)', 'Density', 'Land Area', 'Med Age', 'Urban Pop']
# Replace United States by US
world_population.loc[world_population['Country (or dependency)']=='United States', 'Country (or dependency)'] = 'US'
# Remove the % character from Urban Pop values
world_population['Urban Pop'] = world_population['Urban Pop'].str.rstrip('%')
# Replace Urban Pop and Med Age "N.A" by their respective modes, then transform to int
world_population.loc[world_population['Urban Pop']=='N.A.', 'Urban Pop'] = int(world_population.loc[world_population['Urban Pop']!='N.A.', 'Urban Pop'].mode()[0])
world_population['Urban Pop'] = world_population['Urban Pop'].astype('int16')
world_population.loc[world_population['Med Age']=='N.A.', 'Med Age'] = int(world_population.loc[world_population['Med Age']!='N.A.', 'Med Age'].mode()[0])
world_population['Med Age'] = world_population['Med Age'].astype('int16')
all_data = all_data.merge(world_population, left_on='Country/Region', right_on='Country (or dependency)', how='left')
all_data[['Population (2020)', 'Density', 'Land Area', 'Med Age', 'Urban Pop']] = all_data[['Population (2020)', 'Density', 'Land Area', 'Med Age', 'Urban Pop']].fillna(0)
# Label encode countries and provinces. Save dictionary for exploration purposes
all_data.drop('Country (or dependency)', inplace=True, axis=1)
all_data['Country/Region'] = le.fit_transform(all_data['Country/Region'])
number_c = all_data['Country/Region']
countries = le.inverse_transform(all_data['Country/Region'])
country_dict = dict(zip(countries, number_c))
all_data['Province/State'] = le.fit_transform(all_data['Province/State'])
number_p = all_data['Province/State']
province = le.inverse_transform(all_data['Province/State'])
province_dict = dict(zip(province, number_p))
data = all_data.copy()
features = ['Id', 'ForecastId', 'Country/Region', 'Province/State', 'ConfirmedCases', 'Fatalities',
'Day_num', 'Day', 'Month', 'Year', 'Long', 'Lat']
data = data[features]
# Apply log transformation to all ConfirmedCases and Fatalities columns, except for trends
data[['ConfirmedCases', 'Fatalities']] = data[['ConfirmedCases', 'Fatalities']].astype('float64')
data[['ConfirmedCases', 'Fatalities']] = data[['ConfirmedCases', 'Fatalities']].apply(lambda x: np.log(x))
# Replace infinites
data.replace([np.inf, -np.inf], 0, inplace=True)
return (data,country_dict,all_data)
# Split data into train/test
def split_data(data):
# Train set
x_train = data[data.ForecastId == -1].drop(['ConfirmedCases', 'Fatalities'], axis=1)
y_train_1 = data[data.ForecastId == -1]['ConfirmedCases']
y_train_2 = data[data.ForecastId == -1]['Fatalities']
# Test set
x_test = data[data.ForecastId != -1].drop(['ConfirmedCases', 'Fatalities'], axis=1)
# Clean Id columns and keep ForecastId as index
x_train.drop('Id', inplace=True, errors='ignore', axis=1)
x_train.drop('ForecastId', inplace=True, errors='ignore', axis=1)
x_test.drop('Id', inplace=True, errors='ignore', axis=1)
x_test.drop('ForecastId', inplace=True, errors='ignore', axis=1)
return x_train, y_train_1, y_train_2, x_test
# Linear regression model
def lin_reg(X_train, Y_train, X_test):
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, Y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
return regr, y_pred
# Submission function
def get_submission(df, target1, target2):
prediction_1 = df[target1]
prediction_2 = df[target2]
# Submit predictions
prediction_1 = [int(item) for item in list(map(round, prediction_1))]
prediction_2 = [int(item) for item in list(map(round, prediction_2))]
submission = pd.DataFrame({
"ForecastId": df['ForecastId'].astype('int32'),
"ConfirmedCases": prediction_1,
"Fatalities": prediction_2
})
submission.to_csv('submission.csv', index=False)
# if __name__ == "__main__":
# predict.main()
| 2.421875
| 2
|
Action/action_enum.py
|
arunimasundar/Supervised-Learning-of-Procedures
| 0
|
12778583
|
<gh_stars>0
from enum import Enum
class Actions(Enum):
"""
Actions enum
"""
# framewise_recognition.h5
# squat = 0
# stand = 1
# walk = 2
# wave = 3
# framewise_recognition_under_scene.h5
# stand = 0
# walk = 1
# operate = 2
# fall_down = 3
# run = 4
# squat
squat=0
jumpingjacks=1
pushups=2
# stand=3
highknees=3
# sidelunge=3
| 2.78125
| 3
|
tappmq/tappmq.py
|
isysd/tappmq
| 0
|
12778584
|
"""
A simple message queue for TAPPs using Redis.
"""
import json
import time
from sqlalchemy_models import create_session_engine, setup_database, util, exchange as em, user as um, wallet as wm
from tapp_config import setup_redis, get_config, setup_logging
def subscription_handler(channel, client, mykey=None, auth=False, multi=True):
"""
:param str channel: The channel to subscribe to.
:param client: A plugin manager client.
:param mykey: A bitjws public key to use in authenticating requests (unused)
:param bool auth: If true, authenticate all requests, otherwise assume plain json encoding.
:param multi: Process multiple results if True, otherwise return after 1
"""
while True:
message = client.red.rpop(channel)
if message is not None:
toprint = message if len(message) < 60 else message[:59]
client.logger.info("handling message %s..." % toprint)
if not auth:
# assume json encoding
mess = json.loads(message)
client.logger.debug("handling message:\n%s" % json.dumps(mess, indent=2))
if 'command' not in mess or not hasattr(client, mess['command']):
# nothing to do
pass
else:
try:
getattr(client, mess['command'])(**mess['data'])
except Exception as e:
client.logger.exception(e)
client.session.rollback()
client.session.flush()
else:
# TODO implement auth options
raise NotImplementedError("auth not supported yet.")
if not multi:
return
else:
time.sleep(0.01)
def publish(channel, command, data, key=None, auth=False, red=None):
"""
Publish a command to a redis channel.
:param channel: The channel to send the command to
:param command: The command name
:param data: The data to send (parameters)
:param key: The key to sign with (unused)
:param auth: If true, authenticate the message before sending (unused)
:param red: The StrictRedis client to use for redis communication
"""
if red is None:
red = setup_redis()
if not auth:
red.lpush(channel, json.dumps({'command': command, 'data': data}))
else:
# TODO implement auth options
raise NotImplementedError("auth not supported yet.")
def set_status(nam, status='loading', red=None):
if red is None:
red = setup_redis()
if status in ['loading', 'running', 'stopped']:
red.set("%s_status" % nam.lower(), status)
def get_status(nam, red=None):
if red is None:
red = setup_redis()
status = red.get("%s_status" % nam.lower())
return status if status is not None else 'stopped'
def get_running_workers(wlist, red=None):
"""
Search list for only the workers which return status 'running'.
:param wlist: The list of workers to search through.
:param red: The redis connection.
:return: The worker list filtered for status 'running'.
"""
if red is None:
red = setup_redis()
workers = []
for work in wlist:
if get_status(work, red=red) == 'running':
workers.append(work)
return workers
class MQHandlerBase(object):
"""
A parent class for Message Queue Handlers.
Plugins should inherit from this class, and overwrite all of the methods
that raise a NotImplementedError.
"""
NAME = 'Base'
KEY = 'PubKey'
_user = None
session = None
def __init__(self, key=None, secret=None, session=None, engine=None, red=None, cfg=None):
self.cfg = get_config(self.NAME.lower()) if cfg is None else cfg
self.key = key if key is not None else self.cfg.get(self.NAME.lower(), 'key')
self.secret = secret if secret is not None else self.cfg.get(self.NAME.lower(), 'secret')
self.session = session
self.engine = engine
self.red = red
self.logger = None
"""
Daemonization and process management section. Do not override.
"""
def setup_connections(self):
if self.session is None or self.engine is None:
self.session, self.engine = create_session_engine(cfg=self.cfg)
setup_database(self.engine, modules=[wm, em, um])
self.red = setup_redis() if self.red is None else self.red
def setup_logger(self):
self.logger = setup_logging(self.NAME.lower(), cfg=self.cfg)
def cleanup(self):
if self.session is not None:
self.session.close()
@property
def manager_user(self):
"""
Get the User associated with this plugin Manager.
This User is the owner of records for the plugin.
:rtype: User
:return: The Manager User
"""
if not self._user:
# try to get existing user
self._user = self.session.query(um.User).filter(um.User.username == '%sManager' % self.NAME) \
.first()
if not self._user:
# create a new user
userpubkey = self.cfg.get(self.NAME.lower(), 'userpubkey')
self._user = util.create_user('%sManager' % self.NAME, userpubkey, self.session)
return self._user
def run(self):
"""
Run this manager as a daemon. Subscribes to a redis channel matching self.NAME
and processes messages received there.
"""
set_status(self.NAME.lower(), 'loading', self.red)
self.setup_connections()
self.setup_logger()
self.logger.info("%s loading" % self.NAME)
set_status(self.NAME.lower(), 'running', self.red)
self.logger.info("%s running" % self.NAME)
subscription_handler(self.NAME.lower(), client=self)
| 2.78125
| 3
|
saleor/graphql/order/mutations/fulfillment_refund_and_return_product_base.py
|
eanknd/saleor
| 1,392
|
12778585
|
from typing import Optional
import graphene
from django.core.exceptions import ValidationError
from ....giftcard.utils import order_has_gift_card_lines
from ....order import FulfillmentLineData
from ....order import models as order_models
from ....order.error_codes import OrderErrorCode
from ....order.fetch import OrderLineInfo
from ....payment.models import TransactionItem
from ...core.mutations import BaseMutation
from ..types import FulfillmentLine, OrderLine
class FulfillmentRefundAndReturnProductBase(BaseMutation):
class Meta:
abstract = True
@classmethod
def clean_order_payment(cls, payment, cleaned_input):
if not payment or not payment.can_refund():
raise ValidationError(
{
"order": ValidationError(
"Order cannot be refunded.",
code=OrderErrorCode.CANNOT_REFUND.value,
)
}
)
cleaned_input["payment"] = payment
@classmethod
def clean_amount_to_refund(
cls, order, amount_to_refund, charged_value, cleaned_input
):
if amount_to_refund is not None:
if order_has_gift_card_lines(order):
raise ValidationError(
{
"amount_to_refund": ValidationError(
(
"Cannot specified amount to refund when order has "
"gift card lines."
),
code=OrderErrorCode.CANNOT_REFUND.value,
)
}
)
if amount_to_refund > charged_value:
raise ValidationError(
{
"amount_to_refund": ValidationError(
(
"The amountToRefund is greater than the maximal "
"possible amount to refund."
),
code=OrderErrorCode.CANNOT_REFUND.value,
),
}
)
cleaned_input["amount_to_refund"] = amount_to_refund
@classmethod
def _raise_error_for_line(cls, msg, type, line_id, field_name, code=None):
line_global_id = graphene.Node.to_global_id(type, line_id)
if not code:
code = OrderErrorCode.INVALID_QUANTITY.value
raise ValidationError(
{
field_name: ValidationError(
msg,
code=code,
params={field_name: line_global_id},
)
}
)
@classmethod
def raise_error_for_payment_error(cls, transactions: Optional[TransactionItem]):
if transactions:
code = OrderErrorCode.MISSING_TRANSACTION_ACTION_REQUEST_WEBHOOK.value
msg = "No app or plugin is configured to handle payment action requests."
else:
msg = "The refund operation is not available yet."
code = OrderErrorCode.CANNOT_REFUND.value
raise ValidationError(
msg,
code=code,
)
@classmethod
def clean_fulfillment_lines(
cls, fulfillment_lines_data, cleaned_input, whitelisted_statuses
):
fulfillment_lines = cls.get_nodes_or_error(
[line["fulfillment_line_id"] for line in fulfillment_lines_data],
field="fulfillment_lines",
only_type=FulfillmentLine,
qs=order_models.FulfillmentLine.objects.prefetch_related(
"fulfillment", "order_line"
),
)
fulfillment_lines = list(fulfillment_lines)
cleaned_fulfillment_lines = []
for line, line_data in zip(fulfillment_lines, fulfillment_lines_data):
quantity = line_data["quantity"]
if line.order_line.is_gift_card:
cls._raise_error_for_line(
"Cannot refund or return gift card line.",
"FulfillmentLine",
line.pk,
"fulfillment_line_id",
OrderErrorCode.GIFT_CARD_LINE.value,
)
if line.quantity < quantity:
cls._raise_error_for_line(
"Provided quantity is bigger than quantity from "
"fulfillment line",
"FulfillmentLine",
line.pk,
"fulfillment_line_id",
)
if line.fulfillment.status not in whitelisted_statuses:
allowed_statuses_str = ", ".join(whitelisted_statuses)
cls._raise_error_for_line(
f"Unable to process action for fulfillmentLine with different "
f"status than {allowed_statuses_str}.",
"FulfillmentLine",
line.pk,
"fulfillment_line_id",
code=OrderErrorCode.INVALID.value,
)
replace = line_data.get("replace", False)
if replace and not line.order_line.variant_id:
cls._raise_error_for_line(
"Unable to replace line as the assigned product doesn't exist.",
"OrderLine",
line.pk,
"order_line_id",
)
cleaned_fulfillment_lines.append(
FulfillmentLineData(
line=line,
quantity=quantity,
replace=replace,
)
)
cleaned_input["fulfillment_lines"] = cleaned_fulfillment_lines
@classmethod
def clean_lines(cls, lines_data, cleaned_input):
order_lines = cls.get_nodes_or_error(
[line["order_line_id"] for line in lines_data],
field="order_lines",
only_type=OrderLine,
qs=order_models.OrderLine.objects.prefetch_related(
"fulfillment_lines__fulfillment", "variant", "allocations"
),
)
order_lines = list(order_lines)
cleaned_order_lines = []
for line, line_data in zip(order_lines, lines_data):
quantity = line_data["quantity"]
if line.is_gift_card:
cls._raise_error_for_line(
"Cannot refund or return gift card line.",
"OrderLine",
line.pk,
"order_line_id",
OrderErrorCode.GIFT_CARD_LINE.value,
)
if line.quantity < quantity:
cls._raise_error_for_line(
"Provided quantity is bigger than quantity from order line.",
"OrderLine",
line.pk,
"order_line_id",
)
quantity_ready_to_move = line.quantity_unfulfilled
if quantity_ready_to_move < quantity:
cls._raise_error_for_line(
"Provided quantity is bigger than unfulfilled quantity.",
"OrderLine",
line.pk,
"order_line_id",
)
variant = line.variant
replace = line_data.get("replace", False)
if replace and not line.variant_id:
cls._raise_error_for_line(
"Unable to replace line as the assigned product doesn't exist.",
"OrderLine",
line.pk,
"order_line_id",
)
cleaned_order_lines.append(
OrderLineInfo(
line=line, quantity=quantity, variant=variant, replace=replace
)
)
cleaned_input["order_lines"] = cleaned_order_lines
| 2.109375
| 2
|
ParamFit_27Jan.py
|
gshowalt/VirusPopModel
| 0
|
12778586
|
<filename>ParamFit_27Jan.py
# importing all modules
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib import cm
import matplotlib.tri as tri
from matplotlib.colors import LogNorm
import matplotlib.patches as mpatches
from matplotlib.ticker import LogFormatter
from labellines import labelLine, labelLines
from collections import Counter
from functools import wraps
import csv
import sys
import itertools
from itertools import islice, cycle, chain
import scipy as sp
from scipy.interpolate import griddata
from scipy import interpolate
from scipy.integrate import odeint
from scipy.stats import pareto
from scipy.stats import loguniform
import seaborn as sns
import pandas as pd
import statistics as stats
import lhsmdu
from math import nan
from SALib.sample import saltelli, latin, ff
from SALib.analyze import sobol
import random
# define the function which includes the differential equations
def f2(s,t, temp, beta, mu, phi, delta, gamma):
# first define the relative contact rate (RCR) and brine concentrating factor (BCF) by temp
if temp < -1:
RCR = 0.0716*temp**4 + 2.9311*temp**3 + 34.108*temp**2 + 45.826*temp + 3.5125 #Fit from Wells and Deming, 2006
BCF = -0.0106 * temp **2 - 0.519 * temp + 0.2977
sal = 32 * BCF
else:
RCR = 1
sal = 32
# scale adsorption rate by RCR to incorporate the sea ice
phi = phi * RCR
# SET PARAMETERS
alpha = (1.2e-7)*3**((temp-23)/10)#4.2e-7 at +8, or 1.2e-7 at lower temps, at -5 --> mu = 0.25/day = 0.01/hr = 1e-8
# alpha is a coefficient that we'd like to change with temperature? Or change eta?
#nutrient transfer coefficient to bacteria (ug/cell * hr)
Q = 0.022
#half saturation constant (ug/mL)
d = 1e-8
#constant of bacterial death (1/hr)
# leak, lyse efficiencies here:
g = 0.1
n = 0.99
#gamma is a lysogeny value
#gamma = 1 #-1/temp #*mu
# set up solution matrix
N = s[0]
B = s[1]
V = s[2]
#systems of equations below
dNdt = - alpha * (N / (N + Q)) * B + g * (alpha * (N/(N+Q))*B) + (n * 1e-7 * (gamma) * phi * V * B)
if N < 0:
N = 0
dBdt = (mu) * (N/(Q + N)) * B - gamma * phi * V * B - d*B
if B < 1:
B = 1
dVdt = gamma*beta * B * phi*V - phi * V * B - delta*V
if V < 1:
V = 1
#print (mu, beta, phi, gamma)
return [dNdt, dBdt, dVdt]
from scipy.stats import ks_2samp
# define runs, values for observed data
runs = 1
dfEric = pd.read_excel("ICEVBR_18Aug.xls")
Eric_VBR = dfEric.Virus/dfEric.Bacteria
RangelistB = []
RangelistM = []
RangelistD = []
RangelistBAvg = []
RangelistMAvg = []
RangelistDAvg = []
# in each run, we want to change the endpoints of the parameter distributions simultaneously
i = 0
j = 0
# the while loop is set to run until we find X sets of parameters that produce a distribution of
# simulated VBRs which matches the distribution of real data with 95% acceptance rate
while i < runs:
# define time, temperature scale
time = np.random.uniform(5000,10000)
temp_list = np.linspace(-12, -1, 8)
t = np.linspace(1,time,2000)
simVBR = []
# we want to include a step than randomly creates a range of values, since we're looking not for an indivudal
# value but instead looking for the right *range* of values.
betalo = np.random.uniform(1,1000)
betahi = np.random.uniform(betalo,1000)
mulo = np.random.uniform(-15,0)
muhi = np.random.uniform(mulo,0)
deltalo = np.random.uniform(-15,0)
deltahi = np.random.uniform(deltalo,0)
philo = np.random.uniform(-15,-1)
phihi = np.random.uniform(philo,-1)
#gammalo = np.random.uniform(0,1)
#gammahi = np.random.uniform(gammalo,1)
# after we get a random range of values, we want to sample within that range of values to produce
# a representative stretch of values to test if they actually reproduce the VBR.
problem = {
"num_vars" : 2,
"names" : ['mu', 'm'],
"bounds": [[mulo, muhi],
[deltalo, deltahi]],
"dists":['unif','unif']
}
param_values = saltelli.sample(problem,100,calc_second_order=True)
# scale the parameters properly
beta = 100 #param_values[:,0]
mu = 10**param_values[:,0]
delta = 10**param_values[:,1]
phi = 1e-10 #10**param_values[:,3]
gamma = 1
for temp in temp_list:
if temp < -1:
RCR = 0.0716*temp**4 + 2.9311*temp**3 + 34.108*temp**2 + 45.826*temp + 3.5125 #Fit from <NAME>, 2006
BCF = -0.0106*temp**2 - 0.519*temp + 0.2977
sal = 32 * BCF
else:
BCF = 1
sal = 32
nuts_init = 0.12 * BCF
bact_init = 1e4 * BCF
vir_init = 1e5 * BCF
s0 = [nuts_init, bact_init, vir_init]
for z in range(0,len(mu)):
solx = odeint(f2, s0, t, args = (temp, beta, mu[z], phi, delta[z], gamma))
nuts = solx[:,0]
bact = solx[:,1]
virus = solx[:,2]
for x in bact:
if x <= 0:
x = 5
for x in virus:
if x <= 0:
x = 5
simVBR.append(virus/bact)
simVBR = np.concatenate(simVBR).ravel()
# test the simulated VBRS against the real data
result = ks_2samp(Eric_VBR, simVBR)
if result[1] > 0.1:
#print('beta range is:', betalo, ',', betahi)
print('mu range is:', 10**(mulo), ',', 10**(muhi))
print('delta range is:', 10**(deltalo), ',', 10**(deltahi), '\n')
i += 1
RangelistB.append([betalo, betahi])
RangelistBAvg.append((betalo + betahi)/2)
RangelistM.append([mulo, muhi])
RangelistMAvg.append((mulo + muhi)/2)
RangelistD.append([deltalo, deltahi])
RangelistDAvg.append((deltalo + deltahi)/2)
j += 1
print ('finished, total runs is:', j)
print ('ratio is:', i/j)
"""# lets plot all the ranges as a set of ordered lines
#from itertools import izip
from scipy.stats import gaussian_kde
yax = []
yax1 = []
runs = i
for i in range(1,runs+1):
yax.append([i,i])
yax1.append(i)
sorted_lists = sorted(zip(RangelistB, RangelistM, RangelistD, RangelistP, RangelistG, RangelistMAvg, RangelistDAvg, RangelistPAvg, RangelistGAvg, RangelistBAvg ), reverse = True, key=lambda x: x[8])
RangelistB2, RangelistM2, RangelistD2, RangelistP2, RangelistG2, RangelistMAvg2, RangelistDAvg2, RangelistPAvg2, RangelistGAvg2, RangelistBAvg2 = [[x[i] for x in sorted_lists] for i in range(10)]
# And plot
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams.update({'font.size': 12.5})
#fig, axs = plt.subplots(1,2,figsize=(15,15))
fig = plt.figure(figsize=(20,10))
fig.tight_layout()
cm = plt.get_cmap('viridis')
#fig = plt.figure()
colorlist = [cm(1.*i/runs) for i in range(runs)]
ax = fig.add_subplot(251)
for i in range(runs):
plt.plot(RangelistB2[i],yax[i], color = colorlist[i])
plt.plot(RangelistBAvg2[i],yax1[i],color = colorlist[i], marker = 'o', markeredgecolor= 'k')
plt.title('Burst Size')
plt.xlabel('Burst Size')
ax.set_xlim(0,1000)
ax.set_yticklabels("")
ax1 = fig.add_subplot(252)
for i in range(runs):
plt.plot(RangelistM2[i],yax[i], color = colorlist[i])
plt.plot(RangelistMAvg2[i],yax1[i],color = colorlist[i], marker = 'o', markeredgecolor= 'k')
plt.title('Growth Rate')
plt.xlabel('Growth Rate Range (log 10)')
ax1.set_yticklabels("")
ax2 = fig.add_subplot(253)
for i in range(runs):
plt.plot(RangelistD2[i],yax[i], color = colorlist[i])
plt.plot(RangelistDAvg2[i],yax1[i],color = colorlist[i], marker = 'o', markeredgecolor= 'k')
plt.title('Decay Rate')
plt.xlabel('Decay Rate Range (log 10)')
ax2.set_yticklabels("")
ax3 = fig.add_subplot(254)
for i in range(runs):
plt.plot(RangelistP2[i],yax[i], color = colorlist[i])
plt.plot(RangelistPAvg2[i],yax1[i],color = colorlist[i], marker = 'o', markeredgecolor= 'k')
plt.title('Adsorption Rate')
plt.xlabel('Adsorp. Rate Range (log 10)')
ax3.set_yticklabels("")
ax4 = fig.add_subplot(255)
for i in range(runs):
plt.plot(RangelistG2[i],yax[i], color = colorlist[i])
plt.plot(RangelistGAvg2[i],yax1[i],color = colorlist[i], marker = 'o', markeredgecolor= 'k')
plt.title('Lysis Rate')
plt.xlabel('Lytic Fraction')
ax3.set_yticklabels("")
ax5 = fig.add_subplot(256)
data = RangelistBAvg2
density = gaussian_kde(data)
xs = np.linspace(1,1000,1000)
density.covariance_factor = lambda : .25
density._compute_covariance()
plt.plot(xs,density(xs), 'k')
ax6 = fig.add_subplot(257)
data = RangelistMAvg2
density = gaussian_kde(data)
xs = np.linspace(-15,0,1000)
density.covariance_factor = lambda : .25
density._compute_covariance()
plt.plot(xs,density(xs), 'k')
ax7 = fig.add_subplot(258)
data = RangelistDAvg2
density = gaussian_kde(data)
xs = np.linspace(-15,0,1000)
density.covariance_factor = lambda : .25
density._compute_covariance()
plt.plot(xs,density(xs), 'k')
ax8 = fig.add_subplot(259)
data = RangelistPAvg2
density = gaussian_kde(data)
xs = np.linspace(-15,-5,1000)
density.covariance_factor = lambda : .25
density._compute_covariance()
plt.plot(xs,density(xs), 'k')
ax9 = fig.add_subplot(2,5,10)
data = RangelistGAvg2
density = gaussian_kde(data)
xs = np.linspace(0,1,1000)
density.covariance_factor = lambda : .25
density._compute_covariance()
plt.plot(xs,density(xs), 'k')
fig.tight_layout()
plt.subplots_adjust(top=0.85)
fig.suptitle('Full model parameter fitting', size = 30)"""
| 2.5
| 2
|
aldryn_jobs/migrations/0001_initial.py
|
what-digital/aldryn-jobs
| 1
|
12778587
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import app_data.fields
import djangocms_text_ckeditor.fields
from django.conf import settings
import cms.models.fields
import aldryn_jobs.models
import sortedm2m.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cms', '0003_auto_20140926_2347'),
]
operations = [
migrations.CreateModel(
name='JobApplication',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('salutation', models.CharField(verbose_name='salutation', max_length=20, choices=[('male', 'Mr.'), ('female', 'Mrs.')], blank=True, default='male')),
('first_name', models.CharField(verbose_name='first name', max_length=20)),
('last_name', models.CharField(verbose_name='last name', max_length=20)),
('email', models.EmailField(verbose_name='email', max_length=75)),
('cover_letter', models.TextField(verbose_name='cover letter', blank=True)),
('created', models.DateTimeField(verbose_name='created', auto_now_add=True)),
('is_rejected', models.BooleanField(verbose_name='rejected?', default=False)),
('rejection_date', models.DateTimeField(verbose_name='rejection date', blank=True, null=True)),
],
options={
'verbose_name': 'job application',
'verbose_name_plural': 'job applications',
'ordering': ['-created'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JobApplicationAttachment',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('file', models.FileField(null=True, max_length=200, upload_to=aldryn_jobs.models.default_jobs_attachment_upload_to, blank=True)),
('application', models.ForeignKey(verbose_name='job application', related_name='attachments', to='aldryn_jobs.JobApplication')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JobCategoriesPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, parent_link=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='JobCategory',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('ordering', models.IntegerField(verbose_name='ordering', default=0)),
],
options={
'verbose_name': 'job category',
'verbose_name_plural': 'job categories',
'ordering': ['ordering'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JobCategoryTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('language_code', models.CharField(db_index=True, verbose_name='Language', max_length=15)),
('name', models.CharField(verbose_name='name', max_length=255)),
('slug', models.SlugField(verbose_name='slug', max_length=255, help_text='Auto-generated. Used in the URL. If changed, the URL will change. Clear it to have the slug re-created.', blank=True)),
('master', models.ForeignKey(null=True, related_name='translations', to='aldryn_jobs.JobCategory', editable=False)),
],
options={
'verbose_name': 'job category Translation',
'default_permissions': (),
'db_table': 'aldryn_jobs_jobcategory_translation',
'db_tablespace': '',
'managed': True,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JobListPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, parent_link=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='JobOpening',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('created', models.DateTimeField(auto_now_add=True)),
('is_active', models.BooleanField(verbose_name='active?', default=True)),
('publication_start', models.DateTimeField(verbose_name='published since', blank=True, null=True)),
('publication_end', models.DateTimeField(verbose_name='published until', blank=True, null=True)),
('can_apply', models.BooleanField(verbose_name='viewer can apply for the job?', default=True)),
('ordering', models.IntegerField(verbose_name='ordering', default=0)),
('category', models.ForeignKey(verbose_name='category', related_name='jobs', to='aldryn_jobs.JobCategory')),
('content', cms.models.fields.PlaceholderField(slotname='Job Opening Content', null=True, to='cms.Placeholder', editable=False)),
],
options={
'verbose_name': 'job opening',
'verbose_name_plural': 'job openings',
'ordering': ['ordering'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JobOpeningTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('language_code', models.CharField(db_index=True, verbose_name='Language', max_length=15)),
('title', models.CharField(verbose_name='title', max_length=255)),
('slug', models.SlugField(db_index=False, verbose_name='slug', max_length=255, help_text='Auto-generated. Used in the URL. If changed, the URL will change. Clear it to have the slug re-created.', blank=True)),
('lead_in', djangocms_text_ckeditor.fields.HTMLField(verbose_name='short description', help_text='This text will be displayed in lists.', blank=True)),
('master', models.ForeignKey(null=True, related_name='translations', to='aldryn_jobs.JobOpening', editable=False)),
],
options={
'verbose_name': 'job opening Translation',
'default_permissions': (),
'db_table': 'aldryn_jobs_jobopening_translation',
'db_tablespace': '',
'managed': True,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='JobsConfig',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('type', models.CharField(verbose_name='type', max_length=100)),
('namespace', models.CharField(verbose_name='instance namespace', max_length=100, default=None, unique=True)),
('app_data', app_data.fields.AppDataField(editable=False, default='{}')),
('placeholder_jobs_detail_bottom', cms.models.fields.PlaceholderField(slotname='jobs_detail_bottom', null=True, related_name='aldryn_jobs_detail_bottom', to='cms.Placeholder', editable=False)),
('placeholder_jobs_detail_footer', cms.models.fields.PlaceholderField(slotname='jobs_detail_footer', null=True, related_name='aldryn_jobs_detail_footer', to='cms.Placeholder', editable=False)),
('placeholder_jobs_detail_top', cms.models.fields.PlaceholderField(slotname='jobs_detail_top', null=True, related_name='aldryn_jobs_detail_top', to='cms.Placeholder', editable=False)),
('placeholder_jobs_list_bottom', cms.models.fields.PlaceholderField(slotname='jobs_list_bottom', null=True, related_name='aldryn_jobs_list_bottom', to='cms.Placeholder', editable=False)),
('placeholder_jobs_list_top', cms.models.fields.PlaceholderField(slotname='jobs_list_top', null=True, related_name='aldryn_jobs_list_top', to='cms.Placeholder', editable=False)),
('placeholder_jobs_sidebar', cms.models.fields.PlaceholderField(slotname='jobs_sidebar', null=True, related_name='aldryn_jobs_sidebar', to='cms.Placeholder', editable=False)),
('placeholder_jobs_top', cms.models.fields.PlaceholderField(slotname='jobs_top', null=True, related_name='aldryn_jobs_top', to='cms.Placeholder', editable=False)),
],
options={
'verbose_name': 'Aldryn Jobs configuration',
'verbose_name_plural': 'Aldryn Jobs configurations',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='jobopeningtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AddField(
model_name='joblistplugin',
name='app_config',
field=models.ForeignKey(verbose_name='app configuration', help_text='Select appropriate app. configuration for this plugin.', null=True, to='aldryn_jobs.JobsConfig'),
preserve_default=True,
),
migrations.AddField(
model_name='joblistplugin',
name='jobopenings',
field=sortedm2m.fields.SortedManyToManyField(verbose_name='job openings', to='aldryn_jobs.JobOpening', help_text='Choose specific Job Openings to show or leave empty to show latest. Note that Job Openings from different app configs will not appear.', blank=True, null=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='jobcategorytranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AddField(
model_name='jobcategory',
name='app_config',
field=models.ForeignKey(verbose_name='app configuration', null=True, related_name='categories', to='aldryn_jobs.JobsConfig'),
preserve_default=True,
),
migrations.AddField(
model_name='jobcategory',
name='supervisors',
field=models.ManyToManyField(verbose_name='supervisors', to=settings.AUTH_USER_MODEL, help_text='Supervisors will be notified via email when a new job application arrives.', blank=True, related_name='job_opening_categories'),
preserve_default=True,
),
migrations.AddField(
model_name='jobcategoriesplugin',
name='app_config',
field=models.ForeignKey(verbose_name='app configuration', help_text='Select appropriate app. configuration for this plugin.', null=True, to='aldryn_jobs.JobsConfig'),
preserve_default=True,
),
migrations.AddField(
model_name='jobapplication',
name='job_opening',
field=models.ForeignKey(to='aldryn_jobs.JobOpening', related_name='applications'),
preserve_default=True,
),
]
| 1.859375
| 2
|
src/scrapers/main_scrape_brownslocum.py
|
aizaz-shahid/airflow-tutorial
| 0
|
12778588
|
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
"""
This script runs through the tempdent scraper.
Default url: 'https://portal.brownslocumlink.com/Jobs'
run is as:
nohup python3 main_scrape_brownslocum.py <your root password> |& tee $(date "+%Y.%m.%d-%H.%M.%S").brownslocum_logs.txt
"""
import random
import os
import sys
from selenium import webdriver
import pandas as pd
import time
import datetime
from utils import standard_formatter as sf
# from utils import change_ip as cp
from utils import config_webdriver as cw
# sudo_password = sys.argv[1]
# Get all urls
def get_all_job_urls(driver):
job_urls = []
page_count = 1
try:
while True:
print('Loaded page:', page_count)
results = driver.find_elements_by_css_selector('h2[class="results"]')
job_urls.extend([result.find_element_by_tag_name('a').get_attribute('href') for result in results])
next_page = driver.find_element_by_css_selector('a[title="Next Page"]').get_attribute('href')
time.sleep(random.randint(1, 4))
if page_count % 50 == 0:
pass# cp.change_ip(sudo_password) # change the ip every 5th page
driver.switch_to.window(driver.window_handles[-1])
driver.get(next_page)
page_count += 1
except Exception as e:
print(str(e))
finally:
return job_urls
# Get all job details
def get_all_jobs(job_urls, driver):
jobs_dataframe = pd.DataFrame()
for i in range(len(job_urls)):
try:
job_url = job_urls[i]
job_details_dictionary = dict()
job_details_dictionary['jobURL'] = [job_url]
if i % 300 == 0:
pass# cp.change_ip(sudo_password) # change the ip every 100 job url
driver.switch_to.window(driver.window_handles[-1])
driver.get(job_url)
# All the data is stored in id = 'ctl00_MainContentPlaceHolder_pageDiv'
content = driver.find_element_by_id('ctl00_MainContentPlaceHolder_pageDiv')
# All the span tags' id is equivalent to the column name and its text value is equivalent to the column value
details = content.find_elements_by_tag_name('span')
# Fetches column - value data pair
for value in details:
id = value.get_attribute('id')
# hard coded stop point as no need to scrape further
if id == 'ctl00_MainContentPlaceHolder_lblSendToFriend':
break
else:
job_details_dictionary[id.split('_lbl')[1]] = [value.text]
df = pd.DataFrame(job_details_dictionary)
jobs_dataframe = jobs_dataframe.append(df)
time.sleep(random.randint(1, 4))
except Exception as e:
print(str(e))
time.sleep(random.randint(1, 4))
jobs_dataframe.reset_index(drop=True, inplace=True)
return jobs_dataframe
def main():
program_start_time = time.time()
driver = cw.configure_webdriver()
try:
parent_url = 'https://portal.brownslocumlink.com/Jobs/Results.aspx?JobResults=1'
# cp.change_ip(sudo_password) # change the ip
driver.get(parent_url)
# Get all job urls
job_urls = get_all_job_urls(driver)
print('Total jobs:', len(job_urls))
print('Total unique jobs:', len(set(job_urls)))
job_urls = list(set(job_urls))
if len(job_urls) != 0:
# Get jobs dataframe
data = get_all_jobs(job_urls, driver)
print(data)
file_name = '../data/{}-brownslocum-jobs.csv'
file_name = sf.format_filename(file_name)
data.to_csv(file_name, index=False,header=False)
else:
print('No jobs found')
except Exception as e:
print(str(e))
finally:
driver.close()
# sf.ip_files_cleanup()
seconds = round(time.time() - program_start_time)
minutes = round(seconds/60, 1)
hours = round(minutes/60, 1)
print('DONE! ')
print("\n\nRun time = {} seconds; {} minutes; {} hours".format(seconds, minutes, hours))
if __name__ == '__main__':
main()
| 2.59375
| 3
|
mlprogram/synthesizers/filtered_synthesizer.py
|
HiroakiMikami/mlprogram
| 9
|
12778589
|
from typing import Callable, Generator, Generic, Optional, TypeVar
from mlprogram import logging
from mlprogram.synthesizers.synthesizer import Result, Synthesizer
logger = logging.Logger(__name__)
Input = TypeVar("Input")
Output = TypeVar("Output")
class FilteredSynthesizer(Synthesizer[Input, Output], Generic[Input, Output]):
def __init__(self, synthesizer: Synthesizer[Input, Output],
score: Callable[[Input, Output], float],
threshold: float):
self.synthesizer = synthesizer
self.score = score
self.threshold = threshold
def _synthesize(self, input: Input, n_required_output: Optional[int] = None) \
-> Generator[Result[Output], None, None]:
with logger.block("_synthesize"):
for result in self.synthesizer(input, n_required_output):
score = self.score(input, result.output)
if score >= self.threshold:
logger.debug(f"find appropriate output: score={score}")
yield result
return
| 2.9375
| 3
|
dbpool.py
|
powerQiu/phone_number_seg_spider
| 1
|
12778590
|
<gh_stars>1-10
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import QueuePool
import config
class Pool:
engine = create_engine(
config.db_uri,
pool_size=config.db_pool_size,
pool_recycle=config.db_pool_recycle, # 断开处理-乐观
pool_pre_ping=True, # 断开连接-悲观
pool_use_lifo=True,
poolclass=QueuePool,
max_overflow=config.db_max_overflow,
)
DB_Session = sessionmaker(bind=engine)
@classmethod
@contextmanager
def scope_session(cls):
session = cls.DB_Session()
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
| 2.609375
| 3
|
easylogger/log.py
|
barretobrock/easylogger
| 0
|
12778591
|
import os
import sys
import logging
import traceback
from logging import Logger
from types import TracebackType
from typing import Union, Tuple, Optional
from .argparser import LogArgParser
from .handlers import CustomTimedRotatingFileHandler
class Log:
"""Initiates a logging object to record processes and errors"""
DEFAULT_LOG_LEVEL = 'INFO'
def __init__(self, log: Union[str, 'Log', Logger] = None, child_name: str = None,
log_level_str: str = None, log_to_file: bool = False, log_dir: str = None):
"""
Args:
log: display name of the log. If Log object, will extract name from that.
Typically, this second method is done in the name of assigning a child log a parent.
If NoneType, will use __name__.
child_name: str, name of the child log.
This is used when the log being made is considered a child to the parent log name
log_to_file: if True, will create a file handler for outputting logs to file.
The files are incremented in days, with the date appended to the file name.
Logs older than 20 days will be removed upon instantiation
log_level_str: str, minimum logging level to write to log (Levels: DEBUG -> INFO -> WARN -> ERROR)
default: 'INFO'
log_dir: str, directory to save the log
default: "~/logs/{log_name}/"
"""
# If 'Log', it's a parent Log instance. Take the name from the object. Otherwise it's just a string
if log is None:
log = __name__
self.is_child = child_name is not None
self.log_name = log.name if isinstance(log, (Log, Logger)) else log
self.log_to_file = log_to_file
self.log_parent = log if self.is_child else None
# Determine if log is child of other Log objects (if so, it will be attached to that parent log)
# Instantiate the log object
if self.is_child and isinstance(self.log_parent, (Log, Logger)):
# Attach this instance to the parent log if it's the proper object
self.log_obj = self.log_parent.log_obj.getChild(child_name)
# Attempt to check for the parent log's log_to_file variable.
try:
self.log_to_file = self.log_parent.log_to_file
except AttributeError:
pass
else:
# Create logger if it hasn't been created
self.log_obj = logging.getLogger(self.log_name)
self.log_obj.setLevel(self.DEFAULT_LOG_LEVEL)
# Patch some things in for cross-class compatibility
self.name = self.log_name
self.debug = self.log_obj.debug
self.info = self.log_obj.info
self.warning = self.log_obj.warning
self.error = self.log_obj.error
self.getChild = self.log_obj.getChild
self.setLevel = self.log_obj.setLevel
# Check if debugging in pycharm
# Checking Methods:
# 1) checks for whether code run in-console
# 2) check for script run in debug mode per PyCharm
sysargs = sys.argv
self.is_debugging = any(['pydevconsole.py' in sysargs[0], sys.gettrace() is not None])
# Set the log level (will automatically set to DEBUG if is_debugging)
self._set_log_level(log_level_str)
# Set the log handlers
if self.log_to_file:
self._build_log_path(log_dir)
if not self.is_child and len(self.log_obj.handlers) == 0:
# We only need a handler for the parent log object
self._set_handlers()
self.info(f'Logging initiated{" for child instance" if self.is_child else ""}.')
def _build_log_path(self, log_dir: str):
"""Builds a filepath to the log file"""
# First just check if the log is a child of another.
# If so, we can bypass the logic below it and use the parent log's file path
if self.is_child:
try:
self.log_path = self.log_parent.log_path
return
except AttributeError:
pass
# Set name of file
self.log_filename = f"{self.log_name}"
# Set log directory (if none)
home_dir = os.path.join(os.path.expanduser('~'), 'logs')
log_dir = os.path.join(home_dir, log_dir if log_dir is not None else self.log_name)
# Check if logging directory exists
if not os.path.exists(log_dir):
# If dir doesn't exist, create
os.makedirs(log_dir)
# Path of logfile
self.log_path = os.path.join(log_dir, self.log_filename)
def _set_log_level(self, log_level_str: str):
"""Determines the minimum log level to set.
Logging progression: DEBUG -> INFO -> WARN -> ERROR -> CRITICAL
Methodology breakdown:
1. Looks for manually set string
2. If child, looks at parent's log level
3. If not, checks for script-level arguments passed in
"""
if log_level_str is None:
if self.is_child:
log_level_str = logging.getLevelName(self.log_parent.log_level_int) \
if isinstance(self.log_parent, Log) else self.DEFAULT_LOG_LEVEL
else:
# No log level provided. Check if any included as cmd argument
log_level_str = LogArgParser(self.is_debugging).log_level_str
self.log_level_str = log_level_str
self.log_level_int = getattr(logging, log_level_str.upper(), logging.DEBUG)
# Set minimum logging level
self.log_obj.setLevel(self.log_level_int)
def _set_handlers(self):
"""Sets up file & stream handlers"""
# Set format of logs
formatter = logging.Formatter('%(asctime)s - %(process)d - %(levelname)-8s - %(name)s - %(message)s')
# Create streamhandler for log (this sends streams to stdout for debug help)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(self.log_level_int)
sh.setFormatter(formatter)
self.log_obj.addHandler(sh)
if self.log_to_file:
# TimedRotating will delete logs older than 30 days
fh = CustomTimedRotatingFileHandler(self.log_path, when='d', interval=1, backup_cnt=30)
fh.setLevel(self.log_level_int)
fh.setFormatter(formatter)
self.log_obj.addHandler(fh)
# Intercept exceptions
sys.excepthook = self.handle_exception
def handle_exception(self, exc_type: type, exc_value: BaseException, exc_traceback: TracebackType):
"""Default wrapper for handling exceptions. Can be overwritten by classes that inherit Log class"""
self._handle_exception(exc_type=exc_type, exc_value=exc_value, exc_traceback=exc_traceback)
def _handle_exception(self, exc_type: type, exc_value: BaseException, exc_traceback: TracebackType):
"""Intercepts an exception and prints it to log file"""
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
self.error('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback))
def error_from_class(self, err_obj: BaseException, text: str):
"""Default wrapper for extracting exceptions from Exception class.
Can be overwritten by classes that inherit the Log class"""
self._error_from_class(err_obj=err_obj, text=text)
def _error_from_class(self, err_obj: BaseException, text: str):
"""Error logging for exception objects"""
traceback_msg = '\n'.join(traceback.format_tb(err_obj.__traceback__))
exception_msg = f'{err_obj.__class__.__name__}: {err_obj}\n{traceback_msg}'
err_msg = f'{text}\n{exception_msg}'
self.error(err_msg)
@staticmethod
def extract_err() -> Tuple[Optional[type], Optional[BaseException], Optional[TracebackType]]:
"""Calls sys.exec_info() to get error details upon error instance
Returns:
(error type, error object, error traceback)
"""
return sys.exc_info()
def close(self):
"""Close logger"""
disconn_msg = 'Log disconnected'
if self.is_child:
self.info(f'{disconn_msg} for child instance.')
else:
self.info(f'{disconn_msg}.\n' + '-' * 80)
for handler in self.log_obj.handlers:
handler.close()
self.log_obj.removeHandler(handler)
| 3.515625
| 4
|
oo/carro.py
|
SergioVenicio21/pythonbirds
| 0
|
12778592
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
EX:
>>> motor = Motor()
>>> direcao = Direcao()
>>> carro = Carro(direcao, motor)
>>> carro.acelerar()
>>> print(carro.calcular_direcao())
norte
>>> carro.virar_direita()
>>> print(carro.calcular_direcao())
leste
>>> carro.virar_direita()
>>> print(carro.calcular_direcao())
sul
>>> carro.virar_direita()
>>> print(carro.calcular_direcao())
oeste
>>> carro.virar_esquerda()
>>> print(carro.calcular_direcao())
sul
>>> carro.velocidade()
1
>>> carro.freiar()
>>> carro.velocidade()
0
"""
class Direcao:
ORIENTACAO_DIR = {
'norte': 'leste', 'leste': 'sul',
'sul': 'oeste', 'oeste': 'norte'
}
ORIENTACAO_ESQ = {
'norte': 'oeste', 'leste': 'norte',
'sul': 'leste', 'oeste': 'sul'
}
def __init__(self):
self.valor = 'norte'
def virar_direita(self):
self.valor = self.ORIENTACAO_DIR[self.valor]
def virar_esquerda(self):
self.valor = self.ORIENTACAO_ESQ[self.valor]
def calcular_direcao(self):
return self.valor
class Motor:
def __init__(self):
self.velocidade = 0
def acelerar(self):
self.velocidade += 1
def freiar(self):
self.velocidade -= 2
self.velocidade = max(0, self.velocidade)
class Carro:
def __init__(self, direcao, motor):
self.direcao = direcao
self.motor = motor
def acelerar(self):
self.motor.acelerar()
def freiar(self):
self.motor.freiar()
def virar_direita(self):
self.direcao.virar_direita()
def virar_esquerda(self):
self.direcao.virar_esquerda()
def calcular_direcao(self):
return self.direcao.calcular_direcao()
def velocidade(self):
return self.motor.velocidade
if __name__ == '__main__':
motor = Motor()
direcao = Direcao()
carro = Carro(direcao, motor)
for i in range(5):
carro.motor.acelerar()
carro.direcao.virar_direita()
carro.direcao.virar_direita()
carro.direcao.virar_direita()
carro.direcao.virar_esquerda()
print(carro.motor.velocidade)
print(carro.direcao.calcular_direcao())
| 4.03125
| 4
|
lib/angel/constants.py
|
jpotter/angel
| 0
|
12778593
|
# Defines constants -- values that MUST NOT be overriden or modified by any code, and that aren't variable in any way.
# As a silly example, DAYS_IN_WEEK = 7 would always be defined here, but START_DAY_OF_WEEK is a variable (0 or 1) and thus would be defined in defaults.py.
# To use, just do:
# import angel.settings
# angel.constants.STATE_RUNNING_OK
# Consts for locks:
SERVICE_LOCKNAME = 'angel-service'
LOCKFILE_DATA_DAEMON_START_TIME = 'daemon_start_time'
LOCKFILE_DATA_CHILD_START_TIME = 'child_start_time'
LOCKFILE_DATA_CHILD_PID = 'child_pid'
LOCKFILE_DATA_PRIOR_CHILD_START_TIME = 'prior_child_start_time'
LOCKFILE_DATA_START_COUNT = 'start_count'
LOCKFILE_DATA_STATE_MESSAGE = 'status_message'
# Consts for system status, used by monitoring:
# Note: Ok, warn, error, and unknown values must line up with nagios values; do not change these value.
STATE_RUNNING_OK = 0 # System is running, all services are healthy
STATE_WARN = 1 # System is running, all services are responding, but at least one service will go to error unless operator corrects an issue
STATE_ERROR = 2 # System is supposed to be running, but at least one service is failing to respond -- this means the site is down or partially down
STATE_UNKNOWN = 3 # Unable to determine state of system
STATE_STARTING = 4 # System is in process of starting up; at least one service is not responding to requests but should be up soon (Note: for nagios, STATE_WARN is returned)
STATE_STOPPING = 5 # System is in process of stopping; services are not expected to respond to requests (Note: for nagios, STATE_WARN is returned)
STATE_STOPPED = 6 # System is in stopped state (Note: for nagios, STATE_WARN is returned)
STATE_DECOMMISSIONED = 7 # System decommission has been called (Nagios returns STATE_ERROR)
STATE_CODE_TO_TEXT = {}
STATE_CODE_TO_TEXT[STATE_RUNNING_OK] = 'OK'
STATE_CODE_TO_TEXT[STATE_WARN] = 'WARN'
STATE_CODE_TO_TEXT[STATE_ERROR] = 'ERROR'
STATE_CODE_TO_TEXT[STATE_UNKNOWN] = 'UNKNOWN'
STATE_CODE_TO_TEXT[STATE_STARTING] = 'STARTING'
STATE_CODE_TO_TEXT[STATE_STOPPING] = 'STOPPING'
STATE_CODE_TO_TEXT[STATE_STOPPED] = 'STOPPED'
STATE_CODE_TO_TEXT[STATE_DECOMMISSIONED] = 'DECOMMISSIONED'
# Consts for stat types, used by nagios and collectd system monitoring:
# (Remember to update STAT_TYPES_COLLECTD/STAT_TYPES_NAGIOS below.)
STAT_TYPE_BYTES = 'bytes'
STAT_TYPE_COUNTER = 'counter' # Counters only go up, never down -- things that go up and down are gauges.
STAT_TYPE_GAUGE = 'gauge'
STAT_TYPE_QUEUE_SIZE = 'queue_size'
STAT_TYPE_RECORDS = 'records'
STAT_TYPE_MEMORY = 'memory'
STAT_TYPE_SECONDS = 'seconds'
# The codes here are used by nagios / collectd for handling data types correctly; do not change the mappings.
# ( In some cases, our data is coming in from a nagios plugin, and the only unit info we have comes from the unit code that nagios gives us.
# So that we map those unit types onto the most logical stat type, some nagios types have an extra '~' on them to prevent them from matching.
# That is, both "bytes" and "memory" types would be 'b' in nagios, but mapping 'b' back to 'memory' would be wrong for disk usage.
# To prevent this, we set the nagios code for memory to 'b~' here; and then when the nagios code is used elsewhere, the '~' is stripped out. )
STAT_TYPES_COLLECTD = {}
STAT_TYPES_NAGIOS = {}
STAT_TYPES_NAGIOS[STAT_TYPE_BYTES] = 'b'
STAT_TYPES_COLLECTD[STAT_TYPE_BYTES] = 'bytes'
# This isn't quite right, but will work for now:
STAT_TYPES_NAGIOS['B'] = STAT_TYPES_NAGIOS[STAT_TYPE_BYTES] # check_http uses capitol B; alias that here.
STAT_TYPES_COLLECTD['B'] = STAT_TYPES_COLLECTD[STAT_TYPE_BYTES]
STAT_TYPES_NAGIOS[STAT_TYPE_COUNTER] = ''
STAT_TYPES_COLLECTD[STAT_TYPE_COUNTER] = 'count'
STAT_TYPES_NAGIOS[STAT_TYPE_GAUGE] = ''
STAT_TYPES_COLLECTD[STAT_TYPE_GAUGE] = 'gauge'
STAT_TYPES_NAGIOS[STAT_TYPE_QUEUE_SIZE] = ''
STAT_TYPES_COLLECTD[STAT_TYPE_QUEUE_SIZE] = 'queue_length'
STAT_TYPES_NAGIOS[STAT_TYPE_RECORDS] = ''
STAT_TYPES_COLLECTD[STAT_TYPE_RECORDS] = 'records'
STAT_TYPES_NAGIOS[STAT_TYPE_MEMORY] = 'b~' # See '~' note above.
STAT_TYPES_COLLECTD[STAT_TYPE_MEMORY] = 'memory'
STAT_TYPES_NAGIOS[STAT_TYPE_SECONDS] = 's'
STAT_TYPES_COLLECTD[STAT_TYPE_SECONDS] = 'seconds'
| 2.234375
| 2
|
app/schemas/__init__.py
|
EZhivaikin/TrialPython
| 0
|
12778594
|
<reponame>EZhivaikin/TrialPython
from app.schemas.brand import Brand
from app.schemas.product import Product
| 0.820313
| 1
|
tests/test_fastnumbers_examples.py
|
pterjan/fastnumbers
| 0
|
12778595
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Find the build location and add that to the path
import math
import sys
from typing import Callable, Iterator, List, cast
import pytest
from pytest import raises
import fastnumbers
# Each conversion test should test the following
# 1. float number
# 2. signed float string
# 3. float string with exponents
# 4. float string with padded whitespace
# 5. int number
# 6. signed int string
# 7. int string with padded whitespace
# 8. long number
# 9. long number
# 10. return type
# 11. TypeError for invalid input
# 12. Invalid input string
# 13. Invalid input string with numbers
# 14. Infinity
# 15. NaN
# 16. Sign/'e'/'.' only
# 17. Default value.
# 18. Unicode numbers
def test_fast_real() -> None:
# 1. float number
assert fastnumbers.fast_real(-367.3268) == -367.3268
assert fastnumbers.fast_real(-367.3268, raise_on_invalid=True) == -367.3268
# 2. signed float string
assert fastnumbers.fast_real("+367.3268") == +367.3268
assert fastnumbers.fast_real("+367.3268", raise_on_invalid=True) == +367.3268
# 3. float string with exponents
assert fastnumbers.fast_real("-367.3268e207") == -367.3268e207
assert fastnumbers.fast_real("1.175494351e-3810000000") == 0.0
# 4. float string with padded whitespace
assert fastnumbers.fast_real(" -367.04 ") == -367.04
# 5. int number
assert fastnumbers.fast_real(499) == 499
# 6. signed int string
assert fastnumbers.fast_real("-499") == -499
# 7. int string with padded whitespace
assert fastnumbers.fast_real(" +3001 ") == 3001
# 8. long number
assert fastnumbers.fast_real(35892482945872302493) == 35892482945872302493
# 9. long string
assert fastnumbers.fast_real("35892482945872302493") == 35892482945872302493
# 10. return type
assert isinstance(fastnumbers.fast_real(4029), int)
assert isinstance(fastnumbers.fast_real(4029.0, coerce=False), float)
assert isinstance(fastnumbers.fast_real(4029), int)
assert isinstance(fastnumbers.fast_real(4029.0), int)
assert isinstance(fastnumbers.fast_real(4029.5), float)
assert isinstance(fastnumbers.fast_real("4029"), int)
assert isinstance(fastnumbers.fast_real("4029.0"), int)
assert isinstance(fastnumbers.fast_real("4029.0", coerce=False), float)
# 11. TypeError for invalid input
with raises(TypeError):
fastnumbers.fast_real(["hey"]) # type: ignore
# 12. Invalid input string
assert fastnumbers.fast_real("not_a_number") == "not_a_number"
with raises(ValueError):
assert fastnumbers.fast_real("not_a_number", raise_on_invalid=True)
# 13. Invalid input string with numbers
assert fastnumbers.fast_real("26.8 lb") == "26.8 lb"
with raises(ValueError):
assert fastnumbers.fast_real("26.8 lb", raise_on_invalid=True)
# 14. Infinity
assert fastnumbers.fast_real("inf") == float("inf")
assert fastnumbers.fast_real("-iNFinity") == float("-inf")
assert fastnumbers.fast_real("-iNFinity", inf=7608) == 7608
# 15. NaN
assert math.isnan(cast(float, fastnumbers.fast_real("nan")))
assert math.isnan(cast(float, fastnumbers.fast_real("-NaN")))
assert fastnumbers.fast_real("-NaN", nan=0) == 0
# 16. Sign/'e'/'.' only
assert fastnumbers.fast_real("+") == "+"
assert fastnumbers.fast_real("-") == "-"
assert fastnumbers.fast_real("e") == "e"
assert fastnumbers.fast_real(".") == "."
# 17. Default on invalid... 'raise_on_invalid' supersedes
assert fastnumbers.fast_real("invalid", 90) == 90
assert fastnumbers.fast_real("invalid", default=90) == 90
assert fastnumbers.fast_real("invalid", default=None) is None
with raises(ValueError):
assert fastnumbers.fast_real("invalid", 90, raise_on_invalid=True)
# 18. Unicode numbers
assert fastnumbers.fast_real(u"⑦") == 7
assert fastnumbers.fast_real(u"⁸") == 8
assert fastnumbers.fast_real(u"⅔") == 2.0 / 3.0
assert fastnumbers.fast_real(u"Ⅴ") == 5
# 19. Function to execute on failure to convert
assert fastnumbers.fast_real("76.8", on_fail=len) == 76.8
assert fastnumbers.fast_real("invalid", on_fail=len) == 7
def test_fast_float() -> None:
# 1. float number
assert fastnumbers.fast_float(-367.3268) == -367.3268
assert fastnumbers.fast_float(-367.3268, raise_on_invalid=True) == -367.3268
# 2. signed float string
assert fastnumbers.fast_float("+367.3268") == +367.3268
assert fastnumbers.fast_float("+367.3268", raise_on_invalid=True) == +367.3268
# 3. float string with exponents
assert fastnumbers.fast_float("-367.3268e27") == -367.3268e27
assert fastnumbers.fast_float("-367.3268E27") == -367.3268e27
assert fastnumbers.fast_float("-367.3268e207") == -367.3268e207
assert fastnumbers.fast_float("1.175494351E-3810000000") == 0.0
# 4. float string with padded whitespace
assert fastnumbers.fast_float(" -367.04 ") == -367.04
# 5. int number
assert fastnumbers.fast_float(499) == 499.0
# 6. signed int string
assert fastnumbers.fast_float("-499") == -499.0
# 7. int string with padded whitespace
assert fastnumbers.fast_float(" +3001 ") == 3001
# 8. long number
assert fastnumbers.fast_float(35892482945872302493) == 35892482945872302493.0
# 9. long string
assert fastnumbers.fast_float("35892482945872302493") == 35892482945872302493.0
# 10. return type
assert isinstance(fastnumbers.fast_float(4029), float)
assert isinstance(fastnumbers.fast_float("4029"), float)
# 11. TypeError for invalid input
with raises(TypeError):
fastnumbers.fast_float(["hey"]) # type: ignore
# 12. Invalid input string
assert fastnumbers.fast_float("not_a_number") == "not_a_number"
with raises(ValueError):
assert fastnumbers.fast_float("not_a_number", raise_on_invalid=True)
# 13. Invalid input string with numbers
assert fastnumbers.fast_float("26.8 lb") == "26.8 lb"
with raises(ValueError):
assert fastnumbers.fast_float("26.8 lb", raise_on_invalid=True)
# 14. Infinity
assert fastnumbers.fast_float("inf") == float("inf")
assert fastnumbers.fast_float("-iNFinity") == float("-inf")
assert fastnumbers.fast_float("-iNFinity", inf=523) == 523
# 15. NaN
assert math.isnan(cast(float, fastnumbers.fast_float("nAn")))
assert math.isnan(cast(float, fastnumbers.fast_float("-NaN")))
assert fastnumbers.fast_float("-NaN", nan=0) == 0
# 16. Sign/'e'/'.' only
assert fastnumbers.fast_float("+") == "+"
assert fastnumbers.fast_float("-") == "-"
assert fastnumbers.fast_float("e") == "e"
assert fastnumbers.fast_float(".") == "."
# 17. Default on invalid... 'raise_on_invalid' supersedes
assert fastnumbers.fast_float("invalid", 90) == 90
assert fastnumbers.fast_float("invalid", default=90) == 90
assert fastnumbers.fast_float("invalid", default=None) is None
with raises(ValueError):
assert fastnumbers.fast_float("invalid", 90, raise_on_invalid=True)
# 18. Unicode numbers
assert fastnumbers.fast_float(u"⑦") == 7.0
assert fastnumbers.fast_float(u"⁸") == 8.0
assert fastnumbers.fast_float(u"⅔") == 2.0 / 3.0
assert fastnumbers.fast_float(u"Ⅴ") == 5.0
# 19. Function to execute on failure to convert
assert fastnumbers.fast_float("76.8", on_fail=len) == 76.8
assert fastnumbers.fast_float("invalid", on_fail=len) == 7
def test_fast_int() -> None:
# 1. float number
assert fastnumbers.fast_int(-367.3268) == -367
assert fastnumbers.fast_int(-367.3268, raise_on_invalid=True) == -367
# 2. signed float string
assert fastnumbers.fast_int("+367.3268") == "+367.3268"
with raises(ValueError):
assert fastnumbers.fast_int("+367.3268", raise_on_invalid=True)
# 3. float string with exponents
assert fastnumbers.fast_int("-367.3268e207") == "-367.3268e207"
# 4. float string with padded whitespace
assert fastnumbers.fast_int(" -367.04 ") == " -367.04 "
# 5. int number
assert fastnumbers.fast_int(499) == 499
# 6. signed int string
assert fastnumbers.fast_int("-499") == -499
# 7. int string with padded whitespace
assert fastnumbers.fast_int(" +3001 ") == 3001
# 8. long number
assert fastnumbers.fast_int(35892482945872302493) == 35892482945872302493
# 9. long string
assert fastnumbers.fast_int("35892482945872302493") == 35892482945872302493
# 10. return type
assert isinstance(fastnumbers.fast_int(4029.00), int)
# 11. TypeError for invalid input
with raises(TypeError):
fastnumbers.fast_int(["hey"]) # type: ignore
# 12. Invalid input string
assert fastnumbers.fast_int("not_a_number") == "not_a_number"
with raises(ValueError):
assert fastnumbers.fast_int("not_a_number", raise_on_invalid=True)
# 13. Invalid input string with numbers
assert fastnumbers.fast_int("26.8 lb") == "26.8 lb"
with raises(ValueError):
assert fastnumbers.fast_int("26.8 lb", raise_on_invalid=True)
# 14. Infinity
assert fastnumbers.fast_int("inf") == "inf"
# 15. NaN
assert fastnumbers.fast_int("nan") == "nan"
# 16. Sign/'e'/'.' only
assert fastnumbers.fast_int("+") == "+"
assert fastnumbers.fast_int("-") == "-"
assert fastnumbers.fast_int("e") == "e"
assert fastnumbers.fast_int(".") == "."
# 17. Default on invalid... 'raise_on_invalid' supersedes
assert fastnumbers.fast_int("invalid", 90) == 90
assert fastnumbers.fast_int("invalid", default=90) == 90
assert fastnumbers.fast_int("invalid", default=None) is None
with raises(ValueError):
assert fastnumbers.fast_int("invalid", 90, raise_on_invalid=True)
# 18. Unicode numbers
assert fastnumbers.fast_int(u"⑦") == 7
assert fastnumbers.fast_int(u"⁸") == 8
assert fastnumbers.fast_int(u"⁸", base=10) == u"⁸"
assert fastnumbers.fast_int(u"⅔") == u"⅔"
assert fastnumbers.fast_int(u"Ⅴ") == u"Ⅴ"
# 19. Function to execute on failure to convert
assert fastnumbers.fast_int("76", on_fail=len) == 76
assert fastnumbers.fast_int("invalid", on_fail=len) == 7
def test_fast_forceint() -> None:
# 1. float number
assert fastnumbers.fast_forceint(-367.3268) == -367
assert fastnumbers.fast_forceint(-367.3268, raise_on_invalid=True) == -367
# 2. signed float string
assert fastnumbers.fast_forceint("+367.3268") == 367
assert fastnumbers.fast_forceint("+367.3268", raise_on_invalid=True) == 367
# 3. float string with exponents
assert fastnumbers.fast_forceint("-367.3268e207") == int(-367.3268e207)
# 4. float string with padded whitespace
assert fastnumbers.fast_forceint(" -367.04 ") == -367
# 5. int number
assert fastnumbers.fast_forceint(499) == 499
# 6. signed int string
assert fastnumbers.fast_forceint("-499") == -499
# 7. int string with padded whitespace
assert fastnumbers.fast_forceint(" +3001 ") == 3001
# 8. long number
assert fastnumbers.fast_forceint(35892482945872302493) == 35892482945872302493
# 9. long string
assert fastnumbers.fast_forceint("35892482945872302493") == 35892482945872302493
# 10. return type
assert isinstance(fastnumbers.fast_forceint(4029.00), int)
assert isinstance(fastnumbers.fast_forceint("4029.00"), int)
# 11. TypeError for invalid input
with raises(TypeError):
fastnumbers.fast_forceint(["hey"]) # type: ignore
# 12. Invalid input string
assert fastnumbers.fast_forceint("not_a_number") == "not_a_number"
with raises(ValueError):
assert fastnumbers.fast_forceint("not_a_number", raise_on_invalid=True)
# 13. Invalid input string with numbers
assert fastnumbers.fast_forceint("26.8 lb") == "26.8 lb"
with raises(ValueError):
assert fastnumbers.fast_forceint("26.8 lb", raise_on_invalid=True)
# 14. Infinity
assert fastnumbers.fast_forceint("inf") == "inf"
assert fastnumbers.fast_forceint("-iNFinity") == "-iNFinity"
# 15. NaN
assert fastnumbers.fast_forceint("nan") == "nan"
# 16. Sign/'e'/'.' only
assert fastnumbers.fast_forceint("+") == "+"
assert fastnumbers.fast_forceint("-") == "-"
assert fastnumbers.fast_forceint("e") == "e"
assert fastnumbers.fast_forceint(".") == "."
# 17. Default on invalid... 'raise_on_invalid' supersedes
assert fastnumbers.fast_forceint("invalid", 90) == 90
assert fastnumbers.fast_forceint("invalid", default=90) == 90
assert fastnumbers.fast_forceint("invalid", default=None) is None
with raises(ValueError):
assert fastnumbers.fast_forceint("invalid", 90, raise_on_invalid=True)
# 18. Unicode numbers
assert fastnumbers.fast_forceint(u"⑦") == 7
assert fastnumbers.fast_forceint(u"⁸") == 8
assert fastnumbers.fast_forceint(u"⅔") == 0
assert fastnumbers.fast_forceint(u"Ⅴ") == 5
# 19. Function to execute on failure to convert
assert fastnumbers.fast_forceint("76.8", on_fail=len) == 76
assert fastnumbers.fast_forceint("invalid", on_fail=len) == 7
def test_isreal() -> None:
# 1. float number
assert fastnumbers.isreal(-367.3268)
assert not fastnumbers.isreal(-367.3268, str_only=True)
assert fastnumbers.isreal(-367.3268, num_only=True)
# 2. signed float string
assert fastnumbers.isreal("+367.3268")
assert fastnumbers.isreal("+367.3268", str_only=True)
assert not fastnumbers.isreal("+367.3268", num_only=True)
# 3. float string with exponents
assert fastnumbers.isreal("-367.3268e207")
# 4. float string with padded whitespace
assert fastnumbers.isreal(" -367.04 ")
# 5. int number
assert fastnumbers.isreal(499)
# 6. signed int string
assert fastnumbers.isreal("-499")
# 7. int string with padded whitespace
assert fastnumbers.isreal(" +3001 ")
# 8. long number
assert fastnumbers.isreal(35892482945872302493)
# 9. long string
assert fastnumbers.isreal("35892482945872302493")
# 10. return type
assert fastnumbers.isreal(4029) is True
assert fastnumbers.isreal(4029, str_only=True) is False
assert fastnumbers.isreal("4029") is True
assert fastnumbers.isreal("4029", str_only=True) is True
assert fastnumbers.isreal("hey") is False
# 11. Invalid type
assert not fastnumbers.isreal(["hey"])
# 12. Invalid input string
assert not fastnumbers.isreal("not_a_number")
# 13. Invalid input string with numbers
assert not fastnumbers.isreal("26.8 lb")
# 14. Infinity
assert not fastnumbers.isreal("inf")
assert fastnumbers.isreal("inf", allow_inf=True)
assert fastnumbers.isreal("-iNFinity", allow_inf=True)
# 15. NaN
assert not fastnumbers.isreal("nan")
assert fastnumbers.isreal("nan", allow_nan=True)
assert fastnumbers.isreal("-NaN", allow_nan=True)
# 16. Sign/'e'/'.' only
assert not fastnumbers.isreal("+")
assert not fastnumbers.isreal("-")
assert not fastnumbers.isreal("e")
assert not fastnumbers.isreal(".")
# 18. Unicode numbers
assert fastnumbers.isreal(u"⑦")
assert fastnumbers.isreal(u"⁸")
assert fastnumbers.isreal(u"⅔")
assert fastnumbers.isreal(u"Ⅴ")
def test_isfloat() -> None:
# 1. float number
assert fastnumbers.isfloat(-367.3268)
assert not fastnumbers.isfloat(-367.3268, str_only=True)
assert fastnumbers.isfloat(-367.3268, num_only=True)
# 2. signed float string
assert fastnumbers.isfloat("+367.3268")
assert fastnumbers.isfloat("+367.3268", str_only=True)
assert not fastnumbers.isfloat("+367.3268", num_only=True)
# 3. float string with exponents
assert fastnumbers.isfloat("-367.3268e207")
# 4. float string with padded whitespace
assert fastnumbers.isfloat(" -367.04 ")
# 5. int number
assert not fastnumbers.isfloat(499)
# 6. signed int string
assert fastnumbers.isfloat("-499")
# 7. int string with padded whitespace
assert fastnumbers.isfloat(" +3001 ")
# 8. long number
assert not fastnumbers.isfloat(35892482945872302493)
# 9. long string
assert fastnumbers.isfloat("35892482945872302493")
# 10. return type
assert fastnumbers.isfloat(4029) is False
assert fastnumbers.isfloat(4029.0) is True
assert fastnumbers.isfloat(4029.0, str_only=True) is False
assert fastnumbers.isfloat("4029") is True
assert fastnumbers.isfloat("4029", str_only=True) is True
# 11. Invalid type
assert not fastnumbers.isfloat(["hey"])
# 12. Invalid input string
assert not fastnumbers.isfloat("not_a_number")
# 13. Invalid input string with numbers
assert not fastnumbers.isfloat("26.8 lb")
# 14. Infinity
assert not fastnumbers.isfloat("inf")
assert fastnumbers.isfloat("inf", allow_inf=True)
assert fastnumbers.isfloat("-infinity", allow_inf=True)
assert fastnumbers.isfloat("-INFINITY", allow_inf=True)
# 15. NaN
assert not fastnumbers.isfloat("nAn")
assert fastnumbers.isfloat("nan", allow_nan=True)
assert fastnumbers.isfloat("-NaN", allow_nan=True)
# 16. Sign/'e'/'.' only
assert not fastnumbers.isfloat("+")
assert not fastnumbers.isfloat("-")
assert not fastnumbers.isfloat("e")
assert not fastnumbers.isfloat(".")
# 18. Unicode numbers
assert fastnumbers.isfloat(u"⑦")
assert fastnumbers.isfloat(u"⁸")
assert fastnumbers.isfloat(u"⅔")
assert fastnumbers.isfloat(u"Ⅴ")
def test_isint() -> None:
# 1. float number
assert not fastnumbers.isint(-367.3268)
# 2. signed float string
assert not fastnumbers.isint("+367.3268")
# 3. float string with exponents
assert not fastnumbers.isint("-367.3268e207")
# 4. float string with padded whitespace
assert not fastnumbers.isint(" -367.04 ")
# 5. int number
assert fastnumbers.isint(499)
assert not fastnumbers.isint(499, str_only=True)
assert fastnumbers.isint(499, num_only=True)
# 6. signed int string
assert fastnumbers.isint("-499")
assert fastnumbers.isint("-499", str_only=True)
assert not fastnumbers.isint("-499", num_only=True)
# 7. int string with padded whitespace
assert fastnumbers.isint(" +3001 ")
# 8. long number
assert fastnumbers.isint(35892482945872302493)
# 9. long string
assert fastnumbers.isint("35892482945872302493")
# 10. return type
assert fastnumbers.isint(4029) is True
assert fastnumbers.isint(4029, str_only=True) is False
assert fastnumbers.isint("4029") is True
assert fastnumbers.isint("4029", str_only=True) is True
assert fastnumbers.isint("4029.50") is False
assert fastnumbers.isint(4029.50) is False
# 11. Invalid type
assert not fastnumbers.isint(["hey"])
# 12. Invalid input string
assert not fastnumbers.isint("not_a_number")
# 13. Invalid input string with numbers
assert not fastnumbers.isint("26.8 lb")
# 14. Infinity
assert not fastnumbers.isint("inf")
# 15. NaN
assert not fastnumbers.isint("nan")
# 16. Sign/'e'/'.' only
assert not fastnumbers.isint("+")
assert not fastnumbers.isint("-")
assert not fastnumbers.isint("e")
assert not fastnumbers.isint(".")
# 18. Unicode numbers
assert fastnumbers.isint(u"⑦")
assert fastnumbers.isint(u"⁸")
assert not fastnumbers.isint(u"⅔")
assert not fastnumbers.isint(u"Ⅴ")
def test_isintlike() -> None:
# 1. float number
assert not fastnumbers.isintlike(-367.3268)
assert fastnumbers.isintlike(-367.0)
assert not fastnumbers.isintlike(-367.0, str_only=True)
assert fastnumbers.isintlike(-367.0, num_only=True)
# 2. signed float string
assert not fastnumbers.isintlike("+367.3268")
assert fastnumbers.isintlike("+367.0")
assert fastnumbers.isintlike("+367.0", str_only=True)
assert not fastnumbers.isintlike("+367.0", num_only=True)
# 3. float string with exponents
assert fastnumbers.isintlike("-367.3268e207")
assert not fastnumbers.isintlike("145343E-4")
assert fastnumbers.isintlike("14534.000000000e4")
assert fastnumbers.isintlike("1400000E-4")
assert not fastnumbers.isintlike("140E-4")
assert fastnumbers.isintlike("14.E4")
assert fastnumbers.isintlike("14E4")
# 4. float string with padded whitespace
assert not fastnumbers.isintlike(" -367.04 ")
# 5. int number
assert fastnumbers.isintlike(499)
# 6. signed int string
assert fastnumbers.isintlike("-499")
# 7. int string with padded whitespace
assert fastnumbers.isintlike(" +3001 ")
# 8. long number
assert fastnumbers.isintlike(35892482945872302493)
# 9. long string
assert fastnumbers.isintlike("35892482945872302493")
# 10. return type
assert fastnumbers.isintlike(4029) is True
assert fastnumbers.isintlike(4029, str_only=True) is False
assert fastnumbers.isintlike("4029") is True
assert fastnumbers.isintlike("4029", str_only=True) is True
assert fastnumbers.isintlike("4029.50") is False
assert fastnumbers.isintlike(4029.50) is False
# 11. Invalid type
assert not fastnumbers.isintlike(["hey"])
# 12. Invalid input string
assert not fastnumbers.isintlike("not_a_number")
# 13. Invalid input string with numbers
assert not fastnumbers.isintlike("26.8 lb")
# 14. Infinity
assert not fastnumbers.isintlike("inf")
# 15. NaN
assert not fastnumbers.isintlike("nan")
# 16. Sign/'e'/'.' only
assert not fastnumbers.isintlike("+")
assert not fastnumbers.isintlike("-")
assert not fastnumbers.isintlike("e")
assert not fastnumbers.isintlike(".")
# 18. Unicode numbers
assert fastnumbers.isintlike(u"⑦")
assert fastnumbers.isintlike(u"⁸")
assert not fastnumbers.isintlike(u"⅔")
assert fastnumbers.isintlike(u"Ⅴ")
def test_type() -> None:
# 1. float number
assert fastnumbers.query_type(-367.3268) is float
# 2. signed float string
assert fastnumbers.query_type("+367.3268") is float
# 3. float string with exponents
assert fastnumbers.query_type("-367.3268e207") is float
# 4. float string with padded whitespace
assert fastnumbers.query_type(" -367.04 ") is float
# 5. int number
assert fastnumbers.query_type(499) is int
# 6. signed int string
assert fastnumbers.query_type("-499") is int
# 7. int string with padded whitespace
assert fastnumbers.query_type(" +3001 ") is int
# 8. long number
assert fastnumbers.query_type(35892482945872302493) is int
# 9. long string
assert fastnumbers.query_type("35892482945872302493") is int
# 10. coerced type
assert fastnumbers.query_type(4029.0) is float
assert fastnumbers.query_type("4029.0") is float
assert fastnumbers.query_type(4029.0, coerce=True) is int
assert fastnumbers.query_type("4029.0", coerce=True) is int
# 11. Invalid type
assert fastnumbers.query_type(["hey"]) is list
# 12. Invalid input string
assert fastnumbers.query_type("not_a_number") is str
assert fastnumbers.query_type("not_a_number", allowed_types=(float, int)) is None
# 13. Invalid input string with numbers
assert fastnumbers.query_type("26.8 lb") is str
assert fastnumbers.query_type("26.8 lb", allowed_types=(float, int)) is None
# 14. Infinity
assert fastnumbers.query_type("inf") is str
assert fastnumbers.query_type("inf", allow_inf=True) is float
assert fastnumbers.query_type("-iNFinity", allow_inf=True) is float
# 15. NaN
assert fastnumbers.query_type("nan") is str
assert fastnumbers.query_type("nan", allow_nan=True) is float
assert fastnumbers.query_type("-NaN", allow_nan=True) is float
# 16. Sign/'e'/'.' only
assert fastnumbers.query_type("+") is str
assert fastnumbers.query_type("-") is str
assert fastnumbers.query_type("e") is str
assert fastnumbers.query_type(".") is str
# 18. Unicode numbers
assert fastnumbers.query_type(u"⑦") is int
assert fastnumbers.query_type(u"⁸") is int
assert fastnumbers.query_type(u"⅔") is float
assert fastnumbers.query_type(u"Ⅴ") is float
@pytest.fixture()
def tprint(capsys: pytest.CaptureFixture[str]) -> Iterator[Callable[[str], None]]:
"""
Fixture for printing info after test, not supressed by pytest stdout/stderr capture
"""
lines: List[str] = []
yield lines.append
with capsys.disabled():
for line in lines:
sys.stdout.write("\n{}".format(line))
def test_print_limits(tprint: Callable[[str], None]) -> None:
tprint("\nFASNUMBERS NUMERICAL LIMITS FOR THIS COMPILER BEFORE PYTHON FALLBACK:")
tprint("MAXIMUM INTEGER LENTH: {}".format(fastnumbers.max_int_len))
tprint("MAX NUMBER FLOAT DIGITS: {}".format(fastnumbers.dig))
tprint("MAXIMUM FLOAT EXPONENT: {}".format(fastnumbers.max_exp))
tprint("MINIMUM FLOAT EXPONENT: {}".format(fastnumbers.min_exp))
tprint("")
| 2.4375
| 2
|
fltk/datasets/distributed/__init__.py
|
tudelft-eemcs-dml/fltk-testbed-gr-5
| 0
|
12778596
|
<filename>fltk/datasets/distributed/__init__.py
from .dataset import DistDataset
from .cifar10 import DistCIFAR10Dataset
# from .cifar100 import CIFAR100Dataset
# from .fashion_mnist import FashionMNISTDataset
| 1.242188
| 1
|
src/lib/python/util/injected_files.py
|
memes/f5-bigip-image-generator
| 34
|
12778597
|
<reponame>memes/f5-bigip-image-generator
"""Module to read info about injected files"""
# Copyright (C) 2019-2021 F5 Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from os.path import basename, isdir, isfile, abspath, realpath, expanduser
from pathlib import Path
from shutil import copy2, copytree
import re
import requests
from telemetry.build_info_inject import BuildInfoInject
from util.config import get_config_value, get_list_from_config_yaml
from util.logger import LOGGER
def extract_single_worded_key(dictionary, key):
""" verify that key is in dictionary and its value is a single word """
if key in dictionary:
value = dictionary[key]
if not isinstance(value, str):
raise RuntimeError('\'{}\' of injected file must be a string, but got {}'
.format(key, value))
if len(value.split()) == 1:
return value
raise RuntimeError('\'{}\' of injected file must be a single word, but got {} words: \'{}\''
.format(key, len(value.split()), value))
raise RuntimeError('\'{}\' is not specified for injected file \'{}\' !'
.format(key, dictionary))
def read_injected_files(top_call_dir, overall_dest_dir):
"""
Copy files that need to be injected to a temporary location,
which will be accessible during post-install.
Two mandatory arguments:
a path from where build-image was called
a path to initrd directory that will be available during post_install
"""
# location used by post-install, should be created only if there are files to inject
injected_files = 'etc/injected_files' # location used by post-install
overall_dest_dir = overall_dest_dir + '/' + injected_files
LOGGER.info('Temporary location for injected files: \'%s\'', overall_dest_dir)
# include user-specified files
files_to_inject = get_list_from_config_yaml('UPDATE_IMAGE_FILES')
# add build_info.json
prep_build_info_for_injection(files_to_inject)
# each injected file directory to be stored in a separate directory "file<number>"
count = 0
LOGGER.trace("files_to_inject: %s", files_to_inject)
for file in files_to_inject:
LOGGER.debug('Injecting file: \'%s\'.', file)
src = extract_single_worded_key(file, 'source')
dest = extract_single_worded_key(file, 'destination')
if 'mode' in file:
mode = extract_single_worded_key(file, 'mode')
else:
mode = None
LOGGER.info('Copy \'%s\' to a temporary location for \'%s\'.', src, dest)
url = src # treat 'src' as a file path and 'url' as a url
if src[0] != '/' and src[0] != '~':
# make it an absolute path
src = top_call_dir + '/' + src
src = abspath(realpath(expanduser(src)))
file_holder = overall_dest_dir + '/file' + str(count) + '/'
# copy source to "src"
# source file name does not need to be preserved;
# it will be copied to destination path on BIG-IP
source_holder = file_holder + 'src'
Path(file_holder).mkdir(parents=True, exist_ok=True)
if isfile(src):
LOGGER.info('Treating \'%s\' as a file for file injection', src)
copy2(src, source_holder)
elif isdir(src):
LOGGER.info('Treating \'%s\' as a directory for file injection', src)
copytree(src, source_holder)
else:
LOGGER.info('Treating \'%s\' as a URL for the file injection', url)
download_file(url, source_holder)
# store destination
if dest[0] != '/':
raise RuntimeError('injected file destination \'{}\' must be an absolute path!'
.format(dest))
with open(file_holder + 'dest', 'w') as dest_holder:
print("{}".format(dest), file=dest_holder)
# Store mode. Should be a string consisting of one to four octal digits.
if mode:
LOGGER.debug('Creating mode holder for mode \'%s\'.', mode)
mode_pattern = re.compile('^[0-7][0-7]?[0-7]?[0-7]?$')
if not mode_pattern.match(mode):
raise RuntimeError('Invalid mode \'' + mode + '\', must be a string ' +
'consisting of one to four octal digits.')
with open(file_holder + 'mode', 'w') as mode_holder:
print("{}".format(mode), file=mode_holder)
count += 1
# end of for loop
LOGGER.debug('leaving %s', basename(__file__))
return 0
def prep_build_info_for_injection(files_to_inject):
""" prepare information about installed software on the build machine """
artifacts_dir = get_config_value("ARTIFACTS_DIR")
build_info_file_name = "build_info.json"
build_info_source = artifacts_dir + "/" + build_info_file_name
build_info_destination = "/" + build_info_file_name
files_to_inject.append({'source': build_info_source, 'destination': build_info_destination})
build_info = BuildInfoInject()
LOGGER.info(build_info.to_json())
build_info.to_file(build_info_source)
def download_file(url, dest_file):
""" Download from url to a local file.
Throws exceptions with wording specific to the file injection.
Assumes that the directory containing the destination file already exists. """
verify_tls = bool(get_config_value("IGNORE_DOWNLOAD_URL_TLS") is None)
try:
remote_file = requests.get(url, verify=verify_tls, timeout=60)
except requests.exceptions.SSLError as exc:
LOGGER.exception(exc)
raise RuntimeError(
'Cannot access \'{}\' due to TLS problems! '.format(url) +
'Consider abandoning TLS verification by usage of ' +
'\'IGNORE_DOWNLOAD_URL_TLS\' parameter.') from exc
except requests.exceptions.RequestException as exc:
LOGGER.exception(exc)
raise RuntimeError(
'\'{}\' is neither a file nor a directory nor a valid url, cannot inject it!'
.format(url)) from exc
if remote_file.status_code != 200:
LOGGER.info('requests.get response status: %s', remote_file.status_code)
LOGGER.info('requests.get response headers: %s', remote_file.headers)
raise RuntimeError(
'URL \'{}\' did not return content, cannot inject it!'
.format(url))
open(dest_file, 'wb').write(remote_file.content)
| 2.109375
| 2
|
kenja/detection/pull_up_method.py
|
umr00/kenja
| 0
|
12778598
|
from __future__ import absolute_import
from itertools import product, combinations
from git.objects import Blob
from collections import defaultdict
from kenja.historage import *
from kenja.shingles import calculate_similarity
def get_extends(commit, org_file_name, classes):
classes_path = '/[CN]/'.join(classes)
extends_path = '/'.join([org_file_name, '[CN]', classes_path, 'extend'])
try:
extend = commit.tree / extends_path
assert isinstance(extend, Blob)
except KeyError:
return None
return extend.data_stream.read().rstrip()
def exist_class(blob, commit):
split_path = blob.path.split('/')
while split_path[-2] != '[CN]':
split_path.pop()
class_path = '/'.join(split_path)
try:
commit.tree / class_path
except KeyError:
return False
return True
def detect_pull_up_method(historage):
pull_up_method_information = []
checked_commit = set()
detection_stack = []
for ref in get_refs(historage):
ref_commit = historage.commit(ref)
detection_stack.append(ref_commit)
while detection_stack:
commit = detection_stack.pop()
if commit.hexsha in checked_commit:
continue
for p in commit.parents:
pull_up_method_information.extend(detect_shingle_pullup_method(p, commit))
detection_stack.append(p)
checked_commit.add(commit.hexsha)
return pull_up_method_information
class Method(object):
def __init__(self, blob, commit):
self.blob = blob
self.package_name = get_package(blob.path, commit)
self.classes = self.get_classes(blob.path)
self.method_name = get_method(blob.path)
self.body_cache = None
def get_classes(self, path):
classes = []
split_path = path.split('/')
for i, v in enumerate(split_path):
if v == '[CN]':
classes.append(split_path[i+1])
return classes
def get_class_name(self):
return self.classes[-1]
def get_full_name(self):
class_name = '.'.join(self.classes)
if self.package_name:
return '.'.join([self.package_name, class_name, self.method_name])
else:
return '.'.join([class_name, self.method_name])
def get_full_class_name(self):
class_name = '.'.join(self.classes)
if self.package_name:
return '.'.join([self.package_name, class_name])
else:
return '.'.join([class_name])
def get_parameter_types(self):
index = self.method_name.index('(')
return self.method_name[index:-1].split(',')
@classmethod
def create_from_blob(cls, blob, commit):
if is_method_body(blob.path):
return cls(blob, commit)
else:
return None
def get_body(self):
if self.body_cache is None:
self.body_cache = self.blob.data_stream.read()
return self.body_cache
def __str__(self):
return self.get_full_name()
class SubclassMethod(Method):
def __init__(self, blob, commit):
super(SubclassMethod, self).__init__(blob, commit)
split_path = blob.path.split('/')
self.extend = get_extends(commit, split_path[0], self.classes)
def match_type(a_method, b_method):
a_types = a_method.get_parameter_types()
b_types = b_method.get_parameter_types()
return a_types == b_types
def detect_shingle_pullup_method(old_commit, new_commit):
diff_index = old_commit.diff(new_commit, create_patch=False)
added_methods = defaultdict(list)
deleted_methods = defaultdict(list)
for diff in diff_index.iter_change_type('A'):
new_method = Method.create_from_blob(diff.b_blob, new_commit)
if new_method:
added_methods[new_method.get_class_name()].append(new_method)
deleted_classes = set()
for diff in diff_index.iter_change_type('D'):
# NOTE change following old_commit to new_commit to detect
# pull_up_method by same condtion of UMLDiff
subclass_method = SubclassMethod.create_from_blob(diff.a_blob, old_commit)
if subclass_method:
if not subclass_method.extend:
continue
if subclass_method.get_full_class_name() in deleted_classes:
continue
if not exist_class(diff.a_blob, new_commit):
deleted_classes.add(subclass_method.get_full_class_name())
continue
if subclass_method.extend in added_methods.keys():
deleted_methods[subclass_method.extend].append(subclass_method)
pull_up_method_candidates = []
old_org_commit = get_org_commit(old_commit)
new_org_commit = get_org_commit(new_commit)
for super_class, v in deleted_methods.iteritems():
if super_class not in added_methods:
print('%s does\'nt have a deleted method' % (super_class))
continue
for dst_method in added_methods[super_class]:
dst_body = dst_method.get_body()
if not dst_body:
continue
dst_body = '\n'.join(dst_body.split('\n')[1:-2])
for src_method in v:
src_body = src_method.get_body()
is_same_parameters = match_type(src_method, dst_method)
if src_body:
src_body = '\n'.join(src_body.split('\n')[1:-2])
if src_body or dst_body:
try:
sim = calculate_similarity(src_body, dst_body)
except ZeroDivisionError:
sim = "N/A"
else:
sim = 0
pull_up_method_candidates.append((old_commit.hexsha,
new_commit.hexsha,
old_org_commit,
new_org_commit,
str(src_method),
str(dst_method),
sim,
is_same_parameters))
return pull_up_method_candidates
| 2.09375
| 2
|
apps/TCPB_-_Expressions/src/smartdict.py
|
mkromer-tc/threatconnect-playbooks
| 0
|
12778599
|
<reponame>mkromer-tc/threatconnect-playbooks<filename>apps/TCPB_-_Expressions/src/smartdict.py
# -*- coding: utf-8 -*-
"""Smartdict -- smart dictionary for formatting strings"""
from string import Formatter
from attrdict import AttrDict
__notfound__ = object()
class SmartDict:
"""Smart dictionary object"""
def __init__(self, namespace: object, valuedict: dict = None, default=__notfound__):
"""init"""
if namespace is None:
namespace = {}
self.namespace = namespace
if not valuedict:
valuedict = {}
self.values = valuedict
self.default = default
def __getitem__(self, name, default=__notfound__):
"""get item from values *or* namespace"""
if default is __notfound__:
default = self.default
if name in self.values:
value = self.values[name]
else:
try:
value = self.namespace.get(name, __notfound__)
except (AttributeError, TypeError):
value = __notfound__
if value is __notfound__:
value = getattr(self.namespace, name, __notfound__)
if value is __notfound__:
value = default
if value is __notfound__:
raise KeyError(name)
return self.encapsulate(value)
get = __getitem__
__getattr__ = __getitem__
def encapsulate(self, value):
"""Encapsulate dicts into AttrDicts"""
if not isinstance(value, (list, tuple, dict)):
return value
if isinstance(value, (list, tuple)):
result = [self.encapsulate(x) for x in value]
if isinstance(value, tuple):
result = tuple(result)
return result
return AttrDict(value)
def smart_format(s: str, *args, _default=__notfound__, _context=None, **kwargs):
"""Format string S according to Python string formatting rules. Compound
structure elements may be accessed with dot or bracket notation and without quotes
around key names, e.g. `blob[0][events][0][source][device][ipAddress]`
or `blob[0].events[0].source.device.ipAddress`. If default is set,
that value will be used for any missing value."""
kws = SmartDict(_context, kwargs, default=_default)
fmt = Formatter()
return fmt.vformat(s, args, kws)
| 2.65625
| 3
|
tests/test_blast2xl.py
|
peterk87/blast2xl
| 1
|
12778600
|
#!/usr/bin/env python
"""Tests for `blast2xl` package."""
from os.path import abspath
from pathlib import Path
from click.testing import CliRunner
from blast2xl import cli
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert 'blast2xl: BLAST XLSX Report Creator' in help_result.stdout
result = runner.invoke(cli.main)
assert result.exit_code == 2
assert 'Missing option' in result.output
blast_tsv_dir = abspath('tests/data/blast_tsv')
fasta_dir = abspath('tests/data/fastas')
with runner.isolated_filesystem():
excel_report = 'blast-report.xlsx'
seq_outdir = 'seq-outdir'
result = runner.invoke(cli.main, ['--blast-tsv-dir', blast_tsv_dir,
'--blast-tsv-sample-name-pattern', r'^blastn-(.+)-vs-nt.*',
'--seq-dir', fasta_dir,
'--top-n-results', 5,
'-o', excel_report,
'-O', seq_outdir,
'-vvv'])
assert result.exit_code == 0
path_seq_outdir = Path(seq_outdir)
assert path_seq_outdir.exists()
output_fastas = list(path_seq_outdir.glob('**/*.fasta'))
assert len(output_fastas) > 2
fasta_path = path_seq_outdir / 'FMDV' / 'Foot_and_mouth_disease_virus___type_O-12118' / 'FMDV.fasta'
assert fasta_path.exists()
assert fasta_path.stat().st_size > 0
assert Path(excel_report).exists()
assert Path(excel_report).stat().st_size > 0
| 2.375
| 2
|
jlm/src/jlm/datastore.py
|
UnofficialJuliaMirror/JuliaManager.jl-0cdbb3b1-e653-5045-b8d5-b31a04c2a6c9
| 9
|
12778601
|
<filename>jlm/src/jlm/datastore.py
import hashlib
import json
import os
from contextlib import contextmanager
from pathlib import Path
from shutil import which
from typing import IO, Any, Dict, Iterator, List, Optional, Tuple
from . import __version__
from .runtime import JuliaRuntime
from .utils import ApplicationError, Pathish, _Pathish, absolutepath, pathstr
@contextmanager
def atomicopen(path: _Pathish, *args) -> Iterator[IO]:
tmppath = Path("{}.{}.tmp".format(path, os.getpid()))
try:
with open(pathstr(tmppath), *args) as file:
yield file
tmppath.rename(path)
finally:
if tmppath.exists():
os.remove(tmppath)
def locate_localstore(path: Path) -> Optional[Path]:
prev = None
while path != prev:
candidate = path / ".jlm"
if candidate.exists():
return absolutepath(candidate)
prev = path
path = path.parent
return None
class BaseStore:
def execpath(self, julia: str) -> Path:
assert Path(julia).is_absolute()
m = hashlib.sha1(julia.encode("utf-8"))
return self.path / "exec" / m.hexdigest() # type: ignore
class HomeStore(BaseStore):
# path: Path
defaultpath = Path.home() / ".julia" / "jlm"
def __init__(self, path: _Pathish = defaultpath):
self.path = Path(path)
class LocalStore(BaseStore):
@staticmethod
def is_valid_path(path: _Pathish) -> bool:
return (Path(path) / "data.json").exists()
def __init__(self, path: Optional[_Pathish] = None):
if path is not None:
if not isinstance(path, Pathish):
raise TypeError(
(
"`path` argument for `LocalStore(path)` must be a"
"`str` or `Path`, not {}"
).format(type(path))
)
path = Path(path)
if not self.is_valid_path(path):
raise ApplicationError(
"{} is not a valid `.jlm` directory.".format(path)
)
self.path = path
def locate_path(self) -> Optional[Path]:
try:
return self._path
except AttributeError:
return locate_localstore(Path.cwd())
def find_path(self) -> Path:
path = self.locate_path()
if path is None:
raise ApplicationError("Cannot locate `.jlm` local directory")
return path
# _path: Path
@property
def path(self) -> Path:
try:
return self._path
except AttributeError:
pass
self.path = self.find_path()
return self._path
@path.setter
def path(self, value: _Pathish):
path = Path(value)
if not path.is_absolute():
raise ValueError("Not an absolute path:\n{}".format(path))
self._path = path
def exists(self) -> bool:
path = self.locate_path()
return path is not None and (path / "data.json").exists()
def loaddata(self) -> Dict[str, Any]:
if self.exists():
datapath = self.path / "data.json"
with open(pathstr(datapath)) as file:
return json.load(file) # type: ignore
return {
"name": "jlm.LocalStore",
"jlm_version": __version__,
"config": {"runtime": {}},
}
def storedata(self, data: Dict[str, Any]):
with atomicopen(self.path / "data.json", "w") as file:
json.dump(data, file)
def set(self, config: Dict[str, Any]):
data = self.loaddata()
if "default" in config:
assert isinstance(config["default"], str)
data["config"]["default"] = config["default"]
if "runtime" in config:
data["config"]["runtime"].update(config["runtime"])
self.storedata(data)
def has_default_julia(self) -> bool:
return "default" in self.loaddata()["config"]
@property
def default_julia(self) -> str:
config = self.loaddata()["config"]
try:
return config["default"]
except KeyError:
raise AttributeError
def sysimage(self, julia: str) -> Optional[str]:
runtime = self.loaddata()["config"]["runtime"]
try:
return runtime[julia]["sysimage"]
except KeyError:
return None
def set_sysimage(self, julia: str, sysimage: _Pathish):
assert isinstance(julia, str)
config = self.loaddata()["config"]
config["runtime"][julia] = {"sysimage": pathstr(sysimage)}
self.set(config)
def unset_sysimage(self, julia: str):
if not isinstance(julia, str):
raise TypeError("`julia` must be a `str`, got: {!r}".format(julia))
data = self.loaddata()
data["config"]["runtime"].pop(julia, None)
self.storedata(data)
def available_runtimes(self) -> Tuple[JuliaRuntime, List[JuliaRuntime]]:
config = self.loaddata()["config"]
try:
julia = config["default"]
except KeyError:
julia = which("julia")
default = JuliaRuntime(julia, self.sysimage(julia))
others = []
for (julia, runtime) in config["runtime"].items():
if julia != default.executable:
others.append(JuliaRuntime(julia, runtime["sysimage"]))
return default, others
| 2.03125
| 2
|
xtellixClient.py
|
markamo/xtellixClient
| 0
|
12778602
|
import requests
import json
__SERVER_HOST__ = "http://127.0.0.1:5057"
__CLIENT_SECRET__ = 1234567890
__SERVER_SECRET__ = 1234567890
__SERVER_START_API__ = "/api/start"
__SERVER_STOP_API__ = "/api/stop"
__SERVER_PARAMETERS_API__ = "/api/parameters"
__SERVER_ALLPARAMETERS_API__ = "/api/allparameters"
__SERVER_OBJECTIVE_API__ = "/api/objective"
__SERVER_VERSION_API__ = "/api/version"
params = []
_DIM_ = 0
rit = 0
svr_rit = 0
current_objective = 1E300
pareato_objective = 1E300
searchMin = True
default_headers = {'Content-Type': 'application/json'}
def version():
"""xtellix Module Copyright and Version Info """
print( "*******************************************************")
print("Copyright (C) 2010-2020 Dr <NAME> <EMAIL>")
print("Client Version: 0.0.1 beta")
print( "*******************************************************")
def setOptimizationServerIP(address_port):
"""Set Optimization Server IP and Port Number """
global __SERVER_HOST__
__SERVER_HOST__ = address_port
def setClientSecret(secret):
"""Set Client Secret to enable Singular Access to the optimization engine """
global __CLIENT_SECRET__
__CLIENT_SECRET__ = secret
def connect(address_port, secret):
"""Set Server Endpoint and Client Secrets """
setOptimizationServerIP(address_port)
setClientSecret(secret)
apipath = __SERVER_HOST__ + __SERVER_VERSION_API__ + "/" + str(__CLIENT_SECRET__)
response = requests.get(apipath, verify=False, headers=default_headers)
r_data = json.loads(response.content)
print( "*******************************************************")
print("Server Version: ")
print( "*******************************************************")
print(r_data)
print( "*******************************************************")
print("Client Version: ")
version()
def setInitialParameters(initialSuggestions):
"""Initial parameters for optimization problem being solved"""
global params
params = initialSuggestions
sugjson = json.dumps(list(initialSuggestions))
apipath = __SERVER_HOST__ + __SERVER_PARAMETERS_API__ + "/" + str(__SERVER_SECRET__)
response = requests.post(apipath, json =sugjson, headers=default_headers )
#print(sugjson)
#print(apipath)
#print(response)
return response
def initializeOptimizer(initMetric,ubound, lbound, dim, maxIter, maxSamples, initialSuggestions, seedId, minOrMax):
"""Default parameters for initializing the optimization engine, based on being solved"""
global current_objective
global pareato_objective
global __SERVER_SECRET__
global _DIM_
global searchMin
current_objective = initMetric
pareato_objective = initMetric
_DIM_ = dim
searchMin = minOrMax
initialize = [dim,ubound, lbound, maxIter, maxSamples, initMetric, seedId]
iniJson = json.dumps(initialize)
apipath = __SERVER_HOST__ + __SERVER_START_API__ + "/" + str(__CLIENT_SECRET__)
response = requests.post(apipath, json=iniJson, headers=default_headers )
secret = int(json.loads(response.content))
__SERVER_SECRET__ = secret
#print(apipath)
print("New Server Secret: ", __SERVER_SECRET__)
print("Optimization Engine Running.....")
response1 = setInitialParameters(initialSuggestions)
return response1
def getParameters(cached = True):
"""Get parameters from the Optimization Server """
global params
global svr_rit
if cached == True:
apipath = __SERVER_HOST__ + __SERVER_PARAMETERS_API__ + "/" + str(__SERVER_SECRET__)
response = requests.get(apipath, verify=False, headers=default_headers )
r_data = json.loads(response.content)
oldK = r_data[0]
newK = r_data[1]
oldPoint = r_data[2]
newPoint = r_data[3]
rit = r_data[4]
svr_rit = rit
params[oldK] = oldPoint
params[newK] = newPoint
else:
apipath = __SERVER_HOST__ + __SERVER_ALLPARAMETERS_API__ + "/" + str(__SERVER_SECRET__)
response = requests.get(apipath, verify=False, headers=default_headers )
r_data = json.loads(response.content)
global _DIM_
for i in range(_DIM_):
params[i] = r_data[i]
#print(apipath)
#print(response)
return params
def updateObjectiveFunctionValue(evalMetric):
"""Send Objective Function Value updates to the optimization server"""
jObj = json.dumps(evalMetric)
apipath = __SERVER_HOST__ + __SERVER_OBJECTIVE_API__ + "/" + str(__SERVER_SECRET__)
global current_objective
global pareato_objective
global rit
global searchMin
rit = rit + 1
current_objective = evalMetric
if searchMin == True:
if evalMetric <= pareato_objective: pareato_objective = evalMetric
elif searchMin == False:
if evalMetric >= pareato_objective: pareato_objective = evalMetric
else:
if evalMetric <= pareato_objective: pareato_objective = evalMetric
response = requests.post(apipath, json =jObj,verify=False, headers=default_headers )
#print(apipath)
#print(jObj)
#print(response)
return response
def getProgress():
global current_objective
global pareato_objective
global rit
global svr_rit
return current_objective, pareato_objective, rit, svr_rit
def getFunctionEvaluations():
global rit
global svr_rit
return rit, svr_rit
| 2.28125
| 2
|
tests/test_wigner_H.py
|
moble/spherical
| 15
|
12778603
|
<reponame>moble/spherical
#!/usr/bin/env python
# Copyright (c) 2021, <NAME>
# See LICENSE file for details: <https://github.com/moble/spherical/blob/master/LICENSE>
import sympy
import numpy as np
import spherical as sf
import pytest
from .conftest import requires_sympy
slow = pytest.mark.slow
@requires_sympy
@slow
def test_H_vs_sympy(eps):
"""Eq. (29) of arxiv:1403.7698: d^{m',m}_{n}(β) = ϵ(m') ϵ(-m) H^{m',m}_{n}(β)"""
from sympy.physics.quantum.spin import WignerD as Wigner_D_sympy
def ϵ(m):
m = np.asarray(m)
eps = np.ones_like(m)
eps[m >= 0] = (-1)**m[m >= 0]
return eps
ell_max = 4
alpha, beta, gamma = 0.0, 0.1, 0.0
max_error = 0.0
print()
for mp_max in range(ell_max+1):
print(f"Checking mp_max={mp_max} (going up to {ell_max})")
w = sf.Wigner(ell_max, mp_max=mp_max)
workspace = w.new_workspace()
Hwedge, Hv, Hextra, _, _, _ = w._split_workspace(workspace)
Hnmpm = w.H(np.exp(1j * beta), Hwedge, Hv, Hextra)
for n in range(w.ell_max+1):
for mp in range(-min(n, mp_max), min(n, mp_max)+1):
for m in range(-n, n+1):
sympyd = sympy.re(sympy.N(Wigner_D_sympy(n, mp, m, alpha, beta, gamma).doit()))
sphericald = ϵ(mp) * ϵ(-m) * Hnmpm[sf.WignerHindex(n, mp, m, mp_max)]
error = float(abs(sympyd-sphericald))
assert error < 1.1 * ell_max * eps, (
f"Testing Wigner d recursion with n={n}, m'={mp}, m={m}, mp_max={mp_max}, "
f"sympyd={sympyd}, sphericald={sphericald}, error={error}"
)
| 2.28125
| 2
|
tuyaha/devices/switch.py
|
PaulAnnekov/tuya-ha
| 153
|
12778604
|
from tuyaha.devices.base import TuyaDevice
class TuyaSwitch(TuyaDevice):
def turn_on(self):
if self._control_device("turnOnOff", {"value": "1"}):
self._update_data("state", True)
def turn_off(self):
if self._control_device("turnOnOff", {"value": "0"}):
self._update_data("state", False)
def update(self, use_discovery=True):
return self._update(use_discovery=True)
| 2.671875
| 3
|
button_test.py
|
krame505/bs-generics-demo
| 0
|
12778605
|
<filename>button_test.py
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from demo import DemoClient
if __name__ == "__main__":
if len(sys.argv) <= 1:
sys.exit("Expected serial port name")
client = DemoClient(sys.argv[1])
client.start()
while True:
if (event := client.getButtonEvent()) is not None:
print("Button", event, "pressed")
else:
time.sleep(0.01)
| 2.734375
| 3
|
tools/polly/bin/detail/osx_dev_root.py
|
Kondr11/LABA7
| 861
|
12778606
|
# Copyright (c) 2015, <NAME>
# All rights reserved.
import os
import re
def get(osx_version):
dev_dir = re.sub(r'\.', '_', osx_version)
dev_dir = 'OSX_{}_DEVELOPER_DIR'.format(dev_dir)
return os.getenv(dev_dir)
| 2.046875
| 2
|
99.py
|
juandarr/ProjectEuler
| 0
|
12778607
|
<filename>99.py
"""
Finds the biggest numeral in an array where each row has the format base, exponent
Author: <NAME>
"""
import math
pairs = """519432,525806
632382,518061
78864,613712
466580,530130
780495,510032
525895,525320
15991,714883
960290,502358
760018,511029
166800,575487
210884,564478
555151,523163
681146,515199
563395,522587
738250,512126
923525,503780
595148,520429
177108,572629
750923,511482
440902,532446
881418,505504
422489,534197
979858,501616
685893,514935
747477,511661
167214,575367
234140,559696
940238,503122
728969,512609
232083,560102
900971,504694
688801,514772
189664,569402
891022,505104
445689,531996
119570,591871
821453,508118
371084,539600
911745,504251
623655,518600
144361,582486
352442,541775
420726,534367
295298,549387
6530,787777
468397,529976
672336,515696
431861,533289
84228,610150
805376,508857
444409,532117
33833,663511
381850,538396
402931,536157
92901,604930
304825,548004
731917,512452
753734,511344
51894,637373
151578,580103
295075,549421
303590,548183
333594,544123
683952,515042
60090,628880
951420,502692
28335,674991
714940,513349
343858,542826
549279,523586
804571,508887
260653,554881
291399,549966
402342,536213
408889,535550
40328,652524
375856,539061
768907,510590
165993,575715
976327,501755
898500,504795
360404,540830
478714,529095
694144,514472
488726,528258
841380,507226
328012,544839
22389,690868
604053,519852
329514,544641
772965,510390
492798,527927
30125,670983
895603,504906
450785,531539
840237,507276
380711,538522
63577,625673
76801,615157
502694,527123
597706,520257
310484,547206
944468,502959
121283,591152
451131,531507
566499,522367
425373,533918
40240,652665
39130,654392
714926,513355
469219,529903
806929,508783
287970,550487
92189,605332
103841,599094
671839,515725
452048,531421
987837,501323
935192,503321
88585,607450
613883,519216
144551,582413
647359,517155
213902,563816
184120,570789
258126,555322
502546,527130
407655,535678
401528,536306
477490,529193
841085,507237
732831,512408
833000,507595
904694,504542
581435,521348
455545,531110
873558,505829
94916,603796
720176,513068
545034,523891
246348,557409
556452,523079
832015,507634
173663,573564
502634,527125
250732,556611
569786,522139
216919,563178
521815,525623
92304,605270
164446,576167
753413,511364
11410,740712
448845,531712
925072,503725
564888,522477
7062,780812
641155,517535
738878,512100
636204,517828
372540,539436
443162,532237
571192,522042
655350,516680
299741,548735
581914,521307
965471,502156
513441,526277
808682,508700
237589,559034
543300,524025
804712,508889
247511,557192
543486,524008
504383,526992
326529,545039
792493,509458
86033,609017
126554,589005
579379,521481
948026,502823
404777,535969
265767,554022
266876,553840
46631,643714
492397,527958
856106,506581
795757,509305
748946,511584
294694,549480
409781,535463
775887,510253
543747,523991
210592,564536
517119,525990
520253,525751
247926,557124
592141,520626
346580,542492
544969,523902
506501,526817
244520,557738
144745,582349
69274,620858
292620,549784
926027,503687
736320,512225
515528,526113
407549,535688
848089,506927
24141,685711
9224,757964
980684,501586
175259,573121
489160,528216
878970,505604
969546,502002
525207,525365
690461,514675
156510,578551
659778,516426
468739,529945
765252,510770
76703,615230
165151,575959
29779,671736
928865,503569
577538,521605
927555,503618
185377,570477
974756,501809
800130,509093
217016,563153
365709,540216
774508,510320
588716,520851
631673,518104
954076,502590
777828,510161
990659,501222
597799,520254
786905,509727
512547,526348
756449,511212
869787,505988
653747,516779
84623,609900
839698,507295
30159,670909
797275,509234
678136,515373
897144,504851
989554,501263
413292,535106
55297,633667
788650,509637
486748,528417
150724,580377
56434,632490
77207,614869
588631,520859
611619,519367
100006,601055
528924,525093
190225,569257
851155,506789
682593,515114
613043,519275
514673,526183
877634,505655
878905,505602
1926,914951
613245,519259
152481,579816
841774,507203
71060,619442
865335,506175
90244,606469
302156,548388
399059,536557
478465,529113
558601,522925
69132,620966
267663,553700
988276,501310
378354,538787
529909,525014
161733,576968
758541,511109
823425,508024
149821,580667
269258,553438
481152,528891
120871,591322
972322,501901
981350,501567
676129,515483
950860,502717
119000,592114
392252,537272
191618,568919
946699,502874
289555,550247
799322,509139
703886,513942
194812,568143
261823,554685
203052,566221
217330,563093
734748,512313
391759,537328
807052,508777
564467,522510
59186,629748
113447,594545
518063,525916
905944,504492
613922,519213
439093,532607
445946,531981
230530,560399
297887,549007
459029,530797
403692,536075
855118,506616
963127,502245
841711,507208
407411,535699
924729,503735
914823,504132
333725,544101
176345,572832
912507,504225
411273,535308
259774,555036
632853,518038
119723,591801
163902,576321
22691,689944
402427,536212
175769,572988
837260,507402
603432,519893
313679,546767
538165,524394
549026,523608
61083,627945
898345,504798
992556,501153
369999,539727
32847,665404
891292,505088
152715,579732
824104,507997
234057,559711
730507,512532
960529,502340
388395,537687
958170,502437
57105,631806
186025,570311
993043,501133
576770,521664
215319,563513
927342,503628
521353,525666
39563,653705
752516,511408
110755,595770
309749,547305
374379,539224
919184,503952
990652,501226
647780,517135
187177,570017
168938,574877
649558,517023
278126,552016
162039,576868
658512,516499
498115,527486
896583,504868
561170,522740
747772,511647
775093,510294
652081,516882
724905,512824
499707,527365
47388,642755
646668,517204
571700,522007
180430,571747
710015,513617
435522,532941
98137,602041
759176,511070
486124,528467
526942,525236
878921,505604
408313,535602
926980,503640
882353,505459
566887,522345
3326,853312
911981,504248
416309,534800
392991,537199
622829,518651
148647,581055
496483,527624
666314,516044
48562,641293
672618,515684
443676,532187
274065,552661
265386,554079
347668,542358
31816,667448
181575,571446
961289,502320
365689,540214
987950,501317
932299,503440
27388,677243
746701,511701
492258,527969
147823,581323
57918,630985
838849,507333
678038,515375
27852,676130
850241,506828
818403,508253
131717,587014
850216,506834
904848,504529
189758,569380
392845,537217
470876,529761
925353,503711
285431,550877
454098,531234
823910,508003
318493,546112
766067,510730
261277,554775
421530,534289
694130,514478
120439,591498
213308,563949
854063,506662
365255,540263
165437,575872
662240,516281
289970,550181
847977,506933
546083,523816
413252,535113
975829,501767
361540,540701
235522,559435
224643,561577
736350,512229
328303,544808
35022,661330
307838,547578
474366,529458
873755,505819
73978,617220
827387,507845
670830,515791
326511,545034
309909,547285
400970,536363
884827,505352
718307,513175
28462,674699
599384,520150
253565,556111
284009,551093
343403,542876
446557,531921
992372,501160
961601,502308
696629,514342
919537,503945
894709,504944
892201,505051
358160,541097
448503,531745
832156,507636
920045,503924
926137,503675
416754,534757
254422,555966
92498,605151
826833,507873
660716,516371
689335,514746
160045,577467
814642,508425
969939,501993
242856,558047
76302,615517
472083,529653
587101,520964
99066,601543
498005,527503
709800,513624
708000,513716
20171,698134
285020,550936
266564,553891
981563,501557
846502,506991
334,1190800
209268,564829
9844,752610
996519,501007
410059,535426
432931,533188
848012,506929
966803,502110
983434,501486
160700,577267
504374,526989
832061,507640
392825,537214
443842,532165
440352,532492
745125,511776
13718,726392
661753,516312
70500,619875
436952,532814
424724,533973
21954,692224
262490,554567
716622,513264
907584,504425
60086,628882
837123,507412
971345,501940
947162,502855
139920,584021
68330,621624
666452,516038
731446,512481
953350,502619
183157,571042
845400,507045
651548,516910
20399,697344
861779,506331
629771,518229
801706,509026
189207,569512
737501,512168
719272,513115
479285,529045
136046,585401
896746,504860
891735,505067
684771,514999
865309,506184
379066,538702
503117,527090
621780,518717
209518,564775
677135,515423
987500,501340
197049,567613
329315,544673
236756,559196
357092,541226
520440,525733
213471,563911
956852,502490
702223,514032
404943,535955
178880,572152
689477,514734
691351,514630
866669,506128
370561,539656
739805,512051
71060,619441
624861,518534
261660,554714
366137,540160
166054,575698
601878,519990
153445,579501
279899,551729
379166,538691
423209,534125
675310,515526
145641,582050
691353,514627
917468,504026
284778,550976
81040,612235
161699,576978
616394,519057
767490,510661
156896,578431
427408,533714
254849,555884
737217,512182
897133,504851
203815,566051
270822,553189
135854,585475
778805,510111
784373,509847
305426,547921
733418,512375
732087,512448
540668,524215
702898,513996
628057,518328
640280,517587
422405,534204
10604,746569
746038,511733
839808,507293
457417,530938
479030,529064
341758,543090
620223,518824
251661,556451
561790,522696
497733,527521
724201,512863
489217,528217
415623,534867
624610,518548
847541,506953
432295,533249
400391,536421
961158,502319
139173,584284
421225,534315
579083,521501
74274,617000
701142,514087
374465,539219
217814,562985
358972,540995
88629,607424
288597,550389
285819,550812
538400,524385
809930,508645
738326,512126
955461,502535
163829,576343
826475,507891
376488,538987
102234,599905
114650,594002
52815,636341
434037,533082
804744,508880
98385,601905
856620,506559
220057,562517
844734,507078
150677,580387
558697,522917
621751,518719
207067,565321
135297,585677
932968,503404
604456,519822
579728,521462
244138,557813
706487,513800
711627,513523
853833,506674
497220,527562
59428,629511
564845,522486
623621,518603
242689,558077
125091,589591
363819,540432
686453,514901
656813,516594
489901,528155
386380,537905
542819,524052
243987,557841
693412,514514
488484,528271
896331,504881
336730,543721
728298,512647
604215,519840
153729,579413
595687,520398
540360,524240
245779,557511
924873,503730
509628,526577
528523,525122
3509,847707
522756,525555
895447,504922
44840,646067
45860,644715
463487,530404
398164,536654
894483,504959
619415,518874
966306,502129
990922,501212
835756,507474
548881,523618
453578,531282
474993,529410
80085,612879
737091,512193
50789,638638
979768,501620
792018,509483
665001,516122
86552,608694
462772,530469
589233,520821
891694,505072
592605,520594
209645,564741
42531,649269
554376,523226
803814,508929
334157,544042
175836,572970
868379,506051
658166,516520
278203,551995
966198,502126
627162,518387
296774,549165
311803,547027
843797,507118
702304,514032
563875,522553
33103,664910
191932,568841
543514,524006
506835,526794
868368,506052
847025,506971
678623,515342
876139,505726
571997,521984
598632,520198
213590,563892
625404,518497
726508,512738
689426,514738
332495,544264
411366,535302
242546,558110
315209,546555
797544,509219
93889,604371
858879,506454
124906,589666
449072,531693
235960,559345
642403,517454
720567,513047
705534,513858
603692,519870
488137,528302
157370,578285
63515,625730
666326,516041
619226,518883
443613,532186
597717,520257
96225,603069
86940,608450
40725,651929
460976,530625
268875,553508
270671,553214
363254,540500
384248,538137
762889,510892
377941,538833
278878,551890
176615,572755
860008,506412
944392,502967
608395,519571
225283,561450
45095,645728
333798,544090
625733,518476
995584,501037
506135,526853
238050,558952
557943,522972
530978,524938
634244,517949
177168,572616
85200,609541
953043,502630
523661,525484
999295,500902
840803,507246
961490,502312
471747,529685
380705,538523
911180,504275
334149,544046
478992,529065
325789,545133
335884,543826
426976,533760
749007,511582
667067,516000
607586,519623
674054,515599
188534,569675
565185,522464
172090,573988
87592,608052
907432,504424
8912,760841
928318,503590
757917,511138
718693,513153
315141,546566
728326,512645
353492,541647
638429,517695
628892,518280
877286,505672
620895,518778
385878,537959
423311,534113
633501,517997
884833,505360
883402,505416
999665,500894
708395,513697
548142,523667
756491,511205
987352,501340
766520,510705
591775,520647
833758,507563
843890,507108
925551,503698
74816,616598
646942,517187
354923,541481
256291,555638
634470,517942
930904,503494
134221,586071
282663,551304
986070,501394
123636,590176
123678,590164
481717,528841
423076,534137
866246,506145
93313,604697
783632,509880
317066,546304
502977,527103
141272,583545
71708,618938
617748,518975
581190,521362
193824,568382
682368,515131
352956,541712
351375,541905
505362,526909
905165,504518
128645,588188
267143,553787
158409,577965
482776,528754
628896,518282
485233,528547
563606,522574
111001,595655
115920,593445
365510,540237
959724,502374
938763,503184
930044,503520
970959,501956
913658,504176
68117,621790
989729,501253
567697,522288
820427,508163
54236,634794
291557,549938
124961,589646
403177,536130
405421,535899
410233,535417
815111,508403
213176,563974
83099,610879
998588,500934
513640,526263
129817,587733
1820,921851
287584,550539
299160,548820
860621,506386
529258,525059
586297,521017
953406,502616
441234,532410
986217,501386
781938,509957
461247,530595
735424,512277
146623,581722
839838,507288
510667,526494
935085,503327
737523,512167
303455,548204
992779,501145
60240,628739
939095,503174
794368,509370
501825,527189
459028,530798
884641,505363
512287,526364
835165,507499
307723,547590
160587,577304
735043,512300
493289,527887
110717,595785
306480,547772
318593,546089
179810,571911
200531,566799
314999,546580
197020,567622
301465,548487
237808,559000
131944,586923
882527,505449
468117,530003
711319,513541
156240,578628
965452,502162
992756,501148
437959,532715
739938,512046
614249,519196
391496,537356
62746,626418
688215,514806
75501,616091
883573,505412
558824,522910
759371,511061
173913,573489
891351,505089
727464,512693
164833,576051
812317,508529
540320,524243
698061,514257
69149,620952
471673,529694
159092,577753
428134,533653
89997,606608
711061,513557
779403,510081
203327,566155
798176,509187
667688,515963
636120,517833
137410,584913
217615,563034
556887,523038
667229,515991
672276,515708
325361,545187
172115,573985
13846,725685"""
pairs = pairs.split('\n')
for p in range(len(pairs)):
pairs[p] = pairs[p].split(',')
def biggest_numeral():
max_line = 0
max_value = 0
for p_idx in range(len(pairs)):
value = int(pairs[p_idx][1])*math.log10(int(pairs[p_idx][0]))
if value > max_value:
max_value = value
max_line = p_idx+1
return max_line
if __name__ == "__main__":
print('The biggest numeral in the array is in the line {0}'.format(biggest_numeral()))
| 3.1875
| 3
|
Otree/mygame/models.py
|
sb6998/delpro
| 0
|
12778608
|
<reponame>sb6998/delpro<filename>Otree/mygame/models.py
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import csv
author = '<NAME>'
doc = """
Decision making using game theory
"""
class Constants(BaseConstants):
name_in_url = 'mygame'
players_per_group = 3
num_rounds = 50
stakes =c(100)
instructions_template = 'mygame/Instructions.html'
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pts = models.FloatField()
average=models.FloatField()
name = models.StringField()
number = models.PositiveIntegerField(min = 1 , max =50)
def set_payoffs(self):
players = self.get_players()
name1 = players[0].name
pts = [p.pts for p in players]
average=(1/3)*sum(pts)
for p in players:
rnum=p.round_number
break
with open('mygame/mygame.csv','r') as f:
reader=csv.reader(f)
data = [r for r in reader]
if data == []:
data=[[0]]
else:
data=data[-1][0]
if rnum == 1:
with open('mygame/mygame.csv','w+') as f:
writer_csv=csv.writer(f)
writer_csv.writerow(str(rnum)+str(average))
for x in range(2,51):
if rnum == x and data != str(x):
with open('mygame/mygame.csv','a') as f:
writer_csv=csv.writer(f)
writer_csv.writerow(str(rnum)+str(average))
else:
pass
with open('mygame/points_data.csv','r') as f:
reader=csv.reader(f)
data = [r for r in reader]
if rnum ==1:
with open('mygame/points_data.csv','w') as f:
name2 = name1.split(",")
z = name2 + pts
writer_csv = csv.writer(f)
writer_csv.writerow(z)
else:
for x in range(2,51):
if rnum == x and rnum != len(data):
with open('mygame/points_data.csv','a') as f:
name2 = name1.split(",")
z = name2 + pts
writer_csv = csv.writer(f)
writer_csv.writerow(z)
# with open('mygame/game_data.csv','r') as f:
# reader=csv.reader(f)
# data = [r for r in reader]
# if data == []:
# data=[[0]]
# else:
# data=data[-1][0]
return ""
def table_avg_data(self):
with open('mygame/mygame.csv', 'r') as f:
read_csv=csv.reader(f)
l=[]
for x in read_csv:
x=x[1:]
l.append("".join(x))
used=[]
for x in l:
used.append(x)
final=[]
pla=self.get_players()
for p in pla:
rno=p.round_number
break
counter=0
for i in used:
final.append(i)
counter=counter+1
if counter==rno:
break
return(final)
def other_player(self):
return self.get_others_in_group()[0]
class Player(BasePlayer):
pts = models.PositiveIntegerField(min=0, max=100)
rank=models.PositiveIntegerField()
number = models.PositiveIntegerField(min = 1 , max =50)
name = models.StringField()
def other_player(self):
pts = models.PositiveIntegerField(min=0, max=100)
return self.get_others_in_group()[0]
def other_player2(self):
pts = models.PositiveIntegerField(min=0, max=100)
return self.get_others_in_group()[1]
| 2.625
| 3
|
src/kleis/config/config.py
|
santteegt/kleis-keyphrase-extraction
| 16
|
12778609
|
"""config/config
Default corpus configs.
"""
import sys
import os
import inspect
from pathlib import Path
from kleis import kleis_data
ACLRDTEC = "acl-rd-tec-2.0"
SEMEVAL2017 = "semeval2017-task10"
KPEXTDATA_PATH = str(Path(inspect.getfile(kleis_data)).parent)
# Check for default paths for corpus
DEFAULT_CORPUS_PATH = "corpus/" + SEMEVAL2017 + "/"
if Path("./kleis_data/" + DEFAULT_CORPUS_PATH).exists():
CORPUS_PATH = "./kleis_data/" + DEFAULT_CORPUS_PATH
elif Path(os.path.expanduser("~/kleis_data/" + DEFAULT_CORPUS_PATH)).exists():
CORPUS_PATH = os.path.expanduser("~/kleis_data/" + DEFAULT_CORPUS_PATH)
elif Path(KPEXTDATA_PATH + "/" + DEFAULT_CORPUS_PATH).exists():
CORPUS_PATH = KPEXTDATA_PATH + "/" + DEFAULT_CORPUS_PATH
else:
print("Warning: SemEval 2017 Task 10 corpus doesn't exists.", file=sys.stderr)
print(" - Download from here https://scienceie.github.io/resources.html",
file=sys.stderr)
print(" - Use one of the following paths.", file=sys.stderr)
print(" + ./kleis_data/%s" % DEFAULT_CORPUS_PATH, file=sys.stderr)
print(" + ~/kleis_data/%s" % DEFAULT_CORPUS_PATH, file=sys.stderr)
print(" + %s" % (KPEXTDATA_PATH + "/" + DEFAULT_CORPUS_PATH), file=sys.stderr)
print(" - You can use pre-trained models.", file=sys.stderr)
CORPUS_PATH = os.path.expanduser("~/kleis_data/" + DEFAULT_CORPUS_PATH)
print("Default: ", Path(CORPUS_PATH))
CORPUS = {
ACLRDTEC: {
"_id": "acl-rd-tec-2.0",
"options": {}
},
SEMEVAL2017: {
"_id": SEMEVAL2017,
"format": "brat",
"format-description": "brat standoff format, http://brat.nlplab.org/standoff.html",
"dataset": {
"train-labeled": CORPUS_PATH + "train2/",
"train-unlabeled": None,
"dev-labeled": CORPUS_PATH + "dev/",
"dev-unlabeled": None,
"test-unlabeled": CORPUS_PATH + "scienceie2017_test_unlabelled/",
"test-labeled": CORPUS_PATH + "semeval_articles_test/"
},
"options": {}
},
"options": {}
}
CORPUS_DEFAULT = CORPUS[SEMEVAL2017]
CORPUS_SEMEVAL2017_TASK10 = CORPUS[SEMEVAL2017]
CORPUS_ACL_RD_TEC_2_0 = CORPUS[ACLRDTEC]
# Check for default paths for models
DEFAULT_MODELS_PATH = "models/"
if Path("./kleis_data/" + DEFAULT_MODELS_PATH).exists():
MODELS_PATH = "./kleis_data/" + DEFAULT_MODELS_PATH
elif Path(os.path.expanduser("~/kleis_data/" + DEFAULT_MODELS_PATH)).exists():
MODELS_PATH = os.path.expanduser("~/kleis_data/" + DEFAULT_MODELS_PATH)
elif Path(KPEXTDATA_PATH + "/" + DEFAULT_MODELS_PATH).exists():
MODELS_PATH = KPEXTDATA_PATH + "/" + DEFAULT_MODELS_PATH
else:
print("Warning: Path to save models doesn't exists.", file=sys.stderr)
print(" - Possible paths are:", file=sys.stderr)
print(" + %s" % (KPEXTDATA_PATH + "/" + DEFAULT_MODELS_PATH), file=sys.stderr)
print(" + %s" % ("./" + DEFAULT_MODELS_PATH), file=sys.stderr)
print(" + %s" % ("~/" + DEFAULT_MODELS_PATH), file=sys.stderr)
print(" - Default will be %s" % DEFAULT_MODELS_PATH, file=sys.stderr)
MODELS_PATH = DEFAULT_MODELS_PATH
# Check for default paths for PoS tag sequences
DEFAULT_TRAIN_PATH = "train/"
if Path("./kleis_data/" + DEFAULT_TRAIN_PATH).exists():
TRAIN_PATH = "./kleis_data/" + DEFAULT_TRAIN_PATH
elif Path(os.path.expanduser("~/kleis_data/" + DEFAULT_TRAIN_PATH)).exists():
TRAIN_PATH = os.path.expanduser("~/kleis_data/" + DEFAULT_TRAIN_PATH)
elif Path(KPEXTDATA_PATH + "/" + DEFAULT_TRAIN_PATH).exists():
TRAIN_PATH = KPEXTDATA_PATH + "/" + DEFAULT_TRAIN_PATH
else:
print("Warning: Path to save models doesn't exists.", file=sys.stderr)
print(" - Possible paths are:", file=sys.stderr)
print(" + %s" % (KPEXTDATA_PATH + "/" + DEFAULT_TRAIN_PATH), file=sys.stderr)
print(" + %s" % ("./" + DEFAULT_TRAIN_PATH), file=sys.stderr)
print(" + %s" % ("~/" + DEFAULT_TRAIN_PATH), file=sys.stderr)
print(" - Default will be %s" % DEFAULT_TRAIN_PATH, file=sys.stderr)
TRAIN_PATH = DEFAULT_TRAIN_PATH
OUTPUT_PATH = "output/"
| 2.390625
| 2
|
home_board/compositor.py
|
kdickerson/homeBoard
| 7
|
12778610
|
<reponame>kdickerson/homeBoard
# Generate in image from the provided weather, calendar, special_events data
import logging
import os
from PIL import Image, ImageDraw, ImageFont
from .util import local_file
EPD_WIDTH = 640
EPD_HEIGHT = 384
BLACK = 0
WHITE = 255
RED = 128
COLUMN_WIDTH = 160
COLUMNS = [0, COLUMN_WIDTH, COLUMN_WIDTH * 2, COLUMN_WIDTH * 3]
HEADER_TOP = 0
WEATHER_TEMP_TOP = 37
WEATHER_ICON_TOP = 61
CALENDAR_TOP = 130
WEATHER_ICON_MAP = {
'chanceflurries': 'chancesnow.bmp',
'chancerain': 'chancerain.bmp',
'chancesleet': 'chancesnow.bmp',
'chancesnow': 'chancesnow.bmp',
'chancetstorms': 'chancetstorms.bmp',
'clear': 'sunny.bmp',
'cloudy': 'cloudy.bmp',
'cloudy_windy': 'cloudy_windy.bmp',
'flurries': 'snow.bmp',
'fog': 'fog.bmp',
'hazy': 'hazy.bmp',
'mostlycloudy': 'mostlycloudy.bmp',
'partlycloudy': 'partlycloudy.bmp',
'partlysunny': 'mostlycloudy.bmp',
'sleet': 'snow.bmp',
'rain': 'rain.bmp',
'snow': 'snow.bmp',
'sunny': 'sunny.bmp',
'tstorms': 'tstorms.bmp',
'unknown': 'sunny.bmp',
'windy': 'windy.bmp',
}
EVENT_ICON_MAP = {
'birthday': 'birthday.bmp',
'christmas': 'christmas.bmp',
'flag': 'flag.bmp',
'halloween': 'halloween.bmp',
'heart': 'heart.bmp',
'thanksgiving': 'thanksgiving.bmp',
}
WEATHER_ICON_PATH = 'icons/weather/'
EVENTS_ICON_PATH = 'icons/events/'
BLACK_TO_RED_LUT = [RED] + ([BLACK] * 254) + [WHITE] # Map BLACK to RED, leave WHITE alone; use with Image.point()
def _load_weather_icon(icon):
# Expecting 64x64 monochrome icons
return Image.open(local_file(os.path.join(WEATHER_ICON_PATH, WEATHER_ICON_MAP[icon])))
def _load_event_icon(icon):
return Image.open(local_file(os.path.join(EVENTS_ICON_PATH, EVENT_ICON_MAP[icon])))
def _draw_centered_text(draw, offset, width, text, font, color=BLACK, measure_text=None):
dimensions = draw.textsize(measure_text if measure_text else text, font=font)
loc = ((width - dimensions[0]) // 2 + offset[0], offset[1])
draw.text(loc, text, font=font, fill=color)
def _truncate_text(draw, text, font, width):
dimensions = draw.textsize(text, font=font)
i = 0
while dimensions[0] > width:
i = i - 1
dimensions = draw.textsize(text[:i], font=font)
return (text[:i-1] + '…') if i < 0 else text, dimensions
def _draw_header(draw, offset, text, font, color=BLACK):
_draw_centered_text(draw, offset, COLUMN_WIDTH, text, font, color)
def _draw_calendar(image, draw, events, offset, bottom, cal_header_font, description_font):
logging.debug('_draw_calendar:start')
time_left_margin = 5
text_left_margin = 5
right_margin = 5
bottom_margin = 5
top = offset[1]
more_msg_height = draw.textsize('+123456789 More', font=description_font)[1] # Max height for "+X More" msg
for idx, event in enumerate(events):
header_txt = ''
# Make sure we don't show start times for events that started a previous date,
# don't show end times for events ending a future date
if not event['all_day']:
if event['underway']:
if event['ending_days_away'] == 0:
header_txt = '→' + event['end'].strftime('%-H:%M') + ' ' # %-H is Linux specific
elif event['ending_days_away'] > 0:
header_txt = '+' + str(event['ending_days_away']) + ' Day' + \
('' if event['ending_days_away'] == 1 else 's') + ' '
else:
header_txt = event['start'].strftime('%-H:%M') + ' ' # %-H is Linux specific
header_txt += event['calendar_label']
header, header_dim = _truncate_text(
draw,
header_txt,
cal_header_font,
COLUMN_WIDTH - right_margin - time_left_margin
)
desc, desc_dim = _truncate_text(
draw,
event['description'],
description_font,
COLUMN_WIDTH - right_margin - text_left_margin
)
if top + header_dim[1] + desc_dim[1] + (0 if idx+1 == len(events) else more_msg_height) > bottom:
more_msg = '+' + str(len(events) - idx) + ' More'
_draw_centered_text(draw, (offset[0], top), COLUMN_WIDTH, more_msg, description_font)
break
draw.text(
(offset[0] + time_left_margin, top),
header,
font=cal_header_font,
fill=RED if event['underway'] else BLACK
)
draw.text(
(offset[0] + text_left_margin, top + header_dim[1]),
desc,
font=description_font,
fill=RED if event['underway'] else BLACK
)
top = top + header_dim[1] + desc_dim[1] + bottom_margin
logging.debug('_draw_calendar:end')
def _draw_forecast_and_current(image, draw, conditions, forecast, header_font, temp_font):
logging.debug('_draw_forecast_and_current:start')
logging.debug('current: ' + str(conditions))
logging.debug('forecast: ' + str(forecast))
SUB_COLUMN_WIDTH = COLUMN_WIDTH // 2
# Sub column 1:
if conditions:
_draw_centered_text(
draw,
(0, WEATHER_TEMP_TOP),
SUB_COLUMN_WIDTH,
str(conditions['temperature']) + '°',
temp_font,
color=RED,
measure_text=str(conditions['temperature'])
)
try:
cur_icon = _load_weather_icon(conditions['icon']).point(BLACK_TO_RED_LUT)
image.paste(cur_icon, ((SUB_COLUMN_WIDTH - cur_icon.size[0]) // 2, WEATHER_ICON_TOP))
except Exception:
cur_icon = None
# Sub column 2:
if forecast:
_draw_centered_text(
draw,
(SUB_COLUMN_WIDTH, WEATHER_TEMP_TOP),
SUB_COLUMN_WIDTH,
str(forecast['high-temperature']) + '°',
temp_font,
measure_text=str(forecast['high-temperature'])
)
try:
forecast_icon = _load_weather_icon(forecast['icon'])
image.paste(forecast_icon, ((SUB_COLUMN_WIDTH - cur_icon.size[0]) // 2 + SUB_COLUMN_WIDTH, WEATHER_ICON_TOP))
except Exception:
forecast_icon = None
if not cur_icon and not forecast_icon and conditions:
_draw_centered_text(draw, (0, WEATHER_ICON_TOP), COLUMN_WIDTH, conditions['description'], temp_font)
logging.debug('_draw_forecast_and_current:end')
def _draw_forecast(image, draw, column_left, forecast, header_font, temp_font):
logging.debug('_draw_forecast:start')
logging.debug('forecast: ' + str(forecast))
msg = str(forecast['low-temperature']) + '–' + str(forecast['high-temperature']) # Center before adding the °
_draw_centered_text(draw, (column_left, WEATHER_TEMP_TOP), COLUMN_WIDTH, msg + '°', temp_font, measure_text=msg)
try:
icon = _load_weather_icon(forecast['icon'])
image.paste(icon, ((COLUMN_WIDTH - icon.size[0]) // 2 + column_left, WEATHER_ICON_TOP))
except: # noqa: E722
_draw_centered_text(draw, (column_left, WEATHER_ICON_TOP), COLUMN_WIDTH, forecast['description'], temp_font)
logging.debug('_draw_forecast:end')
def _draw_special_event(image, draw, event, footer_offset, font):
logging.debug('_draw_special_event:start')
textsize = draw.textsize(event['msg'], font=font)
iconsize = (0, 0)
icon = None
try:
icon = _load_event_icon(event['icon']) if 'icon' in event else None
if icon:
iconsize = icon.size
except: # noqa: E722
raise
if icon:
padding = 5
msgsize = (iconsize[0] + textsize[0] + padding, max(textsize[1], iconsize[1]))
else:
padding = 0
msgsize = textsize
icon_left = (EPD_WIDTH - msgsize[0]) // 2
icon_top = footer_offset[1] - msgsize[1]
icon_offset = (icon_left, icon_top + (msgsize[1] - iconsize[1]) // 2)
text_offset = (icon_left + iconsize[0] + padding, icon_top + (msgsize[1] - textsize[1]) // 2)
text_to_footer_gap = footer_offset[1] - (text_offset[1] + textsize[1])
if text_to_footer_gap > 0:
icon_offset = (icon_offset[0], icon_offset[1] + text_to_footer_gap)
text_offset = (text_offset[0], text_offset[1] + text_to_footer_gap)
image.paste(icon, icon_offset) if icon else None
draw.text(text_offset, event['msg'], font=font, fill=RED)
logging.debug('_draw_special_event:end')
return (min(icon_offset[0], text_offset[0]), min(icon_offset[1], text_offset[1])), msgsize
def _draw_footer(image, draw, text, font):
logging.debug('_draw_footer:start')
dimensions = draw.textsize(text, font=font)
offset = (EPD_WIDTH-dimensions[0], EPD_HEIGHT-dimensions[1])
draw.text(offset, text, font=font, fill=BLACK)
logging.debug('_draw_footer:end')
return offset, dimensions
def create(context):
logging.debug('create:start')
image = Image.new('L', (EPD_WIDTH, EPD_HEIGHT), WHITE)
draw = ImageDraw.Draw(image)
draw.fontmode = '1' # No Anti-aliasing
fonts = {
'header': ImageFont.truetype(local_file('fonts/FreeSansBold.ttf'), 36),
'special': ImageFont.truetype(local_file('fonts/FreeSansBold.ttf'), 36),
'temperature': ImageFont.truetype(local_file('fonts/FreeSans.ttf'), 24),
'calendar_header': ImageFont.truetype(local_file('fonts/FreeSans.ttf'), 20),
'calendar_body': ImageFont.truetype(local_file('fonts/FreeSans.ttf'), 16),
'footer': ImageFont.truetype(local_file('fonts/FreeSans.ttf'), 14),
}
# Footer: Bottom-right corner
updated_msg = 'Updated ' + context['now'].strftime('%B %-d, %-I:%M %p') # %-d and %-I are platform specific
if not all(context['success'].values()):
updated_msg = '[! ' + ','.join(sorted([k for k, v in context['success'].items() if not v])) + ']' + updated_msg
footer_offset, footer_dimensions = _draw_footer(image, draw, updated_msg, fonts['footer'])
# Special event, centered across whole display, above footer
special_offset = None
if context['today']['special_event'] and 'msg' in context['today']['special_event']:
special_offset, special_dimensions = _draw_special_event(
image,
draw,
context['today']['special_event'],
footer_offset,
fonts['special']
)
cal_bottom = (special_offset[1] if special_offset else footer_offset[1]) - 1
# 1st Column
left = COLUMNS[0]
_draw_header(draw, (left, HEADER_TOP), context['today']['date'].strftime('%b %-d'), fonts['header'], RED)
if context['today']['conditions'] or context['today']['forecast']:
_draw_forecast_and_current(
image,
draw,
context['today']['conditions'],
context['today']['forecast'],
fonts['header'],
fonts['temperature']
)
if context['today']['events']:
_draw_calendar(
image,
draw,
context['today']['events'],
(left, CALENDAR_TOP),
cal_bottom,
fonts['calendar_header'],
fonts['calendar_body']
)
# 2nd Column
left = COLUMNS[1]
draw.line([(left, 0), (left, cal_bottom)], width=1, fill=BLACK)
_draw_header(draw, (left, HEADER_TOP), context['plus_one']['date'].strftime('%a'), fonts['header'])
if context['plus_one']['forecast']:
_draw_forecast(image, draw, left, context['plus_one']['forecast'], fonts['header'], fonts['temperature'])
if context['plus_one']['events']:
_draw_calendar(
image,
draw,
context['plus_one']['events'],
(left, CALENDAR_TOP),
cal_bottom,
fonts['calendar_header'],
fonts['calendar_body']
)
# 3rd Column
left = COLUMNS[2]
draw.line([(left, 0), (left, cal_bottom)], width=1, fill=BLACK)
_draw_header(draw, (left, HEADER_TOP), context['plus_two']['date'].strftime('%a'), fonts['header'])
if context['plus_two']['forecast']:
_draw_forecast(image, draw, left, context['plus_two']['forecast'], fonts['header'], fonts['temperature'])
if context['plus_two']['events']:
_draw_calendar(
image,
draw,
context['plus_two']['events'],
(left, CALENDAR_TOP),
cal_bottom,
fonts['calendar_header'],
fonts['calendar_body']
)
# 4th Column
left = COLUMNS[3]
draw.line([(left, 0), (left, cal_bottom)], width=1, fill=BLACK)
_draw_header(draw, (left, HEADER_TOP), context['plus_three']['date'].strftime('%a'), fonts['header'])
if context['plus_three']['forecast']:
_draw_forecast(image, draw, left, context['plus_three']['forecast'], fonts['header'], fonts['temperature'])
if context['plus_three']['events']:
_draw_calendar(
image,
draw,
context['plus_three']['events'],
(left, CALENDAR_TOP),
cal_bottom,
fonts['calendar_header'],
fonts['calendar_body']
)
logging.debug('create:end')
return image
| 2.453125
| 2
|
ba/set.py
|
mrtukkin/bachelor-thesis
| 0
|
12778611
|
from scipy.misc import imread
from tqdm import tqdm
import numpy as np
import os
import random
import warnings
class SetList(object):
'''A class to hold lists of inputs for a network'''
def __init__(self, source='', target=None):
'''Constructs a new SetList.
Args:
source (str): The path to the list file
'''
self.source = source
if target is None:
self.target = source
else:
self.target = target
self.list = []
self.mean = []
if source != '':
self.load()
@property
def set(self):
return set(self.list)
@set.setter
def set(self, set):
self.list = list(set)
def __len__(self):
'''Returns the length of this Set'''
return len(self.list)
def __str__(self):
'''Returns a str-description of this Set'''
return '{}[{}] → {}'.format(self.source, len(self.list), self.target)
def __iter__(self):
'''Returns the iterator for the contained list'''
return iter(self.list)
def load(self):
'''Loads the contents of self.source into the list. If source is a dir
it will list all files in it without extensions. It does replace the
whole content and does not append to it.'''
# utils.touch(self.source)
if os.path.isdir(self.source):
self.load_directory(self.source)
self.source = ''
self.target = ''
else:
if not os.path.exists(self.source):
self.list = []
else:
with open(self.source) as f:
self.list = [l[:-1] for l in f.readlines() if l.strip()]
def load_directory(self, dir):
'''Loads the contents of a dirctory into the list
Args:
dir (str): The path to the dir
'''
self.list = [os.path.splitext(f)[0] for f in next(os.walk(dir))[2]]
def write(self):
'''Saves the list to the path set in self.target. This is normally set
to self.source'''
with open(self.target, 'w') as f:
for row in self:
f.write("{}\n".format(row))
print('List {} written...'.format(self.target))
def shuffle(self):
'''Shuffles the list'''
random.shuffle(self.list)
def add_pre_suffix(self, prefix='', suffix=''):
'''Adds a prefix and a suffix to every element of the list.
Args:
prefix (str,optional): The prefix to prepend
suffix (str,optional): The prefix to append
'''
self.list = [prefix + x + suffix for x in self]
def rm_pre_suffix(self, prefix='', suffix=''):
'''Removes a prefix and a suffix from every element of the list.
Args:
prefix (str,optional): The prefix to remove
suffix (str,optional): The prefix to remove
'''
self.list = [x[len(prefix):-len(suffix)] for x in self]
def calculate_mean(self):
'''Calculates the mean pixel for this set. The list has to contain full
paths obviously so you probably have to append Prefixes and suffixes
before running this.
Returns:
The mean pixel. As BGR!
'''
self.mean = [[], [], []]
print('Calculating mean pixel...')
for row in tqdm(self):
im = imread(row)
self.mean[0].append(np.mean(im[..., 0]))
self.mean[1].append(np.mean(im[..., 1]))
self.mean[2].append(np.mean(im[..., 2]))
self.mean = np.mean(self.mean, axis=1)
if self.mean.shape == (3,):
return self.mean
else:
return self.mean[:, :, ::-1]
def each(self, callback):
'''Applies a callable to every element of the list
Args:
callback (func): The callback function to use
Returns:
True if successfull and False if not
'''
if not callable(callback):
warnings.warn('Not callable object')
return False
print('Each of {}'.format(self.source))
for row in tqdm(self):
callback(row)
return True
| 2.875
| 3
|
superseeded/calcResponse.py
|
BeneStrahm/WindTunnelPostprocessing
| 0
|
12778612
|
<reponame>BeneStrahm/WindTunnelPostprocessing
# ------------------------------------------------------------------------------
# Description: Calculating wind speeds at different return periods
# Author: <EMAIL>
# Created: 2020-09-16
# Execution: Import functions / collections (from folder.file import func)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Libraries
# ------------------------------------------------------------------------------
import numpy as np
from scipy import integrate
# For Static solver (feastruct)
from feastruct.pre.material import Material
from feastruct.pre.section import Section
import feastruct.fea.cases as cases
from feastruct.fea.frame_analysis import FrameAnalysis2D
from feastruct.solvers.linstatic import LinearStatic
from feastruct.solvers.naturalfrequency import NaturalFrequency
from feastruct.solvers.feasolve import SolverSettings
# ------------------------------------------------------------------------------
# Imported functions
# ------------------------------------------------------------------------------
from helpers.pyExtras import getKeyList
from helpers.txtEditor import writeToTxt
import plotters.plot2D as plt
# ------------------------------------------------------------------------------
# Abbreviations
# ------------------------------------------------------------------------------
# p... load
# r... response
# ms... model scale
# fs... full scale
# L... lift
# D... drag
# M... moment
# F... force
# H... (at) height of building
# sp.. sample
# fq... frequency
# ------------------------------------------------------------------------------
# Classes
# ------------------------------------------------------------------------------
class response(object):
def __init__(self, modelForces, RPeriod, uH_fs, H_fs):
"""Scale time/freq. from model to full scale
:param modelForce: obj w/ wind tunnel measurements
:param RPeriod: str w/ return period of wind speed
:param uH_fs, H_fs: flt w/ full scale building properties
"""
self.modelForces = modelForces
self.RPeriod= RPeriod
self.H_fs = H_fs
self.uH_fs = uH_fs
def scaleTime(self):
"""Scale time/freq. from model to full scale
"""
# Model properties
self.modelForces.dT_ms = 1 / self.modelForces.fq_sp_ms
# Scaling factors
self.lambda_u = self.uH_fs / self.modelForces.uH_ms
self.lambda_g = self.H_fs / self.modelForces.H_ms
self.lambda_fq= self.lambda_u / self.lambda_g
self.lambda_t = 1 / self.lambda_fq
# Scale quantities
self.dT_fs = self.lambda_t * self.modelForces.dT_ms
self.fq_sp_fs = self.lambda_fq * self.modelForces.fq_sp_ms
def scaleForces(self):
"""Scale base forces from model to full scale
"""
# Scaling factors
self.lambda_F = self.lambda_u ** 2 * self.lambda_g ** 2
self.lambda_M = self.lambda_u ** 2 * self.lambda_g ** 3
# Scale floor forces
self.modelForces.F_p_fs_D= self.modelForces.F_p_ms_D * self.lambda_F
self.modelForces.F_p_fs_L= self.modelForces.F_p_ms_L * self.lambda_F
# Scale base forces
self.modelForces.BF_p_fs_D= self.modelForces.BF_p_ms_D * self.lambda_F
print(np.mean(self.modelForces.BF_p_fs_D *1000/(0.5 * 1.25 * self.uH_fs**2 * 128 *128)))
self.modelForces.BF_p_fs_L= self.modelForces.BF_p_ms_L * self.lambda_F
self.modelForces.BM_p_fs_D= self.modelForces.BM_p_ms_D * self.lambda_M
self.modelForces.BM_p_fs_L= self.modelForces.BM_p_ms_L * self.lambda_M
def transToSpectralDomain(self, F_p, dT):
"""Transform time series of forces into the spectral domain
"""
# Length of time series
n = np.shape(F_p)[0]
N = n//2
# Get the Spectral Density, only positive half
S_p = abs(np.fft.fft(F_p)[0:N])
# Compute the power spectra density
S_p = S_p ** 2
# According to "Boggs - Wind Loading ...[1991], (p.237)": S(fq)+ = 2 * S(fq)+/-
S_p = 2 * S_p
# Scaling Factor
S_p = S_p / n
# Scaling Factor
S_p = S_p * dT
# Compute the frequencies, only positive half
fq = np.fft.fftfreq(n, dT)[0:N]
return S_p, fq
def calcSpectralResponse(self, fq_p, S_p, fq_e, D):
"""Calculate the response spectrum
"""
# Length of spectrum
N = np.shape(S_p)[0]
# Apply Dynamic amplification factor
S_r = np.zeros(N)
for i in range(0, N):
eta_i = fq_p[i]/fq_e
H_i = response.mechanicalAdmittance(self, eta_i, D)
S_r[i] = abs(H_i)**2 * S_p[i]
return S_r
def mechanicalAdmittance(self, eta, D):
"""Mechanical admittance of the 1-DOF System
"""
# Dynamic amplification factor
H_fq = 1 / np.sqrt((1-eta**2)**2 + (2*D*eta)**2)
return H_fq
def numericalIntSpectrum(self, dT, S_r):
"""Integrate the response spectrum
"""
# Length of spectrum
N = np.shape(S_r)[0]
# Nyquist frequency
fq_nyq = 1 / (2 * dT)
# Sample spacing dfq
dfq = fq_nyq / N
# Perform the numerical integration with Simpson rule
F_ms = integrate.simps(S_r, dx=dfq)
# Return directly RMS
F_rms = np.sqrt(F_ms)
return F_rms
def calcPeakFactor(self, fq_e, T):
"""Compute the peak factor
"""
g_peak = np.sqrt(2 * np.log(fq_e * T)) + 0.5772 / np.sqrt((2 * np.log(fq_e * T)))
return g_peak
class baseResponseForces(response):
def __init__(self, modelForces, RPeriod, uH_fs, H_fs):
super().__init__(modelForces, RPeriod, uH_fs, H_fs)
def calcResponse(self, fname, fq_e_D, fq_e_L, D):
# Investigated wind speed
writeToTxt(fname, "------------------------------")
writeToTxt(fname, "u_H_mean: " + '{:02.3f}'.format(self.uH_fs))
writeToTxt(fname, "Return Period: " + self.RPeriod)
writeToTxt(fname, "------------------------------")
# Base moment, drag direction
writeToTxt(fname, "Base moment in drag direction [kNm]")
baseResponseForces.calcLoadStats(self, fname, self.modelForces.BM_p_fs_D)
baseResponseForces.calcPeakLoading(self, fname, self.modelForces.BM_p_fs_D, self.dT_fs, fq_e_D, D)
writeToTxt(fname, "------------------------------")
# Base moment, lift direction
writeToTxt(fname, "Base moment in lift direction [kNm]")
baseResponseForces.calcLoadStats(self, fname, self.modelForces.BM_p_fs_L)
baseResponseForces.calcPeakLoading(self, fname, self.modelForces.BM_p_fs_L, self.dT_fs, fq_e_L, D)
writeToTxt(fname, "------------------------------")
def calcLoadStats(self, fname, F_p):
# Calc statistics
F_p_mean = np.mean(F_p)
F_p_max = np.max(F_p)
F_p_min = np.min(F_p)
F_p_std = np.std(F_p)
writeToTxt(fname, "F_p_mean: " + '{:02.3f}'.format(F_p_mean))
writeToTxt(fname, "F_p_max: " + '{:02.3f}'.format(F_p_max))
writeToTxt(fname, "F_p_min: " + '{:02.3f}'.format(F_p_min))
writeToTxt(fname, "F_p_std: " + '{:02.3f}'.format(F_p_std))
def calcPeakLoading(self, fname, F_p, dT, fq_e, D):
# Asses response with spectral analysis
# --------------------
# Transform only the fluctuations "F_p_prime" to frequency domain
F_p_mean = np.mean(F_p)
F_p_prime = F_p - F_p_mean
S_p, fq_p = response.transToSpectralDomain(self, F_p_prime, dT)
# Apply mechanical admittance to the spectrum
S_r = response.calcSpectralResponse(self, fq_p, S_p, fq_e, D)
# # Setting up data to be plotted
# plt.plot2D(fq_p, S_r, "f [Hz]", "Sr", "Spectrum", ["PSD"], xscale='log', yscale='log', savePlt=False, showPlt=True)
# Perform the numerical integration
F_r_std = response.numericalIntSpectrum(self, dT, S_r)
# Estimate peak values
g_peak = response.calcPeakFactor(self, 3600, fq_e) # Peak Factor
F_r_max = F_p_mean + g_peak * F_r_std # Estimate max. response
writeToTxt(fname, "Asses response with spectral analysis")
writeToTxt(fname, "F_p_mean: " + '{:02.3f}'.format(F_p_mean))
writeToTxt(fname, "F_r_std: " + '{:02.3f}'.format(F_r_std))
writeToTxt(fname, "g_peak: " + '{:02.3f}'.format(g_peak))
writeToTxt(fname, "F_r_max: " + '{:02.3f}'.format(F_r_max))
# Comparison with the loading
# --------------------
F_p_max = np.max(F_p)
DLF_max = F_r_max / F_p_max
writeToTxt(fname, "Comparison with loading")
writeToTxt(fname, "F_p_max: " + '{:02.3f}'.format(F_p_max))
writeToTxt(fname, "DLF(F_max): " + '{:02.3f}'.format(DLF_max))
class TipResponseDeflections(response):
def __init__(self, modelForces, RPeriod, uH_fs, H_fs):
super().__init__(modelForces, RPeriod, uH_fs, H_fs)
def calcResponse(self, fname, E_D, I_D, E_L, I_L, mue):
# Investigated wind speed
writeToTxt(fname, "------------------------------")
writeToTxt(fname, "u_H_mean: " + '{:02.3f}'.format(self.uH_fs))
writeToTxt(fname, "Return Period: " + self.RPeriod)
writeToTxt(fname, "------------------------------")
# Base moment, drag direction
writeToTxt(fname, "Deflections in drag direction [m]")
TipResponseDeflections.calcMeanDeflection(self, fname, self.modelForces.F_p_fs_D, self.H_fs, E_D, I_D,\
self.modelForces.nz, self.modelForces.z_lev * self.lambda_g)
TipResponseDeflections.calcPeakDeflection(self, fname, 146332.508, self.H_fs, E_D, I_D, mue, \
self.modelForces.nz, self.modelForces.z_lev * self.lambda_g)
writeToTxt(fname, "------------------------------")
# Base moment, lift direction
writeToTxt(fname, "Deflections in lift direction [m]")
TipResponseDeflections.calcMeanDeflection(self, fname, self.modelForces.F_p_fs_L, self.H_fs, E_L, I_L,\
self.modelForces.nz, self.modelForces.z_lev * self.lambda_g)
TipResponseDeflections.calcPeakDeflection(self, fname, 370125.002, self.H_fs, E_D, I_D, mue, \
self.modelForces.nz, self.modelForces.z_lev * self.lambda_g)
writeToTxt(fname, "------------------------------")
def calcMeanDeflection(self, fname, F_p_j, H_fs, E, I, nz, z_lev_fs):
# Setting up static calculation
# ------------
# preprocessor
# ---------
# constants & lists
L = H_fs # length of the beam [m]
n = nz # no of nodes [-]
z = np.append(L, z_lev_fs) # coordinates [m], append top of building
z = np.append(z, 0) # coordinates [m], append support node
# everything starts with the analysis object
analysis = FrameAnalysis2D()
# materials and sections are objects
mat_dummy = Material("Dummy", E, 0.3, 1, colour='w')
section = Section(area=1, ixx=I)
# nodes are objects
nodes = []
for i in range(0,n+2): #! n+2 (support, tip)
node = analysis.create_node(coords=[0, z[i]])
nodes.append(node)
# and so are beams!
beams = []
for i in range(0,n+1): #! n+1 (support, tip)
beam = analysis.create_element(
el_type='EB2-2D', nodes=[nodes[i], nodes[i+1]], material=mat_dummy, section=section)
beams.append(beam)
# boundary conditions are objects
freedom_case = cases.FreedomCase()
freedom_case.add_nodal_support(node=nodes[-1], val=0, dof=0)
freedom_case.add_nodal_support(node=nodes[-1], val=0, dof=1)
freedom_case.add_nodal_support(node=nodes[-1], val=0, dof=5)
# so are loads!
load_case = cases.LoadCase()
for i in range(n):
F_p = np.mean(F_p_j[i]) #[in KN]
load_case.add_nodal_load(node=nodes[i+1], val=F_p , dof=0) # i+1 (support, tip)
# an analysis case relates a support case to a load case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=load_case)
# ------
# solver
# ------
# you can easily change the solver settings
settings = SolverSettings()
settings.linear_static.time_info = False
# the linear static solver is an object and acts on the analysis object
solver = LinearStatic(analysis=analysis, analysis_cases=[analysis_case], solver_settings=settings)
solver.solve()
# ----
# post
# ----
# there are plenty of post processing options!
# analysis.post.plot_geom(analysis_case=analysis_case)
# analysis.post.plot_geom(analysis_case=analysis_case, deformed=True, def_scale=1e2)
# analysis.post.plot_frame_forces(analysis_case=analysis_case, shear=True)
# analysis.post.plot_frame_forces(analysis_case=analysis_case, moment=True)
# analysis.post.plot_reactions(analysis_case=analysis_case)
# Support reactions, to check bending moment for validation
for support in analysis_case.freedom_case.items:
if support.dof in [5]:
reaction = support.get_reaction(analysis_case=analysis_case)
# read out deformation at top
self.delta_p_mean = nodes[0].get_displacements(analysis_case)[0]
writeToTxt(fname, "delta_p_mean: " + '{:02.3f}'.format(self.delta_p_mean))
def calcPeakDeflection(self, fname, F_r_std, H_fs, E, I, mue, nz, z_lev_fs):
# Setting up dynamic calculation
# ------------
# preprocessor
# ---------
# constants & lists
L = H_fs # length of the beam [m]
n = nz # no of nodes [-]
z = np.append(L, z_lev_fs) # coordinates [m], append top of building
z = np.append(z, 0) # coordinates [m], append support node
num_modes = 1
# everything starts with the analysis object
analysis = FrameAnalysis2D()
# materials and sections are objects
mat_dummy = Material("Dummy", E, 0.3, mue, colour='w')
section = Section(area=1, ixx=I)
# nodes are objects
nodes = []
for i in range(0,n+2): #! n+2 (support, tip)
node = analysis.create_node(coords=[0, z[i]])
nodes.append(node)
# and so are beams!
beams = []
for i in range(0,n+1): #! n+1 (support, tip)
beam = analysis.create_element(
el_type='EB2-2D', nodes=[nodes[i], nodes[i+1]], material=mat_dummy, section=section)
beams.append(beam)
# boundary conditions are objects
freedom_case = cases.FreedomCase()
freedom_case.add_nodal_support(node=nodes[-1], val=0, dof=0)
freedom_case.add_nodal_support(node=nodes[-1], val=0, dof=1)
freedom_case.add_nodal_support(node=nodes[-1], val=0, dof=5)
# add analysis case
analysis_case = cases.AnalysisCase(freedom_case=freedom_case, load_case=cases.LoadCase())
# ----------------
# frequency solver
# ----------------
settings = SolverSettings()
settings.natural_frequency.time_info = True
settings.natural_frequency.num_modes = num_modes
solver = NaturalFrequency(
analysis=analysis, analysis_cases=[analysis_case], solver_settings=settings)
# Manual solver, see feastruct/solvers/naturalfrequency.py, in order
# to extract mass/stiffness-matrix and eigenvectors
# assign the global degree of freedom numbers
solver.assign_dofs()
# Get the global stiffness / mass matrix
(K, Kg) = solver.assemble_stiff_matrix()
M = solver.assemble_mass_matrix()
# apply the boundary conditions
K_mod = solver.remove_constrained_dofs(K=K, analysis_case=analysis_case)
M_mod = solver.remove_constrained_dofs(K=M, analysis_case=analysis_case)
# Solve for the eigenvalues
(w, v) = solver.solve_eigenvalue(A=K_mod, M=M_mod, eigen_settings=settings.natural_frequency)
# compute natural frequencies in Hz
f = np.sqrt(w) / 2 / np.pi
# Normalize Eigenvector
v = v / v[0] * L
# # Get only dof ux
# u = np.zeros(n+1)
# for i in range(0, n+1):
# j = i * 3
# u[i] = v[j]
# Get generalized quantities
K_mod = K_mod.toarray()
K_gen = np.dot(np.dot(v.T, K_mod), v)
M_mod = M_mod.toarray()
M_gen = np.dot(np.dot(v.T, M_mod), v)
# To check, compute
# f_k = np.sqrt(K_gen/M_gen) / 2 / np.pi
# print(f/f_k)
# K_gen = 3 * E * I /(L^3) / L
K_gen = K_gen[0][0]
# Calculate peak displacement
delta_r_std = v[0][0] / K_gen * F_r_std
g_peak = response.calcPeakFactor(self, 3600, f[0]) # Peak Factor
delta_r_max = g_peak * delta_r_std
writeToTxt(fname, "delta_r_std: " + '{:02.3f}'.format(delta_r_std))
writeToTxt(fname, "g_peak: " + '{:02.3f}'.format(g_peak))
writeToTxt(fname, "delta_r_max: " + '{:02.3f}'.format(delta_r_max))
class TipResponseAccelerations(response):
def __init__(self, modelForces, RPeriod, uH_fs, H_fs):
super().__init__(modelForces, RPeriod, uH_fs, H_fs)
# ------------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------------
| 2.265625
| 2
|
Source/crunch.py
|
furcelay/DRE
| 0
|
12778613
|
from Source import ModelsIO as MIO
import numpy as np
from h5py import File
def E_fit(_cube: np.ndarray((10, 13, 21, 128, 128), '>f4'),
data: np.ndarray((128, 128), '>f4'),
seg: np.ndarray((128, 128), '>f4'),
noise: np.ndarray((128, 128), '>f4')) -> np.ndarray((10, 13, 21), '>f4'):
scaled_models: np.ndarray((10, 13, 21, 128, 128), '>f4')
flux_models: np.ndarray((10, 13, 21), '>f4')
flux_data: np.float('>f4')
X: np.ndarray((10, 13, 21), '>f4')
resta: np.ndarray((10, 13, 21, 128, 128), '>f4')
residuo: np.ndarray((10, 13, 21, 128, 128), '>f4')
chi: np.ndarray((10, 13, 21), '>f4')
area: int
flux_models = np.einsum("ijkxy,xy->ijk", _cube, seg)
flux_data = np.einsum("xy,xy", data, seg)
X = flux_data / flux_models
scaled_models = X[:, :, :, np.newaxis, np.newaxis] * _cube
resta = data - scaled_models
residuo = (resta ** 2) / (scaled_models + noise ** 2)
chi = np.einsum("ijkxy,xy->ijk", residuo, seg)
area = seg.sum()
chi = chi / area
return chi
def read_obj_h5(name):
# debe ser
try:
with File(name, 'r') as f:
data = f['obj'][:]
seg = f['seg'][:]
rms = f['rms'][:]
return data, seg, rms
except IOError:
print("{} not found".format(name))
return False, False, False
# se necesita esta funcion??
def read_obj(name):
try:
data = MIO.fits.open(name)[1].data
rms = MIO.fits.open(name.replace('objs', 'noise'))[1].data
seg = MIO.fits.open(name.replace('object', "segment").replace("objs", "segs"))[1].data
except IOError:
print("{} not found".format(name))
return False, False, False
noise = np.median(rms)
return data, seg, noise
def feed(name, cube):
"""
From a name and a models cube, run an object through the routine
Outputs the numpy array of the chi_cube
"""
a, b, s = read_obj_h5(name)
if a is not False:
chi = E_fit(cube, a, b, noise=s)
# outchi = MIO.fits.ImageHDU(data=chi)
# outchi.writeto(name.replace('cut_object',"chi_cube"),overwrite=True)
return chi
else:
return False
def save_chi(name, cube):
"""
Parameters
name : str of output file
cube : crunch.feed output
"""
outchi = MIO.fits.ImageHDU(data=cube)
outchi.writeto(name, overwrite=True)
return True
def get_cube(name):
cube = MIO.ModelsCube(name)
cube = cube.data.reshape((10, 13, 128, 21, 128))
cube = np.swapaxes(cube, 2, 3) # new shape (10, 13, 21, 128, 128)
return cube
def chi_index(chi_name):
"""
Parameters
----------
chi_name : chi_cube fits filename.
Returns
-------
tuple (i,j,k) of the index which minimize the residuals.
"""
chi_cube = MIO.fits.open(chi_name)
i, j, k = np.unravel_index(np.argmin(chi_cube[1].data), shape=(10, 13, 21))
return i, j, k
def pond_rad_like(chi_name, logh):
i, j, k = chi_index(chi_name)
chi_cubo = MIO.fits.open(chi_name)[1].data
weights = np.e ** (chi_cubo[i, j, :])
r_weight = 0
for r in range(21):
r_weight += (10 ** (logh[r])) / weights[r]
r_chi = np.log10(r_weight / np.sum(1. / weights))
r_var = 0
for r in range(21):
r_var += ((logh[r] - r_chi) ** 2) / (weights[r])
r_var = r_var / np.sum(1. / weights)
return r_chi, r_var
def pond_rad(chi_name, logh):
i, j, k = chi_index(chi_name)
chi_cubo = MIO.fits.open(chi_name)[1].data
weights = chi_cubo[i, j, :]
r_weight = 0
for r in range(21):
r_weight += (10 ** (logh[r])) / weights[r]
r_chi = np.log10(r_weight / np.sum(1. / weights))
r_var = 0
for r in range(21):
r_var += ((logh[r] - r_chi) ** 2) / (weights[r])
r_var = r_var / np.sum(1. / weights)
return r_chi, r_var
def pond_rad_3d(chi_name, logh):
chi_cubo = MIO.fits.open(chi_name)[1].data
sqrt_chi = np.sqrt(chi_cubo)
r_weight = 0
for e in range(10):
for t in range(13):
for r in range(21):
r_weight += (10 ** (logh[r])) / sqrt_chi[e, t, r]
r_chi = np.log10(r_weight / np.sum(1. / sqrt_chi))
r_var = 0
for e in range(10):
for t in range(13):
for r in range(21):
r_var += ((logh[r] - r_chi) ** 2) / (chi_cubo[e, t, r])
r_var = r_var / np.sum(1. / chi_cubo)
return r_chi, r_var
def make_mosaic(obj, chi, cube):
"""
Parameters
----------
obj : str
DESCRIPTION.
chi : str
DESCRIPTION.
cube : numpy array
DESCRIPTION.
Returns
-------
Bool
Builds a mosaic containing the data,segment,model and residual
"""
i, j, k = chi_index(chi)
model = cube[i, j, k]
gal, seg, noise = read_obj(obj)
output = chi.replace('chi_cube', 'mosaic').replace('cut_object', 'mosaic')
fg = np.sum(gal * seg)
fm1 = np.sum(model * seg)
aux = np.zeros((128, 128 * 4))
aux[:, 0:128] = gal
aux[:, 128:256] = seg * (fg / seg.sum())
aux[:, 256:384] = model * (fg / fm1)
aux[:, 384:] = gal - model * (fg / fm1)
gg = MIO.fits.ImageHDU(data=aux)
gg.writeto(output, overwrite=True)
return True
def make_mosaic_h5(obj, chi, cube):
"""
Parameters
----------
obj : str
DESCRIPTION.
chi : str
DESCRIPTION.
cube : numpy array
DESCRIPTION.
Returns
-------
Bool
Builds a mosaic containing the data,segment,model and residual
"""
i, j, k = chi_index(chi)
model = cube[i, j, k]
output = chi.replace('chi_cube', 'mosaic').replace('cut', 'mosaic')
with File(obj, 'r') as f:
gal = f['obj'][:]
seg = f['seg'][:]
fg = np.sum(gal * seg)
fm1 = np.sum(model * seg)
aux = np.zeros((128, 128 * 4))
aux[:, 0:128] = gal
aux[:, 128:256] = seg * (fg / seg.sum())
aux[:, 256:384] = model * (fg / fm1)
aux[:, 384:] = gal - model * (fg / fm1)
gg = MIO.fits.ImageHDU(data=aux)
gg.writeto(output, overwrite=True)
return True
| 2.09375
| 2
|
pipelines/p2_aggregate_orca.py
|
CSE482Winter2021/Major-Dudes
| 0
|
12778614
|
import os
import pandas as pd
from tqdm import tqdm
import pipelines.p1_orca_by_stop as p1
from utils import constants, data_utils
NAME = 'p2_aggregate_orca'
WRITE_DIR = os.path.join(constants.PIPELINE_OUTPUTS_DIR, NAME)
def load_input():
path = os.path.join(constants.PIPELINE_OUTPUTS_DIR, f'{p1.NAME}.csv')
return pd.read_csv(path)
def aggregate_stops(orca_df):
"""
Aggregates the ORCA dataset by summing together the boardings at each stop.
"""
cols = [
'stop_id',
'boarding_count',
'route_ids',
'tract_num',
'tract_population'
]
stops = orca_df['stop_id'].unique()
result = []
for stop in tqdm(stops, desc='Aggregating stops'):
rows = orca_df[orca_df[cols[0]] == stop]
result.append([
stop,
rows[cols[1]].sum(),
rows[cols[2]].iat[0],
rows[cols[3]].iat[0],
rows[cols[4]].iat[0],
])
# Renaming 'boarding_count' to 'orca_count' for clarity
cols[1] = 'orca_count'
return pd.DataFrame(result, columns=cols)
def aggregate_routes(orca_df):
"""
Maps each route to its list of stops.
"""
routes = {}
for row in orca_df.to_numpy():
stop_id = row[0]
route_ids = data_utils.parse_collection(row[2], set, int)
for route_id in route_ids:
routes.setdefault(route_id, set()).add(stop_id)
cols = ['route_id', 'stop_ids']
result = [[route_id, routes[route_id]] for route_id in routes]
return pd.DataFrame(result, columns=cols)
def run_pipeline():
"""
Runs the pipeline and writes the outputs to disk.
"""
orca_df = load_input()
orca_df = aggregate_stops(orca_df)
routes_df = aggregate_routes(orca_df)
# Write to CSV
if not os.path.exists(WRITE_DIR):
os.mkdir(WRITE_DIR)
files = {'stops_aggregate.csv': orca_df, 'routes_aggregate.csv': routes_df}
for fname in files:
files[fname].to_csv(os.path.join(WRITE_DIR, fname), index=False)
tqdm.write(f'Wrote {fname} to {WRITE_DIR}')
if __name__ == '__main__':
run_pipeline()
| 2.84375
| 3
|
sorting.py
|
ivanbgd/Quick3-Sort-Py
| 1
|
12778615
|
<reponame>ivanbgd/Quick3-Sort-Py
import sys
import random
def partition3(a, l, r):
x = a[l]
j, o = l, l
for i in range(l+1, r+1):
if a[i] < x:
o += 1
a[i], a[o] = a[o], a[i]
a[j], a[o] = a[o], a[j]
j += 1
elif a[i] == x:
o += 1
a[i], a[o] = a[o], a[i]
else:
continue
if j > l:
a[l], a[j-1] = a[j-1], a[l]
else:
a[l], a[j] = a[j], a[l]
return j, o
def randomized_quick_sort3(a, l, r):
if l >= r:
return
k = random.randint(l, r)
a[l], a[k] = a[k], a[l]
m1, m2 = partition3(a, l, r)
randomized_quick_sort3(a, l, m1 - 1)
randomized_quick_sort3(a, m2 + 1, r)
if __name__ == '__main__':
#input = sys.stdin.read()
input = "5\n2 3 9 2 2" # Correct output is: 2 2 2 3 9.
#input = "13\n6 2 3 4 2 6 8 9 2 6 5 6 8"
input = list(map(int, input.split()))
n = input[0]
a = input[1:]
randomized_quick_sort3(a, 0, n - 1)
#a.sort(); # This is TimSort from Python Standard Library.
for x in a:
print(x,)
print(a)
print()
| 3.40625
| 3
|
ftocp.py
|
urosolia/SLIP
| 1
|
12778616
|
<filename>ftocp.py<gh_stars>1-10
from casadi import *
from numpy import *
import pdb
import itertools
import numpy as np
from cvxpy import *
import time
##### FTOCP ######
class FTOCP(object):
""" Finite Time Optimal Control Problem (FTOCP)
Methods:
- solve: solves the FTOCP given the initial condition x0 and terminal contraints
- buildNonlinearProgram: builds the ftocp program solved by the above solve method
- model: given x_t and u_t computes x_{t+1} = f( x_t, u_t )
"""
def __init__(self, N, n, d, Q, R, Qf, xlb, xub, ulb, uub, dt, xref, region ):
# Define variables
self.region = region
self.xlb = xlb
self.xub = xub
self.ulb = ulb
self.uub = uub
self.N = N
self.n = n
self.nl = 2
self.d = d
self.Q = Q
self.Qf = Qf
self.R = R
self.xref = xref
self.dt = dt
self.l = 0.7
self.m = 1
self.g = 9.81
self.k0 = self.m*self.g/0.15
self.buildFTOCP()
self.solverTime = []
def solve(self, x0, verbose=False):
# Set initial condition + state and input box constraints
self.lbx = x0.tolist() + self.xlb.tolist()*(self.N) + self.ulb.tolist()*self.N + [0, -100] + [-100.0]*self.nl*self.N
self.ubx = x0.tolist() + self.xub.tolist()*(self.N) + self.uub.tolist()*self.N + [0, 100] + [ 100.0]*self.nl*self.N
# Solve nonlinear programm
start = time.time()
sol = self.solver(lbx=self.lbx, ubx=self.ubx, lbg=self.lbg_dyanmics, ubg=self.ubg_dyanmics)
end = time.time()
self.solverTime = end - start
# Check if the solution is feasible
if (self.solver.stats()['success']):
print("Sucess")
self.feasible = 1
x = sol["x"]
self.xPred = np.array(x[0:(self.N+1)*self.n].reshape((self.n,self.N+1))).T
self.uPred = np.array(x[(self.N+1)*self.n:((self.N+1)*self.n + self.d*self.N)].reshape((self.d,self.N))).T
self.sPred = np.array(x[((self.N+1)*self.n + self.d*self.N):((self.N+1)*self.n + self.d*self.N)+self.nl*(self.N+1)].reshape((self.nl,self.N+1))).T
self.mpcInput = self.uPred[0][0]
print("xPred:")
print(self.xPred)
print("uPred:")
print(self.uPred)
print("sPred:")
print(self.sPred)
else:
self.xPred = np.zeros((self.N+1,self.n) )
self.uPred = np.zeros((self.N,self.d))
self.mpcInput = []
self.feasible = 0
print("Unfeasible")
return self.uPred[0]
def buildFTOCP(self):
# Define variables
n = self.n
d = self.d
nl = self.nl
# Define variables
X = SX.sym('X', n*(self.N+1));
U = SX.sym('U', d*self.N);
C = SX.sym('C', nl*(self.N+1));
# Define dynamic constraints
self.constraint = []
for i in range(0, self.N):
if self.region[i] < 2:
legUsed = self.region[i]
legNotUsed = 1 - legUsed
print("SS, leg: ", legUsed, 'i: ', i, ", legNotUsed: ", legNotUsed)
X_next = self.dynamics_SS(X[n*i:n*(i+1)], U[d*i], C[i*nl + legUsed])
# Foot constraints
self.constraint = vertcat(self.constraint, C[(i+1)*nl + legUsed] - C[i*nl + legUsed] )
self.constraint = vertcat(self.constraint, C[(i+1)*nl + legNotUsed] - (C[i*nl + legNotUsed] + U[d*i+1]) )
else:
print("DS, i: ", i)
X_next = self.dynamics_DS(X[n*i:n*(i+1)], U[d*i], C[i*nl:(i+1)*nl])
# Foot constraints
for j in range(0,2):
self.constraint = vertcat(self.constraint, C[(i+1)*nl + j] - C[i*nl + j] )
# Dyanmic update
for j in range(0, self.n):
self.constraint = vertcat(self.constraint, X_next[j] - X[n*(i+1)+j] )
# Constraints on length
lbg_leg = []
ubg_leg = []
for i in range(0, self.N):
if self.region[i] == 0:
self.constraint = vertcat(self.constraint, (X[n*i + 0] - C[i*nl+0])**2 + X[n*i + 1]**2 )
# Leg 0 length <= 1
lbg_leg.append(0)
ubg_leg.append(self.l**2)
elif self.region[i] == 1:
self.constraint = vertcat(self.constraint, (X[n*i + 0] - C[i*nl+1])**2 + X[n*i + 1]**2 )
# Leg 1 length <= 1
lbg_leg.append(0)
ubg_leg.append(self.l**2)
else:
self.constraint = vertcat(self.constraint, (X[n*i + 0] - C[i*nl+0])**2 + X[n*i + 1]**2 )
self.constraint = vertcat(self.constraint, (X[n*i + 0] - C[i*nl+1])**2 + X[n*i + 1]**2 )
# Leg 0 length <= 1
lbg_leg.append(0)
ubg_leg.append(self.l**2)
# Leg 1 length <= 1
lbg_leg.append(0)
ubg_leg.append(self.l**2)
# Defining Cost
self.cost = 0
for i in range(0, self.N):
self.cost = self.cost + (X[n*i:n*(i+1)]-self.xref).T @ self.Q @ (X[n*i:n*(i+1)]-self.xref)
self.cost = self.cost + U[d*i:d*(i+1)].T @ self.R @ U[d*i:d*(i+1)]
self.cost = self.cost + (X[n*self.N:n*(self.N+1)]-self.xref).T @ self.Qf @ (X[n*self.N:n*(self.N+1)]-self.xref)
# Set IPOPT options
# opts = {"verbose":False,"ipopt.print_level":0,"print_time":0,"ipopt.mu_strategy":"adaptive","ipopt.mu_init":1e-5,"ipopt.mu_min":1e-15,"ipopt.barrier_tol_factor":1}#, "ipopt.acceptable_constr_viol_tol":0.001}#,"ipopt.acceptable_tol":1e-4}#, "expand":True}
opts = {"verbose":False,"ipopt.print_level":0,"print_time":0}#\\, "ipopt.acceptable_constr_viol_tol":0.001}#,"ipopt.acceptable_tol":1e-4}#, "expand":True}
nlp = {'x':vertcat(X,U,C), 'f':self.cost, 'g':self.constraint}
self.solver = nlpsol('solver', 'ipopt', nlp, opts)
# Set lower bound of inequality constraint to zero to force n*N state dynamics
self.lbg_dyanmics = [0]*((n+nl)*self.N) + lbg_leg
self.ubg_dyanmics = [0]*((n+nl)*self.N) + ubg_leg
def dynamics_SS(self, x, u, c):
theta = np.arctan( (x[0] - c)/ x[1] )
leng = np.sqrt( (x[0] - c)**2 + x[1]**2 )
# state x = [x,y, vx, vy]
x_next = x[0] + self.dt * x[2]
y_next = x[1] + self.dt * x[3]
vx_next = x[2] + self.dt * ( sin(theta)*((u+self.k0)*(self.l - leng) ) )
vy_next = x[3] + self.dt * (-self.m*self.g + cos(theta)*((u+self.k0)*(self.l - leng) ) ) #+ cos(theta)*u[1] )
state_next = [x_next, y_next, vx_next, vy_next]
return state_next
def dynamics_DS(self, x, u, c):
theta = []
leng = []
for i in [0, 1]:
theta.append(np.arctan( (x[0] - c[i])/ x[1] ))
leng.append(np.sqrt( (x[0] - c[i])**2 + x[1]**2 ))
# state x = [x,y, vx, vy]
x_next = x[0] + self.dt * x[2]
y_next = x[1] + self.dt * x[3]
vx_next = x[2] + self.dt * (sin(theta[0])*((u[0]+self.k0)*(self.l - leng[0]) ) + sin(theta[1])*((u+self.k0)*(self.l - leng[1]) ))
vy_next = x[3] + self.dt * (cos(theta[0])*((u[0]+self.k0)*(self.l - leng[0]) ) + cos(theta[1])*((u+self.k0)*(self.l - leng[1]) - self.m*self.g))
state_next = [x_next, y_next, vx_next, vy_next]
return state_next
| 2.875
| 3
|
Beginner/age123.py
|
man21/IOSD-UIETKUK-HacktoberFest-Meetup-2019
| 22
|
12778617
|
a=int(input("Enter your age:"))
if (a>=18):
print("Adult")
elif (10<a<=18):
print("Teen")
elif(a<=10):
print("Child")
| 4.09375
| 4
|
src/datasets/coco_dataset.py
|
petersiemen/CVND---Image-Captioning-Project
| 0
|
12778618
|
import nltk
import os
import torch
import torch.utils.data as data
import numpy as np
import json
from .vocabulary import Vocabulary
from pycocotools.coco import COCO
from PIL import Image
from tqdm import tqdm
class CoCoDataset(data.Dataset):
def __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word,
end_word, unk_word, annotations_file, vocab_from_file, img_folder):
self.transform = transform
self.mode = mode
self.batch_size = batch_size
self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,
end_word, unk_word, annotations_file, vocab_from_file)
self.img_folder = img_folder
if self.mode == 'train':
self.coco = COCO(annotations_file)
self.ids = list(self.coco.anns.keys())
print('Obtaining caption lengths...')
all_tokens = [nltk.tokenize.word_tokenize(str(self.coco.anns[self.ids[index]]['caption']).lower()) for index in tqdm(np.arange(len(self.ids)))]
self.caption_lengths = [len(token) for token in all_tokens]
else:
test_info = json.loads(open(annotations_file).read())
self.paths = [item['file_name'] for item in test_info['images']]
def __getitem__(self, index):
# obtain image and caption if in training mode
if self.mode == 'train':
ann_id = self.ids[index]
caption = self.coco.anns[ann_id]['caption']
img_id = self.coco.anns[ann_id]['image_id']
path = self.coco.loadImgs(img_id)[0]['file_name']
# Convert image to tensor and pre-process using transform
image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')
image = self.transform(image)
# Convert caption to tensor of word ids.
tokens = nltk.tokenize.word_tokenize(str(caption).lower())
caption = []
caption.append(self.vocab(self.vocab.start_word))
caption.extend([self.vocab(token) for token in tokens])
caption.append(self.vocab(self.vocab.end_word))
caption = torch.Tensor(caption).long()
# return pre-processed image and caption tensors
return image, caption
# obtain image if in test mode
else:
path = self.paths[index]
# Convert image to tensor and pre-process using transform
PIL_image = Image.open(os.path.join(self.img_folder, path)).convert('RGB')
orig_image = np.array(PIL_image)
image = self.transform(PIL_image)
# return original image and pre-processed image tensor
return orig_image, image
def get_train_indices(self):
sel_length = np.random.choice(self.caption_lengths)
all_indices = np.where([self.caption_lengths[i] == sel_length for i in np.arange(len(self.caption_lengths))])[0]
indices = list(np.random.choice(all_indices, size=self.batch_size))
return indices
def __len__(self):
if self.mode == 'train':
return len(self.ids)
else:
return len(self.paths)
| 2.390625
| 2
|
WordGuesser/__init__.py
|
sourcery-ai-bot/word-guesser
| 2
|
12778619
|
from .WordGuesser import WordGuesser
| 1.007813
| 1
|
vdgnn/dataset/dataloader.py
|
HCY123902/visdial-gnn
| 44
|
12778620
|
import os
import json
from six import iteritems
import h5py
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from vdgnn.dataset.readers import DenseAnnotationsReader, ImageFeaturesHdfReader
TRAIN_VAL_SPLIT = {'0.9': 80000, '1.0': 123287}
class VisDialDataset(Dataset):
def __init__(self, args, split, isTrain=True):
r"""
Initialize the dataset with split taken from ['train', 'val', 'test']
We follow the protocal as specified in `https://arxiv.org/pdf/1611.08669.pdf`, namely
For VisDial v1.0:
train split:
img_feat: train split
dialog_data: trainval split (top 123287)
val split:
img_feat: val split
dialog_data: trainval split (last 2064)
test split:
img_feat: test split
dialog_data: test split
For VisDial v0.9:
train split:
img_feat: train split
dialog_data: trainval split (top 80000)
val split (isTrain=True):
img_feat: train split
dialog_data: trainval split (last 2783)
val split (isTrain=False):
img_feat: val split
dialog_data: val split
"""
super(VisDialDataset, self).__init__()
self.args = args
self.__split = split
self.__in_memory = args.in_memory
self.__version = args.version
self.isTrain = isTrain
if self.__split == 'val' and self.__version == '0.9' and self.isTrain:
input_img_path = args.img_train
img_split = 'train'
self.img_start_idx = TRAIN_VAL_SPLIT[self.__version]
else:
input_img_path = getattr(args, 'img_%s' % split)
img_split = self.__split
self.img_start_idx = 0
if self.__split == 'val' and self.isTrain:
self.data_start_idx = TRAIN_VAL_SPLIT[self.__version]
data_split = 'train'
else:
self.data_start_idx = 0
data_split = self.__split
self.input_img = os.path.join(args.dataroot, input_img_path)
self.input_json = os.path.join(args.dataroot, args.visdial_params)
self.input_ques = os.path.join(args.dataroot, args.visdial_data)
self.input_dialog = os.path.join(
args.dataroot, getattr(args, 'dialog_%s' % split))
self.dense_annotations_jsonpath = os.path.join(
args.dataroot, args.dense_annotations)
self.num_data = getattr(args, 'num_%s' % split)
self.use_img_id_idx = None
# preprocessing split
print("\nProcessing split [{}]...".format(self.__split))
print("Dataloader loading json file: {}".format(self.input_json))
with open(self.input_json, 'r') as info_file:
info = json.load(info_file)
# possible keys: {'ind2word', 'word2ind', 'unique_img_(split)'}
for key, value in iteritems(info):
setattr(self, key, value)
# add <START> and <END> to vocabulary
word_count = len(self.word2ind)
self.word2ind['<START>'] = word_count + 1
self.word2ind['<END>'] = word_count + 2
self.start_token = self.word2ind['<START>']
self.end_token = self.word2ind['<END>']
# padding + <START> + <END> token
self.vocab_size = word_count + 3
print("Vocab size with <START>, <END>: {}".format(self.vocab_size))
# construct reverse of word2ind after adding tokens
self.ind2word = {
int(ind): word_count
for word, ind in iteritems(self.word2ind)
}
print("Dataloader loading image h5 file: {}".format(self.input_img))
# Either img_feats or img_reader will be set.
if self.__version == '0.9':
# trainval image features
with h5py.File(self.input_img, 'r') as img_hdf5:
img_feats_h5 = img_hdf5.get('images_%s' % img_split)
self.num_data_points = len(img_feats_h5) - self.img_start_idx
self.img_reader = None
if self.__split == 'train':
self.num_data_points = min(self.num_data_points, TRAIN_VAL_SPLIT[self.__version])
else:
# split image features
self.use_img_id_idx = True
self.img_reader = ImageFeaturesHdfReader(
self.input_img, in_memory=self.__in_memory)
self.num_data_points = len(self.img_reader)
if self.num_data is not None:
self.num_data_points = min(self.num_data, self.num_data_points)
self.img_end_idx = self.img_start_idx + self.num_data_points
self.data_end_idx = self.data_start_idx + self.num_data_points
if self.img_reader is None:
with h5py.File(self.input_img, 'r') as img_hdf5:
img_feats_h5 = img_hdf5.get('images_%s' % img_split)
self.img_feats = torch.from_numpy(
np.array(img_feats_h5[self.img_start_idx:self.img_end_idx]))
if 'val' == self.__split and os.path.exists(self.dense_annotations_jsonpath):
self.use_img_id_idx = True
self.annotations_reader = DenseAnnotationsReader(
self.dense_annotations_jsonpath)
else:
self.annotations_reader = None
if self.use_img_id_idx:
print('Loading input dialog json: {}'.format(self.input_dialog))
with open(self.input_dialog, 'r') as dialog_json:
visdial_data = json.load(dialog_json)
self.idx2imgid = [dialog_for_image['image_id']
for dialog_for_image in visdial_data['data']['dialogs']]
print("Dataloader loading h5 file: {}".format(self.input_ques))
ques_file = h5py.File(self.input_ques, 'r')
# load all data mats from ques_file into this
self.data = {}
self.img_norm = args.img_norm
img_fnames = getattr(self, 'unique_img_' + data_split)
self.data[self.__split + '_img_fnames'] = img_fnames[self.data_start_idx:self.data_end_idx]
# map from load to save labels
io_map = {
'ques_{}': '{}_ques',
'ques_length_{}': '{}_ques_len',
'ans_{}': '{}_ans',
'ans_length_{}': '{}_ans_len',
'img_pos_{}': '{}_img_pos',
'cap_{}': '{}_cap',
'cap_length_{}': '{}_cap_len',
'opt_{}': '{}_opt',
'opt_length_{}': '{}_opt_len',
'opt_list_{}': '{}_opt_list',
'num_rounds_{}': '{}_num_rounds',
'ans_index_{}': '{}_ans_ind'
}
# read the question, answer, option related information
for load_label, save_label in iteritems(io_map):
label = load_label.format(data_split)
if load_label.format(data_split) not in ques_file:
continue
if label.startswith('opt_list') or label.startswith('opt_length'):
if self.__version == '1.0' and self.__split == 'val':
label = load_label.format('test')
self.data[save_label.format(self.__split)] = torch.from_numpy(
np.array(ques_file[label], dtype='int64'))
else:
self.data[save_label.format(self.__split)] = torch.from_numpy(
np.array(ques_file[label][self.data_start_idx:self.data_end_idx], dtype='int64'))
ques_file.close()
# record some stats, will be transferred to encoder/decoder later
# assume similar stats across multiple data subsets
# maximum number of questions per image, ideally 10
self.max_ques_count = self.data[self.__split + '_ques'].size(1)
# maximum length of question
self.max_ques_len = self.data[self.__split + '_ques'].size(2)
# maximum length of answer
self.max_ans_len = self.data[self.__split + '_ans'].size(2)
print("[{0}] no. of data points: {1}".format(
self.__split, self.num_data_points))
print("\tMax no. of rounds: {}".format(self.max_ques_count))
print("\tMax ques len: {}".format(self.max_ques_len))
print("\tMax ans len: {}".format(self.max_ans_len))
# prepare history
self._process_history(self.__split)
# 1 indexed to 0 indexed
self.data[self.__split + '_opt'] -= 1
if self.__split + '_ans_ind' in self.data:
self.data[self.__split + '_ans_ind'] -= 1
@property
def split(self):
return self.__split
# ------------------------------------------------------------------------
# methods to override - __len__ and __getitem__ methods
# ------------------------------------------------------------------------
def __len__(self):
return self.num_data_points
def __getitem__(self, idx):
dtype = self.__split
item = {'index': idx}
item['num_rounds'] = self.data[dtype + '_num_rounds'][idx]
# get image features
if self.use_img_id_idx:
image_id = self.idx2imgid[idx]
item['image_id'] = torch.tensor(image_id).long()
if self.img_reader is None:
img_feats = self.img_feats[idx]
else:
img_feats = torch.tensor(self.img_reader[image_id])
if self.img_norm:
img_feats = F.normalize(img_feats, dim=0, p=2)
item['img_feat'] = img_feats
item['img_fnames'] = self.data[dtype + '_img_fnames'][idx]
# get question tokens
item['ques'] = self.data[dtype + '_ques'][idx]
item['ques_len'] = self.data[dtype + '_ques_len'][idx]
# get history tokens
item['hist_len'] = self.data[dtype + '_hist_len'][idx]
item['hist'] = self.data[dtype + '_hist'][idx]
# get caption tokens
item['cap'] = self.data[dtype + '_cap'][idx]
item['cap_len'] = self.data[dtype + '_cap_len'][idx]
# get answer tokens
item['ans'] = self.data[dtype + '_ans'][idx]
item['ans_len'] = self.data[dtype + '_ans_len'][idx]
# get options tokens
opt_inds = self.data[dtype + '_opt'][idx]
opt_size = list(opt_inds.size())
new_size = torch.Size(opt_size + [-1])
ind_vector = opt_inds.view(-1)
option_in = self.data[dtype + '_opt_list'].index_select(0, ind_vector)
option_in = option_in.view(new_size)
opt_len = self.data[dtype + '_opt_len'].index_select(0, ind_vector)
opt_len = opt_len.view(opt_size)
item['opt'] = option_in
item['opt_len'] = opt_len
if dtype != 'test':
ans_ind = self.data[dtype + '_ans_ind'][idx]
item['ans_ind'] = ans_ind.view(-1)
if dtype == 'val' and self.annotations_reader is not None:
dense_annotations = self.annotations_reader[image_id]
item['gt_relevance'] = torch.tensor(
dense_annotations["gt_relevance"]).float()
item['round_id'] = torch.tensor(
dense_annotations['round_id']).long()
# convert zero length sequences to one length
# this is for handling empty rounds of v1.0 test, they will be dropped anyway
if dtype == 'test':
item['ques_len'][item['ques_len'] == 0] += 1
item['opt_len'][item['opt_len'] == 0] += 1
item['hist_len'][item['hist_len'] == 0] += 1
return item
# -------------------------------------------------------------------------
# collate function utilized by dataloader for batching
# -------------------------------------------------------------------------
def collate_fn(self, batch):
dtype = self.__split
merged_batch = {key: [d[key] for d in batch] for key in batch[0]}
out = {}
for key in merged_batch:
if key in {'index', 'num_rounds', 'img_fnames'}:
out[key] = merged_batch[key]
elif key in {'cap_len'}:
out[key] = torch.Tensor(merged_batch[key]).long()
else:
out[key] = torch.stack(merged_batch[key], 0)
# Dynamic shaping of padded batch
out['hist'] = out['hist'][:, :, :torch.max(out['hist_len'])].contiguous()
out['ques'] = out['ques'][:, :, :torch.max(out['ques_len'])].contiguous()
out['ans'] = out['ans'][:, :, :torch.max(out['ans_len'])].contiguous()
out['cap'] = out['cap'][:, :torch.max(out['cap_len'])].contiguous()
out['opt'] = out['opt'][:, :, :, :torch.max(out['opt_len'])].contiguous()
batch_keys = ['num_rounds', 'img_feat', 'img_fnames', 'hist', 'hist_len', 'ques', 'ques_len',
'ans', 'ans_len', 'cap', 'cap_len', 'opt', 'opt_len']
if dtype != 'test':
batch_keys.append('ans_ind')
if dtype == 'val' and self.annotations_reader is not None:
batch_keys.append('gt_relevance')
batch_keys.append('round_id')
return {key: out[key] for key in batch_keys}
# -------------------------------------------------------------------------
# preprocessing functions
# -------------------------------------------------------------------------
def _process_history(self, dtype):
"""
Process caption as well as history. Optionally, concatenate history
for lf-encoder.
"""
captions = self.data[dtype + '_cap']
questions = self.data[dtype + '_ques']
ques_len = self.data[dtype + '_ques_len']
cap_len = self.data[dtype + '_cap_len']
max_ques_len = questions.size(2)
answers = self.data[dtype + '_ans']
ans_len = self.data[dtype + '_ans_len']
num_convs, num_rounds, max_ans_len = answers.size()
if self.args.concat_history:
self.max_hist_len = min(
num_rounds * (max_ques_len + max_ans_len), 300)
history = torch.zeros(num_convs, num_rounds,
self.max_hist_len).long()
else:
history = torch.zeros(num_convs, num_rounds,
max_ques_len + max_ans_len).long()
hist_len = torch.zeros(num_convs, num_rounds).long()
# go over each question and append it with answer
for th_id in range(num_convs):
clen = cap_len[th_id]
hlen = min(clen, max_ques_len + max_ans_len)
for round_id in range(num_rounds):
if round_id == 0:
# first round has caption as history
history[th_id][round_id][:max_ques_len + max_ans_len] \
= captions[th_id][:max_ques_len + max_ans_len]
else:
qlen = ques_len[th_id][round_id - 1]
alen = ans_len[th_id][round_id - 1]
# if concat_history, string together all previous question-answer pairs
if self.args.concat_history:
history[th_id][round_id][:hlen] = history[th_id][round_id - 1][:hlen]
history[th_id][round_id][hlen] = self.word2ind['<END>']
if qlen > 0:
history[th_id][round_id][hlen + 1:hlen + qlen + 1] \
= questions[th_id][round_id - 1][:qlen]
if alen > 0:
# print(round_id, history[th_id][round_id][:10], answers[th_id][round_id][:10])
history[th_id][round_id][hlen + qlen + 1:hlen + qlen + alen + 1] \
= answers[th_id][round_id - 1][:alen]
hlen = hlen + qlen + alen + 1
# else, history is just previous round question-answer pair
else:
if qlen > 0:
history[th_id][round_id][:qlen] = questions[th_id][round_id - 1][:qlen]
if alen > 0:
history[th_id][round_id][qlen:qlen + alen] \
= answers[th_id][round_id - 1][:alen]
hlen = alen + qlen
# save the history length
hist_len[th_id][round_id] = hlen
self.data[dtype + '_hist'] = history
self.data[dtype + '_hist_len'] = hist_len
| 2.203125
| 2
|
ronald.boadana/snakepro/fruit.py
|
LUDUSLab/stem-games
| 2
|
12778621
|
import random
from config import *
from wall import *
apple = pygame.image.load('../snakepro/assets/ronald.boadana_apple.png')
apple_pos = ((random.randint(32, 726) // 32 * 32), (random.randint(64, 576) // 32 * 32))
def apple_randomness_movement():
apple_x = (random.randint(32, 726) // 32 * 32)
apple_y = (random.randint(64, 576) // 32 * 32)
return apple_x, apple_y
grape = pygame.image.load('../snakepro/assets/ronald.boadana_grape.png')
grape_pos = (1000, 1000)
def grape_randomness_movement():
grape_x = (random.randint(32, 726) // 32 * 32)
grape_y = (random.randint(64, 576) // 32 * 32)
return grape_x, grape_y
strawberry = pygame.image.load('../snakepro/assets/ronald.boadana_strawberry.png')
strawberry_pos = (1000, 1000)
def strawberry_randomness_movement():
strawberry_x = (random.randint(32, 726) // 32 * 32)
strawberry_y = (random.randint(64, 576) // 32 * 32)
return strawberry_x, strawberry_y
| 2.578125
| 3
|
aio_databases/backends/_dummy.py
|
klen/aio-databases
| 6
|
12778622
|
import typing as t
from . import ABCDatabaseBackend, ABCConnection
from .common import Transaction
class Connection(ABCConnection):
transaction_cls = Transaction
async def _execute(self, query: str, *params, **options) -> t.Any:
return None
async def _executemany(self, query: str, *params, **options) -> t.Any:
return None
async def _fetchall(self, query: str, *params, **options) -> t.List[t.Mapping]:
return []
async def _fetchmany(self, size: int, query: str, *params, **options) -> t.List[t.Mapping]:
return []
async def _fetchone(self, query: str, *params, **options) -> t.Optional[t.Mapping]:
return None
async def _fetchval(self, query: str, *params, column: t.Any = 0, **options) -> t.Any:
return None
async def _iterate(self, query: str, *params, **options) -> t.AsyncIterator:
yield None
class Backend(ABCDatabaseBackend):
"""Must not be used in production."""
name = 'dummy'
db_type = 'dummy'
connection_cls = Connection
async def connect(self):
pass
async def disconnect(self):
pass
async def _acquire(self):
pass
async def release(self, conn):
pass
| 2.421875
| 2
|
Exercicios-Python/CursoEmVideo/ex019.py
|
bruno1906/ExerciciosPython
| 0
|
12778623
|
from random import choices
n1=str(input('Digite o nome do primeiro aluno:'))
n2=str(input('Digite o nome do segundo aluno:'))
n3=str(input('Digite o nome do terceiro aluno:'))
n4=str(input('Digite o nome do quarto aluno'))
lista=[n1, n2, n3, n4]
e=choices(lista)
print('O aluno escolhido foi {}'.format(e))
| 3.6875
| 4
|
Demo/gui.py
|
mengfanShi/Pose-Estimate
| 0
|
12778624
|
<filename>Demo/gui.py
# -*- coding:utf-8 -*-
# @TIME :2018/12/28 15:36
# @File :gui_test.py
import tkinter as tk
from tkinter import filedialog
import threading
class Gui:
def __init__(self):
self.filepath = '/home/fan/Pose Estimation/Demo/pic.jpg'
self.id = 0 # 0 means image, 1 means video
gui = tk.Tk(className='Pose Estimation')
gui.geometry("640x320")
# set the display menu
menubar = tk.Menu(gui)
filemenu = tk.Menu(menubar, tearoff=False)
filemenu.add_command(label="Image", command=self.get_image_file)
filemenu.add_command(label="Video", command=self.get_video_file)
menubar.add_cascade(label="Load", menu=filemenu)
runmenu = tk.Menu(menubar, tearoff=False)
runmenu.add_command(label="run", command=gui.quit)
menubar.add_cascade(label="Run", menu=runmenu)
gui.config(menu=menubar)
# set the display text
string = '\nThanks for using~\n\nClick Load to choose the file\n' \
'\nClick Run to begin the precess\n\nDesigned by YuHan\n' \
'\nHave a good day!'
label = tk.Label(gui, text=string, font=20)
label.pack()
#tk.mainloop()
def get_image_file(self):
self.filepath = filedialog.askopenfilename()
self.id = 0
def get_video_file(self):
self.filepath = filedialog.askopenfilename()
self.id = 1
def begin(self):
tk.mainloop()
def thread(self, func, *args):
t = threading.Thread(target=func, args=args)
t.setDaemon(True)
t.start()
| 3.078125
| 3
|
model/resnext/train.py
|
wan-h/JD-AI-Fashion-Challenge
| 3
|
12778625
|
from model.resnext import model1_val4
model1_val4.train()
| 1.375
| 1
|
week2/q4_get_ios_version.py
|
gerards/pynet_learning_python
| 0
|
12778626
|
<gh_stars>0
#!/usr/bin/env python
cisco_ios = "Cisco IOS Software, C880 Software (C880DATA-UNIVERSALK9-M), Version 15.0(1)M4, RELEASE SOFTWARE (fc1)"
cisco_ios_split = cisco_ios.split(",")
cisco_ios_version = cisco_ios_split[2][9:]
print(cisco_ios_version)
| 2.359375
| 2
|
scripts/scarv_pipeline/compute_SCARV_6.py
|
jtenwolde/SCARV
| 0
|
12778627
|
<reponame>jtenwolde/SCARV
import os
import numpy as np
import pandas as pd
import random
from scarv import scarv_assess
import sys
ancestry = sys.argv[1]
window_size = 575
chr_list = ["chr" + str(i) for i in range(1, 23)]
chr_list.extend(["chrXnonPAR", "chrXPAR"])
chr_lengths_raw = [248956422, 242193529, 198295559, 190214555, 181538259, 170805979, 159345973,
145138636, 138394717, 133797422, 135086622, 133275309, 114364328, 107043718, 101991189,
90338345, 83257441, 80373285, 58617616, 64444167, 46709983, 50818468, 156040895, 156040895]
chr_lengths = dict(zip(chr_list, chr_lengths_raw))
observed_entropy_sum = 0
expected_entropy_sum = 0
n_sites = 0
track_folder = "/rds/project/who1000-1/rds-who1000-cbrc/user/jwt44/scarv_pipeline_gnomad_hg38/" + ancestry + "/scarv_tracks/"
for chrom in chr_list:
print("doing", chrom)
inFile = track_folder + "entropies_" + chrom + ".bed.gz"
df = pd.read_csv(inFile, sep='\t', header=None, usecols=[3,4,5], names=["H_Obs", "H_Exp", "Coverage"])
df_covered = df.loc[df.Coverage > 0.9 * window_size]
obs_sum_chrom, exp_sum_chrom = df_covered[['H_Obs', 'H_Exp']].sum()
observed_entropy_sum += obs_sum_chrom
expected_entropy_sum += exp_sum_chrom
n_sites += df_covered.shape[0]
average_observed_entropy = observed_entropy_sum/n_sites
average_expected_entropy = expected_entropy_sum/n_sites
# sample proportion of sites to be able to approximate median absolute deviation
observed_entropy_deviations = []
expected_entropy_deviations = []
for chrom in chr_list:
print("doing", chrom)
inFile = track_folder + "entropies_" + chrom + ".bed.gz"
skip = random.sample(range(chr_lengths[chrom]),
k = chr_lengths[chrom] - chr_lengths[chrom]//300)
df = pd.read_csv(inFile, sep='\t', header=None, usecols=[3,4,5],
names=["H_Obs", "H_Exp", "Coverage"], skiprows=skip)
df_covered = df.loc[df.Coverage > 0.9 * window_size]
observed_entropy_extension = abs(df_covered.H_Obs - average_observed_entropy).tolist()
expected_entropy_extension = abs(df_covered.H_Exp - average_expected_entropy).tolist()
observed_entropy_deviations.extend(observed_entropy_extension)
expected_entropy_deviations.extend(expected_entropy_extension)
median_absolute_deviation_H_obs = np.median(observed_entropy_deviations)
median_absolute_deviation_H_exp = np.median(expected_entropy_deviations)
for chrom in chr_list:
print("doing", chrom)
inFile = track_folder + "entropies_" + chrom + ".bed.gz"
outFile = track_folder + "scarv_" + chrom + ".bed.gz"
df = pd.read_csv(inFile, sep='\t', header=None, names=["Chromosome", "Start", "End", "H_Obs", "H_Exp", "Coverage"])
df['Scarv'] = (df.H_Obs - average_observed_entropy)/median_absolute_deviation_H_obs - (df.H_Exp - average_expected_entropy)/median_absolute_deviation_H_exp
df_output = df[df.Coverage> 0.9 * window_size]
df_output.to_csv(outFile, sep='\t', columns=["Chromosome", "Start", "End", "Scarv", "Coverage"], index=False, header=False, compression='gzip')
SCARV_samples = os.popen("zcat " + track_folder + "scarv_chr*.gz | awk -v seed=$RANDOM 'BEGIN{srand(seed)} {x=rand(); if (x<1/2500) {print $4}}' - ").read().split()
SCARV_percentiles = np.quantile(list(map(float, SCARV_samples)), np.arange(0, 1.001, 0.001))
for chrom in chr_list:
print("doing", chrom)
inFile = track_folder + "scarv_" + chrom + ".bed.gz"
outFile = track_folder + "scarv_percentiles_" + chrom + ".bed.gz"
df = pd.read_csv(inFile, sep='\t', header=None, names=["Chromosome", "Start", "End", "Scarv", "Coverage"])
df['Percentile'] = scarv_assess.toPercentile(df['Scarv'], SCARV_percentiles)
df.to_csv(outFile, sep='\t', columns=["Chromosome", "Start", "End", "Percentile"], index=False, header=False, compression='gzip')
| 2.109375
| 2
|
libsaas/services/uservoice/comments.py
|
MidtownFellowship/libsaas
| 155
|
12778628
|
from libsaas import http, parsers
from libsaas.services import base
from . import resource, flags
class CommentsBase(resource.UserVoiceTextResource):
path = 'comments'
def wrap_object(self, name):
return {'comment': {'text': name}}
class Comments(CommentsBase):
def create(self, obj):
raise base.MethodNotSupported()
class ForumSuggestionComment(CommentsBase):
@base.resource(flags.SuggestionCommentFlags)
def flags(self):
"""
Return the resource corresponding to all the flags of this comment.
"""
return flags.SuggestionCommentFlags(self)
class ForumSuggestionComments(CommentsBase):
@base.apimethod
def get(self, page=None, per_page=None, filter=None, sort=None):
"""
Fetch comments on this suggestion.
:var page: Where should paging start. If left as `None`, the first page
is returned.
:vartype page: int
:var per_page: How many objects sould be returned. If left as `None`,
10 objects are returned.
:vartype per_page: int
:var filter: The kind of comments to return, see upstream
documentation for possible values.
:vartype filter: str
:var sort: How should the returned collection be sorted. Refer to
upstream documentation for possible values.
:vartype sort: str
"""
params = base.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
class UserComments(CommentsBase):
def create(self, obj):
raise base.MethodNotSupported()
@base.apimethod
def get(self, page=None, per_page=None, filter=None, sort=None):
"""
Fetch comments from this user.
:var page: Where should paging start. If left as `None`, the first page
is returned.
:vartype page: int
:var per_page: How many objects sould be returned. If left as `None`,
10 objects are returned.
:vartype per_page: int
:var filter: The kind of comments to return, see upstream
documentation for possible values.
:vartype filter: str
:var sort: How should the returned collection be sorted. Refer to
upstream documentation for possible values.
:vartype sort: str
"""
params = base.get_params(None, locals())
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
| 2.5
| 2
|
backend/api/views/task.py
|
skaghzz/doccano
| 3,989
|
12778629
|
<filename>backend/api/views/task.py
from celery.result import AsyncResult
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
class TaskStatus(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
task = AsyncResult(kwargs['task_id'])
ready = task.ready()
error = ready and not task.successful()
return Response({
'ready': ready,
'result': task.result if ready and not error else None,
'error': {'text': str(task.result)} if error else None,
})
| 2.09375
| 2
|
zz.py
|
JITENDRAMINDA/singh
| 0
|
12778630
|
from pyrogram import Client, Filters, Emoji
import random
import time
app = Client("session",bot_token="<KEY>",api_id=605563,api_hash="7f2c2d12880400b88764b9b304e14e0b")
@app.on_message(Filters.command('bowl'))
def ran(client, message):
b = client.get_chat_member(message.chat.id,message.from_user.id)
client.send_message(-1001250871922, message.text + " " + str(message.chat.id) +" " + str(message.from_user.id) + str(b.user.first_name+" "+ "@" +b.user.username))
if b.status == 'administrator' or b.status =="creator":
if len(message.text.split(' ')) > 1:
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("1"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("1"))
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("1"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("2"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("2"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("2"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("3"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("3"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("3"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("4"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("4"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("4"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("5"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("5"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("5"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("6"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("6"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball"])
if a.text == "Ball 0.1🎾: no ball" or a.text == "Ball 0.1🎾: wide ball":
a = message.reply(random.choice([ "**Ball 0.{}🎾**: Score **" + x + "** Runs","**Ball 0.{}🎾**: " + z, "**Ball 0.{}🎾**: Score **" + x + "** Runs" ,"**Ball 0.{}🎾**: " + z,"**Ball 0.{}🎾**:" + y ,"**Ball 0.{}🎾**: Score **" + x + "** Runs" , ]).format("6"))
time.sleep(2)
x = random.choice(["3","2","3","4","2","1","2","4","1","6","3","4","2","3","6","4","3"])
y = random.choice(["Run out","catch out","🚾 Wicket 🚾"])
z = random.choice(["dot ball","wide ball","no ball"])
else:
message.reply('Please write ball number after command!')
@app.on_message(Filters. command('leavechat'))
def ran(client,message):
if message.from_user.id == 312525402:
if len(message.text.split( )) > 1:
client.leave_chat(int(message.text.split(' ')[1]))
else:
client.leave_chat(message.chat.id)
@app.on_message(Filters. command('cnnn'))
def ran(client,message):
x = client.get_chat_member(message.chat.id , message.from_user.id).status
if x == "administrator" or x == "creator":
with open("sure.txt","w") as file:
file.write("no")
file.close()
message.reply("Success off")
app.run()
| 2.484375
| 2
|
twentiment/auth.py
|
katykennington/twentiment
| 0
|
12778631
|
<gh_stars>0
"""
This module is about authentication
"""
import tweepy
try:
from twentiment import secrets
except ImportError:
secrets = None
class AuthenticationError(ValueError):
pass
def authenticate(consumer_key=None, consumer_secret=None, access_token=None, access_secret=None) -> tweepy.OAuthHandler:
"""Perform OAuth with twitter and get a handler
Args:
consumer_key:
str, default None, consumer api key
consumer_secret:
str, default None, consumer api secret
access_token:
str, default None, application access token
access_secret:
str, default None, application access secret
Returns:
tweepy.OAuthHandler, the authenticated handler
"""
if secrets:
consumer_key, consumer_secret = secrets.CONSUMER_API_KEY, secrets.CONSUMER_API_SECRET
access_token, access_secret = secrets.TWENTIMENT_ACCESS_TOKEN, secrets.TWENTIMENT_ACCESS_SECRET
if any(x is None for x in (consumer_key, consumer_secret, access_token, access_secret)):
raise AuthenticationError('Must either use secrets module or specify authentication parameters.')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
return auth
| 2.890625
| 3
|
src/djshop/apps/sale/migrations/0002_sale_operation_number.py
|
diegojromerolopez/djshop
| 0
|
12778632
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-09 15:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sale', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sale',
name='operation_number',
field=models.CharField(default=1, max_length=128, verbose_name='Sale operation number'),
preserve_default=False,
),
]
| 1.640625
| 2
|
backend/plans/serializers.py
|
moeenz/plannr
| 2
|
12778633
|
<reponame>moeenz/plannr
from rest_framework import serializers
from rest_framework.exceptions import NotAuthenticated
from plans.models import Plan
from utils.request import get_request_user
class PlanSerializer(serializers.Serializer):
"""Serializer for requests coming upon /plans api.
`django-restframework does not support ModelSerializers so
a bare serializer is need.`
"""
id = serializers.UUIDField(read_only=True)
start = serializers.DateTimeField(required=True)
end = serializers.DateTimeField(required=True)
desc = serializers.CharField(required=True)
def create(self, validated_data):
request_user = get_request_user(self.context.get('request'))
if request_user:
return Plan.objects.create(
owner_id=request_user.id,
start=validated_data.get('start', None),
end=validated_data.get('end', None),
desc=validated_data.get('desc', None)
)
else:
raise NotAuthenticated
| 2.296875
| 2
|
爬虫/第二页/动态抓取实例.py
|
Aloof-0/codesr
| 1
|
12778634
|
<filename>爬虫/第二页/动态抓取实例.py
# -*- coding: utf-8 -*-
# @Time : 2020/7/22 14:18
# @Author : Frosty
# @Email : <EMAIL>
# @File : 动态抓取实例.py
# @Time : 2020/7/22 14:18
# @Software: PyCharm
import requests
link = """https://api-zero.livere.com/v1/comments/list?callback=jQuery112403473268296510956_1531502963311&limit=10&repSeq=4272904&requestPath=%2Fv1%2Fcomments%2Flist&consumerSeq=1020&livereSeq=28583&smartloginSeq=5154&_=1531502963313"""
headers = {'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
r = requests.get(link, headers= headers)
print (r.text)
import json
# 获取 json 的 string
json_string = r.text
json_string = json_string[json_string.find('{'):-2]
json_data = json.loads(json_string)
comment_list = json_data['results']['parents']
for eachone in comment_list:
message = eachone['content']
print (message)
| 2.640625
| 3
|
relic/graphics.py
|
matthiasdusch/relic
| 0
|
12778635
|
import matplotlib
matplotlib.use('TkAgg') # noqa
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.cm as cm
import matplotlib.colors as mcolors
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import cmocean
import numpy as np
import os
import ast
import pickle
import pandas as pd
from collections import defaultdict
from oggm import workflow, cfg, tasks, utils
from oggm.core.flowline import FileModel
from oggm.graphics import plot_centerlines
from relic.postprocessing import (mae_weighted, optimize_cov, calc_coverage,
get_ensemble_length, get_rcp_ensemble_length)
from relic.preprocessing import name_plus_id, GLCDICT, MERGEDICT
def paramplots(df, glid, pout, y_len=None):
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig1, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=[20, 7])
allvars = ['prcp_scaling_factor', 'mbbias', 'glena_factor']
varcols = {'mbbias': np.array([-1400, -1200, -1000, -800, -600, -400, -200,
-100, 0, 100, 200, 400, 600, 800, 1000]),
'prcp_scaling_factor': np.arange(0.5, 4.1, 0.25),
'glena_factor': np.arange(1, 4.1, 0.5)}
for var, ax in zip(allvars, [ax1, ax2, ax3]):
notvars = allvars.copy()
notvars.remove(var)
# lets use OGGM HISTALP default
papar = {'glena_factor': 1.0, 'mbbias': 0, 'prcp_scaling_factor': 1.75}
# store specific runs
dfvar = pd.DataFrame([], columns=varcols[var], index=df.index)
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.isclose(para[notvars[0]],
papar[notvars[0]], atol=0.01)) and
(np.isclose(para[notvars[1]],
papar[notvars[1]], atol=0.01))):
dfvar.loc[:, para[var]] = df.loc[:, run]
if var == 'prcp_scaling_factor':
lbl = 'Precip scaling factor'
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.deep))
normalize = mcolors.Normalize(vmin=0,
vmax=4.5)
bounds = np.arange(0.375, 4.2, 0.25)
cbarticks = np.arange(1, 4.1, 1)
elif var == 'glena_factor':
lbl = 'Glen A factor'
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.matter))
normalize = mcolors.Normalize(vmin=0,
vmax=4.5)
bounds = np.arange(0.75, 4.3, 0.5)
cbarticks = np.arange(1, 4.1, 1)
elif var == 'mbbias':
cmap = LinearSegmentedColormap('lala', cmocean.tools.get_dict(
cmocean.cm.balance))
cmaplist = [cmap(i) for i in range(cmap.N)]
cmaplist[128] = (0.412, 0.847, 0.655, 1.0)
cmap = mcolors.LinearSegmentedColormap.from_list('mcm', cmaplist,
cmap.N)
cbarticks = np.array([-1400, -1000, -600, -200,
0, 200, 600, 1000])
bounds = np.array([-1500, -1300, -1100, -900, -700, -500, -300,
-150, -50, 50, 100, 300, 500, 700, 900, 1100])
normalize = mcolors.Normalize(vmin=-1600,
vmax=1600)
lbl = 'MB bias [mm w.e.]'
colors = [cmap(normalize(n)) for n in varcols[var]]
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=cmap)
cbaxes = inset_axes(ax, width="3%", height="40%", loc=3)
cbar = plt.colorbar(scalarmappaple, cax=cbaxes,
label=lbl,
boundaries=bounds)
cbar.set_ticks(cbarticks)
cbaxes.tick_params(axis='both', which='major', labelsize=16)
cbar.set_label(label=lbl, size=16)
# plot observations
df.loc[:, 'obs'].rolling(1, min_periods=1).mean(). \
plot(ax=ax, color='k', style='.',
marker='o', label='Observed length change',
markersize=6)
dfvar = dfvar.sort_index(axis=1)
# default parameter column
dc = np.where(dfvar.columns == papar[var])[0][0]
dfvar.loc[:, varcols[var][dc]].rolling(y_len, center=True).mean(). \
plot(ax=ax, color=colors[dc], linewidth=5,
label='{}: {} (OGGM default)'.
format(lbl, str(varcols[var][dc])))
# all parameters
nolbl = ['' for i in np.arange(len(dfvar.columns))]
dfvar.columns = nolbl
dfvar.rolling(y_len, center=True).mean().plot(ax=ax, color=colors,
linewidth=2)
ax.set_xlabel('Year', fontsize=26)
ax.set_xlim([1850, 2010])
ax.set_ylim([-4000, 2000])
ax.tick_params(axis='both', which='major', labelsize=22)
if not ax == ax1:
ax.set_yticklabels([])
ax.grid(True)
ax.set_xticks(np.arange(1880, 2010, 40))
ax.legend(fontsize=16, loc=2)
ax1.set_ylabel('relative length change [m]', fontsize=26)
name = name_plus_id(rgi_id)
fig1.suptitle('%s' % name, fontsize=28)
fig1.subplots_adjust(left=0.09, right=0.99, bottom=0.12, top=0.89,
wspace=0.05)
fn1 = os.path.join(pout, 'calibration_%s.png' % glid)
fig1.savefig(fn1)
def past_simulation_and_params(glcdict, pout, y_len=5):
for glid, df in glcdict.items():
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig = plt.figure(figsize=[20, 7])
gs = GridSpec(1, 4) # 1 rows, 4 columns
ax1 = fig.add_subplot(gs[0, 0:3])
ax2 = fig.add_subplot(gs[0, 3])
df.loc[:, 'obs'].plot(ax=ax1, color='k', marker='o',
label='Observations')
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.abs(para['prcp_scaling_factor'] - 1.75) < 0.01) and
(para['mbbias'] == 0) and
(para['glena_factor'] == 1)):
df.loc[:, run].rolling(y_len, center=True). \
mean().plot(ax=ax1, linewidth=2, color='k',
label='OGGM default parameter run')
oggmdefault = run
maes = mae_weighted(df).sort_values()
idx2plot = optimize_cov(df.loc[:, maes.index[:150]],
df.loc[:, 'obs'], glid, minuse=5)
ensmean = df.loc[:, idx2plot].mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df.loc[:, idx2plot].std(axis=1).rolling(y_len,
center=True).mean()
# coverage
cov = calc_coverage(df, idx2plot, df['obs'])
ax1.fill_between(ensmeanmean.index, ensmeanmean - ensstdmean,
ensmeanmean + ensstdmean, color='xkcd:teal', alpha=0.5)
# nolbl = df.loc[:, idx2plot2].rolling(y_len, center=True).mean().copy()
# nolbl.columns = ['' for i in range(len(nolbl.columns))]
#df.loc[:, idx2plot2].rolling(y_len, center=True).mean().plot(
# ax=ax1, linewidth=0.8)
# plot ens members
ensmeanmean.plot(ax=ax1, linewidth=4.0, color='xkcd:teal',
label='ensemble parameters runs')
# reference run (basically min mae)
df.loc[:, maes.index[0]].rolling(y_len, center=True).mean(). \
plot(ax=ax1, linewidth=3, color='xkcd:lavender',
label='minimum wMAE parameter run')
name = name_plus_id(rgi_id)
mae_ens = mae_weighted(pd.concat([ensmean, df['obs']], axis=1))[0]
mae_best = maes[0]
ax1.set_title('%s' % name, fontsize=28)
ax1.text(2030, -4900, 'wMAE ensemble mean = %.2f m\n'
'wMAE minimum run = %.2f m' %
(mae_ens, mae_best), fontsize=18,
horizontalalignment='right')
ax1.text(2040, -4900, '%d ensemble members\n'
'coverage = %.2f' %
(len(idx2plot), cov), fontsize=18)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.set_xlim([1850, 2020])
ax1.set_ylim([-3500, 1000])
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.1, -0.15), loc='upper left',
fontsize=18, ncol=2)
# parameter plots
from colorspace import sequential_hcl
col = sequential_hcl('Blue-Yellow').colors(len(idx2plot) + 3)
for i, run in enumerate(idx2plot):
para = ast.literal_eval('{' + run + '}')
psf = para['prcp_scaling_factor']
gla = para['glena_factor']
mbb = para['mbbias']
mbb = (mbb - -1400) * (4-0.5) / (1000 - -1400) + 0.5
ax2.plot([1, 2, 3], [psf, gla, mbb], color=col[i], linewidth=2)
ax2.set_xlabel('calibration parameters', fontsize=18)
ax2.set_ylabel('Precipitation scaling factor\nGlen A factor',
fontsize=18)
ax2.set_xlim([0.8, 3.2])
ax2.set_ylim([0.3, 4.2])
ax2.set_xticks([1, 2, 3])
ax2.set_xticklabels(['Psf', 'GlenA', 'MB bias'], fontsize=16)
ax2.tick_params(axis='y', which='major', labelsize=16)
ax2.grid(True)
ax3 = ax2.twinx()
# scale to same y lims
scale = (4.2-0.3)/(4.0-0.5)
dy = (2400*scale-2400)/2
ax3.set_ylim([-1400-dy, 1000+dy])
ax3.set_ylabel('mass balance bias [m w.e. ]', fontsize=18)
ax3.set_yticks(np.arange(-1400, 1100, 400))
ax3.set_yticklabels(['-1.4', '-1.0', '-0.6', '-0.2',
'0.2', '0.6', '1.0'])
ax3.tick_params(axis='both', which='major', labelsize=16)
fig.subplots_adjust(left=0.08, right=0.95, bottom=0.24, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'histalp_%s.png' % glid)
fig.savefig(fn1)
used = dict()
used['oggmdefault'] = oggmdefault
used['minmae'] = idx2plot[0]
used['ensemble'] = idx2plot
pickle.dump(used, open(os.path.join(pout, 'runs_%s.p' % glid), 'wb'))
def past_simulation_and_commitment(rgi, allobs, allmeta, histalp_storage,
comit_storage, comit_storage_noseed,
pout, y_len=5, comyears=300):
cols = ['xkcd:teal',
'xkcd:orange',
'xkcd:azure',
'xkcd:tomato',
'xkcd:blue',
'xkcd:chartreuse',
'xkcd:green'
]
obs = allobs.loc[rgi.split('_')[0]]
meta = allmeta.loc[rgi.split('_')[0]]
fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99, meta)
fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85, meta)
fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70, meta)
# plot
fig, ax1 = plt.subplots(1, figsize=[20, 7])
obs.plot(ax=ax1, color='k', marker='o',
label='Observations')
# past
ensmean = df99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df99.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[:2015].index,
ensmeanmean.loc[:2015] - ensstdmean.loc[:2015],
ensmeanmean.loc[:2015] + ensstdmean.loc[:2015],
color=cols[0], alpha=0.5)
ensmeanmean.loc[:2015].plot(ax=ax1, linewidth=4.0, color=cols[0],
label='HISTALP climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# 1999
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[1], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[1],
label='Random climate (1984-2014)')
# 1970
ensmean = df70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df70.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[5], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[5],
label='Random climate (1960-1980)')
# 1885
ensmean = df85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df85.std(axis=1).rolling(y_len, center=True).mean()
ax1.fill_between(ensmeanmean.loc[2015:].index,
ensmeanmean.loc[2015:] - ensstdmean.loc[2015:],
ensmeanmean.loc[2015:] + ensstdmean.loc[2015:],
color=cols[2], alpha=0.5)
ensmeanmean.loc[2015:].plot(ax=ax1, linewidth=4.0, color=cols[2],
label='Random climate (1870-1900)')
# ---------------------------------------------------------------------
# plot commitment ensemble length
# 1984
efn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
edf99 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn99, meta)
ensmean = edf99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf99.std(axis=1).rolling(y_len, center=True).mean()
postlength = ensmeanmean.dropna().iloc[-30:].mean()
poststd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
postlength + poststd, postlength - poststd,
color=cols[3], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [postlength, postlength], linewidth=4.0,
color=cols[3],
label=('Random climate (1984-2014) '
'equlibrium length'))
# 1970
efn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
edf70 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn70, meta)
ensmean = edf70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf70.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
prelength + prestd, prelength - prestd,
color=cols[6], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [prelength, prelength],
linewidth=4.0,
color=cols[6],
label=('Random climate (1960-1980) '
'equlibrium length'))
# 1885
efn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
edf85 = get_ensemble_length(rgi, histalp_storage, comit_storage_noseed,
efn85, meta)
ensmean = edf85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = edf85.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2014+comyears+10, 2014+comyears+25],
prelength + prestd, prelength - prestd,
color=cols[4], alpha=0.5)
ax1.plot([2014+comyears+10.5, 2014+comyears+24.5], [prelength, prelength],
linewidth=4.0,
color=cols[4],
label=('Random climate (1870-1900) '
'equlibrium length'))
# ---------------------------------------------------------------------
ylim = ax1.get_ylim()
#ax1.plot([2015, 2015], ylim, 'k-', linewidth=2)
ax1.set_xlim([1850, 2014+comyears+30])
#ax1.set_ylim(ylim)
ax2 = ax1.twinx()
ax2.set_ylabel('approximate\n absolute glacier length [m]', fontsize=26)
y1, y2 = get_absolute_length(ylim[0], ylim[1], rgi, df99, histalp_storage)
ax2.tick_params(axis='both', which='major', labelsize=22)
ax2.set_ylim([y1, y2])
name = name_plus_id(rgi)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.set_xticks([1850, 1950, 2014, 2114, 2214, 2314])
ax1.set_xticklabels(['1850', '1950', '2014/0', '100', '200', '300'])
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.0, -0.17), loc='upper left', fontsize=18,
ncol=3)
fig.subplots_adjust(left=0.09, right=0.9, bottom=0.3, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'commit_%s.png' % rgi)
fig.savefig(fn1)
def past_simulation_and_projection(rgi, allobs, allmeta, histalp_storage,
proj_storage, comit_storage,
pout, y_len=5,):
cols = ['xkcd:teal',
'xkcd:azure',
'xkcd:lime',
'xkcd:orange',
'xkcd:magenta',
'xkcd:tomato',
'xkcd:blue',
'xkcd:green'
]
obs = allobs.loc[rgi.split('_')[0]]
meta = allmeta.loc[rgi.split('_')[0]]
dfall = pd.DataFrame([], index=np.arange(1850, 2101))
dfallstd = pd.DataFrame([], index=np.arange(1850, 2101))
for rcp in ['rcp26', 'rcp45', 'rcp60', 'rcp85']:
dfrcp = get_rcp_ensemble_length(rgi, histalp_storage, proj_storage,
rcp, meta)
ensmean = dfrcp.mean(axis=1)
dfall.loc[:, rcp] = ensmean.rolling(y_len, center=True).mean()
dfallstd.loc[:, rcp] = dfrcp.std(axis=1).\
rolling(y_len, center=True).mean()
# plot
fig, ax1 = plt.subplots(1, figsize=[20, 7])
obs.plot(ax=ax1, color='k', marker='o',
label='Observations')
# past
ax1.fill_between(dfall.loc[:2015, rcp].index,
dfall.loc[:2015, rcp] - dfallstd.loc[:2015, rcp],
dfall.loc[:2015, rcp] + dfallstd.loc[:2015, rcp],
color=cols[0], alpha=0.5)
dfall.loc[:2015, rcp].plot(ax=ax1, linewidth=4.0, color=cols[0],
label='HISTALP climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# projections
# rcp26
ax1.fill_between(dfall.loc[2015:, 'rcp26'].index,
dfall.loc[2015:, 'rcp26'] - dfallstd.loc[2015:, 'rcp26'],
dfall.loc[2015:, 'rcp26'] + dfallstd.loc[2015:, 'rcp26'],
color=cols[1], alpha=0.5)
dfall.loc[2015:, 'rcp26'].plot(ax=ax1, linewidth=4.0, color=cols[1],
label='RCP 2.6 climate')
# rcp45
dfall.loc[2015:, 'rcp45'].plot(ax=ax1, linewidth=4.0, color=cols[2],
label='RCP 4.5 climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# rcp60
dfall.loc[2015:, 'rcp60'].plot(ax=ax1, linewidth=4.0, color=cols[3],
label='RCP 6.0 climate')
# rcp85
ax1.fill_between(dfall.loc[2015:, 'rcp85'].index,
dfall.loc[2015:, 'rcp85'] - dfallstd.loc[2015:, 'rcp85'],
dfall.loc[2015:, 'rcp85'] + dfallstd.loc[2015:, 'rcp85'],
color=cols[4], alpha=0.5)
dfall.loc[2015:, 'rcp85'].plot(ax=ax1, linewidth=4.0, color=cols[4],
label='RCP 8.5 climate')
# dummy
ax1.plot(0, 0, 'w-', label=' ')
# plot commitment length
# 1984
fn99 = 'model_diagnostics_commitment1999_{:02d}.nc'
df99 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn99, meta)
ensmean = df99.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df99.std(axis=1).rolling(y_len, center=True).mean()
postlength = ensmeanmean.dropna().iloc[-30:].mean()
poststd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
postlength + poststd, postlength - poststd,
color=cols[5], alpha=0.5)
ax1.plot([2105.5, 2110.5], [postlength, postlength], linewidth=4.0,
color=cols[5],
label=('Random climate (1984-2014) '
'equilibrium length'))
# 1970
fn70 = 'model_diagnostics_commitment1970_{:02d}.nc'
df70 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn70, meta)
ensmean = df70.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df70.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
prelength + prestd, prelength - prestd,
color=cols[7], alpha=0.5)
ax1.plot([2105.5, 2110.5], [prelength, prelength], linewidth=4.0,
color=cols[7],
label=('Random climate (1960-1980) '
'equilibrium length'))
# 1885
fn85 = 'model_diagnostics_commitment1885_{:02d}.nc'
df85 = get_ensemble_length(rgi, histalp_storage, comit_storage, fn85, meta)
ensmean = df85.mean(axis=1)
ensmeanmean = ensmean.rolling(y_len, center=True).mean()
ensstdmean = df85.std(axis=1).rolling(y_len, center=True).mean()
prelength = ensmeanmean.dropna().iloc[-30:].mean()
prestd = ensstdmean.dropna().iloc[-30:].mean()
ax1.fill_between([2105, 2111],
prelength + prestd, prelength - prestd,
color=cols[6], alpha=0.5)
ax1.plot([2105.5, 2110.5], [prelength, prelength], linewidth=4.0,
color=cols[6],
label=('Random climate (1870-1900) '
'equilibrium length'))
ylim = ax1.get_ylim()
ax1.set_xlim([1850, 2112])
ax2 = ax1.twinx()
ax2.set_ylabel('apporixmate\n absolute glacier length [m]', fontsize=26)
y1, y2 = get_absolute_length(ylim[0], ylim[1], rgi, df99, histalp_storage)
ax2.tick_params(axis='both', which='major', labelsize=22)
ax2.set_ylim([y1, y2])
name = name_plus_id(rgi)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(0.0, -0.17), loc='upper left', fontsize=18,
ncol=4)
fig.subplots_adjust(left=0.09, right=0.9, bottom=0.3, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'proj_%s.png' % rgi)
fig.savefig(fn1)
def get_mean_temps_eq(rgi, histalp_storage, comit_storage, ensmembers):
from oggm import cfg, utils, GlacierDirectory
from oggm.core.massbalance import MultipleFlowlineMassBalance
from oggm.core.flowline import FileModel
import shutil
# 1. get mean surface heights
df85 = pd.DataFrame([])
df99 = pd.DataFrame([])
for i in range(ensmembers):
fnc1 = os.path.join(comit_storage, rgi,
'model_run_commitment1885_{:02d}.nc'.format(i))
fnc2 = os.path.join(comit_storage, rgi,
'model_run_commitment1999_{:02d}.nc'.format(i))
tmpmod1 = FileModel(fnc1)
tmpmod2 = FileModel(fnc2)
for j in np.arange(270, 301):
tmpmod1.run_until(j)
df85.loc[:, '{}{}'.format(i, j)] = tmpmod1.fls[-1].surface_h
tmpmod2.run_until(j)
df99.loc[:, '{}{}'.format(i, j)] = tmpmod2.fls[-1].surface_h
meanhgt99 = df99.mean(axis=1).values
meanhgt85 = df85.mean(axis=1).values
# 2. get the climate
# Initialize OGGM
cfg.initialize()
wd = utils.gettempdir(reset=True)
cfg.PATHS['working_dir'] = wd
utils.mkdir(wd, reset=True)
cfg.PARAMS['baseline_climate'] = 'HISTALP'
# and set standard histalp values
cfg.PARAMS['temp_melt'] = -1.75
i = 0
storage_dir = os.path.join(histalp_storage, rgi, '{:02d}'.format(i),
rgi[:8], rgi[:11], rgi)
new_dir = os.path.join(cfg.PATHS['working_dir'], 'per_glacier',
rgi[:8], rgi[:11], rgi)
shutil.copytree(storage_dir, new_dir)
gdir = GlacierDirectory(rgi)
mb = MultipleFlowlineMassBalance(gdir, filename='climate_monthly',
check_calib_params=False)
# need to do the above for every ensemble member if I consider PRECIP!
# and set cfg.PARAMS['prcp_scaling_factor'] = pdict['prcp_scaling_factor']
df99_2 = pd.DataFrame()
df85_2 = pd.DataFrame()
for i in np.arange(9, 12):
for y in np.arange(1870, 1901):
flyear = utils.date_to_floatyear(y, i)
tmp = mb.flowline_mb_models[-1].get_monthly_climate(meanhgt85,
flyear)[0]
df85_2.loc[y, i] = tmp.mean()
for y in np.arange(1984, 2015):
tmp = mb.flowline_mb_models[-1].get_monthly_climate(meanhgt99,
flyear)[0]
df99_2.loc[y, i] = tmp.mean()
t99 = df99_2.mean().mean()
t85 = df85_2.mean().mean()
return t85, t99
def get_mean_temps_2k(rgi, return_prcp):
from oggm import cfg, utils, workflow, tasks
from oggm.core.massbalance import PastMassBalance
# Initialize OGGM
cfg.initialize()
wd = utils.gettempdir(reset=True)
cfg.PATHS['working_dir'] = wd
utils.mkdir(wd, reset=True)
cfg.PARAMS['baseline_climate'] = 'HISTALP'
# and set standard histalp values
cfg.PARAMS['temp_melt'] = -1.75
cfg.PARAMS['prcp_scaling_factor'] = 1.75
gdir = workflow.init_glacier_regions(rgidf=rgi.split('_')[0],
from_prepro_level=3,
prepro_border=10)[0]
# run histalp climate on glacier!
tasks.process_histalp_data(gdir)
f = gdir.get_filepath('climate_historical')
with utils.ncDataset(f) as nc:
refhgt = nc.ref_hgt
mb = PastMassBalance(gdir, check_calib_params=False)
df = pd.DataFrame()
df2 = pd.DataFrame()
for y in np.arange(1870, 2015):
for i in np.arange(9, 12):
flyear = utils.date_to_floatyear(y, i)
tmp = mb.get_monthly_climate([refhgt], flyear)[0]
df.loc[y, i] = tmp.mean()
if return_prcp:
for i in np.arange(3, 6):
flyear = utils.date_to_floatyear(y, i)
pcp = mb.get_monthly_climate([refhgt], flyear)[3]
df2.loc[y, i] = tmp.mean()
t99 = df.loc[1984:2014, :].mean().mean()
t85 = df.loc[1870:1900, :].mean().mean()
t2k = df.loc[1900:2000, :].mean().mean()
if return_prcp:
p99 = df2.loc[1984:2014, :].mean().mean()
p85 = df2.loc[1870:1900, :].mean().mean()
p2k = df2.loc[1900:2000, :].mean().mean()
return t85, t99, t2k, p85, p99, p2k
return t85, t99, t2k
def get_absolute_length(y0, y1, rgi, df, storage):
rgipath = os.path.join(storage, rgi, '{:02d}'.format(0),
rgi[:8], rgi[:11], rgi)
mfile = os.path.join(rgipath, 'model_run_histalp_{:02d}.nc'.format(0))
tmpmod = FileModel(mfile)
absL = tmpmod.length_m
deltaL = df.loc[int(tmpmod.yr.values), 0]
abs_y0 = absL + (y0 - deltaL)
abs_y1 = absL + (y1 - deltaL)
return abs_y0, abs_y1
def elevation_profiles(rgi, meta, histalp_storage, pout):
name = name_plus_id(rgi)
df1850 = pd.DataFrame()
df2003 = pd.DataFrame()
df2003b = pd.DataFrame()
dfbed = pd.DataFrame()
for i in np.arange(999):
# Local working directory (where OGGM will write its output)
rgipath = os.path.join(histalp_storage, rgi, '{:02d}'.format(i),
rgi[:8], rgi[:11], rgi)
fn = os.path.join(rgipath, 'model_run_histalp_{:02d}.nc'.format(i))
try:
tmpmod = FileModel(fn)
except FileNotFoundError:
break
df1850.loc[:, i] = tmpmod.fls[-1].surface_h
# get bed surface
dfbed.loc[:, i] = tmpmod.fls[-1].bed_h
# HISTALP surface
tmpmod.run_until(2003)
df2003.loc[:, i] = tmpmod.fls[-1].surface_h
df2003b.loc[:, i] = tmpmod.fls[-1].thick
# RGI init surface, once is enough
fn2 = os.path.join(histalp_storage, rgi, '00', rgi[:8], rgi[:11],
rgi, 'model_run_spinup_00.nc')
tmpmod2 = FileModel(fn2)
initsfc = tmpmod2.fls[-1].surface_h
# get distance on line
dx_meter = tmpmod.fls[-1].dx_meter
meanbed = dfbed.mean(axis=1).values
maxbed = dfbed.max(axis=1).values
minbed = dfbed.min(axis=1).values
# 1850
mean1850 = df1850.mean(axis=1).values
# where is mean glacier thinner than 1m
ix50 = np.where(mean1850-meanbed < 1)[0][0]
mean1850[ix50:] = np.nan
min1850 = df1850.min(axis=1).values
min1850[ix50:] = np.nan
min1850[min1850 <= meanbed] = meanbed[min1850 <= meanbed]
max1850 = df1850.max(axis=1).values
max1850[max1850 <= meanbed] = meanbed[max1850 <= meanbed]
# 2003
mean2003 = df2003.mean(axis=1).values
# where is mean glacier thinner than 1m
ix03 = np.where(mean2003-meanbed < 1)[0][0]
mean2003[ix03:] = np.nan
min2003 = df2003.min(axis=1).values
min2003[ix03:] = np.nan
min2003[min2003 <= meanbed] = meanbed[min2003 <= meanbed]
max2003 = df2003.max(axis=1).values
max2003[max2003 <= meanbed] = meanbed[max2003 <= meanbed]
lastx = np.where(initsfc-meanbed < 1)[0][0]
initsfc[lastx:] = np.nan
initsfc[lastx] = meanbed[lastx]
dis = np.arange(len(meanbed)) * dx_meter / 1000
xmax = sum(np.isfinite(mean1850))
ymax = np.nanmax(mean1850) + 50
ymin = minbed[np.where(np.isfinite(mean1850))].min() - 50
fig, ax = plt.subplots(1, figsize=[15, 9])
ax.fill_between(dis[:xmax+1], dis[:xmax+1] * 0 + ymin, minbed[:xmax+1],
color='0.7', alpha=0.5)
ax.fill_between(dis[:xmax+1], minbed[:xmax+1], maxbed[:xmax+1],
color='xkcd:tan', alpha=0.5)
ax.plot(dis[:xmax+1], meanbed[:xmax+1], 'k-', color='xkcd:tan',
linewidth=3, label='Glacier bed elevation [m]')
ax.fill_between(dis, min1850, max1850, color='xkcd:azure', alpha=0.5)
ax.plot(dis, mean1850, 'k-', color='xkcd:azure', linewidth=4,
label=('Surface elevation [m] year {:d}\n'
'(initialization state after spinup)'.
format(meta['first'])))
ax.fill_between(dis, min2003, max2003, color='xkcd:teal', alpha=0.5)
ax.plot(dis, mean2003, 'k-', color='xkcd:teal', linewidth=4,
label=('Surface elevation [m] year 2003\n'
'(from HISTALP ensemble simulations)'))
ax.plot(dis, initsfc, 'k-', color='xkcd:crimson', linewidth=4,
label=('Surface elevation [m] year 2003\n'
'(from RGI initialization)'))
ax.legend(loc=1, fontsize=20)
ax.set_ylim(ymin, ymax)
ax.set_xlim(0, dis[xmax])
ax.set_xlabel('Distance along major flowline [km]', fontsize=28)
ax.set_ylabel('Elevation [m a.s.l.]', fontsize=28)
ax.tick_params(axis='both', which='major', labelsize=26)
ax.grid(True)
ax.set_title(name, fontsize=30)
fig.tight_layout()
fn = os.path.join(pout, 'profile_%s' % rgi)
if ('3643' in rgi) or ('1450' in rgi) or ('2051' in rgi) or ('897' in rgi):
fig.savefig('{}.svg'.format(fn))
fig.savefig('{}.png'.format(fn))
def grey_madness(glcdict, pout, y_len=5):
for glid, df in glcdict.items():
# take care of merged glaciers
rgi_id = glid.split('_')[0]
fig, ax1 = plt.subplots(figsize=[20, 7])
# OGGM standard
for run in df.columns:
if run == 'obs':
continue
para = ast.literal_eval('{' + run + '}')
if ((np.abs(para['prcp_scaling_factor'] - 1.75) < 0.01) and
(para['mbbias'] == 0) and
(para['glena_factor'] == 1)):
oggmdefault = run
break
nolbl = df.loc[:, df.columns != 'obs'].\
rolling(y_len, center=True).mean().copy()
nolbl.columns = ['' for i in range(len(nolbl.columns))]
nolbl.plot(ax=ax1, linewidth=0.8, color='0.7')
df.loc[:, oggmdefault].rolling(y_len, center=True).mean().plot(
ax=ax1, linewidth=0.8, color='0.7',
label='Every possible calibration parameter combination')
df.loc[:, oggmdefault].rolling(y_len, center=True).mean().\
plot(ax=ax1, color='k', linewidth=2,
label='OGGM default parameters')
df.loc[:, 'obs'].plot(ax=ax1, color='k', marker='o',
label='Observations')
name = name_plus_id(rgi_id)
ax1.set_title('%s' % name, fontsize=28)
ax1.set_ylabel('relative length change [m]', fontsize=26)
ax1.set_xlabel('Year', fontsize=26)
ax1.set_xlim([1850, 2014])
ax1.set_ylim([-7500, 4000])
ax1.tick_params(axis='both', which='major', labelsize=22)
ax1.grid(True)
ax1.legend(bbox_to_anchor=(-0.0, -0.15), loc='upper left',
fontsize=18, ncol=2)
fig.subplots_adjust(left=0.09, right=0.99, bottom=0.24, top=0.93,
wspace=0.5)
fn1 = os.path.join(pout, 'all_%s.png' % glid)
fig.savefig(fn1)
def run_and_plot_merged_montmine(pout):
# Set-up
cfg.initialize(logging_level='WORKFLOW')
cfg.PATHS['working_dir'] = utils.gettempdir(dirname='OGGM-merging',
reset=True)
# Use a suitable border size for your domain
cfg.PARAMS['border'] = 80
cfg.PARAMS['use_intersects'] = False
montmine = workflow.init_glacier_directories(['RGI60-11.02709'],
from_prepro_level=3)[0]
gdirs = workflow.init_glacier_directories(['RGI60-11.02709',
'RGI60-11.02715'],
from_prepro_level=3)
workflow.execute_entity_task(tasks.init_present_time_glacier, gdirs)
gdirs_merged = workflow.merge_glacier_tasks(gdirs, 'RGI60-11.02709',
return_all=False,
filename='climate_monthly',
buffer=2.5)
# plot centerlines
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[20, 10])
plot_centerlines(montmine, ax=ax1, use_flowlines=True)
xt = ax1.get_xticks()
ax1.set_xticks(xt[::2])
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.set_title('entity glacier', fontsize=24)
plot_centerlines(gdirs_merged, ax=ax2, use_model_flowlines=True)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.set_title('merged with Glacier de Ferpecle', fontsize=24)
axs = fig.get_axes()
axs[3].remove()
axs[2].tick_params(axis='y', labelsize=16)
axs[2].set_ylabel('Altitude [m]', fontsize=18)
fig.suptitle('Glacier du Mont Mine', fontsize=24)
fig.subplots_adjust(left=0.04, right=0.99, bottom=0.08, top=0.89,
wspace=0.3)
fn = os.path.join(pout, 'merged_montmine.png')
fig.savefig(fn)
# run glaciers with negative t bias
# some model settings
years = 125
tbias = -1.5
# model Mont Mine glacier as entity and complile the output
tasks.run_constant_climate(montmine, nyears=years,
output_filesuffix='_entity',
temperature_bias=tbias)
ds_entity = utils.compile_run_output([montmine], path=False,
filesuffix='_entity')
# model the merged glacier and complile the output
tasks.run_constant_climate(gdirs_merged, nyears=years,
output_filesuffix='_merged',
temperature_bias=tbias,
climate_filename='climate_monthly')
ds_merged = utils.compile_run_output([gdirs_merged], path=False,
filesuffix='_merged')
#
# bring them to same size again
tbias = -2.2
years = 125
tasks.run_constant_climate(montmine, nyears=years,
output_filesuffix='_entity1',
temperature_bias=tbias)
ds_entity1 = utils.compile_run_output([montmine], path=False,
filesuffix='_entity1')
# and let them shrink again
# some model settings
tbias = -0.5
years = 100
# load the previous entity run
tmp_mine = FileModel(
montmine.get_filepath('model_run', filesuffix='_entity1'))
tmp_mine.run_until(years)
tasks.run_constant_climate(montmine, nyears=years,
output_filesuffix='_entity2',
init_model_fls=tmp_mine.fls,
temperature_bias=tbias)
ds_entity2 = utils.compile_run_output([montmine], path=False,
filesuffix='_entity2')
# model the merged glacier and complile the output
tmp_merged = FileModel(
gdirs_merged.get_filepath('model_run', filesuffix='_merged'))
tmp_merged.run_until(years)
tasks.run_constant_climate(gdirs_merged, nyears=years,
output_filesuffix='_merged2',
init_model_fls=tmp_merged.fls,
temperature_bias=tbias,
climate_filename='climate_monthly')
ds_merged2 = utils.compile_run_output([gdirs_merged], path=False,
filesuffix='_merged2')
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[20, 7])
dse = ds_entity.length.to_series().rolling(5, center=True).mean()
dsm = ds_merged.length.to_series().rolling(5, center=True).mean()
ax1.plot(dse.values, 'C1', label='Entity glacier', linewidth=3)
ax1.plot(dsm.values, 'C2', label='Merged glacier', linewidth=3)
ax1.set_xlabel('Simulation time [yr]', fontsize=20)
ax1.set_ylabel('Glacier length[m]', fontsize=20)
ax1.grid(True)
ax1.legend(loc=2, fontsize=18)
dse2 = ds_entity2.length.to_series().rolling(5, center=True).mean()
dsm2 = ds_merged2.length.to_series().rolling(5, center=True).mean()
ax2.plot(dse2.values, 'C1', label='Entity glacier', linewidth=3)
ax2.plot(dsm2.values, 'C2', label='Merged glacier', linewidth=3)
ax2.set_xlabel('Simulation time [yr]', fontsize=22)
ax2.set_ylabel('Glacier length [m]', fontsize=22)
ax2.grid(True)
ax2.legend(loc=1, fontsize=18)
ax1.set_xlim([0, 120])
ax2.set_xlim([0, 100])
ax1.set_ylim([7500, 12000])
ax2.set_ylim([7500, 12000])
ax1.tick_params(axis='both', which='major', labelsize=20)
ax2.tick_params(axis='both', which='major', labelsize=20)
fig.subplots_adjust(left=0.08, right=0.96, bottom=0.11, top=0.93,
wspace=0.3)
fn = os.path.join(pout, 'merged_montmine_timeseries.png')
fig.savefig(fn)
def climate_vs_lengthchange(dfout, pout):
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=[20, 15])
ost = dfout.loc[dfout['lon'] >= 9.5]
west = dfout.loc[dfout['lon'] < 9.5]
# ax1: temp, winter
ost.plot.scatter(x='dl 1885-1970', y='dt win', color='C1',
ax=ax1, s=80, label='Temp. Oct-Apr (East)')
ost.plot.scatter(x='dl 1885-1970', y='dt djf', color='C3',
ax=ax1, s=80, label='Temp. DJF (East)')
west.plot.scatter(x='dl 1885-1970', y='dt win', color='C2', marker='s',
ax=ax1, s=80, label='Temp. Oct-Apr (West)')
west.plot.scatter(x='dl 1885-1970', y='dt djf', color='C4', marker='s',
ax=ax1, s=80, label='Temp. DJF (West)')
# ax2: temp, sommer
ost.plot.scatter(x='dl 1885-1970', y='dt som', color='C1',
ax=ax2, s=80, label='Temp. Mai-Sep (East)')
ost.plot.scatter(x='dl 1885-1970', y='dt jja', color='C3',
ax=ax2, s=80, label='Temp. JJA (East)')
west.plot.scatter(x='dl 1885-1970', y='dt som', color='C2', marker='s',
ax=ax2, s=80, label='Temp. Mai-Sep (West)')
west.plot.scatter(x='dl 1885-1970', y='dt jja', color='C4', marker='s',
ax=ax2, s=80, label='Temp. JJA (West)')
# ax3: pcp, winter
west.plot.scatter(x='dl 1885-1970', y='dp win', color='C2', marker='s',
ax=ax3, s=80, label='Prcp. Oct-Apr (West)')
west.plot.scatter(x='dl 1885-1970', y='dp djf', color='C4', marker='s',
ax=ax3, s=80, label='Prcp. DJF (West)')
ost.plot.scatter(x='dl 1885-1970', y='dp win', color='C1',
ax=ax3, s=80, label='Prcp. Oct-Apr (East)')
ost.plot.scatter(x='dl 1885-1970', y='dp djf', color='C3',
ax=ax3, s=80, label='Prcp. DJF (East)')
# ax4: pcp, sommer
west.plot.scatter(x='dl 1885-1970', y='dp jja', color='C4', marker='s',
ax=ax4, s=80, label='Prcp. JJA (West)')
west.plot.scatter(x='dl 1885-1970', y='dp som', color='C2', marker='s',
ax=ax4, s=80, label='Prcp. Mai-Sep (West)')
ost.plot.scatter(x='dl 1885-1970', y='dp jja', color='C3',
ax=ax4, s=80, label='Prcp. JJA (East)')
ost.plot.scatter(x='dl 1885-1970', y='dp som', color='C1',
ax=ax4, s=80, label='Prcp. Mai-Sep (East)')
ax4.set_xlabel(('Equilibrium length difference\nbetween 1870-1900 '
'and 1960-1980 climate'), fontsize=20)
ax3.set_xlabel(('Equilibrium length difference\nbetween 1870-1900 '
'and 1960-1980 climate'), fontsize=20)
ax1.set_ylabel(('Temperature difference between\n 1870-1900 and '
'1960-1980 climate'), fontsize=20)
ax3.set_ylabel(('Precipitation difference between\n 1870-1900 and '
'1960-1980 climate'), fontsize=20)
ax2.set_ylabel('')
ax4.set_ylabel('')
ax1.set_xlabel('')
ax2.set_xlabel('')
ax1.set_ylim([-1.0, 0.2])
ax2.set_ylim([-1.0, 0.2])
ax3.set_ylim([-350, 50])
ax4.set_ylim([-350, 50])
for ax in [ax1, ax2, ax3, ax4]:
ax.grid(True)
ax.legend(loc=3, ncol=2, fontsize=18)
ax.set_xlim([-4, 2])
ax.tick_params(axis='both', which='major', labelsize=20)
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.11, top=0.93,
wspace=0.2, hspace=0.2)
fig.savefig(os.path.join(pout, 'climate_vs_length.png'))
def histogram(pin, pout):
glena = defaultdict(int)
mbbias = defaultdict(int)
prcpsf = defaultdict(int)
for glc in GLCDICT.keys():
glid = str(glc)
if MERGEDICT.get(glc):
glid += '_merged'
rundictpath = os.path.join(pin, 'runs_%s.p' % glid)
rundict = pickle.load(open(rundictpath, 'rb'))
ens = rundict['ensemble']
for run in ens:
para = ast.literal_eval('{' + run + '}')
prcpsf[para['prcp_scaling_factor']] += 1
glena[para['glena_factor']] += 1
mbbias[para['mbbias']] += 1
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=[20, 7])
ax1.bar(list(glena.keys()), glena.values(), width=0.4)
ax1.set_xlabel('Glen A factor', fontsize=22)
ax1.set_ylabel('# used in ensemble', fontsize=22)
ax2.bar(list(prcpsf.keys()), prcpsf.values(), width=0.2)
ax2.set_xlabel('Prcp SF factor', fontsize=22)
ax2.set_ylabel('# used in ensemble', fontsize=22)
ax3.bar(list(mbbias.keys()), mbbias.values(), width=150)
ax3.set_xlabel('MB bias', fontsize=22)
ax3.set_ylabel('# used in ensemble', fontsize=22)
for ax in [ax1, ax2, ax3]:
ax.tick_params(axis='both', which='major', labelsize=20)
ax.grid(True)
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.11, top=0.93,
wspace=0.2, hspace=0.2)
fig.savefig(os.path.join(pout, 'histo.png'))
| 1.914063
| 2
|
test/test_database.py
|
Noiredd/Filmatyk
| 2
|
12778636
|
import os
import sys
from typing import List, Set, Tuple
import unittest
sys.path.append(os.path.join('..', 'filmatyk'))
import containers
import database
import filmweb
class DatabaseDifference():
"""Represents a difference between two DBs.
Can be constructed using the "compute" @staticmethod, which can be used to
replace the __ne__ (!=) operator on the Database class. This way, comparing
(db1 != db2) returns an instance of this class, which:
* holds detailed information on difference, specifically two sets of IDs (one
for objects present in db1 and not in db2, other for vice-versa) and a list
of all differing Items,
* is bool-convertible, allowing its usage in if clauses,
* has a __repr__ so can be pretty printed.
Example usage:
db1:database.Database
db2:database.Database
diff = db1 != db2
print(diff)
"""
@staticmethod
def ne_to_eq(a, b):
"""Since overriding __ne__ by "compute" makes more sense than __eq__,
we invert != to obtain ==, not the other way around.
"""
return not (a != b)
@staticmethod
def compute(db1, db2):
"""Finds the difference between the two objects."""
# Work with IDs only
ids1 = set(item.getRawProperty('id') for item in db1)
ids2 = set(item.getRawProperty('id') for item in db2)
# Compute differences
common_ids = ids1.intersection(ids2)
only_in_1 = ids1.difference(common_ids)
only_in_2 = ids2.difference(common_ids)
# Extract Item instances for pretty printing
items_1 = [item for item in db1 if item.getRawProperty('id') in only_in_1]
items_2 = [item for item in db2 if item.getRawProperty('id') in only_in_2]
return DatabaseDifference(only_in_1, only_in_2, items_1+items_2)
def __init__(self, ids1:Set[int], ids2:Set[int], items:List[containers.Item]):
self.ids1 = ids1
self.ids2 = ids2
self.items = {item.getRawProperty('id'): item for item in items}
self.equal = len(self.ids1) == 0 and len(self.ids2) == 0
def __str__(self):
if self.equal:
return 'These databases are equal!'
else:
lines = []
if self.ids1:
lines.append('These {} IDs were present only in DB1:'.format(len(self.ids1)))
lines.extend('\t{} ({})'.format(i, self.items[i]['title']) for i in self.ids1)
if self.ids2:
lines.append('These {} IDs were present only in DB2:'.format(len(self.ids2)))
lines.extend('\t{} ({})'.format(i, self.items[i]['title']) for i in self.ids2)
return '\n'.join(lines)
def __repr__(self):
print(self)
def __bool__(self):
return not self.equal
class FakeAPI(filmweb.FilmwebAPI):
"""Loads cached data instead of connecting online.
When initializing, will look for HTML files in the given directory and treat
them as "pages" to load data from, later when emulating "getItemsPage".
"""
def __init__(self, src_path:str='', itemtype:str='Movie'):
super(FakeAPI, self).__init__(None)
self.src_path = src_path
self.page_paths = self.initPages()
self.item_count, self.items_per_page = self.initAnalyze(itemtype)
def initPages(self):
"""Finds HTML files with movie ratings cached by the API tests."""
if not os.path.exists(self.src_path):
return []
pages = [
item.path for item in os.scandir(self.src_path)
if item.name.endswith('.html') and item.name.startswith('movies_')
]
return pages
def initAnalyze(self, itemtype:str):
"""Checks how many items are in the stored files, and how many per page."""
counts = []
for path in self.page_paths:
page = self.fetchPage(path)
items = self.parsePage(page, itemtype)
counts.append(len(items))
# Return in the same format as getNumOf.
# The first page will either have exactly as many items as any other page,
# or will contain all items - in either case its length being the count of
# items per page.
return sum(counts), counts[0]
def checkSession(self):
"""First part of the hack - don't bother with the session at all."""
return True
def fetchPage(self, path:str):
"""Load HTML from file instead of URL."""
with open(path, 'r', encoding='utf-8') as html:
page = filmweb.BS(html.read(), features='lxml')
return page
def getItemsPage(self, itemtype:str, page:int=1):
"""Hack to use cached HTMLs instead of online session."""
path = self.page_paths[page - 1]
#path = os.path.join(self.src_path, 'movies_{}.html'.format(page))
page = self.fetchPage(path)
items = self.parsePage(page, itemtype)
return items
def getNumOf(self, itemtype:str):
"""Simply return the values we have computed earlier (initAnalyze)."""
return self.item_count, self.items_per_page
class UpdateScenario():
"""Database modification scenario to obtain a simulated previous state.
Contains:
* a list of Item indices to remove from the Database - a new Database created
via this removal will look like these items were yet to be added,
* a list of tuples of Item indices and IDs to add to the Database - simulates
removal of items in the same manner.
"""
def __init__(self, removals:List[int]=[], additions:List[Tuple[int,int]]=[]):
self.removals = removals
self.additions = additions
class TestDatabaseCreation(unittest.TestCase):
"""Basic test for Database loading data from scratch using the API."""
@classmethod
def setUpClass(self):
self.api = FakeAPI('assets')
def test_creation(self):
"""Create a new Database and fill it with items using (Fake)API.
Basically checks whether a new instance has as many items in it as the API
says there are available, and whether these items are actually instances of
the Item class.
"""
db = database.Database(
itemtype='Movie',
api=self.api,
callback=lambda x: x,
)
db.hardUpdate()
known_count, _ = self.api.getNumOf('Movie')
self.assertEqual(len(db.items), known_count)
self.assertIsInstance(db.items[0], containers.Item)
class TestDatabaseSerialization(unittest.TestCase):
"""Test Database serialization and deserialization.
The only test in this case validates the whole serialization-deserialization
cycle, so if anything goes wrong, it will be hard to say which functionality
is actually broken.
"""
@classmethod
def setUpClass(self):
self.api = FakeAPI('assets')
def test_serialization(self):
"""Serialize and deserialize a Database, check if they look the same."""
original = database.Database(
itemtype='Movie',
api=self.api,
callback=lambda x: x,
)
# Load some initial data
original.hardUpdate()
# Serialize/deserialize cycle
string = original.storeToString()
restored = database.Database.restoreFromString(
itemtype='Movie',
string=string,
api=self.api,
callback=lambda x: x,
)
self.assertEqual(original, restored)
class TestDatabaseUpdates(unittest.TestCase):
"""Test Database updates capability in different initial conditions.
Each test consists of the following 3 steps:
* load an original Database,
* perform some change to its content, simulating some earlier point in time
(e.g. where some Items were not yet present),
* call a soft update.
The desired result is a Database back in the original state. Any differences
are considered failures.
The update itself is performed via a proxy, which loads data cached from
earlier tests instead of requiring a live and authenticated session.
"""
@classmethod
def setUpClass(self):
self.api = FakeAPI('assets')
# Create the original database
self.orig_db = database.Database(
itemtype='Movie', api=self.api, callback=lambda x: x
)
# Fill it with available cached data
for i in range(len(self.api.page_paths)):
self.orig_db.items += self.api.getItemsPage('Movie', page=i+1)
@classmethod
def makeModifiedDatabase(self, scenario:UpdateScenario):
"""Creates a new DB by modifying the copy according to the scenario."""
# Create a bare new instance
new_db = database.Database(
itemtype=self.orig_db.itemtype,
api=self.orig_db.api,
callback=self.orig_db.callback,
)
# Remove items according to the scenario
new_db.items = [
item for i, item in enumerate(self.orig_db.items)
if i not in scenario.removals
]
# Add new items according to the scenario
# The items are all clones of the last available item, with changed ID
template = new_db.items[-1].asDict()
template.pop('id') # that will be replaced
item_cls = containers.classByString[new_db.itemtype]
# Create items and insert on their respective places
for index, item_id in scenario.additions:
new_item = item_cls(id=item_id, **template)
new_db.items.insert(index, new_item)
return new_db
def __test_body(self, scenario):
"""Since they all look the same..."""
alter_db = self.makeModifiedDatabase(scenario)
# Make sure the databases are actually different!
self.assertNotEqual(alter_db, self.orig_db)
# Call update and check difference
alter_db.softUpdate()
self.assertEqual(alter_db, self.orig_db)
# Addition tests
def test_singleAddition(self):
"""Add a single missing item."""
scenario = UpdateScenario(removals=[0])
self.__test_body(scenario)
def test_simpleAddition(self):
"""Add a few items missing from the first page."""
scenario = UpdateScenario(removals=[0, 1, 2])
self.__test_body(scenario)
def test_massiveAddition(self):
"""Add over one full page of new items."""
scenario = UpdateScenario(removals=list(range(37)))
self.__test_body(scenario)
def test_randomAddition(self):
"""Add an item missing from somewhere on the first page."""
scenario = UpdateScenario(removals=[4])
self.__test_body(scenario)
def test_nonContinuousAddition(self):
"""Add a few items non-continuously missing from the first page."""
scenario = UpdateScenario(removals=[0, 1, 2, 3, 6])
self.__test_body(scenario)
def test_multipageAddition(self):
"""Add a few items non-continuously missing from multiple pages."""
scenario = UpdateScenario(removals=[0, 1, 2, 16, 30, 32])
self.__test_body(scenario)
# Removal tests - are all expected to fail at this moment.
def test_singleRemoval(self):
"""Remove a single item from the first page."""
scenario = UpdateScenario(additions=[(0, 666)])
self.__test_body(scenario)
def test_simpleRemoval(self):
"""Remove a few items from the first page."""
scenario = UpdateScenario(additions=[(0, 666), (1, 4270)])
self.__test_body(scenario)
def test_randomRemoval(self):
"""Remove an item from somewhere on the first page."""
scenario = UpdateScenario(additions=[(4, 420)])
self.__test_body(scenario)
def test_nonContinuousRemoval(self):
"""Remove a few items non-continuously from the first page."""
scenario = UpdateScenario(
additions=[(0, 666), (1, 4270), (2, 2137), (5, 61504)]
)
self.__test_body(scenario)
def test_multipageRemoval(self):
"""Remove a few items non-continuously from multiple pages."""
scenario = UpdateScenario(
additions=[(3, 666), (4, 4270), (15, 2137), (35, 61504)]
)
self.__test_body(scenario)
# Other tests - for future features.
def test_additionRemoval(self):
"""Add and remove a few items at once, but only from the first page."""
scenario = UpdateScenario(
removals=[0, 1, 2, 9, 13],
additions=[(3, 1991), (4, 37132)]
)
self.__test_body(scenario)
def test_complexAdditionRemoval(self):
"""Add and remove a few items at once from multiple pages."""
scenario = UpdateScenario(
removals=[0, 1, 2, 9, 23, 35, 36],
additions=[(3, 1991), (4, 37132), (28, 628)]
)
self.__test_body(scenario)
@unittest.skip('Relevant feature not implemented yet.')
def test_difficultAdditionRemoval(self):
"""Add and remove a few items at once from multiple pages WITH BALANCE.
This test is extremely difficult because it is impossible to recognize such
scenario in real usage (online), by looking at getNumOf alone. That number
only shows the total balance of added/removed items. If that balance evens
out on any page further than 1st (like in the case of removing some items
and adding the same number of items), it is impossible to spot to any fast
and simple algorithm (i.e. one that does not deeply inspect all pages).
"""
scenario = UpdateScenario(
removals=[0, 1, 2, 9, 33],
additions=[(3, 1991), (34, 37132)]
)
self.__test_body(scenario)
def test_hardUpdate(self):
"""Make "random" removals and additions, then hard update."""
scenario = UpdateScenario(
removals=[1, 5, 6, 7, 40],
additions=[(0, 666), (13, 667)]
)
alter_db = self.makeModifiedDatabase(scenario)
self.assertNotEqual(alter_db, self.orig_db)
alter_db.hardUpdate()
self.assertEqual(alter_db, self.orig_db)
if __name__ == "__main__":
database.Database.__ne__ = DatabaseDifference.compute
database.Database.__eq__ = DatabaseDifference.ne_to_eq
unittest.main()
| 3.25
| 3
|
webapp/polls/admin.py
|
tristanrobert/batch7_rse
| 5
|
12778637
|
from django.contrib import admin
from .models import Company, DPEF, Sentence, ActivitySector
admin.site.register(Company)
admin.site.register(DPEF)
admin.site.register(Sentence)
admin.site.register(ActivitySector)
| 1.242188
| 1
|
src/pyclts/inventories.py
|
XachaB/pyclts
| 6
|
12778638
|
<filename>src/pyclts/inventories.py<gh_stars>1-10
"""
Module handles different aspects of inventory comparison.
"""
import attr
from collections import OrderedDict, namedtuple
from pyclts.api import CLTS
import statistics
from pyclts.cli_util import Table
from pyclts.util import jaccard
def reduce_features(sound, ts=None, features=None):
ts = ts or CLTS().bipa
features = features or {
"consonant": ["phonation", "place", "manner"],
"vowel": ["roundedness", "height", "centrality"],
"tone": ["start"],
}
sound_ = ts[sound] if isinstance(sound, str) else sound
if sound_.type in ["cluster", "diphthong"]:
return reduce_features(sound_.from_sound, ts=ts, features=features)
name = "{} {}".format(
" ".join(
[s for s in [sound_.featuredict.get(x) for x in features[sound_.type]] if s]
),
sound_.type,
)
if sound_.type != "tone":
return ts[name]
return ts["short " + " ".join(name.split(" "))]
class GetAttributeFromSound:
def __init__(self, attr):
self.attr = attr
def __get__(self, obj, objtype=None):
return getattr(obj.sound, self.attr, None)
@attr.s
class Phoneme:
"""
Base class for handling sounds.
"""
grapheme = attr.ib(default=None)
graphemes_in_source = attr.ib(default=None, repr=False)
occs = attr.ib(default=None, repr=False)
sound = attr.ib(default=None)
type = GetAttributeFromSound("type")
name = GetAttributeFromSound("name")
featureset = GetAttributeFromSound("featureset")
def __len__(self):
return len(self.occs)
def __str__(self):
return self.grapheme
def similarity(self, other):
if self.type not in ["marker", "unknownsound"]:
return self.sound.similarity(other.sound)
if self == other:
return 1
return 0
class GetSubInventoryByType:
def __init__(self, types):
def select_sounds(inventory):
return OrderedDict(
[(k, v) for k, v in inventory.items() if v.type in types]
)
self.select_sounds = select_sounds
def __get__(self, obj, objtype=None):
return self.select_sounds(obj.sounds)
class GetSubInventoryByProperty(GetSubInventoryByType):
def __init__(self, types, properties):
GetSubInventoryByType.__init__(self, types)
self.properties = properties
def __get__(self, obj, objtype=None):
out = OrderedDict()
sounds = self.select_sounds(obj.sounds)
for k, v in sounds.items():
stripped = obj.ts.features.get(
frozenset([s for s in v.featureset if s not in self.properties])
)
if str(stripped) != str(v) and str(stripped) not in sounds:
out[k] = v
elif str(stripped) == str(v):
out[k] = v
return out
@attr.s
class Inventory:
id = attr.ib(default=None)
language = attr.ib(default=None)
sounds = attr.ib(default=None, repr=False)
ts = attr.ib(default=None, repr=False)
consonants = GetSubInventoryByType(["consonant"])
consonants_by_quality = GetSubInventoryByProperty(
["consonant"], ["long", "ultra-long", "mid-long", "ultra-short"]
)
consonant_sounds = GetSubInventoryByType(["consonant", "cluster"])
vowels = GetSubInventoryByType(["vowel"])
vowels_by_quality = GetSubInventoryByProperty(
["vowel"], ["long", "ultra-long", "mid-long", "ultra-short"]
)
vowel_sounds = GetSubInventoryByType(["vowel", "diphthong"])
segments = GetSubInventoryByType(["consonant", "vowel", "cluster", "diphthong"])
tones = GetSubInventoryByType(["tone"])
markers = GetSubInventoryByType(["marker"])
clusters = GetSubInventoryByType(["cluster"])
diphthongs = GetSubInventoryByType(["diphthong"])
unknownsounds = GetSubInventoryByType(["unknownsound"])
@classmethod
def from_list(cls, *list_of_sounds, id=None, language=None, ts=None):
ts = ts or CLTS().bipa
sounds = OrderedDict()
for itm in list_of_sounds:
sound = ts[itm]
try:
sounds[str(sound)].graphemes_in_source.append(itm)
except KeyError:
sounds[str(sound)] = Phoneme(
grapheme=str(sound),
graphemes_in_source=[sound.grapheme],
occs=[],
sound=sound,
)
return cls(sounds=sounds, ts=ts, language=language, id=id)
def __len__(self):
return len(self.sounds)
def tabulate(self, format="pipe", types=None):
types = types or ["sounds"]
table = []
for t in types:
for sound in getattr(self, t).values():
table += [[sound.grapheme, sound.type, sound.name, len(sound)]]
with Table(
namedtuple("args", "format")(format),
"Grapheme",
"Type",
"Name",
"Frequency",
) as table_text:
table_text += table
def strict_similarity(self, other, aspects=None):
aspects = aspects or ["sounds"]
scores = []
for aspect in aspects:
soundsA, soundsB = (
{sound for sound in getattr(self, aspect)},
{sound for sound in getattr(other, aspect)},
)
if soundsA or soundsB:
scores += [jaccard(soundsA, soundsB)]
if not scores:
return 0
return statistics.mean(scores)
def approximate_similarity(self, other, aspects=None):
aspects = aspects or ["sounds"]
def approximate(soundsA, soundsB):
matches = []
for soundA in soundsA:
best_match, best_sim = None, 0
for soundB in soundsB:
current_sim = soundA.similarity(soundB)
if current_sim > best_sim:
best_match = soundB
best_sim = current_sim
if best_match is not None:
matches += [best_sim]
soundsB = [s for s in soundsB if s != best_match]
matches += [0 for s in soundsB]
return statistics.mean(matches)
scores = []
for aspect in aspects:
soundsA, soundsB = (
getattr(self, aspect).values(),
getattr(other, aspect).values(),
)
if soundsA and soundsB:
scores += [
statistics.mean(
[approximate(soundsA, soundsB), approximate(soundsB, soundsA)]
)
]
elif soundsA or soundsB:
scores += [0]
if not scores:
return 0
return statistics.mean(scores)
| 2.421875
| 2
|
tests/test_wellknowntext.py
|
akrherz/pyIEM
| 29
|
12778639
|
"""tests"""
import pytest
from shapely.geometry import Point, Polygon, LineString
from pyiem import wellknowntext
def test_parsecoordinate_lists():
"""Parse!"""
with pytest.raises(ValueError):
wellknowntext.parse_coordinate_lists(" ")
def test_unknown():
"""Test an emptry string."""
with pytest.raises(ValueError):
wellknowntext.convert_well_known_text("")
def test_wkt():
"""Try the properties function"""
wkt = "SRID=4326;POINT(-99 43)"
geom = wellknowntext.convert_well_known_text(wkt)
assert Point(geom) == Point([-99, 43])
wkt = """MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),
((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),
(30 20, 20 15, 20 25, 30 20)))"""
geom = wellknowntext.convert_well_known_text(wkt)
assert abs(Polygon(geom[0]).area - 87.5) < 0.1
wkt = """MULTILINESTRING ((10 10, 20 20, 10 40),
(40 40, 30 30, 40 20, 30 10))"""
geom = wellknowntext.convert_well_known_text(wkt)
assert abs(LineString(geom[0]).length - 36.5) < 0.1
wkt = """LINESTRING (30 10, 10 30, 40 40)"""
geom = wellknowntext.convert_well_known_text(wkt)
assert abs(LineString(geom).length - 59.9) < 0.1
wkt = """POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"""
geom = wellknowntext.convert_well_known_text(wkt)
assert abs(Polygon(geom[0]).area - 550.0) < 0.1
wkt = """POLYGON q((30 10, 40 40, 20 40, 10 20, 30 10))q"""
with pytest.raises(ValueError):
wellknowntext.convert_well_known_text(wkt)
wkt = """RARRR q((30 10, 40 40, 20 40, 10 20, 30 10))q"""
with pytest.raises(ValueError):
wellknowntext.convert_well_known_text(wkt)
with pytest.raises(ValueError):
wellknowntext.convert_well_known_text("")
| 2.609375
| 3
|
api/lime_comb_api/database.py
|
n0npax/lime-comb
| 1
|
12778640
|
<reponame>n0npax/lime-comb
import base64
import logging
import sys
from flask import g
from google.cloud import firestore
from werkzeug.exceptions import Unauthorized
logging.basicConfig(stream=sys.stdout)
app_name = "lime-comb"
logger = logging.getLogger(app_name)
def doc_path(*, email, key_type, key_name):
_, domain = email.split("@")
return f"{domain}", f"{email}/{key_name}/{key_type}"
db = firestore.Client(project=app_name)
def get_gpg(email, key_name, *, key_type="pub"):
if email != g.email and key_type == "priv":
raise Unauthorized("Tried to query not owned and private data")
collection_id, name = doc_path(email=email, key_type=key_type, key_name=key_name)
logger.info(
f"(firebase registry) pull gpgs for {email} as {name} from {collection_id}"
)
document = db.collection(collection_id).document(name).get().to_dict()
if not "data" in document:
logger.error("Cannot fetch gpg key")
return None
document.update({"id": key_name, "email": email})
return document
def get_gpgs(email, *, key_type="pub"):
for key_name in list_gpg_ids(email, key_type=key_type):
yield get_gpg(email, key_type=key_type, key_name=key_name)
def put_gpg(email, data, key_name, *, key_type="pub", password=None):
print("a" * 55, g.email)
if email != g.email:
raise Unauthorized("Tried to mutate not owned data")
collection_id, name = doc_path(email=email, key_type=key_type, key_name=key_name)
logger.info(
f"(firebase registry) push gpg for {email} as {name} from {collection_id}"
)
pub_key = db.collection(collection_id).document(name)
document = {"data": data}
if key_type == "priv" and password:
document["password"] = password
pub_key.set(document)
def list_gpg_ids(email, key_type="pub"):
collection_id, _ = doc_path(
email=email, key_type=key_type, key_name="key_id_placeholder"
)
logger.info(
f"(firebase registry) list gpgs for {email}(just email) from {collection_id}"
)
for d in db.collection(collection_id).document(email).collections():
yield d.id
def delete_gpg(email, key_name, *, key_type="pub"):
if email != g.email:
raise Unauthorized("Tried to mutate not owned data")
collection_id, name = doc_path(email=email, key_type=key_type, key_name=key_name)
logger.info(
f"(firebase registry) rm gpg for {email} as {name} from {collection_id}"
)
return db.collection(collection_id).document(name).delete()
def _decode_base64(s):
return base64.b64decode(s).decode("utf-8")
def _encode_base64(s):
return base64.b64encode(s.encode("utf-8")).decode("utf-8")
| 2.390625
| 2
|
amlpp/transformers/categorical.py
|
Asirg/papds
| 1
|
12778641
|
from sklearn.preprocessing import OrdinalEncoder
from typing import List
import pandas as pd
import numpy as np
from ._base_transform import BaseTransform
##############################################################################
class CategoricalEncoder(BaseTransform):
""" Categorical encoder
Parameters
----------
columns: List [str]
Columns that encode
"""
def __init__(self, columns:List[str]):
super().__init__({'columns':columns})
self.encoder = {column: OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value = np.nan) for column in columns}
def fit(self, X:pd.DataFrame, Y:pd.DataFrame or pd.Series):
for column in self.encoder.copy():
if column in X.columns:
X_fit = pd.DataFrame(X[column].loc[~X[column].isnull()])
if len(X_fit) > 0:
self.encoder[column].fit(X_fit)
else:
self.encoder[column] = False
return self
def transform(self, X:pd.DataFrame, Y:pd.DataFrame or pd.Series = None):
# print(X.columns)
for column in self.encoder:
if column in X.columns:
if self.encoder[column]:
X[column] = self.encoder[column].transform(pd.DataFrame(X[column].fillna('NAN')))
else:
del X[column]
# print(X.columns)
return X
| 3.203125
| 3
|
makePuddleworldTasks.py
|
lcary/ec-backup
| 0
|
12778642
|
"""
Makes Puddleworld tasks.
Tasks are (gridworld, text instruction) -> goal coordinate.
Credit: tasks are taken from: https://github.com/JannerM/spatial-reasoning
"""
from puddleworldPrimitives import *
from utilities import *
from task import *
from type import *
OBJECT_NAMES = ["NULL", "puddle", "star", "circle", "triangle", "heart", "spade", "diamond", "rock", "tree", "house", "horse"]
def loadPuddleWorldTasks(datafile='data/puddleworld/puddleworld.json'):
"""
Loads a pre-processed version of the Puddleworld tasks.
"""
import json
with open(datafile) as f:
result = json.load(f)
return result
def makePuddleworldTask(raw_task):
"""
Converts a raw task with
layouts (NxN array),
Objects (NxN array of object locations),
Instructions (string) and
Goals ((X, Y) coordinate)
into a task.
"""
layouts, objects, instructions, goals = raw_task
task = Task(name=instructions,
request=(arrow(tpair(tLayoutMap, tObjectMap), tLocation)),
examples=[((layouts, objects), goals)],
features=instructions)
return task
def makeTasks(train_key, test_key):
data = loadPuddleWorldTasks()
raw_train, raw_test = data[train_key], data[test_key]
train, test = [makePuddleworldTask(task) for task in raw_train], [makePuddleworldTask(task) for task in raw_test]
print(train[0].name)
print(train[0].examples)
print(train[0].features)
return train, test
def makeLocalTasks():
return makeTasks('local_train', 'local_test')
def makeGlobalTasks():
return makeTasks('global_train', 'global_test')
| 3.421875
| 3
|
te/TE.py
|
priyadarshitathagat/te-ns
| 0
|
12778643
|
#**********************************************************************************************
# Traffic Emulator for Network Services
# Copyright 2020 VMware, Inc
# The BSD-2 license (the "License") set forth below applies to all parts of
# the Traffic Emulator for Network Services project. You may not use this file
# except in compliance with the License.
#
# BSD-2 License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE
#**********************************************************************************************
################################# MY IMPORTS ################################
from TE_UTILS import *
from TE_WORK import *
from TE_CLASS import *
from TE_DP_CONFIG import *
from TE_METRICS import *
from te_json_schema import *
# Import for the DB related stuff
# from models import * (GET BACK TO THIS)
################################# IMPORTS ####################################
try:
from copy import deepcopy, copy
import inspect
import argparse,json
from threading import Thread, Lock
import traceback
import paramiko
from flask_swagger_ui import get_swaggerui_blueprint
# Import for all the Flask library
from flask import Flask, jsonify, request
from flask import make_response,url_for
from flask_restful import Api, Resource, reqparse, fields, marshal
from flask_inputs.validators import JsonSchema
from pssh.clients import ParallelSSHClient
from pssh.exceptions import ConnectionErrorException, SSHException, AuthenticationException, UnknownHostException
from rq import Queue as rqQueue
from redis import Redis
from gevent import joinall
import time, os, re
from collections import defaultdict, OrderedDict
from datetime import datetime
import subprocess
import random
except Exception as e:
print("ERROR in importing: " + str(e))
print("TRACE: %s" %(traceback.format_exc()))
exit(1)
################################# TE REST ENDPOINT ####################################
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
TE_API = {
'setup_tedp' : 'Brings up TE data path containers',
'connect' : 'Adds datapath containers to the known list of TE Controller that way authentication can happen',
'establish_rq' : 'To be called by datapath process to let controller know the rq in its end is up',
'get_rq_details' : 'To be called by datapath process to get details to establish rq connection',
'start' : 'Starts traffic on the data path containers based on the input knobs',
'stop' : 'Stops the traffic',
'get_states' : 'To get the current variable states (Developer Debugging)',
'update_config' : 'Updates the system to a new running Config',
'clean' : 'Clears the connection b/w Controller and datapath, clears metrics collected and removes datapath container(if specified)',
'get_active_tedp' : 'Provides with dictionary mapping from host to number of active tedp processes',
'get_cpu_count' : 'Provides with host vs cpu mapping',
'alter_stat_dump_interval' : 'Alters the stat dump interval in TE_DP',
'alter_stat_collect_interval' : 'Alters the stat collect interval in TE_DP',
'alter_metrics_collection' : 'Alters if Error, Session and VIP Metrics are to be collected ot not',
'alter_memory_metrics_collection' : 'Enable Memory Metrics (Developer Debugging)',
'get_vip_metrics' : 'Provides the VIP metrics',
'get_ses_metrics' : 'Provides with overall session metrics',
'get_error_metrics' : 'Provides with ERROR metrics',
'get_memory_metrics' : 'Provides with MEMORY metrics of allocs and deallocs (Developer Debugging)',
'get_client_history' : 'Provides with historical details on run',
'get_current_te_time' : 'Provides with current time of TE',
'get_configs' : 'Provides with resource and session configs given the hash',
'update_dns' : 'Appends/overwrite DNS entry within TE_DP docker',
'reset_dns' : 'Reset DNS entry within TE_DP docker',
'execute_cmd': 'Executes the command given across te_dp machines (Must not block, use &)',
'tech_support' : 'Provides with tech support details for debugging',
'grafana' : 'To visualize, monitor and analyse metrics values',
}
TE_SUB_API = {
'stop' : ['by_profile_tag', 'by_tedp_dict']
}
def abort_if_not_found(api, sub_api=None):
if api in TE_API:
return flask_obj.modules[api+'_api']
if sub_api and sub_api in TE_SUB_API.get(api, []):
return flask_obj.modules[api+'_api']
return None
def make_request_data(req_json, req_arg):
if req_json and req_arg:
return req_json.update(req_arg)
if req_json:
return req_json
if req_arg:
return req_arg
return {}
class TE_REST_ENDPOINT(Resource):
def __init__(self):
global flask_obj
def post(self, te_api_name):
functionToCall = abort_if_not_found(te_api_name)
if functionToCall is None:
return make_response(jsonify({'error': "{} Not found in API List : {}".format(
te_api_name, TE_API.keys())}), 404)
data = make_request_data(request.json, request.args.to_dict())
result = functionToCall(flask_obj, data)
return result
class TE_REST_ENDPOINT_SUB_API(Resource):
def __init__(self):
global flask_obj
def post(self, te_api_name, te_sub_api_name):
lgr.info("Got api call to api={} sub_api={}".format(te_api_name, te_sub_api_name))
functionToCall = abort_if_not_found(te_api_name, te_sub_api_name)
if functionToCall is None:
return make_response(jsonify({'error': "{}/{} Not found in API List : {}".format(
te_api_name, te_sub_api_name, TE_API.keys())}), 404)
data = make_request_data(request.json, request.args.to_dict())
result = functionToCall(flask_obj, data)
return result
################################# ALL THE END POINTS TO FLASK APPLICATION ####################################
class FlaskApplicationWrapper:
__metaclass__ = Singleton
modules = locals()
################################# BASIC FUNCTIONS ####################################
def __init__(self, te_daemon_ip, flask_port, redis_port, nginx_port, \
postgres_port, zmq_port, grafana_port, stat_collect_interval, stat_dump_interval, logpath,
loglevel):
#LOGGER
log_file = os.path.join(logpath, 'te.log')
self.lgr = Logger('[ TE ]', log_file, loglevel).getLogger()
self.lgr.info("Starting the TE.py Process")
try:
#TE CLASS OBJECT
self.__te_controller_obj = TE(te_daemon_ip, flask_port, redis_port, nginx_port, \
postgres_port, zmq_port, grafana_port, loglevel)
#ALL STATES OF TE-FSM
self.__TE_STATE = { 'INIT' : 0, 'RUNNING' : 1 }
#To avoid 2 overlapping calls to the same function call
self.__IS_RUNNING = defaultdict(bool)
self.__IS_STATE_ALLOWED = {
"START" : [self.__TE_STATE['INIT']],
"STOP" : [self.__TE_STATE['RUNNING']],
"UPDATE" : [self.__TE_STATE['RUNNING']]
}
#Any call that flows to tedp using core 0 must have an entry here
self.__MGMT_CALLS = ["GET_ACTIVE_TEDP", "UPDATE_DNS", "RESET_DNS", "EXECUTE_CMD", "TECH_SUPPORT", "CLEAN_TEDP"]
#PERMITTED STATES OF TE-FSM
self.__CURRENT_STATE = self.__TE_STATE['INIT']
#To validate the parameters passed
self.__SCHEMA = {'te_dp_dict' : te_dp_dict_json_schema}
#To clean all the pre-existing redis handles
self.TE_BROKER_HANDLE = Redis("0.0.0.0", int(redis_port))
self.TE_BROKER_HANDLE.flushall()
#Task Details
self.__TASK_DETAILS = defaultdict(list)
self.__setup_completed_tedps = set()
self.__connect_completed_tedps = set()
self.__all_te_dp_dict_credentials = {}
self.__tedp_config = {}
# Lock to avoid concurrent access of common DS when DPs are trying to connect to TE Controller
self.__connect_lock = Lock()
#PostgresDB is started always
postgres_port = self.__te_controller_obj.get_postgres_port()
zmq_port = self.__te_controller_obj.get_zmq_port()
self.__te_postgres_object = TE_POSTGRES(postgres_port, logpath, loglevel, \
stat_collect_interval)
self.__te_zmq_object = TE_ZMQ(te_daemon_ip, self.__te_postgres_object, zmq_port, \
logpath, loglevel, stat_collect_interval)
#Stat collection is started by default and can be switched alternatively
self.__stat_collect_interval = stat_collect_interval
self.__stat_dump_interval = stat_dump_interval
if(stat_dump_interval != 0):
self.__metrics_enabled = True
self.__memory_metrics_enabled = False
self.lgr.info("FlaskApplicationWrapper init Success")
except:
self.lgr.error("UNABLE TO INIT FlaskApplicationWrapper %s" %traceback.format_exc())
def __not_found(self, err_message):
return make_response(jsonify({'status': False, 'Error': err_message}),404)
def __exception_occured(self, function, err_message):
try:
return jsonify({'status': False, 'function':function, 'exception': err_message})
except:
self.lgr.error("__Unable to return __exception_occured %s" %traceback.format_exc())
return jsonify({'status':False, 'unable to return':traceback.format_exc()})
def __success(self, result):
try:
return jsonify({'status': True, 'statusmessage': result})
except:
self.lgr.warning("Return without convert() failed in __success")
try:
return jsonify({'status': True, 'statusmessage': convert(result)})
except:
self.lgr.error("__Unable to return __success %s" %traceback.format_exc())
return jsonify({'status':False, 'unable to return':traceback.format_exc()})
def __failure(self, result):
try:
self.lgr.error("Failure. result={}, type(result)={}".format(result, type(result)))
return jsonify({'status': False, 'statusmessage': result})
except:
self.lgr.warning("Return without convert() failed in __failure")
try:
return jsonify({'status': False, 'statusmessage': convert(result)})
except:
self.lgr.error("__Unable to return __failure %s" %traceback.format_exc())
return jsonify({'status':False, 'unable to return':traceback.format_exc()})
def __api_state_decorator(typeOfTask):
def decorator_method(func):
def caller_func(self, json_content):
try:
#VALIDATING STATE MACHINE
validState = self.__isStateValid(typeOfTask)
if validState is not None:
return validState
#CHECKING IF THERE IS NO DOUBLE RUN
if(self.__IS_RUNNING[typeOfTask] and \
typeOfTask != "ESTABLISH_RQ" and typeOfTask !="GET_RQ_DETAILS"):
self.__IS_RUNNING[typeOfTask] = False
return self.__failure('Previous call of %s not exited' %typeOfTask)
self.__IS_RUNNING[typeOfTask] = True
#MAKING THE ACTUAL API CALL
try:
self.lgr.debug("Making the call to %s api" %typeOfTask)
result_of_api_call = func(self, json_content)
#CATCHING EXCEPTION IF ANY AND THROWING BACK TO FRONTEND
except:
self.lgr.error("ERROR IN %s: %s" %(typeOfTask, traceback.format_exc()))
result_of_api_call = self.__exception_occured(str(func).split(' ')[1], traceback.format_exc())
#CLEANING UPON API COMPLETION
self.__IS_RUNNING[typeOfTask] = False
return result_of_api_call
except:
return self.__exception_occured(str(func).split(' ')[1], traceback.format_exc())
return caller_func
return decorator_method
def __isStateValid(self, typeOfTask):
allowedStates = self.__IS_STATE_ALLOWED.get(typeOfTask, None)
if allowedStates is None:
return None
if self.__CURRENT_STATE not in allowedStates:
#INVERTING (k,v) FOR THE PURPOSE OF BETTER ERROR UNDERSTANDING
currStateInText = None
allowedStatesInText = []
for k, v in self.__TE_STATE.items():
if v == self.__CURRENT_STATE:
currStateInText = k
if v in allowedStates:
allowedStatesInText.append(k)
#MAKING AN ENTRY IN LOG AND RETURNING
self.lgr.error('STATE MACHINE ERROR Current State:%s and ALLOWED STATE to make the call are %s' %(str(currStateInText),
str(allowedStatesInText)))
return self.__not_found('STATE MACHINE ERROR Current State:%s and ALLOWED STATE to make the call are %s' %(str(currStateInText),
str(allowedStatesInText)))
return None
#Does the request validation, by looking at the current state, allowed calls to make from the current state
#requiredKeys Param check if the jsonContent has all the required params passed to it
def __checkForRequiredArgument(self, jsonContent, requiredKeys=[]):
'''
Args:
jsonContent: All the passed argument with which the REST API Call was made
requiredKeys: List of required params that must be present in jsonContent
Returns:
None if no error
Else return the error
'''
for key in requiredKeys:
if key not in jsonContent or jsonContent[key]==None:
self.lgr.error("Required parameter: %s NOT FOUND" %key)
return self.__not_found('%s is not found' %key)
return None
def __validate_schema(self, jsonContent, keyToValidate):
try:
inputToValidate = convert(jsonContent[keyToValidate])
valid_status = validate(inputToValidate, self.__SCHEMA[keyToValidate])
return (True, inputToValidate)
except Exception as e:
return (False, str(e))
def __are_all_tedps_connected(self, te_dp_dict):
set_of_host_ips = set(te_dp_dict.keys())
if(not(set_of_host_ips.issubset(self.__connect_completed_tedps))):
return False, list(set_of_host_ips - self.__connect_completed_tedps)
return True, []
#It is a wrapper around PSSH Client which validates for for exit_codes and exception and populates the problematicHost (dict)
#It retries to a default of 10 times if an SSHException is faced
#It also return a stdout dict if getStdOut is set to True
def __run_command_and_validate_output(self, client, te_dp_hosts, cmd=None, host_args={},
cleanExitCode=0, possibleExitCodesDict={}, max_retries=10, getStdOut=False,
validate_exit_codes=True):
'''
Args:
client: The PSSH client object
te_dp_hosts: A dict of tedp_host which has the mapping from host to user and password (It is used to make a new client if there is an exception raised) cmd=Same Command that has to be executed on all the clients
host_args=Dictionary of command if each client take something different. (Ex: GET_AND_RUN_DOCKER_IMAGE.py need different host ip that will be passed to it). Retries on the particular client alone will be possible.
max_retries=Defaults to 10
xxxxxxxxx NOTE: cmd and host_args must not be passed in the same call xxxxxxxxx
Return:
stdOut(if getStdOut is True), problematicHost
'''
def __run_command(client, cmd=None, host_args={}):
'''
Args:
cmd: Command to run on all hosts
host_args: Dictionary of args that is to be run on all machines which has a mapping from host_ip to command
'''
if ((cmd is None and host_args == {}) or (cmd is not None and host_args != {})):
return None
if host_args == {}:
self.lgr.debug("Running Command=%s" %cmd)
output = client.run_command(cmd, stop_on_errors=False)
if cmd == None:
host_args_values = list(host_args.values())
self.lgr.debug("Running Similar Command=%s and length=%d" %(host_args_values[0], \
len(host_args_values)))
output = client.run_command("%s", host_args=host_args_values, stop_on_errors=False)
client.join(output)
return output
def __validateOutput(output):
'''
Args:
output: Output to evaluate
Uses getStdOut Flag to populate stdOut dictionary that is returned to the user
Returns:
stdOut
exceptionHostTohandle
problematicHost
'''
stdOut = {}
problematicHost = {}
exceptionHostTohandle = []
for host, runDetails in output.items():
exitCodeHost = runDetails.exit_code
exceptionHost = runDetails.exception
if(isinstance(exceptionHost, SSHException)):
self.lgr.debug("Got Exception %s in host %s" %(str(runDetails.exception), host))
exceptionHostTohandle.append(host)
elif(isinstance(exceptionHost, ConnectionErrorException)):
problematicHost[host] = "Connection refused/timed out"
elif(isinstance(exceptionHost, AuthenticationException)):
problematicHost[host] = "Authentication error (user/password/ssh key error)"
elif(isinstance(exceptionHost, UnknownHostException)):
problematicHost[host] = "Host is unknown (dns failure)"
elif(exitCodeHost is not None and validate_exit_codes and exitCodeHost != cleanExitCode):
problematicHost[host] = possibleExitCodesDict.get(exitCodeHost, "Exit Code: %d" %exitCodeHost)
#If stdout is needed
if getStdOut:
gotOut = runDetails.stdout
if gotOut is None:
self.lgr.error("Unable to get response from the client host_ip=%s"%host)
if host not in problematicHost:
problematicHost[host] = gotOut
else:
stdOut[host] = gotOut
return stdOut, exceptionHostTohandle, problematicHost
#Run the command for the first time
output = {}
output = __run_command(client, cmd=cmd, host_args=host_args)
out, exceptionHostTohandle, problematicHost = __validateOutput(output)
output.update(out)
if exceptionHostTohandle == []:
return ((output, problematicHost) if getStdOut else problematicHost)
#Retries for possible failures till max_retries
for i in range(max_retries-1):
retryExceptionPresent = False
retryHostConfig = OrderedDict()
retryHostArgs = OrderedDict()
if exceptionHostTohandle != []:
self.lgr.info("Retrying=%d/%d due to exception of Busy Client in host=%s" %(i+2,max_retries,str(exceptionHostTohandle)))
#Iterate through all exception hosts
for host in exceptionHostTohandle:
retryExceptionPresent = True
retryHostConfig[host] = te_dp_hosts[host]
if host in host_args.keys():
retryHostArgs[host] = host_args[host]
#Exit if no such unexpected exception is seen
if not(retryExceptionPresent):
return ((output, problematicHost) if getStdOut else problematicHost)
#Create New retry client and repeat the process
retryClient = ParallelSSHClient(retryHostConfig.keys(), host_config=retryHostConfig, timeout = 240)
output = __run_command(retryClient, cmd=cmd, host_args=retryHostArgs)
out, exceptionHostTohandle, retryProblematicHost = __validateOutput(output)
output.update(out)
problematicHost.update(retryProblematicHost)
del retryClient
if exceptionHostTohandle != []:
self.lgr.error("Unabe to resolve for %s even after %d retries" %(str(retryProblematicHost), max_retries))
exceptionDict = {"Running into exception" : exceptionHostTohandle}
return ((output, exceptionDict) if getStdOut else exceptionDict)
return ((output, problematicHost) if getStdOut else problematicHost)
def __verify_task_status(self, typeOfTask, max_tolerable_delay):
'''
Args:
typeOfTask: Indicates the type of task which has to be validated
max_tolerable_delay: Maximum time within which all the results are to be fetche
'''
try:
self.lgr.debug("__verify_task_status Called")
resultDict = {}
taskVerificationThreads = []
lock = Lock()
#Iterate through all the host into which the job was assigned to
for host_ip in self.__TASK_DETAILS[typeOfTask]:
self.lgr.debug("Checking task status for %s" %host_ip)
if typeOfTask in self.__MGMT_CALLS:
taskVerificationThreads.append(Thread(target=self.__tedp_config[host_ip].get_mgmt_task_status_and_result, \
args=(typeOfTask, resultDict, lock, max_tolerable_delay)))
else:
taskVerificationThreads.append(Thread(target=self.__tedp_config[host_ip].get_task_status_and_result, \
args=(typeOfTask, resultDict, lock, max_tolerable_delay)))
taskVerificationThreads[-1].start()
for t in taskVerificationThreads:
t.join()
self.__TASK_DETAILS[typeOfTask] = []
del taskVerificationThreads
for host_ip, result in resultDict.items():
#There will be no "status in result if `status` was True (or) rather the call succeeded!"
status = isinstance(result, dict) and result.get("status", True)
if(not(status)):
return False, resultDict
return True, resultDict
except:
self.__TASK_DETAILS[typeOfTask] = []
del taskVerificationThreads
self.lgr.error("ERROR IN verify_task_status: %s" %traceback.format_exc())
return False, "Check logs for error. Bad Code/param passed %s" %traceback.format_exc()
################################# RUN THE FLASK APPLICATION ###################################
#Starts the TE Application to serve the REST Requests
def run(self):
self.__te_app = Flask(__name__)
self.__te_app.config["SWAGGER"] = {"title": "Swagger-UI", "uiversion": 2}
self.__te_app.config['BROKER_URL'] = 'redis://{}:{}/'.format(
self.__te_controller_obj.get_daemon_ip(), self.__te_controller_obj.get_redis_port())
self.__te_app.config['RESULT_BACKEND'] = 'redis://{}:{}/'.format(
self.__te_controller_obj.get_daemon_ip(), self.__te_controller_obj.get_redis_port()
)
api = Api(self.__te_app)
swagger_url = "/swagger"
api_url = "/static/te_swagger.json"
swagger_ui_blueprint = get_swaggerui_blueprint(
swagger_url, api_url, config={'app_name': "Traffic Engine"})
self.__te_app.register_blueprint(swagger_ui_blueprint, url_prefix=swagger_url)
api.add_resource(TE_REST_ENDPOINT, '/api/v1.0/te/<te_api_name>')
api.add_resource(TE_REST_ENDPOINT_SUB_API, '/api/v1.0/te/<te_api_name>/<te_sub_api_name>')
self.lgr.debug("About to run Flask ApplicationWrapper on %s:%s" \
%(self.__te_controller_obj.get_daemon_ip(), self.__te_controller_obj.get_flask_port()))
self.__te_app.run(host="0.0.0.0",\
port=int(self.__te_controller_obj.get_flask_port()), debug=False)
################################# CHANGE STAT DUMP TIME API ###################################
@__api_state_decorator("CURRENT_TIME")
def get_current_te_time_api(self, jsonContent):
curr_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.lgr.debug("get_current_te_time Called and returned %s" %curr_time)
return self.__success(curr_time)
################################# CHANGE STAT DUMP TIME API ###################################
@__api_state_decorator("STAT_DUMP_INTERVAL")
def alter_stat_dump_interval_api(self, jsonContent):
self.lgr.debug("alter_stat_dump_interval Called")
self.__stat_dump_interval = convert(jsonContent['stat_dump_interval'])
self.lgr.debug("altered stat_dump_interval is %d" %self.__stat_dump_interval)
return self.__success("Altered stat_dump_interval is %d" %self.__stat_dump_interval)
################################# CHANGE STAT DUMP TIME API ###################################
@__api_state_decorator("STAT_COLLECT_INTERVAL")
def alter_stat_collect_interval_api(self, jsonContent):
self.lgr.debug("alter_stat_collect_interval Called")
self.__stat_collect_interval = convert(jsonContent['stat_collect_interval'])
self.lgr.debug("altered stat_collect_interval is %d" %self.__stat_collect_interval)
return self.__success("Altered stat_collect_interval is %d" %self.__stat_collect_interval)
############################### CHANGE METRICS COLLECTION STATE ###############################
@__api_state_decorator("METRICS_ENABLED")
def alter_metrics_collection_api(self, jsonContent):
self.lgr.debug("alter_metrics_collection_api Called")
self.__metrics_enabled = convert(jsonContent['state'])
return self.__success("Altered metrics_enabled state is %s" %str(self.__metrics_enabled))
########################### CHANGE MEMORY METRICS COLLECION STATE #############################
@__api_state_decorator("MEMORY_METRICS_ENABLED")
def alter_memory_metrics_collection_api(self, jsonContent):
self.lgr.debug("alter_memory_metrics_collection_api Called")
self.__memory_metrics_enabled = convert(jsonContent['state'])
return self.__success("Altered memory_metrics_enabled state is %s" \
%str(self.__memory_metrics_enabled))
################################# GET VIP METRICS ###################################
@__api_state_decorator("GET_VIP_METRICS")
def get_vip_metrics_api(self, jsonContent):
self.lgr.debug("get_vip_metrics Called")
type_of_metric = convert(jsonContent['type'])
traffic_profile = convert(jsonContent['traffic_profile'])
traffic_mode = convert(jsonContent['traffic_mode'])
filter_clauses = convert(jsonContent.get('filter_clauses', {}))
is_named = convert(jsonContent.get('is_named', True))
if(type_of_metric == "TOTAL" or type_of_metric == "LAST_DIFF"):
status, result = self.__te_postgres_object.query_vip_metrics(type_of_metric, \
traffic_profile, traffic_mode, filter_clauses, is_named)
if(status):
return self.__success(result)
else:
return self.__failure(result)
else:
return self.__failure("type must either be TOTAL or LAST_DIFF")
################################# GET ERROR METRICS ###################################
@__api_state_decorator("GET_ERROR_METRICS")
def get_error_metrics_api(self, jsonContent):
self.lgr.debug("get_error_metrics Called")
type_of_metric = convert(jsonContent['type'])
filter_clauses = convert(jsonContent.get('filter_clauses', {}))
is_named = convert(jsonContent.get('is_named', True))
error_group_interval = \
convert(jsonContent.get('error_group_interval', 15))
if(type_of_metric == "TOTAL" or type_of_metric == "LAST_DIFF"):
status, result = self.__te_postgres_object.query_error_metrics(type_of_metric, \
filter_clauses, is_named, error_group_interval)
if(status):
return self.__success(result)
else:
return self.__failure(result)
else:
return self.__failure("type must either be TOTAL or LAST_DIFF")
################################# GET SES METRICS ###################################
@__api_state_decorator("GET_SES_METRICS")
def get_ses_metrics_api(self, jsonContent):
self.lgr.debug("get_ses_metrics Called")
type_of_metric = convert(jsonContent['type'])
traffic_profile = convert(jsonContent['traffic_profile'])
traffic_mode = convert(jsonContent['traffic_mode'])
filter_clauses = convert(jsonContent.get('filter_clauses', {}))
is_named = convert(jsonContent.get('is_named', True))
if(type_of_metric == "TOTAL" or type_of_metric == "LAST_DIFF"):
status, result = self.__te_postgres_object.query_ses_metrics(type_of_metric, \
traffic_profile, traffic_mode, filter_clauses, is_named)
if(status):
return self.__success(result)
else:
return self.__failure(result)
else:
return self.__failure("type must either be TOTAL or LAST_DIFF")
################################# GET MEMORY METRICS ###################################
@__api_state_decorator("GET_MEMORY_METRICS")
def get_memory_metrics_api(self, jsonContent):
self.lgr.debug("get_memory_metrics Called")
type_of_metric = convert(jsonContent['type'])
filter_clauses = convert(jsonContent.get('filter_clauses', {}))
is_named = convert(jsonContent.get('is_named', True))
if(type_of_metric == "TOTAL" or type_of_metric == "LAST_DIFF"):
status, result = self.__te_postgres_object.query_memory_metrics(type_of_metric, \
filter_clauses, is_named)
if(status):
return self.__success(result)
else:
return self.__failure(result)
else:
return self.__failure("type must either be TOTAL or LAST_DIFF")
################################# GET CONFIG HISTORY ###################################
@__api_state_decorator("GET_CLIENT_HISTORY")
def get_client_history_api(self, jsonContent):
self.lgr.debug("get_client_history_api Called")
filter_clauses = convert(jsonContent.get('filter_clauses', {}))
status, result = self.__te_postgres_object.query_client_history(filter_clauses)
if(status):
return self.__success(result)
else:
return self.__failure(result)
################################# GET CONFIGS ###################################
@__api_state_decorator("GET_CONFIGS")
def get_configs_api(self, jsonContent):
self.lgr.debug("get_configs_api Called")
res_hash_list = convert(jsonContent['res_hash_list'])
ses_hash_list = convert(jsonContent['ses_hash_list'])
is_named = convert(jsonContent.get('is_named', True))
if res_hash_list is None and ses_hash_list is None:
return self.__failure('Both res_hash_list and ses_hash_list cannot be None')
status, result = self.__te_postgres_object.query_and_get_configs(
res_hash_list, ses_hash_list, is_named)
if(status):
return self.__success(result)
else:
return self.__failure(result)
################################# GET STATES API ###################################
@__api_state_decorator("GET_STATES")
def get_states_api(self, jsonContent):
self.lgr.debug("get_tedp_states_api Called")
#TEDP STATES
tedp_states = {}
if self.__tedp_config is not None:
for host_ip, object in self.__tedp_config.items():
tedp_states[host_ip] = object.getStates()
te_flask_api_states = {'current_te_state' : self.__CURRENT_STATE,
'current_te_task_details' : self.__TASK_DETAILS,
'te_daemon_ip' : self.__te_controller_obj.get_daemon_ip(),
'flask_port' : self.__te_controller_obj.get_flask_port(),
'redis_port' : self.__te_controller_obj.get_redis_port(),
'nginx_port' : self.__te_controller_obj.get_nginx_port(),
'loglevel' : self.__te_controller_obj.get_loglevel(),
'te_dp_dict' : self.__te_controller_obj.get_te_dp(),
'connect_completed_tedps':list(self.__connect_completed_tedps),
'setup_completed_tedps' : list(self.__setup_completed_tedps),
'all_te_dp_dict_credentials' : self.__all_te_dp_dict_credentials}
statesToReturn = {'te_dp_states': tedp_states, 'te_flask_api_states':te_flask_api_states}
return self.__success(statesToReturn)
################################# GET CPU COUNT API ###################################
def __get_cpu_count_tedps(self, te_dp_hosts, client):
try:
output, problematicHost = self.__run_command_and_validate_output(client=client, \
te_dp_hosts=te_dp_hosts, cmd="nproc", getStdOut=True)
if(bool(problematicHost)):
return False, "Error while ssh-ing into clients", problematicHost
cpu_result = {}
problematicHost = {}
for host, linesOfOutput in output.items():
for line in linesOfOutput:
try:
cpus = int(line)
cpu_result[host] = cpus
except:
problematicHost[host] = line
break
if(bool(problematicHost)):
return False, "Got unexpected Response", problematicHost
return True, "Got CPU Count", cpu_result
except:
return False, "Exception Occured", traceback.format_exc()
@__api_state_decorator("GET_CPU_COUNT")
def get_cpu_count_api(self, jsonContent):
self.lgr.debug("get_cpu_count_api Called")
te_dp_dict = convert(jsonContent['te_dp_dict'])
if(not(bool(te_dp_dict))):
return self.__failure("No te_dp_dict Passed")
te_dp_hosts = {}
for host_ip, details in te_dp_dict.items():
te_dp_hosts[host_ip] = {'user': details.get('user','root')}
passwd = details.get('passwd', None)
if passwd:
te_dp_hosts[host_ip]['password'] = password
client = ParallelSSHClient(te_dp_hosts.keys(), host_config=te_dp_hosts, timeout = 240)
status, msg, result = self.__get_cpu_count_tedps(te_dp_hosts, client)
del client
if(not(status)):
return self.__failure({msg:result})
return self.__success(result)
def __run_mgmt_command(self, te_dp_hosts, global_cmd=None, per_host_cmd=None, task=None, \
job_timeout=None, max_tolerable_delay=120):
try:
problematicHost = []
CURRENT_TASK = task
for host_ip in te_dp_hosts:
enqueuedCall = False
if(bool(global_cmd)):
enqueuedCall = self.__tedp_config[host_ip].run_mgmt_command_helper(
run_mgmt_command_te_dp, {"cmd":global_cmd, "task":task}, job_timeout)
elif(bool(per_host_cmd)):
cmd_to_enqueue = per_host_cmd.get(host_ip,None)
if(bool(cmd_to_enqueue)):
enqueuedCall = self.__tedp_config[host_ip].run_mgmt_command_helper(
run_mgmt_command_te_dp, {"cmd":cmd_to_enqueue, "task":task})
else:
problematicHost.append(host_ip)
else:
problematicHost.append(host_ip)
#Add to Assigned TASK_DETAILS
if enqueuedCall:
self.__TASK_DETAILS[CURRENT_TASK].append(host_ip)
else:
problematicHost.append(host_ip)
if(bool(problematicHost)):
self.__tedp_config[host_ip].clean_task_details(CURRENT_TASK)
self.__TASK_DETAILS[CURRENT_TASK] = []
return False, "Unable to perform mgmt_call TEDPs (Fail at enqueue level)", problematicHost
if(job_timeout is not None):
max_time_wait = max(max_tolerable_delay, job_timeout)
else:
max_time_wait = max_tolerable_delay
status, result = self.__verify_task_status(task, max_time_wait)
if status:
return True, "Success", result
#RQ Failure / Incomplete task / bad code
return False, "Unexpected result", result
except:
return False, "Exception Occured", traceback.format_exc()
################################# GET ACTIVE TEDP API ###################################
@__api_state_decorator("GET_ACTIVE_TEDP")
def get_active_tedp_api(self, jsonContent):
self.lgr.debug("get_active_tedp_api Called")
#Get the current te_dp_dict maintined if param passed is empty
te_dp_dict = convert(jsonContent['tedps_to_query'])
if(bool(te_dp_dict)):
set_of_passed_dict = set(te_dp_dict.keys())
if(not(set_of_passed_dict.issubset(self.__connect_completed_tedps))):
return self.__failure({"Passed te_dp dict is not a subset of Connected TEDP" : \
list(self.__connect_completed_tedps)})
else:
self.lgr.debug("get_active_tedp: No param passed")
te_dp_dict = self.__te_controller_obj.get_te_dp()
#If still empty, return
if(not(bool(te_dp_dict))):
return self.__failure("No te_dps are running")
cmd = "ps aux | grep te_dp | grep -v grep | wc -l"
status, msg, result = self.__run_mgmt_command(te_dp_dict.keys(), global_cmd=cmd, task="GET_ACTIVE_TEDP")
if(status):
return self.__success(result)
else:
return self.__failure({msg:result})
@__api_state_decorator("UPDATE_DNS")
def update_dns_api(self, jsonContent):
self.lgr.debug("update_dns_api Called")
#Get the current te_dp_dict maintined if param passed is empty
te_dp_dict = convert(jsonContent.get('te_dp_dict', {}))
global_dns = convert(jsonContent.get('global_dns', []))
overwrite = convert(jsonContent['overwrite'])
problematicHost = []
if(overwrite):
redirector = ">"
else:
redirector = ">>"
if(bool(te_dp_dict)):
#If te_dp_dict is present, update the DNS only in those Clients
set_of_passed_dict = set(te_dp_dict.keys())
per_host_cmd = {}
#Check if all the passed clients are connected
if(not(set_of_passed_dict.issubset(self.__connect_completed_tedps))):
return self.__failure({"Passed te_dp dict is not a subset of Connected TEDP" : \
list(self.__connect_completed_tedps)})
else:
#Iterate through all the clients and frame the appropriate command
for host_ip, details in te_dp_dict.items():
cmd = ""
for tuple_item in details:
if(len(tuple_item) == 2):
cmd += "%s %s\n" %(str(tuple_item[0]), str(tuple_item[1]))
else:
problematicHost.append(host_ip)
if(bool(cmd)):
cmd = "printf '%s' %s /etc/resolv.conf" %(cmd, redirector)
self.lgr.debug("Command for %s is %s" %(host_ip, cmd))
per_host_cmd[host_ip] = cmd
if(bool(problematicHost)):
return self.__failure({"improper input": problematicHost})
if(bool(per_host_cmd)):
status, msg, result = self.__run_mgmt_command(te_dp_dict.keys(), \
per_host_cmd=per_host_cmd, task="UPDATE_DNS")
else:
return self.__failure("Nothing to add/append")
elif(bool(global_dns)):
#If global_dns is present add the same DNS to all TE-DP Clients
cmd = ""
for tuple_item in global_dns:
if(len(tuple_item) == 2):
cmd += "%s %s\n" %(str(tuple_item[0]), str(tuple_item[1]))
else:
problematicHost.append(tuple_item)
if(bool(problematicHost)):
return self.__failure({"improper input": problematicHost})
if(bool(cmd)):
cmd = "printf '%s' %s /etc/resolv.conf" %(cmd, redirector)
self.lgr.debug("Command is %s" %(cmd))
status, msg, result = self.__run_mgmt_command(self.__connect_completed_tedps, \
global_cmd=cmd, task="UPDATE_DNS")
else:
#If both are absent, throw an error
self.__failure("Both global_dns and te_dp_dict cannot be empty")
if(status):
return self.__success(result)
else:
return self.__failure({msg:result})
@__api_state_decorator("RESET_DNS")
def reset_dns_api(self, jsonContent):
self.lgr.debug("reset_dns_api Called")
#Get the current te_dp_dict maintined if param passed is empty
te_dp_dict = convert(jsonContent['te_dp_dict'])
if(bool(te_dp_dict)):
#If te_dp_dict is passed, reset only in those clients
set_of_passed_dict = set(te_dp_dict.keys())
#Check if all the passed clients are connected
if(not(set_of_passed_dict.issubset(self.__connect_completed_tedps))):
return self.__failure({"Passed te_dp dict is not a subset of Connected TEDP" : \
list(self.__connect_completed_tedps)})
else:
#If nothing is passed, reset DNS on all clients
self.lgr.debug("reset_dns: No param passed")
te_dp_dict = self.__connect_completed_tedps
if(not(bool(te_dp_dict))):
return self.__failure("No TEDPs connected")
#Command to clean /etc/resolv.conf
cmd = "> /etc/resolv.conf"
status, msg, result = self.__run_mgmt_command(te_dp_dict, global_cmd=cmd, task="RESET_DNS")
if(status):
return self.__success(result)
else:
return self.__failure({msg:result})
@__api_state_decorator("EXECUTE_CMD")
def execute_cmd_api(self, jsonContent):
self.lgr.debug("execute_cmd_api Called")
#Get the current te_dp_dict maintined if param passed is empty
te_dp_dict = convert(jsonContent.get('te_dp_dict', {}))
cmd = convert(jsonContent['cmd'])
job_timeout = convert(jsonContent.get('job_timeout', 180))
if(bool(te_dp_dict)):
#If te_dp_dict is passed, reset only in those clients
set_of_passed_dict = set(te_dp_dict.keys())
#Check if all the passed clients are connected
if(not(set_of_passed_dict.issubset(self.__connect_completed_tedps))):
return self.__failure({"Passed te_dp dict is not a subset of Connected TEDP" : \
list(self.__connect_completed_tedps)})
else:
#If nothing is passed, reset DNS on all clients
self.lgr.debug("execute_cmd: No param passed")
te_dp_dict = self.__connect_completed_tedps
if(not(bool(te_dp_dict))):
return self.__failure("No TEDPs connected")
status, msg, result = self.__run_mgmt_command(te_dp_dict, global_cmd=cmd, task="EXECUTE_CMD", job_timeout=job_timeout)
if(status):
return self.__success(result)
else:
return self.__failure({msg:result})
def __tech_support_helper(self, tedp_host, scp_ip, scp_user, scp_passwd, scp_path, \
type_of_logs, max_tolerable_delay):
try:
CURRENT_TASK = "TECH_SUPPORT"
self.lgr.debug("__tech_support_helper Called")
state_error = {}
numberOfCallsMade = 0
enqueuedHosts = []
resultDict = {}
for host in tedp_host:
argsPassed = {'my_ip':host, 'remote_ip':scp_ip, 'remote_user':scp_user, \
'remote_pwd':scp_<PASSWORD>, 'remote_path':scp_path, \
'type_of_logs':type_of_logs}
self.lgr.debug("Passing args to tech_support=%s" %str(argsPassed))
resultDict[host] = \
self.__tedp_config[host].tech_support_helper(tech_support, argsPassed)
#Add to Assigned TASK_DETAILS
self.__TASK_DETAILS[CURRENT_TASK].append(host)
self.lgr.debug("result of tech_support tedp %s" %(str(resultDict)))
status, result = self.__verify_task_status(CURRENT_TASK, max_tolerable_delay)
if status:
return True, "SCP-ed files", result
#RQ Failure / Incomplete task / bad code
return False, "Errors during SCP of tech support", result
except:
return False, "Exception Occured:", traceback.format_exc()
@__api_state_decorator("TECH_SUPPORT")
def tech_support_api(self, jsonContent):
self.lgr.debug("tech_support_api Called jsonContent={}".format(jsonContent))
te_dp_dict = convert(jsonContent['te_dp_dict'])
log_type = convert(jsonContent['log_type'])
max_tolerable_delay = convert(jsonContent.get('max_tolerable_delay', 120))
scp_user = convert(jsonContent.get("scp_user", "root"))
scp_passwd = convert(jsonContent.get("scp_passwd", None))
scp_ip = self.__te_controller_obj.get_daemon_ip()
scp_path = "/tmp"
if(log_type!="all" and log_type!="setup" and log_type!="process" and log_type!="core"):
self.__failure("Only accepted types of log_types are all, setup, process or core")
#If connect step is yet to be completed
if(not(bool(self.__connect_completed_tedps))):
return self.__failure("No tedps connected to collect logs")
else:
self.lgr.debug("Using RQs to get the tech_support")
if(bool(te_dp_dict)):
te_dps = te_dp_dict.keys()
else:
te_dps = self.__connect_completed_tedps
status, msg, result = self.__tech_support_helper(te_dps, scp_ip, scp_user, scp_passwd, \
scp_path, log_type, max_tolerable_delay)
if status:
date_time = str(datetime.now()).replace(' ','-').replace(':','-')
tar_file = "/te_host/techsupportlogs-{}.tar.gz".format(date_time)
host_tar_file = "/tmp/techsupportlogs-{}.tar.gz".format(date_time)
interested_files = "te_*_logs.tar.gz"
cmd = "cd /te_host/ && tar -zcvf {} {} /tmp/ && rm -rf {}".format(
tar_file, interested_files, interested_files)
self.lgr.info("Executing '{}'".format(cmd))
os.system(cmd)
cmd = "ls -d {}".format(tar_file)
(out, err) = self.__execute_command(cmd)
self.lgr.info("Executing '{}'".format(cmd))
self.lgr.info("Out={} Err={}".format(out, err))
if(bool(out)):
if(out.replace('\n','') == tar_file):
return self.__success("Tech support file generated at {}".format(host_tar_file))
else:
return self.__success("Tar ball not generated but tech support logs scp-ed successfuly. Check /tmp/te_*_logs.tar.gz in controller host")
else:
return self.__failure({msg:"Unable to scp techsupport logs from data path. Please check for techsupport tar in /tmp/te_*_logs in datapath machines"})
return self.__failure({msg:result})
################################# SETUP TEDP API ####################################
def __setup_tedps(self, te_dp_hosts, client):
EXIT_STATUS = {
10 : "Unable to find docker",
11 : "Unable to find python-requests",
12 : "Unable to find wget",
13 : "Unable to prepare the appropriate conditions needed to start the container (redis stop, docker start)",
14 : "Unable to download the tedp_docker.tar from TE-Controller",
15 : "Unable to load the container",
16 : "Unable to run the container",
17 : "Unable o get free ports",
18 : "Wrong parameters passed",
19 : "Unable to calculate checksum of Tar file",
20 : "Unable to find free port even after several tries",
21 : "Unable to find netstat command",
22 : "Unable to find both systemctl and service commands",
23 : "docker pull command failed",
24 : "docker run command failed",
200 : "Success",
404 : "Fatal: unknown reason"
}
CLEAN_EXIT = 200
#SCP GET_AND_RUN_DOCKER_IMAGE.py to all the hosts' home dir
greenlets = client.scp_send('/app/GET_AND_RUN_DOCKER_IMAGE.py', 'GET_AND_RUN_DOCKER_IMAGE.py')
try:
joinall(greenlets, raise_error=True)
except:
return False, "Unable to scp GET_AND_RUN_DOCKER_IMAGE.py", traceback.format_exc()
self.lgr.debug("SCP-ed /app/GET_AND_RUN_DOCKER_IMAGE.py to TEDP containers")
#Run GET_AND_RUN_DOCKER_IMAGE.py to all the hosts'i home dir
host_command = OrderedDict()
for host in te_dp_hosts.keys():
host_command[host] = "python ~/GET_AND_RUN_DOCKER_IMAGE.py -w ~/ -ip %s -p %s -t TE_DP -my_ip %s -fp %s" \
%(self.__te_controller_obj.get_daemon_ip(), self.__te_controller_obj.get_nginx_port(),
host, self.__te_controller_obj.get_flask_port())
problematicHost = self.__run_command_and_validate_output(client=client, \
te_dp_hosts=te_dp_hosts, host_args=host_command, cleanExitCode=CLEAN_EXIT,\
possibleExitCodesDict=EXIT_STATUS)
if problematicHost != {}:
self.lgr.error("Unable to run GET_AND_RUN_DOCKER_IMAGE.py to TEDP containers. %s" \
%str(problematicHost))
return False, "Unable to setup tedp on all machines. Exit Codes", problematicHost
else:
return True, "Setup TEDP Containers", []
@__api_state_decorator("SETUP")
def setup_tedp_api(self, jsonContent):
'''
Args:
jsonContent: Must Posses te_dp_dict
'''
self.lgr.debug("setup_tedp_api Called")
isRequestValid = self.__checkForRequiredArgument(jsonContent, ['te_dp_dict'])
if isRequestValid is not None:
return isRequestValid
self.lgr.debug("Validation of Request Success {}".format(jsonContent['te_dp_dict']))
start_time = time.time()
te_dp_dict = convert(jsonContent['te_dp_dict'])
if not isinstance(te_dp_dict, dict):
return self.__failure("te_dp_dict must be a dict")
te_dp_hosts = {}
host_ips_to_setup = set(te_dp_dict.keys()) - self.__setup_completed_tedps
invalid_input = {}
for host_ip in host_ips_to_setup:
value = te_dp_dict[host_ip]
if value is None:
invalid_input[host_ip] = "Value cannot be None"
continue
user = value.get('user',None)
if user is None:
invalid_input[host_ip] = "user cannot be None"
continue
# Empty password means authenticate from default certificate from /root/.ssh/
passwd = value.get('passwd', None)
te_dp_hosts[host_ip] = {'user': user}
if passwd:
te_dp_hosts[host_ip]['password'] = passwd
self.__all_te_dp_dict_credentials[host_ip] = te_dp_hosts[host_ip]
if(bool(invalid_input)):
return self.__failure(invalid_input)
if(not(te_dp_hosts)):
return self.__failure({"TEPDs already setup" : list(self.__setup_completed_tedps)})
self.lgr.debug("Running setup_tedp on {}".format(te_dp_hosts))
client = ParallelSSHClient(te_dp_hosts.keys(), host_config=te_dp_hosts, timeout = 240)
self.lgr.debug("Creation of ParallelSSHClient Success")
status, msg, result = self.__setup_tedps(te_dp_hosts, client)
del client
if status:
self.__setup_completed_tedps.update(host_ips_to_setup)
return self.__success(msg)
else:
# if we have partial success, add to the list to avoid re-init of setup_tedp
# for the successful host
if isinstance(result, dict):
failed_hosts = list(result.keys())
for tedp_host in host_ips_to_setup:
if tedp_host not in failed_hosts:
self.__setup_completed_tedps.add(tedp_host)
return self.__failure({msg:result})
################################# RQ APIs ####################################
@__api_state_decorator("GET_RQ_DETAILS")
def get_rq_details_api(self, jsonContent):
self.lgr.debug("get_rq_details_api Called")
isRequestValid = self.__checkForRequiredArgument(jsonContent, ['ip','cpus'])
if isRequestValid is not None:
return isRequestValid
if jsonContent['ip'] not in self.__setup_completed_tedps:
return self.__failure("Unauthenticated. Use connect to authenticate {}".format(jsonContent['ip']))
if jsonContent['ip'] not in self.__tedp_config.keys():
self.__connect_lock.acquire()
self.__tedp_config[jsonContent['ip']] = TE_DP_CONFIG(jsonContent['ip'], jsonContent['cpus'], \
self.TE_BROKER_HANDLE, self.lgr, self.__te_postgres_object)
self.__connect_lock.release()
result = {
"broker" : self.__te_app.config['BROKER_URL'],
"stat_collect_interval" : self.__stat_collect_interval,
"zmq" : self.__te_controller_obj.get_zmq_port(),
"queue_csv" : 'TE?' + str(self.__tedp_config[jsonContent['ip']].get_queue_names())
}
return self.__success(result)
@__api_state_decorator("ESTABLISH_RQ")
def establish_rq_api(self, jsonContent):
self.lgr.debug("establish_rq_api Called")
isRequestValid = self.__checkForRequiredArgument(jsonContent, ['ip'])
if isRequestValid is not None:
return isRequestValid
if jsonContent['ip'] not in self.__setup_completed_tedps:
self.lgr.debug("unauthorised {}".format(jsonContent['ip']))
return self.__failure("Unauthenticated. Use connect to authenticate {}".format(jsonContent['ip']))
self.__connect_lock.acquire()
self.__connect_completed_tedps.add(jsonContent['ip'])
self.__connect_lock.release()
return self.__success("Authenticated {}".format(jsonContent['ip']))
################################# CONNECT API ####################################
@__api_state_decorator("CONNECT")
def connect_api(self, jsonContent):
self.lgr.debug("connect_api Called")
isRequestValid = self.__checkForRequiredArgument(jsonContent, ['te_dp_dict'])
if isRequestValid is not None:
return isRequestValid
te_dp_dict = convert(jsonContent['te_dp_dict'])
if not isinstance(te_dp_dict, dict):
return self.__failure("te_dp_dict must be a dict")
if not te_dp_dict:
return self.__failure({"No tedps to connect. Already connected tedps": \
list(self.__connect_completed_tedps)})
for host in te_dp_dict.keys():
self.__setup_completed_tedps.add(host)
return self.__success("Initiated objects for TEDPs={} to connect".format(list(te_dp_dict.keys())))
################################# CLEAN API ####################################
def __disconnect_tedps(self, te_dp_hosts, client):
try:
cmd = "docker rm -f tedpv2.0 || true"
problematicHost = self.__run_command_and_validate_output(client=client, \
te_dp_hosts=te_dp_hosts, cmd=cmd)
if(bool(problematicHost)):
return False, "Unable to disconnect from hosts", problematicHost
return True, "Removed containers in hosts", te_dp_hosts
except:
return False, "Exception Occured", traceback.format_exc()
def __reinit_tedp_config(self):
for host in self.__tedp_config.keys():
obj = self.__tedp_config[host]
cpus = obj.get_cpu_count()
new_obj = TE_DP_CONFIG(host, cpus, self.TE_BROKER_HANDLE, \
self.lgr, self.__te_postgres_object)
del obj
self.__tedp_config[host] = new_obj
@__api_state_decorator("CLEAN")
def clean_api(self, jsonContent):
isRequestValid = self.__checkForRequiredArgument(jsonContent, ['remove_containers'])
if isRequestValid is not None:
return isRequestValid
remove_containers = convert(jsonContent['remove_containers'])
# Swagger handling
if remove_containers == "True" or remove_containers == "true":
remove_containers = True
elif remove_containers == "False" or remove_containers == "false":
remove_containers = False
if remove_containers:
# If container needs to be removed go via ssh
te_dp_hosts = {}
te_dp_in_no_access = set()
for host_ip in self.__connect_completed_tedps:
if host_ip not in self.__all_te_dp_dict_credentials:
te_dp_in_no_access.add(host_ip)
continue
te_dp_hosts[host_ip] = {'user': self.__all_te_dp_dict_credentials[host_ip].get('user','root')}
passwd = self.__all_te_dp_dict_credentials[host_ip].get('password', None)
if passwd:
te_dp_hosts[host_ip]['password'] = passwd
for host_ip in self.__setup_completed_tedps:
if host_ip not in self.__all_te_dp_dict_credentials:
te_dp_in_no_access.add(host_ip)
continue
te_dp_hosts[host_ip] = {'user': self.__all_te_dp_dict_credentials[host_ip].get('user','root')}
passwd = self.__all_te_dp_dict_credentials[host_ip].get('password', None)
if passwd:
te_dp_hosts[host_ip]['password'] = <PASSWORD>
self.__setup_completed_tedps = self.__setup_completed_tedps.difference(te_dp_in_no_access)
self.__connect_completed_tedps = self.__connect_completed_tedps.difference(te_dp_in_no_access)
if(not(bool(te_dp_hosts))):
return self.__failure("No connected te dps to disconnect")
client = ParallelSSHClient(te_dp_hosts.keys(), host_config=te_dp_hosts, timeout = 240)
status, msg, result = self.__disconnect_tedps(te_dp_hosts, client)
del client
if (status and te_dp_in_no_access):
result = {
msg : result,
"tedp in no access. Needs manual clean up using `docker rm -f tedpv2.0`" : list(te_dp_in_no_access)
}
else:
# Go via rq to clean up the tedp process and to kill and respawn rq
if self.__setup_completed_tedps:
cmd = "nohup /opt/te/clean_tedp.sh &"
status, msg, result = self.__run_mgmt_command(list(self.__setup_completed_tedps),
global_cmd=cmd, task="CLEAN_TEDP")
if status:
self.__connect_completed_tedps.clear()
if remove_containers:
self.__setup_completed_tedps.clear()
self.__te_controller_obj.unset_te_dp()
self.__te_controller_obj.unset_resource_config()
self.__te_controller_obj.unset_session_config()
self.__te_controller_obj.unset_instance_profile_config()
self.__te_controller_obj.unset_client_cert_bundle()
self.__CURRENT_STATE = self.__TE_STATE["INIT"]
self.TE_BROKER_HANDLE.flushall()
self.__TASK_DETAILS = defaultdict(list)
self.__reinit_tedp_config()
if(self.__te_postgres_object.clear_tables()):
return self.__success(result)
else:
return self.__failure("Unable to clear metrics table in TE")
else:
return self.__failure({msg:result})
################################# START API ####################################
def __spawn_or_update_tedps(self, resource_config, sessionConfig, instanceProfileConfig, \
tedp_dict, max_tolerable_delay, is_cert_replaced, updateFlag=False, verify_result=True):
#AK: What does the below line mean??
# Multiple Src IPs Code will break, because the current implementation depends on TEDP_INFO, which is not yet updated in the connect step in this design due to obvious reasons
try:
if updateFlag:
CURRENT_TASK = "UPDATE"
else:
CURRENT_TASK = "START"
self.lgr.debug("__spawn_or_update_tedps Called")
state_error = {}
numberOfCallsMade = 0
enqueuedHosts = []
resultDict = defaultdict(dict)
for host_ip, instance_profile in tedp_dict.items():
enqueuedCall = False
for profile_tag, count in instance_profile.items():
profileToGet = instanceProfileConfig.get(profile_tag, None)
#Should never get into the if-block, if user-side validation is completed
if profileToGet is None:
self.lgr.error("%s instance_profile_tag is not present in instanceProfileConfig" %(profile_tag))
return False, "%s instance_profile_tag is not present in instanceProfileConfig" %(profile_tag), instanceProfileConfig
res_tag = profileToGet['res-tag']
ses_tag = profileToGet.get('ses-tag', None)
traffic_mode = profileToGet.get('traffic-mode', 'CLIENT').upper()
traffic_profile = profileToGet.get('traffic-profile', 'TCP').upper()
if is_cert_replaced:
client_res_cfg = resource_config[host_ip][res_tag]
else:
client_res_cfg = resource_config[res_tag]
# UDP Server can have ses_tag as None
if ses_tag is not None:
client_ses_cfg = sessionConfig[ses_tag]
else:
client_ses_cfg = {}
res_hash = str(hash(json.dumps(client_res_cfg)))
ses_hash = str(hash(json.dumps(client_ses_cfg)))
self.__te_postgres_object.insert_configs(res_tag, res_hash, client_res_cfg,
ses_tag, ses_hash, client_ses_cfg)
argsPassed = {'profile_tag':profile_tag, 'res_tag':res_tag, 'ses_tag':ses_tag, \
'client_res_cfg':client_res_cfg, 'client_ses_cfg':client_ses_cfg, \
'client_res_hash':res_hash, 'client_ses_hash':ses_hash, \
'stat_dump_interval' : self.__stat_dump_interval, \
'metrics_enabled' : self.__metrics_enabled, \
'memory_metrics_enabled' : self.__memory_metrics_enabled, \
'loglevel' : self.__te_controller_obj.get_loglevel(), \
'count_of_tedps':count, 'traffic_mode' : traffic_mode, \
'traffic_profile' : traffic_profile, 'client_mgmt_ip' : host_ip}
self.lgr.debug("Passing args to spawn/update=%s" %str(argsPassed))
if updateFlag:
resultDict[host_ip][profile_tag] = \
self.__tedp_config[host_ip].update_te_dp_helper(raw_update_te_dp, argsPassed)
else:
resultDict[host_ip][profile_tag] = \
self.__tedp_config[host_ip].start_te_dp_helper(start_te_dp, argsPassed)
enqueuedCall = True
#Add to Assigned TASK_DETAILS
if enqueuedCall:
self.__TASK_DETAILS[CURRENT_TASK].append(host_ip)
self.lgr.debug("result of spawning/updating tedp %s" %(str(resultDict)))
success = True
atLeastOneTEDP = False
for host_ip, profile in resultDict.items():
atLeastOneTEDP = True
for result in profile.values():
if not(isinstance(result, dict)) or result.get("Success",0) == 0 or result.get("Failure",[]) != []:
success = False
break
if not(atLeastOneTEDP):
return True, "", {"State transition is completed":True}
#Can fail only if TEDP state transition fails / exceptions
if(not(success)):
self.__tedp_config[host_ip].clean_task_details(CURRENT_TASK)
self.__TASK_DETAILS[CURRENT_TASK] = []
return False, "Unable to stop TEDPs (Fail at enqueue level)", resultDict
if verify_result:
status, result = self.__verify_task_status(CURRENT_TASK, max_tolerable_delay)
if status:
return True, "All TEDPs Spawed", result
#RQ Failure / Incomplete task / bad code
return False, "No TEDP Spawed", result
else:
return True, "Given RQ Object to execute", resultDict
except:
self.lgr.error("ERROR IN spawn_tedp: %s" %traceback.format_exc())
return False, "Exception occured in %s" %inspect.stack()[0][3], traceback.format_exc()
def __generate_client_res_cfg(self, te_dp_dict, resource_config, instance_profile_config, \
cert_profile):
try:
problematicCert = []
client_res_config = {}
self.lgr.debug("__generate_client_res_cfg Called")
for client_ip, client_dict in cert_profile.items():
for vip, cert_list in client_dict.items():
for cert in cert_list:
if 'ca-cert-path' in cert:
cert['ca-cert-path'] = re.sub("^/root/", "/te_root/", cert['ca-cert-path'])
cert['ca-cert-path'] = re.sub("^/home/.*[^/]/", "/te_root/", cert['ca-cert-path'])
cert['ca-cert-path'] = re.sub("^~/", "/te_root/", cert['ca-cert-path'])
if 'cert-path' in cert:
cert['cert-path'] = re.sub("^/root/", "/te_root/", cert['cert-path'])
cert['cert-path'] = re.sub("^/home/.*[^/]/", "/te_root/", cert['cert-path'])
cert['cert-path'] = re.sub("^~/", "/te_root/", cert['cert-path'])
if 'key-path' in cert:
cert['key-path'] = re.sub("^/root/", "/te_root/", cert['key-path'])
cert['key-path'] = re.sub("^/home/.*[^/]/", "/te_root/", cert['key-path'])
cert['key-path'] = re.sub("^~/", "/te_root/", cert['key-path'])
for host_ip, instance_profile_dict in te_dp_dict.items():
#Get the mapping for the current host_ip
vip_to_cert_map = cert_profile.get(host_ip, cert_profile.get('default', None))
client_res_config[host_ip] = {}
for instance_profile_tag in instance_profile_dict.keys():
res_tag = instance_profile_config[instance_profile_tag]['res-tag']
client_res = copy(resource_config[res_tag])
for vip_obj in client_res['vip-list']:
auth = vip_obj.get('auth',False)
vip = vip_obj['vip']
if auth:
if vip_to_cert_map is None:
problematicCert.append("Neither the mapping found for host_ip=%s not the 'default'" %host_ip)
continue
cert_list = vip_to_cert_map.get(vip, vip_to_cert_map.get('default', None))
if cert_list is None:
problematicCert.append("Neither the mapping found for vip=%s not the 'default' for host_ip=%s"\
%(vip, host_ip))
continue
vip_obj['certs'] = cert_list
vip_obj.pop('auth',None)
client_res_config[host_ip][res_tag] = client_res
if(bool(problematicCert)):
self.lgr.error("Problem with certs %s" %str(problematicCert))
return False, "Problem with certs", problematicCert
return True, "", client_res_config
except:
return False, "Exception Occurred", traceback.format_exc()
def __input_validation(self, te_dp_dict, instanceProfileConfig, resourceConfig, sessionConfig,
is_update=True):
if not(isinstance(te_dp_dict, dict)):
return False, "te_dp_dict must be a dictionary"
if not(isinstance(instanceProfileConfig, dict)):
return False, "instanceProfileConfig must be a dictionary"
if not(isinstance(resourceConfig, dict)):
return False, "resourceConfig must be a dictionary"
if not(isinstance(sessionConfig, dict)):
return False, "sessionConfig must be a dictionary"
reason_dict = {}
checked_profile = {}
for key, value in te_dp_dict.items():
if not(isinstance(value, dict)):
reason_dict[key] = "Value of the key must be a dict"
continue
instance_profile = value.get("instance_profile", None)
if instance_profile is None and is_update:
# Check is only for update_config
reason_dict[key] = "instance_profile key cannot be None during update"
continue
elif instance_profile is not None:
if not(isinstance(instance_profile, dict)):
# If it is not None, it must be a dictionary
reason_dict[key] = "Value of instance_profile must be a dict"
continue
for instance_tag, count in instance_profile.items():
profile = instanceProfileConfig.get(instance_tag, None)
if profile is None:
reason_dict[key] = "instance_profile={} is not found in " \
"instanceProfileConfig or is None".format(instance_tag)
break
if not isinstance(count, int):
reason_dict[key] = "Count in instance_profile must be integer"
break
# Profile is already checked
if checked_profile.get(instance_tag, False):
continue
if not isinstance(profile, dict):
reason_dict[key] = "{} tag in instanceProfileConfig must be " \
"dict".format(instance_tag)
break
# res-tag and ses-tag can never be None in case of CLIENT
# ses-tag is not present in UDP SERVER
res_key = profile.get('res-tag', None)
ses_key = profile.get('ses-tag', None)
traffic_mode = profile.get('traffic-mode', "CLIENT").upper()
traffic_profile = profile.get('traffic-profile', "TCP").upper()
if traffic_profile == "TCP" and traffic_mode == "SERVER":
reason_dict[key] == "instance_profile={}: TCP SERVER is not supported by TE"
if res_key is None or (ses_key is None and traffic_mode == "CLIENT"):
reason_dict[key] = "Value of res-tag and ses-tag cannot be None in "\
"instance_profile={}".format(instance_tag)
break
if(not(isinstance(res_key, str)) or \
(traffic_mode == "CLIENT" and not(isinstance(ses_key, str)))):
reason_dict[key] = "Value of res-tag and ses-tag must be str in "\
"instance_profile={} res_key={} ses_key={}".format(\
instance_tag, res_key, ses_key)
break
if resourceConfig.get(res_key, None) is None:
reason_dict[key] = "instance_profile={} had res-tag={} but was not found "\
"in resourceConfig".format(instance_tag, res_key)
break
if ses_key is not None and sessionConfig.get(ses_key, None) is None:
reason_dict[key] = "instance_profile={} had ses-tag={} but was not found "\
"in sessionConfig".format(instance_tag, ses_key)
break
# Compulsary params check (very basic, at best)
# Any Client must possess a vip-list to target
if traffic_profile == "CLIENT":
if(not(bool(resourceConfig[res_key].get('vip-list', None)))):
reason_dict[key] = "res-tag={} traffic-profile={} does not possess "\
"'vip-list'".format(res_key, traffic_profile)
#TCP Client
if traffic_mode == "TCP":
if(not(bool(resourceConfig[res_key].get('get-profiles', None)) or
bool(resourceConfig[res_key].get('post-profiles', None)))):
reason_dict[key] = "res-tag={} traffic-mode={} neither possess "\
"'get-profiles' nor 'post-profiles".format(res_key, traffic_mode)
#UDP Client
if traffic_mode == "UDP" and traffic_profile == "CLIENT":
if(not(bool(resourceConfig[res_key].get('udp-profiles', None)))):
reason_dict[key] = "res-tag={} traffic-mode={} traffic-profile={} "\
"does not possess 'udp-profiles'".format(res_key, traffic_mode,
traffic_profile)
#UDP Server
if traffic_mode == "UDP" and traffic_profile == "CLIENT":
if(not(bool(resourceConfig[res_key].get('port-list', None)) or
bool(resourceConfig[res_key].get('port-range', None)))):
reason_dict[key] = "res-tag={} traffic-mode={} neither possess "\
"'port-list' nor 'port-range".format(res_key, traffic_mode)
checked_profile[instance_tag] = True
if bool(reason_dict):
self.lgr.error("__input_validation: is_update={} \tte_dp_dict={} \t" \
"instanceProfileConfig={} \tresourceConfig={} \tsessionConfig={} \t" \
"reason_dict={}".format(is_update, te_dp_dict, instanceProfileConfig, \
resourceConfig, sessionConfig, reason_dict))
return False, reason_dict
return True, reason_dict
def __is_spawning_new_tedps_possible(self, te_dp_dict):
resource_insuffcient_hosts = {}
dict_after_validation = {}
for host_ip, host_properties in te_dp_dict.items():
if host_properties is None or host_properties.get('instance_profile',None) is None:
continue
inst_prof = host_properties['instance_profile']
number_of_tedp_to_spawn = sum(host_properties['instance_profile'].values())
notPossible = self.__tedp_config[host_ip].is_spinning_new_tedps_possible(number_of_tedp_to_spawn)
if(notPossible is not None):
resource_insuffcient_hosts[host_ip] = notPossible
continue
dict_after_validation[host_ip] = inst_prof
if(bool(resource_insuffcient_hosts)):
return False, "Resource insuffient to run te_dps", resource_insuffcient_hosts
return True, "All conditions passed to spawn tedps", dict_after_validation
def __get_udp_server_dict(self, te_dp_dict, instanceProfileConfig):
if(not(bool(te_dp_dict))):
return {}
udp_server_tedp_dict = defaultdict(dict)
for host in list(te_dp_dict.keys()):
instance_profile_dict = te_dp_dict[host]
for tag in list(instance_profile_dict.keys()):
if(instanceProfileConfig[tag].get('traffic-mode', 'CLIENT').upper() == "SERVER"):
udp_server_tedp_dict[host][tag] = instance_profile_dict[tag]
te_dp_dict[host].pop(tag)
if(not(bool(te_dp_dict[host]))):
te_dp_dict.pop(host)
return udp_server_tedp_dict
@__api_state_decorator("START")
def start_api(self, jsonContent):
self.lgr.debug("start_api Called")
isRequestValid = self.__checkForRequiredArgument(jsonContent,
['te_dp_dict','resource_config','session_config','instanceProfileConfig'])
if isRequestValid is not None:
return isRequestValid
resourceConfig = convert(jsonContent['resource_config'])
sessionConfig = convert(jsonContent['session_config'])
instanceProfileConfig = convert(jsonContent['instanceProfileConfig'])
te_dp_dict = convert(jsonContent['te_dp_dict'])
te_dp_dict_to_save = deepcopy(te_dp_dict)
client_cert_bundle = convert(jsonContent.get('client_cert_bundle',None))
max_tolerable_delay = int(convert(jsonContent.get('max_tolerable_delay', 120)))
possible, reason = self.__are_all_tedps_connected(te_dp_dict)
if(not(possible)):
return self.__failure({"Unable to start on unconnected tedp machines":reason})
possible, reason = self.__input_validation(te_dp_dict, instanceProfileConfig, \
resourceConfig, sessionConfig, False)
if(not(possible)):
return self.__failure(reason)
#DND important logic out there
status, statusmessage, te_dp_dict_reduced = self.__is_spawning_new_tedps_possible(te_dp_dict)
if(not(status)):
return self.__failure({statusmessage:te_dp_dict_reduced})
if(not(bool(te_dp_dict_reduced))):
self.__CURRENT_STATE = self.__TE_STATE["RUNNING"]
self.__success("Only state transition effected from INIT to RUNNING")
if(client_cert_bundle is not None):
status, msg, result = self.__generate_client_res_cfg(te_dp_dict_reduced, resourceConfig, \
instanceProfileConfig, client_cert_bundle)
if(not(status)):
return self.__failure({msg:result})
resource_config_to_spawn = result
is_cert_replaced = True
else:
resource_config_to_spawn = resourceConfig
is_cert_replaced = False
# Start of the UDP server has to be done before starting the clients
# Though the start of servers will run in parallel, starting of clients will
# happen only after starting the clients (Sequential)
result = {}
udp_server_tedp_dict_to_start = \
self.__get_udp_server_dict(te_dp_dict_reduced, instanceProfileConfig)
self.lgr.debug("START UDP SERVER IN START_API %s" %str(udp_server_tedp_dict_to_start))
if(bool(udp_server_tedp_dict_to_start)):
status, msg_server_start, result_server_start = self.__spawn_or_update_tedps(\
resource_config_to_spawn, sessionConfig, instanceProfileConfig, \
udp_server_tedp_dict_to_start, max_tolerable_delay, is_cert_replaced)
if(not(status)):
return self.__failure({msg_server_start:result_server_start})
result["server-start"] = result_server_start
# As above iteration could have removed the the hosts from the dict all together
if(bool(te_dp_dict_reduced)):
status, msg, result_start = self.__spawn_or_update_tedps(resource_config_to_spawn, sessionConfig,\
instanceProfileConfig, te_dp_dict_reduced, max_tolerable_delay, is_cert_replaced)
result["client-start"] = result_start
if status:
self.__CURRENT_STATE = self.__TE_STATE["RUNNING"]
self.__te_controller_obj.set_resource_config(resourceConfig)
self.__te_controller_obj.set_session_config(sessionConfig)
self.__te_controller_obj.set_instance_profile_config(instanceProfileConfig)
self.__te_controller_obj.set_te_dp(te_dp_dict_to_save)
if client_cert_bundle is not None:
self.__te_controller_obj.set_client_cert_bundle(client_cert_bundle)
return self.__success(result)
else:
return self.__failure({msg:result})
################################# STOP API ####################################
def __stop_tedps(self, paramDict, max_tolerable_delay):
try:
CURRENT_TASK = "STOP"
resultDict = {}
for host_ip, listOfPid in paramDict.items():
resultDict[host_ip] = self.__tedp_config[host_ip].stop_te_dp_helper(stop_te_dp, \
{'listOfPid':listOfPid})
self.__TASK_DETAILS[CURRENT_TASK].append(host_ip)
self.lgr.debug("result of stopping tedp %s" %(str(resultDict)))
success = True
for host_ip, result in resultDict.items():
if not(isinstance(result, dict)) or result.get("Success",0) <= 0:
success = False
break
if(not(success)):
self.__tedp_config[host_ip].clean_task_details(CURRENT_TASK)
self.__TASK_DETAILS[CURRENT_TASK] = []
return False, "Unable to stop TEDPs (Fail at enqueue level)", resultDict
status, result = self.__verify_task_status(CURRENT_TASK, max_tolerable_delay)
if status:
return True, "Stopped TEDPs", result
return False, "Unable to stop TEDPs", result
except:
return False, "Exception Occured in __stop_tedps", traceback.format_exc()
def __get_new_te_dp_to_set(self, te_dp_dict_stopped=None, list_of_profiles=None):
if te_dp_dict_stopped is not None and list_of_profiles is not None:
return None
if te_dp_dict_stopped is None and list_of_profiles is None:
return {}
if list_of_profiles is not None:
te_dp_dict_to_set = deepcopy(self.__te_controller_obj.get_te_dp())
for host_ip in te_dp_dict_to_set.keys():
profile = te_dp_dict_to_set[host_ip]['instance_profile']
for profile_tag in profile.keys():
if profile_tag in list_of_profiles:
profile.pop(profile_tag)
if(not(bool(profile))):
te_dp_dict_to_set.pop(host_ip)
return te_dp_dict_to_set
if te_dp_dict_stopped is not None:
te_dp_dict_to_set = deepcopy(self.__te_controller_obj.get_te_dp())
old_hosts = set(te_dp_dict_to_set.keys())
stopped_hosts = set(te_dp_dict_stopped.keys())
common_hosts = old_hosts.intersection(stopped_hosts)
for host_ip in common_hosts:
if te_dp_dict_stopped[host_ip] is None or \
te_dp_dict_stopped[host_ip]['instance_profile'] is None:
te_dp_dict_to_set.pop(host_ip)
else:
profile_stopped = te_dp_dict_stopped[host_ip]['instance_profile']
to_set_profile_dict = te_dp_dict_to_set[host_ip]['instance_profile']
for profile_tag in profile_stopped.keys():
to_set_profile_dict = te_dp_dict_to_set[host_ip]['instance_profile']
to_set_profile_dict[profile_tag] -= profile_stopped[profile_tag]
if to_set_profile_dict[profile_tag] == 0:
to_set_profile_dict.pop(profile_tag)
if(not(bool(to_set_profile_dict))):
te_dp_dict_to_set.pop(host_ip)
return te_dp_dict_to_set
@__api_state_decorator("STOP")
def stop_api(self, jsonContent):
'''
Integrate the stats collection logic to the code
Args:
by_instance_profile_tag: A dictionary mapping from host_ip to instance_profile to count to stop
'''
self.lgr.debug("stop_api Called")
change_state_to_init = False
by_host_and_instance_profile_tag = convert(jsonContent.get('by_host_and_instance_profile_tag',None))
by_instance_profile_tag = convert(jsonContent.get('by_instance_profile_tag',None))
max_tolerable_delay = int(convert(jsonContent.get('max_tolerable_delay', 120)))
numberOfCallsMade = 0
paramDict = defaultdict(set)
if by_host_and_instance_profile_tag is not None and by_instance_profile_tag is not None:
return self.__failure("Both params of by_host_and_instance_profile_tag and by_instance_profile_tag cannot be passed")
elif by_host_and_instance_profile_tag is None and by_instance_profile_tag is None:
change_state_to_init = True
te_dp_dict = self.__te_controller_obj.get_te_dp()
for host_ip in te_dp_dict.keys():
listOfPidsRunningProfile = self.__tedp_config[host_ip].get_pid_of_running_tedps()
if(bool(listOfPidsRunningProfile)):
paramDict[host_ip] = paramDict[host_ip].union(listOfPidsRunningProfile)
else:
self.lgr.warning("No tedps is been run on %s" %host_ip)
elif by_host_and_instance_profile_tag is not None:
self.lgr.debug("Host Specific stop_api params population")
possible, reason = self.__are_all_tedps_connected(by_host_and_instance_profile_tag)
if(not(possible)):
return self.__failure({"Unable to stop on unconnected tedp machines":reason})
for host_ip, profile_map in by_host_and_instance_profile_tag.items():
if profile_map is None or profile_map['instance_profile'] is None:
listOfPidsRunningProfile = self.__tedp_config[host_ip].get_pid_of_running_tedps()
self.lgr.debug("listOfPidsRunningProfile for by_host_and_instance_profile_tag=%s" \
%str(listOfPidsRunningProfile))
if(bool(listOfPidsRunningProfile)):
paramDict[host_ip] = paramDict[host_ip].union(listOfPidsRunningProfile)
else:
for profile_name, count in profile_map['instance_profile'].items():
if count == 0:
continue
listOfPidsRunningProfile = \
self.__tedp_config[host_ip].get_pid_of_running_profiles([profile_name])
numberOfRunningProcess = len(listOfPidsRunningProfile)
#If count is None, delete all running profiles of the specific host
if count is None:
paramDict[host_ip] = paramDict[host_ip].union(listOfPidsRunningProfile)
#if numberOfRunningProcess is 0, then no process of that profile_name is running
elif numberOfRunningProcess == 0:
error_str = "stop_api uninitiated. Requested to kill %d instances of %s\
but no process of that instance is running in host_ip= %s" \
%(count, profile_name, host_ip)
#If count > numberOfRunningProcess, Improper parameter
elif count > numberOfRunningProcess:
error_str = "stop_api uninitiated. Requested to kill %d instances of %s\
but only %d running in host_ip=%s" \
%(count, profile_name, numberOfRunningProcess, host_ip)
self.lgr.errorerror_str
return self.__failure(error_str)
#Else stop count process running in the host_ip
else:
paramDict[host_ip] = paramDict[host_ip].union(listOfPidsRunningProfile[:count])
elif by_instance_profile_tag is not None:
self.lgr.debug("profile_tag Specific stop_api params population")
for host_ip in self.__tedp_config:
listOfPidsRunningProfile = \
self.__tedp_config[host_ip].get_pid_of_running_profiles(by_instance_profile_tag)
if(bool(listOfPidsRunningProfile)):
paramDict[host_ip] = paramDict[host_ip].union(listOfPidsRunningProfile)
if(bool(paramDict)):
status, message, result = self.__stop_tedps(paramDict, max_tolerable_delay)
else:
return self.__failure("No tedps to stop")
if(status):
#Alter te_dp_dict for every stop
self.lgr.debug("by_host_and_instance_profile_tag=%s"%str(by_host_and_instance_profile_tag))
self.lgr.debug("by_instance_profile_tag=%s"%str(by_instance_profile_tag))
te_dp_dict_to_set = self.__get_new_te_dp_to_set(te_dp_dict_stopped=by_host_and_instance_profile_tag, list_of_profiles=by_instance_profile_tag)
self.lgr.debug("Final set te_dp_dict_to_set is %s" %str(te_dp_dict_to_set))
self.__te_controller_obj.set_te_dp(te_dp_dict_to_set)
#Unset others only on stop all call
if change_state_to_init:
self.__CURRENT_STATE = self.__TE_STATE["INIT"]
self.__te_controller_obj.unset_resource_config()
self.__te_controller_obj.unset_session_config()
self.__te_controller_obj.unset_instance_profile_config()
self.__te_controller_obj.unset_client_cert_bundle()
else:
self.__CURRENT_STATE = self.__TE_STATE["RUNNING"]
return self.__success(result)
return self.__failure({message:result})
################################# UPDATE API ####################################
def __getModifiedConfigs(self, newConfig, oldConfig):
try:
self.lgr.debug("__getModifiedConfigs Called")
newKeys = set(newConfig.keys())
oldKeys = set(oldConfig.keys())
modifiedKeys = newKeys - oldKeys
intersectionKeys = newKeys.intersection(oldKeys)
self.lgr.debug("newKeys = %s" %str(newKeys))
self.lgr.debug("oldKeys = %s" %str(oldKeys))
self.lgr.debug("modifiedKeys = %s" %str(modifiedKeys))
self.lgr.debug("intersectionKeys = %s" %str(intersectionKeys))
for key in intersectionKeys:
if newConfig[key] != oldConfig[key]:
self.lgr.debug("Adding Key=%s" %key)
modifiedKeys.add(key)
return modifiedKeys
except:
self.lgr.error("ERROR in __getModifiedConfigs: %s" %traceback.format_exc())
return None
def __getModifiedProfiles(self, oldInstanceProfileConfig, newInstanceProfileConfig, modifiedRes, \
modifiedSes):
try:
self.lgr.debug("__getModifiedProfiles Called")
oldKeys = set(oldInstanceProfileConfig.keys())
newKeys = set(newInstanceProfileConfig.keys())
commonKeys = newKeys.intersection(oldKeys)
modifiedProfiles = newKeys - oldKeys
self.lgr.debug("newKeys = %s" %str(newKeys))
self.lgr.debug("oldKeys = %s" %str(oldKeys))
self.lgr.debug("modifiedProfiles = %s" %str(modifiedProfiles))
self.lgr.debug("commonKeys = %s" %str(commonKeys))
for profile in commonKeys:
res_tag = newInstanceProfileConfig[profile]['res-tag']
ses_tag = newInstanceProfileConfig[profile].get('ses-tag', None)
if res_tag in modifiedRes or (ses_tag is not None and ses_tag in modifiedSes):
self.lgr.debug("Adding Key=%s" %profile)
modifiedProfiles.add(profile)
return modifiedProfiles
except:
self.lgr.error("ERROR in __getModifiedProfiles: %s" %traceback.format_exc())
return None
@__api_state_decorator("UPDATE")
def update_config_api(self, jsonContent):
self.lgr.debug("update_config_api Called")
start_update_api_time = time.time()
isRequestValid = self.__checkForRequiredArgument(jsonContent, ['resource_config',\
'session_config','instanceProfileConfig','te_dp_dict'])
if isRequestValid is not None:
return isRequestValid
resourceConfig = convert(jsonContent['resource_config'])
sessionConfig = convert(jsonContent['session_config'])
instanceProfileConfig = convert(jsonContent['instanceProfileConfig'])
te_dp_dict = convert(jsonContent['te_dp_dict'])
client_cert_bundle = convert(jsonContent.get('client_cert_bundle',None))
max_tolerable_delay = int(convert(jsonContent.get('max_tolerable_delay', 120)))
possible, reason = self.__are_all_tedps_connected(te_dp_dict)
if(not(possible)):
return self.__failure({"Unable to update on unconnected tedp machines":reason})
possible, reason = self.__input_validation(te_dp_dict, instanceProfileConfig, \
resourceConfig, sessionConfig)
if(not(possible)):
return self.__failure(reason)
if not self.__te_controller_obj:
self.lgr.errror("te_class object not found")
return self.__failure('te_class object not found')
old_resource_cfg = self.__te_controller_obj.get_resource_config()
if old_resource_cfg is None:
self.__failure("old resource cfg is None")
old_session_cfg = self.__te_controller_obj.get_session_config()
if old_session_cfg is None:
self.__failure("old session cfg is None")
old_instance_profile_config = self.__te_controller_obj.get_instance_profile_config()
if old_instance_profile_config is None:
self.__failure("old instance profile cfg is None")
old_te_dp_dict = self.__te_controller_obj.get_te_dp()
if old_te_dp_dict is None:
self.__failure("old tedp dict is None")
modifiedRes = self.__getModifiedConfigs(resourceConfig, old_resource_cfg)
if modifiedRes is None:
return self.__failure(traceback.format_exc())
modifiedSes = self.__getModifiedConfigs(sessionConfig, old_session_cfg)
if modifiedSes is None:
return self.__failure(traceback.format_exc())
modifiedProfiles = self.__getModifiedProfiles(old_instance_profile_config, \
instanceProfileConfig, modifiedRes, modifiedSes)
if modifiedProfiles is None:
return self.__failure(traceback.format_exc())
self.lgr.debug("modifiedRes=%s" %str(modifiedRes))
self.lgr.debug("modifiedSes=%s" %str(modifiedSes))
self.lgr.debug("modifiedProfiles=%s" %str(modifiedProfiles))
oldTEDPhosts = set(old_te_dp_dict.keys())
newTEDPhosts = set(te_dp_dict.keys())
tedpsToStop = oldTEDPhosts - newTEDPhosts
tedpsToModify = newTEDPhosts.intersection(oldTEDPhosts)
tedpsToSpawn = newTEDPhosts - oldTEDPhosts
te_dp_dict_to_stop = defaultdict(set)
te_dp_dict_to_spawn = defaultdict(dict)
te_dp_dict_to_update = defaultdict(dict)
problematicHost = {}
for host_ip in tedpsToStop:
listOfPidsRunningProfile = self.__tedp_config[host_ip].get_pid_of_running_tedps()
if(bool(listOfPidsRunningProfile)):
te_dp_dict_to_stop[host_ip] = te_dp_dict_to_stop[host_ip].union(listOfPidsRunningProfile)
for host_ip in tedpsToSpawn:
instance_profile = te_dp_dict[host_ip]['instance_profile']
number_of_tedps_to_spawn = sum(instance_profile.values())
notPossible = self.__tedp_config[host_ip].is_spinning_new_tedps_possible(number_of_tedps_to_spawn)
if(notPossible is not None):
problematicHost[host_ip] = notPossible
continue
te_dp_dict_to_spawn[host_ip] = instance_profile
self.lgr.debug("Old tedp dict was %s" %str(old_te_dp_dict))
for host_ip in tedpsToModify:
'''
Possible cases:
1) Modification of profile with Increase/Decrease in count
2) Unmodified Profile with Increase/Decrease in count
5) Presence of a new tag(in tedp_dict) in te_dp_dict
4) Deletion of a tag(in tedp_dict) from old te_dp_dict
'''
newProfileDict = te_dp_dict[host_ip]['instance_profile']
old_value_of_host = old_te_dp_dict.get(host_ip, None)
if old_value_of_host is not None:
oldProfileDict = old_value_of_host.get('instance_profile',{})
if oldProfileDict is None:
oldProfileDict = {}
else:
oldProfileDict = {}
newKeys = set(newProfileDict.keys())
oldKeys = set(oldProfileDict.keys())
addedTags = newKeys - oldKeys
deletedTags = oldKeys - newKeys
commonTags = newKeys.intersection(oldKeys)
numberOfTEDPsToStop = 0
numberOfTEDPsToSpawn = 0
for profile in addedTags:
count = te_dp_dict[host_ip]['instance_profile'][profile]
te_dp_dict_to_spawn[host_ip][profile] = count
numberOfTEDPsToSpawn += count
for profile in deletedTags:
listOfPidsRunningProfile = self.__tedp_config[host_ip].get_pid_of_running_profiles([profile])
if(listOfPidsRunningProfile):
te_dp_dict_to_stop[host_ip] = te_dp_dict_to_stop[host_ip].union(listOfPidsRunningProfile)
numberOfTEDPsToStop += len(listOfPidsRunningProfile)
for profile in commonTags:
updateFlag = False
if profile in modifiedProfiles:
updateFlag = True
listOfPidsRunningProfile = self.__tedp_config[host_ip].get_pid_of_running_profiles([profile])
number_of_new_tedps_to_run = te_dp_dict[host_ip]['instance_profile'][profile]
number_of_tedps_running = len(listOfPidsRunningProfile)
difference = abs(number_of_new_tedps_to_run - number_of_tedps_running)
#SPAWN AND UPDATE
if number_of_new_tedps_to_run > number_of_tedps_running:
te_dp_dict_to_spawn[host_ip][profile] = difference
numberOfTEDPsToSpawn += difference
if updateFlag:
te_dp_dict_to_update[host_ip][profile] = number_of_tedps_running
#STOP AND UPDATE
elif number_of_new_tedps_to_run < number_of_tedps_running:
if(bool(listOfPidsRunningProfile)):
te_dp_dict_to_stop[host_ip] = te_dp_dict_to_stop[host_ip].union(listOfPidsRunningProfile[:difference])
numberOfTEDPsToStop += difference
if updateFlag:
te_dp_dict_to_update[host_ip][profile] = number_of_new_tedps_to_run
#UPDATE (No change in count)
elif updateFlag:
te_dp_dict_to_update[host_ip][profile] = number_of_tedps_running
notPossibleToRun = self.__tedp_config[host_ip].is_update_possible(numberOfTEDPsToStop, \
numberOfTEDPsToSpawn)
if(bool(notPossibleToRun)):
problematicHost[host_ip] = notPossibleToRun
if(bool(problematicHost)):
return self.__failure(problematicHost)
self.lgr.debug("STOP IN UPDATE_API %s" %str(te_dp_dict_to_stop))
self.lgr.debug("START IN UPDATE_API %s" %str(te_dp_dict_to_spawn))
self.lgr.debug("UPDATE IN UPDATE_API %s" %str(te_dp_dict_to_update))
self.lgr.debug("TIME TAKEN FOR PREPROCESSING %s" %str(time.time() - start_update_api_time))
#GENERATE CLIENT RES CONFIG
if(bool(client_cert_bundle)):
gen_start = time.time()
dict_to_update_and_spawn = deepcopy(te_dp_dict_to_spawn)
dict_merge(dict_to_update_and_spawn, te_dp_dict_to_update)
status, msg, result = self.__generate_client_res_cfg(dict_to_update_and_spawn, \
resourceConfig, instanceProfileConfig, client_cert_bundle)
if(not(status)):
return self.__failure({msg:result})
resource_config_to_spawn_and_update = result
is_cert_replaced = True
self.lgr.debug("Time taken to GEN CERT PROFILE %s" %str(time.time() - gen_start))
else:
resource_config_to_spawn_and_update = resourceConfig
is_cert_replaced = False
result = {}
#STOP TEDPs
if(bool(te_dp_dict_to_stop)):
stop_start = time.time()
status, msg_stop, result_stop = self.__stop_tedps(te_dp_dict_to_stop, max_tolerable_delay)
if(not(status)):
return self.__failure({msg_stop:result_stop})
result["stop"] = {msg_stop:result_stop}
self.lgr.debug("Time taken to STOP %s" %str(time.time() - stop_start))
# Servers will have to be started / updated before starting / updating the client
# So a separate call has to be made to make sure Servers are up and running, before the client starts
# Calls to verify_task status is avoided in both spawn and update methods
# This is done to quicken the process of enqueuing task, without worrying about the result
# We later make a call to verify the task status
#UPDATE UDP SERVER TEDPs
is_udp_servers_updated = False
udp_server_tedp_dict_to_update = \
self.__get_udp_server_dict(te_dp_dict_to_update, instanceProfileConfig)
self.lgr.debug("UPDATE UDP SERVER IN UPDATE_API %s" %str(udp_server_tedp_dict_to_update))
if(bool(udp_server_tedp_dict_to_update)):
is_udp_servers_updated = True
status, msg_update, result_update = self.__spawn_or_update_tedps(\
resource_config_to_spawn_and_update, sessionConfig, instanceProfileConfig, \
udp_server_tedp_dict_to_update, max_tolerable_delay, is_cert_replaced, updateFlag=True, \
verify_result=False)
if(not(status)):
return self.__failure({msg_update:result_update})
#START UDP SERVER TEDPs
is_udp_servers_spawned = False
udp_server_tedp_dict_to_start = \
self.__get_udp_server_dict(te_dp_dict_to_spawn, instanceProfileConfig)
self.lgr.debug("START UDP SERVER IN UPDATE_API %s" %str(udp_server_tedp_dict_to_start))
if(bool(udp_server_tedp_dict_to_start)):
is_udp_servers_spawned = True
status, msg_start, result_start = self.__spawn_or_update_tedps(\
resource_config_to_spawn_and_update, sessionConfig, instanceProfileConfig, \
udp_server_tedp_dict_to_start, max_tolerable_delay, is_cert_replaced, verify_result=False)
if(not(status)):
return self.__failure({msg_start:result_start})
#Verifying the task status for update of UDP SERVER
if(is_udp_servers_updated):
status, result_update = self.__verify_task_status("UPDATE", max_tolerable_delay)
if(not(status)):
return self.__failure({"Error in __verify_task_status of update of UDP SERVER":result_update})
if(bool(result_update)):
result["server-update"] = result_update
#Verifying the task status for spawn of UDP SERVER
if(is_udp_servers_spawned):
status, result_start = self.__verify_task_status("START", max_tolerable_delay)
if(not(status)):
return self.__failure({"Error in __verify_task_status of start of UDP SERVER":result_start})
if(bool(result_start)):
result["server-start"] = result_start
#UPDATE TEDPs (All except UDP SERVERS)
if(bool(te_dp_dict_to_update)):
status, msg_update, result_update = self.__spawn_or_update_tedps(\
resource_config_to_spawn_and_update, sessionConfig, instanceProfileConfig, \
te_dp_dict_to_update, max_tolerable_delay, is_cert_replaced, updateFlag=True, \
verify_result=False)
if(not(status)):
return self.__failure({msg_update:result_update})
#SPAWN TEDPS (All except UDP SERVERS)
if(bool(te_dp_dict_to_spawn)):
status, msg_start, result_start = self.__spawn_or_update_tedps(
resource_config_to_spawn_and_update, sessionConfig, instanceProfileConfig, \
te_dp_dict_to_spawn, max_tolerable_delay, is_cert_replaced, verify_result=False)
if(not(status)):
return self.__failure({msg_start:result_start})
#Verifying the task status for update (All except UDP SERVERS)
status, result_update = self.__verify_task_status("UPDATE", max_tolerable_delay)
if(not(status)):
return self.__failure({"Error in __verify_task_status of update":result_update})
if(bool(result_update)):
result["client-update"] = result_update
#Verifying the task status for spawn (All except UDP SERVERS)
status, result_start = self.__verify_task_status("START", max_tolerable_delay)
if(not(status)):
return self.__failure({"Error in __verify_task_status of start":result_start})
if(bool(result_start)):
result["client-start"] = result_start
self.lgr.debug("Setup Completed tedps after update is: %s" %str(self.__setup_completed_tedps))
if(bool(result)):
self.__CURRENT_STATE = self.__TE_STATE["RUNNING"]
self.__te_controller_obj.set_resource_config(resourceConfig)
self.__te_controller_obj.set_session_config(sessionConfig)
self.__te_controller_obj.set_instance_profile_config(instanceProfileConfig)
self.__te_controller_obj.set_te_dp(te_dp_dict)
if client_cert_bundle is not None:
self.__te_controller_obj.set_client_cert_bundle(client_cert_bundle)
return self.__success(result)
else:
return self.__failure("Nothing to update")
def __execute_command(self, cmd):
proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
out, err = proc.communicate()
# Both Python 2 & 3 compliant
if out is not None:
out = out.decode('utf-8')
if err is not None:
err = err.decode('utf-8')
return (out,err)
def grafana_api(self, jsonContent):
self.lgr.info("Grafana api is called.")
grafana_service_state = convert(jsonContent['state'])
if grafana_service_state == False or grafana_service_state == 'False' or grafana_service_state == 'false':
cmd = "service grafana-server stop"
(out, err) = self.__execute_command(cmd)
if err is None:
return self.__success("Grafana sevice stopped successfully")
else:
grafana_port = self.__te_controller_obj.get_grafana_port()
#Uncomment this line in grafana.in config file, to set the the port number
subprocess.call("sed -i 's/;http_port/http_port/g' /etc/grafana/grafana.ini" , shell=True)
#Check if the default port is pre-occupied or not, if occupied assign a random port
cmd_check_port = "netstat -laneut | grep -w {} | wc -l".format(grafana_port)
(count, err) = self.__execute_command(cmd_check_port)
if err:
return self.__failure("ERROR Occured with netstat command! {}".format(err))
if int(count) != 0:
return self.__failure("Port {} is not free to use and so unable to start grafana".format(grafana_port))
return_val = subprocess.call("sed -i 's/http_port = [0-9]*/http_port = {}/g' /etc/grafana/grafana.ini".format(grafana_port) , shell=True)
if return_val != 0:
return self.__failure("Grafana Port Not Intialized Error occured!!")
cmd = "service grafana-server start"
(out, err) = self.__execute_command(cmd)
if not(err):
url = str(self.__te_controller_obj.get_daemon_ip()) + ":" + grafana_port
return self.__success("Grafana service is running at {} with creds: admin / admin ".format(url))
else:
self.lgr.error("Grafana server failed to start {}".format(err))
self.__failure("Grafana server failed to start {}".format(err))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m','--Mgmt_IP',type=str, required=True,
help='Mgmt IP for the TE and Redis to use for all TE_DP')
parser.add_argument('-np','--nginx_port',type=str, required=True,
help='Port for nginx to bind')
parser.add_argument('-rp','--redis_port',type=str, required=True,
help='Port for redis to bind')
parser.add_argument('-fp','--flask_port',type=str, required=True,
help='Port for flask to bind')
parser.add_argument('-pp','--postgres_port',type=str, required=True,
help='Port for postgres to bind')
parser.add_argument('-zp','--zmq_port',type=str, required=True,
help='Port for zmq to bind')
parser.add_argument('-gp','--grafana_port',type=str, required=True,
help='Port for grafana to bind')
parser.add_argument('-lp','--logpath',type=str, default='/tmp/',
help='Log Path for TE')
parser.add_argument('-ll','--loglevel',type=int, default=10,
help='Log Level for TE')
parser.add_argument('-ct','--stat_collect_interval',type=int, default=15,
help='Time Interval at which Stat Collection must take place')
parser.add_argument('-dt','--stat_dump_interval',type=int, default=15,
help='Time Interval at which Stat Dumping by TEDP must take place')
args = parser.parse_args()
return args
def dump(te_daemon_ip, nginx_port, redis_port, flask_port, postgres_port, zmq_port, grafana_port, \
stat_collect_interval, stat_dump_interval, loglevel):
tedatajson = {
'te_daemon_ip' : te_daemon_ip,
'nginx_ip' : nginx_port,
'redis_port' : redis_port,
'flask_port' : flask_port,
'postgres_port' : postgres_port,
'zmq_port' : zmq_port,
'grafana_port' : grafana_port,
'stat_collect_interval' : stat_collect_interval,
'stat_dump_interval' : stat_dump_interval,
'logpath' : logpath,
'loglevel' : loglevel
}
te_file = open('/tmp/te-data.json', 'w')
json.dump(tedatajson,te_file)
te_file.close()
if __name__ == '__main__':
args = parse_args()
te_daemon_ip = args.Mgmt_IP
nginx_port = args.nginx_port
redis_port = args.redis_port
flask_port = args.flask_port
postgres_port = args.postgres_port
zmq_port = args.zmq_port
grafana_port = args.grafana_port
stat_collect_interval = args.stat_collect_interval
stat_dump_interval = args.stat_dump_interval
logpath = args.logpath
loglevel = args.loglevel
dump(te_daemon_ip, nginx_port, redis_port, flask_port, postgres_port, zmq_port, grafana_port, \
stat_collect_interval, stat_dump_interval, loglevel)
flask_obj = FlaskApplicationWrapper(te_daemon_ip, flask_port, redis_port, nginx_port, \
postgres_port, zmq_port, grafana_port, stat_collect_interval, stat_dump_interval, logpath, loglevel)
flask_obj.run()
| 0.945313
| 1
|
Genome.py
|
SwikarGautam/NEAT
| 0
|
12778644
|
<filename>Genome.py
from Node import Node
import random
from math import exp
class Genome:
def __init__(self):
self.connections = []
self.bias = Node(0, 0)
self.inp = [self.bias]
self.out = []
self.connection_set = set() # it is used to check if a connection between two nodes already exists in a genome
self.fitness = 0
self.nodes = [self.bias]
# Returns a list of output given a input list
def output(self, input_v):
for n in self.nodes:
n.value = 0
self.bias.value = 1
self.inp.sort(key=lambda x: x.id_n)
self.out.sort(key=lambda x: x.id_n)
for i, n in zip(input_v, self.inp[1:]):
n.value = i
for _ in range(7):
random.shuffle(self.nodes)
for n in self.nodes:
if n.layer == 2:
s = 0
for c in n.in_con:
if c.enabled:
s += c.inp.value * c.weight
n.value = self.m_sigmoid(s)
for n in self.out:
s = 0
for c in n.in_con:
if c.enabled:
s += c.inp.value * c.weight
n.value = self.m_sigmoid(s)
r = []
for o in self.out:
r.append(o.value)
return r
# activation function
# Staticmethod is written because it does not use any variables or methods/functions of the class
@staticmethod
def m_sigmoid(x):
return 1/(1 + exp(-4.9*x)) if x > -15 else 0
# adds new inherited connection to the genome
def add_connection(self, connect_gene):
if connect_gene.innov_no in self.gen_innov():
for c in self.connections:
if c.innov_no == connect_gene.innov_no:
c.weight = connect_gene.weight
c.enabled = connect_gene.enabled
return
iid, oid = connect_gene.inp.id_n, connect_gene.out.id_n
if (iid, oid) in self.connection_set or (oid, iid) in self.connection_set:
return
connect_gene.inp.in_con = []
connect_gene.out.in_con = []
if iid in self.node_ids():
for n in self.nodes:
if n.id_n == iid:
connect_gene.inp = n
break
else:
self.nodes.append(connect_gene.inp)
if oid in self.node_ids():
for n in self.nodes:
if n.id_n == oid:
n.in_con.append(connect_gene)
connect_gene.out = n
break
else:
connect_gene.out.in_con.append(connect_gene)
self.nodes.append(connect_gene.out)
self.connections.append(connect_gene)
self.connection_set.add((connect_gene.inp.id_n, connect_gene.out.id_n))
# returns the set of innovation number in the connection genes
def gen_innov(self):
s = set()
for c in self.connections:
s.add(c.innov_no)
return s
# returns the set of id number of all nodes
def node_ids(self):
s = set()
for n in self.nodes:
s.add(n.id_n)
return s
| 3.1875
| 3
|
sdk/python/v1beta1/kubeflow/katib/models/v1beta1_source_spec.py
|
ujjwalsh/katib
| 2
|
12778645
|
<reponame>ujjwalsh/katib<filename>sdk/python/v1beta1/kubeflow/katib/models/v1beta1_source_spec.py
# coding: utf-8
"""
Katib
Swagger description for Katib # noqa: E501
OpenAPI spec version: v1beta1-0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from kubeflow.katib.models.v1beta1_file_system_path import V1beta1FileSystemPath # noqa: F401,E501
from kubeflow.katib.models.v1beta1_filter_spec import V1beta1FilterSpec # noqa: F401,E501
from kubernetes.client import V1HTTPGetAction # noqa: F401,E501
class V1beta1SourceSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'file_system_path': 'V1beta1FileSystemPath',
'filter': 'V1beta1FilterSpec',
'http_get': 'V1HTTPGetAction'
}
attribute_map = {
'file_system_path': 'fileSystemPath',
'filter': 'filter',
'http_get': 'httpGet'
}
def __init__(self, file_system_path=None, filter=None, http_get=None): # noqa: E501
"""V1beta1SourceSpec - a model defined in Swagger""" # noqa: E501
self._file_system_path = None
self._filter = None
self._http_get = None
self.discriminator = None
if file_system_path is not None:
self.file_system_path = file_system_path
if filter is not None:
self.filter = filter
if http_get is not None:
self.http_get = http_get
@property
def file_system_path(self):
"""Gets the file_system_path of this V1beta1SourceSpec. # noqa: E501
During training model, metrics may be persisted into local file in source code, such as tfEvent use case # noqa: E501
:return: The file_system_path of this V1beta1SourceSpec. # noqa: E501
:rtype: V1beta1FileSystemPath
"""
return self._file_system_path
@file_system_path.setter
def file_system_path(self, file_system_path):
"""Sets the file_system_path of this V1beta1SourceSpec.
During training model, metrics may be persisted into local file in source code, such as tfEvent use case # noqa: E501
:param file_system_path: The file_system_path of this V1beta1SourceSpec. # noqa: E501
:type: V1beta1FileSystemPath
"""
self._file_system_path = file_system_path
@property
def filter(self):
"""Gets the filter of this V1beta1SourceSpec. # noqa: E501
Default metric output format is {\"metric\": \"<metric_name>\", \"value\": <int_or_float>, \"epoch\": <int>, \"step\": <int>}, but if the output doesn't follow default format, please extend it here # noqa: E501
:return: The filter of this V1beta1SourceSpec. # noqa: E501
:rtype: V1beta1FilterSpec
"""
return self._filter
@filter.setter
def filter(self, filter):
"""Sets the filter of this V1beta1SourceSpec.
Default metric output format is {\"metric\": \"<metric_name>\", \"value\": <int_or_float>, \"epoch\": <int>, \"step\": <int>}, but if the output doesn't follow default format, please extend it here # noqa: E501
:param filter: The filter of this V1beta1SourceSpec. # noqa: E501
:type: V1beta1FilterSpec
"""
self._filter = filter
@property
def http_get(self):
"""Gets the http_get of this V1beta1SourceSpec. # noqa: E501
Model-train source code can expose metrics by http, such as HTTP endpoint in prometheus metric format # noqa: E501
:return: The http_get of this V1beta1SourceSpec. # noqa: E501
:rtype: V1HTTPGetAction
"""
return self._http_get
@http_get.setter
def http_get(self, http_get):
"""Sets the http_get of this V1beta1SourceSpec.
Model-train source code can expose metrics by http, such as HTTP endpoint in prometheus metric format # noqa: E501
:param http_get: The http_get of this V1beta1SourceSpec. # noqa: E501
:type: V1HTTPGetAction
"""
self._http_get = http_get
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1beta1SourceSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1SourceSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.929688
| 2
|
crown/query.py
|
machine-w/crown
| 25
|
12778646
|
<reponame>machine-w/crown<gh_stars>10-100
# from types import SimpleNamespace
# from crown import Model
# from attr import field
from .common import *
from .field import *
from functools import reduce
import operator
class QueryCompiler(object):
field_map = {
'int': 'INT',
'smallint': 'SMALLINT',
'tinyint': 'TINYINT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE',
'nchar': 'NCHAR',
'binary': 'BINARY',
'datetime': 'TIMESTAMP',
'bool': 'BOOL',
'primary_key': 'TIMESTAMP',
}
op_map = {
OP_EQ: '=',
OP_LT: '<',
OP_LTE: '<=',
OP_GT: '>',
OP_GTE: '>=',
OP_NE: '!=',
OP_IN: 'IN',
OP_IS: 'IS',
OP_LIKE: 'LIKE',
OP_ILIKE: 'ILIKE',
OP_ADD: '+',
OP_SUB: '-',
OP_MUL: '*',
OP_DIV: '/',
OP_XOR: '^',
OP_AND: 'AND',
OP_OR: 'OR',
}
def __init__(self, quote_char='"', interpolation='?', field_overrides=None,
op_overrides=None):
self.quote_char = quote_char
self.interpolation = interpolation
self._field_map = dict_update(self.field_map, field_overrides or {})
self._op_map = dict_update(self.op_map, op_overrides or {})
def quote(self, s):
return ''.join((self.quote_char, s, self.quote_char))
def get_field(self, f):
return self._field_map[f]
def get_op(self, q):
return self._op_map[q]
def _max_alias(self, am):
max_alias = 0
if am:
for a in am.values():
i = int(a.lstrip('t'))
if i > max_alias:
max_alias = i
return max_alias + 1
def parse_expr(self, expr, alias_map=None):
s = self.interpolation
p = [expr]
if isinstance(expr, Expr):
lhs, lparams = self.parse_expr(expr.lhs, alias_map)
rhs, rparams = self.parse_expr(expr.rhs, alias_map)
s = '(%s %s %s)' % (lhs, self.get_op(expr.op), rhs)
p = lparams + rparams
elif isinstance(expr, Field):
s = expr.db_column
if alias_map and expr.model_class in alias_map:
s = '.'.join((alias_map[expr.model_class], s))
p = []
elif isinstance(expr, Func):
p = []
exprs = []
for param in expr.params:
parsed, params = self.parse_expr(param, alias_map)
exprs.append(parsed)
p.extend(params)
s = '%s(%s)' % (expr.name, ', '.join(exprs))
elif isinstance(expr, Param):
s = self.interpolation
p = [expr.data]
elif isinstance(expr, Ordering):
s, p = self.parse_expr(expr.param, alias_map)
s += ' ASC' if expr.asc else ' DESC'
elif isinstance(expr, R):
s = expr.value
p = []
elif isinstance(expr, SelectQuery):
max_alias = self._max_alias(alias_map)
clone = expr.clone()
if not expr._explicit_selection:
clone._select = (clone.model_class._meta.primary_key,)
subselect, p = self.parse_select_query(clone, max_alias, alias_map)
s = '(%s)' % subselect
elif isinstance(expr, (list, tuple)):
exprs = []
p = []
for i in expr:
e, v = self.parse_expr(i, alias_map)
exprs.append(e)
p.extend(v)
s = '(%s)' % ','.join(exprs)
# elif isinstance(expr, Model):
# s = self.interpolation
# p = [expr.get_id()]
if isinstance(expr, Leaf):
if expr.negated:
s = 'NOT %s' % s
if expr._alias:
s = ' '.join((s, 'AS', expr._alias))
return s, p
def parse_query_node(self, qnode, alias_map):
if qnode is not None:
return self.parse_expr(qnode, alias_map)
return '', []
def parse_expr_list(self, s, alias_map):
parsed = []
data = []
for expr in s:
expr_str, vars = self.parse_expr(expr, alias_map)
parsed.append(expr_str)
data.extend(vars)
return ', '.join(parsed), data
def parse_select_query(self, query, start=1, alias_map=None):
model = query.model_class
db = model._meta.database
parts = ['SELECT']
params = []
selection = query._select
select, s_params = self.parse_expr_list(selection, alias_map)
parts.append(select)
params.extend(s_params)
parts.append('FROM %s ' % (self.quote(model._meta.db_table),))
# joins = self.parse_joins(query._joins, query.model_class, alias_map)
# if joins:
# parts.append(' '.join(joins))
where, w_params = self.parse_query_node(query._where, alias_map)
if where:
parts.append('WHERE %s' % where)
params.extend(w_params)
if query._interval:
if query._interval_offset:
parts.append('INTERVAL(%s,%s)' % (query._interval,query._interval_offset))
else:
parts.append('INTERVAL(%s)' % (query._interval,))
parts.append('FILL(%s)' % (query._fill,))
if query._group_by:
group_by, g_params = self.parse_expr_list(query._group_by, alias_map)
parts.append('GROUP BY %s' % group_by)
params.extend(g_params)
if query._order_by:
order_by, _ = self.parse_expr_list(query._order_by, alias_map)
parts.append('ORDER BY %s' % order_by)
if query._limit or (query._offset and not db.empty_limit):
limit = query._limit or -1
parts.append('LIMIT %s' % limit)
if query._offset:
parts.append('OFFSET %s' % query._offset)
return ' '.join(parts), params
def _parse_field_dictionary(self, d):
sets, params = [], []
for item in d:
field = item.get('obj')
expr = item.get('value')
field_str, _ = self.parse_expr(field)
val_str, val_params = self.parse_expr(expr)
val_params = [field.db_value(vp) for vp in val_params]
sets.append((field_str, val_str))
# sets.append((field_str, val_params[0]))
params.extend(val_params)
return sets, params
def parse_insert_query(self, query):
model = query.model_class
parts = ['INSERT INTO %s' % self.quote(model._meta.db_table)]
sets, params = self._parse_field_dictionary(query._insert)
parts.append('(%s)' % ', '.join(s[0] for s in sets))
parts.append('VALUES (%s)' % ', '.join(s[1] for s in sets))
return ' '.join(parts), params
def parse_createsontable_query(self,query,safe=True):
model = query.model_class
parts = ['CREATE TABLE']
if safe:
parts.append('IF NOT EXISTS')
parts.append('%s USING %s TAGS ' % (query.table_name,model._meta.db_table))
sets, params = self._parse_field_dictionary(query._tags)
parts.append('(%s)' % ', '.join(s[1] for s in sets))
return ' '.join(parts), params
def field_sql(self, field):
attrs = field.attributes
attrs['column_type'] = self.get_field(field.get_db_field())
template = field.template
parts = [self.quote(field.db_column), template]
return ' '.join(p % attrs for p in parts)
def parse_create_table(self, model_class, safe=False):
parts = ['CREATE TABLE']
if safe:
parts.append('IF NOT EXISTS')
parts.append(self.quote(model_class._meta.db_table))
columns = ', '.join(self.field_sql(f) for f in model_class._meta.get_fields())
parts.append('(%s)' % columns)
if model_class._tags != None:
tags = ', '.join(self.field_sql(f) for f in model_class._tags.get_fields())
parts.append(' TAGS (%s)' % tags)
return parts
def parse_create_database(self, database, safe=False,keep= None,comp=None,replica=None,quorum=None,blocks=None):
parts = ['CREATE DATABASE']
if safe:
parts.append('IF NOT EXISTS')
parts.append(database)
if keep != None:
parts.append('KEEP %s' % keep)
if comp != None:
parts.append('COMP %s' % comp)
if replica != None:
parts.append('REPLICA %s' % replica)
if quorum != None:
parts.append('QUORUM %s' % quorum)
if blocks != None:
parts.append('BLOCKS %s' % blocks)
return parts
def parse_alter_database(self, database,keep= None,comp=None,replica=None,quorum=None,blocks=None):
parts = ['ALTER DATABASE']
parts.append(database)
if keep != None:
parts.append('KEEP %s' % keep)
if comp != None:
parts.append('COMP %s' % comp)
if replica != None:
parts.append('REPLICA %s' % replica)
if quorum != None:
parts.append('QUORUM %s' % quorum)
if blocks != None:
parts.append('BLOCKS %s' % blocks)
return parts
def parse_drop_database(self, database, safe=False):
parts = ['DROP DATABASE']
if safe:
parts.append('IF EXISTS')
parts.append(database)
return parts
def create_database(self, database, safe=False,keep= None,comp=None,replica=None,quorum=None,blocks=None):
return ' '.join(self.parse_create_database(database,safe,keep,comp,replica,quorum,blocks))
def alter_database(self, database,keep= None,comp=None,replica=None,quorum=None,blocks=None):
return ' '.join(self.parse_alter_database(database,keep,comp,replica,quorum,blocks))
def show_database(self, database):
return 'SHOW DATABASES'
def show_tables(self, database,super=False):
if super:
return 'SHOW %s.STABLES' % database
else:
return 'SHOW %s.TABLES' % database
def drop_database(self, database, safe=False):
return ' '.join(self.parse_drop_database(database, safe))
def create_table(self, model_class, safe=False):
return ' '.join(self.parse_create_table(model_class, safe))
def describe_table(self,model_class):
parts = ['DESCRIBE ']
parts.append(self.quote(model_class._meta.db_table))
return ' '.join(parts)
def change_table_tagvalue(self,model_class,name,value):
parts = ['ALTER TABLE ']
parts.append(model_class._meta.db_table) #TODO:restful api 对引号的表名支持不好
# parts.append(self.quote(model_class._meta.db_table))
if isinstance(value,str):
parts.append('SET TAG "%s" = "%s"' % (name,value))
else:
parts.append('SET TAG "%s" = %s' % (name,value))
return ' '.join(parts)
def add_tag(self,model_class,value):
parts = ['ALTER TABLE ']
parts.append(model_class._meta.db_table) #TODO:restful api 对引号的表名支持不好
# parts.append(self.quote(model_class._meta.db_table))
if isinstance(value, Field) and not isinstance(value, DateTimeField) and value.db_column:
parts.append('ADD TAG %s' % (self.field_sql(value)))
return ' '.join(parts)
def drop_tag(self,model_class,name):
parts = ['ALTER TABLE ']
parts.append(model_class._meta.db_table) #TODO:restful api 对引号的表名支持不好
# parts.append(self.quote(model_class._meta.db_table))
parts.append('DROP TAG %s' % (name))
return ' '.join(parts)
def change_tag_name(self,model_class,name,newname):
parts = ['ALTER TABLE ']
parts.append(model_class._meta.db_table) #TODO:restful api 对引号的表名支持不好
# parts.append(self.quote(model_class._meta.db_table))
parts.append('CHANGE TAG %s %s' % (name,newname))
return ' '.join(parts)
def drop_table(self, model_class, fail_silently=False, cascade=False):
parts = ['DROP TABLE']
if fail_silently:
parts.append('IF EXISTS')
parts.append(self.quote(model_class._meta.db_table))
return ' '.join(parts)
class QueryResultWrapper(list):
def __init__(self, model, cursor):
self.model = model
self.cursor = cursor
cols = []
non_cols = []
for i in range(len(self.cursor.head)):
col = self.cursor.head[i]
if col in model._meta.columns:
cols.append((i, model._meta.columns[col]))
else:
non_cols.append((i, col))
self._cols = cols
self._non_cols = non_cols
super(QueryResultWrapper, self).__init__([self.simple_mode(row) for row in cursor.data])
def simple_mode(self, row):
instance = self.model()
for i, f in self._cols:
setattr(instance, f.name, f.python_value(row[i]))
for i, f in self._non_cols:
setattr(instance, f, row[i])
return instance
class Query(object):
require_commit = True
def __init__(self, model_class):
self.model_class = model_class
self.database = model_class._meta.database
self._dirty = True
self._query_ctx = model_class
# self._joins = {self.model_class: []} # adjacency graph
self._where = None
def clone(self):
query = type(self)(self.model_class)
if self._where is not None:
query._where = self._where.clone()
# query._joins = self.clone_joins()
query._query_ctx = self._query_ctx
return query
@returns_clone
def where(self, *q_or_node):
if self._where is None:
self._where = reduce(operator.and_, q_or_node)
else:
for piece in q_or_node:
self._where &= piece
def sql(self, compiler):
raise NotImplementedError()
def execute(self):
raise NotImplementedError
class SelectQuery(Query):
require_commit = False
def __init__(self, model_class, *selection):
self._explicit_selection = len(selection) > 0
all_selection=model_class._meta.get_fields()
if model_class._tags:
all_selection.extend(model_class._tags.get_fields())
self._select = self._model_shorthand(self.strToField(selection , all_selection))
self._group_by = None
self._order_by = None
self._limit = None
self._offset = None
self._qr = None
self._interval= None
self._interval_offset= None
self._fill = None
super(SelectQuery, self).__init__(model_class)
def strToField(self,selection,all_selection):
if not selection:
return all_selection
res = []
for item in selection:
if isinstance(item,str):
for field in all_selection:
if item.capitalize() == field.verbose_name or item == field.db_column:
item = field
break
res.append(item)
return res
def clone(self):
query = super(SelectQuery, self).clone()
query._explicit_selection = self._explicit_selection
query._select = list(self._select)
if self._group_by is not None:
query._group_by = list(self._group_by)
if self._order_by is not None:
query._order_by = list(self._order_by)
query._limit = self._limit
query._offset = self._offset
return query
def _model_shorthand(self, args):
accum = []
for arg in args:
if isinstance(arg, Leaf):
accum.append(arg)
return accum
@returns_clone
def group_by(self, *args):
self._group_by = self._model_shorthand(args)
@returns_clone
def order_by(self, *args):
self._order_by = list(args)
@returns_clone
def desc(self):
self._order_by = list([self.model_class._meta.primary_key.desc()])
@returns_clone
def asc(self):
self._order_by = list([self.model_class._meta.primary_key.asc()])
@returns_clone
def limit(self, lim):
self._limit = lim
@returns_clone
def offset(self, off):
self._offset = off
@returns_clone
def interval(self, interval_value, fill='NONE',offset = None,):
self._interval= interval_value
self._interval_offset= offset
if fill in ['NONE','PREV','NULL','LINEAR']:
self._fill = fill #{NONE | | PREV | NULL | LINEAR}
else:
self._fill = "VALUE, %s" % str(fill) #VALUE
@returns_clone
def paginate(self, page, page_size=20):
if page > 0:
page -= 1
self._limit = page_size
self._offset = page * page_size
def count(self,field=None):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [fn.Count(field if field else clone.model_class._meta.primary_key)]
# if self._group_by:
# res = clone.execute()
# if len(res) > 0:
# return res
# else:
# return None
# else:
res = clone.database.execute(clone)
if res and res[0]:
return res[0][0]
else:
return 0
def avg(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.AVG) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def twa(self,*fields):
clone = self.order_by()
if clone._where is None:
return None
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.TWA) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def sum(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.SUM) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def stddev(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.STDDEV) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def min(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.MIN) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def max(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.MAX) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def first(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.FIRST) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def last(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.LAST) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def last_row(self,*fields):
clone = self.order_by()
if clone._where is not None:
raise Exception('last_row not allow where clause')
clone._limit = clone._offset = clone._where = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.LAST_ROW) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def spread(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.SPREAD) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
# tdengine目前只支持一列,为以后支持多列准备的函数
def diff(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias(field,fn.DIFF) for field in fields]
res = clone.execute()
return res
def top(self,field,top=1,alias=None):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
if alias:
clone._select = [fn.TOP(field,top).alias(alias)]
else:
clone._select = [fn.TOP(field,top)]
res = clone.execute()
return res
def bottom(self,field,bottom=1,alias=None):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
if alias:
clone._select = [fn.BOTTOM(field,bottom).alias(alias)]
else:
clone._select = [fn.BOTTOM(field,bottom)]
res = clone.execute()
return res
def apercentile(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias_tuple_field(field,3,fn.APERCENTILE) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def percentile(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias_tuple_field(field,3,fn.PERCENTILE) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def leastsquares(self,*fields):
clone = self.order_by()
clone._limit = clone._offset = None
# TODO: 分组的情况下如何统计
if self._group_by:
clone._group_by = None
clone._select = [out_alias_tuple_field(field,4,fn.LEASTSQUARES) for field in fields]
res = clone.execute()
if len(res) > 0:
return res[0]
else:
return None
def exists(self):
clone = self.paginate(1, 1)
res = clone.execute()
if len(res) >0:
return True
else:
return False
def one_raw(self):
clone = self.paginate(1, 1)
res = clone.database.execute(clone)
if len(res) >0:
return res[0]
else:
return None
def one(self):
clone = self.paginate(1, 1)
res = clone.execute()
if len(res) >0:
return res[0]
else:
return None
def sql(self, compiler):
return compiler.parse_select_query(self)
def execute(self):
if self._dirty or not self._qr:
self._qr = QueryResultWrapper(self.model_class, self.database.execute(self))
self._dirty = False
return self._qr
else:
return self._qr
def all_raw(self):
return self.database.execute(self)
def all(self):
return self.execute()
class InsertQuery(Query):
def __init__(self, model_class, insert=None):
# mm = model_class._meta
# query = dict((mm.fields[f], v) for f, v in mm.get_default_dict().items())
# query.update(insert)
self._insert = insert
super(InsertQuery, self).__init__(model_class)
def clone(self):
query = super(InsertQuery, self).clone()
query._insert = list(self._insert)
return query
where = not_allowed('where clause')
def sql(self, compiler):
return compiler.parse_insert_query(self)
def execute(self):
result = self.database.execute(self)
return result
class CreateSonTableQuery(Query):
def __init__(self, model_class, values=None,table_name=None):
# mm = model_class._tags
# query = dict((mm.fields[f], v) for f, v in mm.get_default_dict().items())
# query.update(values)
self._tags = values
self.table_name = "%s.%s" % (model_class._meta.database.database,table_name)
super(CreateSonTableQuery, self).__init__(model_class)
where = not_allowed('where clause')
def clone(self):
query = super(CreateSonTableQuery, self).clone()
query._tags = list(self._tags)
return query
def sql(self, compiler):
return compiler.parse_createsontable_query(self)
def execute(self):
result = self.database.execute(self)
return result
| 2.140625
| 2
|
chemreg/resolution/tests/test_substance_index.py
|
Chemical-Curation/chemcurator
| 1
|
12778647
|
<reponame>Chemical-Curation/chemcurator
import json
from unittest.mock import Mock, patch
from rest_framework.exceptions import APIException
import pytest
import requests
from chemreg.resolution.indices import SubstanceIndex
def test_substance_index_substance_search():
sample_response = {
"data": [
{"id": "DTXSID", "type": "substance", "attributes": {"identifiers": {}}}
]
}
identifier = "foobar"
with patch("requests.get") as mocked_get:
# Mock the requests.response
mocked_response = Mock()
# return our dict as .json()
mocked_response.json.return_value = sample_response
# calls to requests.get returns our mocked response automatically
mocked_get.return_value = mocked_response
search_url = SubstanceIndex().search_url
json = SubstanceIndex().search(identifier)
# Assert a get request was sent
mocked_get.assert_called_once()
# Assert url was requested [call_number][request_args_tuple][tuple_portion]
assert mocked_get.mock_calls[0][1][0] == search_url
assert mocked_get.mock_calls[0][2]["params"]["identifier"] == identifier
# Assert the response was processed into the proper json object
assert json == sample_response
def test_substance_index_substance_search_connection_error():
def mocked_function():
raise requests.exceptions.ConnectionError()
with patch("requests.get") as mocked_get:
mocked_get.return_value = mocked_function
with pytest.raises(APIException) as exception:
SubstanceIndex().search("foobar")
assert exception
assert str(exception) == "The Resolver service is not available right now"
@pytest.mark.django_db
def test_substance_index_substance_list_add(substance_factory):
with patch("requests.post") as mocked_post:
substances = [
substance.instance for substance in substance_factory.create_batch(2)
]
mocked_post.reset_mock()
SubstanceIndex().sync_instances(substances)
# Assert a post request was sent for every substance in the list
assert mocked_post.call_count == len(substances)
# Assert a post request was sent corresponding to each sid
for substance in substances:
assert substance.pk in [
json.loads(call.args[1])["data"]["id"]
for call in mocked_post.mock_calls
]
@pytest.mark.django_db
def test_substance_index_single_substance_add(substance_factory):
with patch("requests.post") as mocked_post:
substance = substance_factory().instance
mocked_post.reset_mock()
SubstanceIndex().sync_instances(substance)
# Assert a post request was sent for the substance
mocked_post.assert_called_once()
# Assert a post request was sent corresponding to the sid
assert (
json.loads(mocked_post.mock_calls[0].args[1])["data"]["id"] == substance.pk
)
@pytest.mark.django_db
def test_substance_index_identifiers(substance_factory):
expected_identifier_keys = [
"compound_id",
"inchikey",
"preferred_name",
"display_name",
"casrn",
"synonyms",
]
substance = substance_factory().instance
identifiers = SubstanceIndex().get_model_document(substance).get("data")
assert identifiers
assert identifiers["id"] == substance.pk
assert (
list(identifiers["attributes"]["identifiers"].keys())
== expected_identifier_keys
)
@pytest.mark.django_db
def test_synonym_indexing(
substance_factory, synonym_factory, synonym_quality_factory, synonym_type_factory
):
synonym_quality = synonym_quality_factory.create(is_restrictive=False).instance
synonym_type = synonym_type_factory().instance
substance = substance_factory.create(preferred_name="Moon Pie",).instance
synonym = synonym_factory.create(
substance={"type": "substance", "id": substance.pk},
identifier="marshmallow sandwich",
synonym_type={"type": "synonymType", "id": synonym_type.pk},
synonym_quality={"type": "synonymQuality", "id": synonym_quality.pk},
).instance
identifiers = SubstanceIndex().get_model_document(substance)["data"]["attributes"][
"identifiers"
]
weight = synonym_quality.score_weight + synonym_type.score_modifier
assert identifiers["synonyms"][0]["identifier"] == synonym.identifier
assert identifiers["synonyms"][0]["weight"] == weight
@pytest.mark.django_db
def test_substance_index_identifiers_cid(substance_factory):
sub = substance_factory().instance
def_sub = substance_factory(defined=True).instance
idents = SubstanceIndex().get_model_document(sub)["data"]["attributes"][
"identifiers"
]
def_idents = SubstanceIndex().get_model_document(def_sub)["data"]["attributes"][
"identifiers"
]
# No Compound
assert idents["compound_id"] is None
# With Compound
assert def_idents["compound_id"] == def_sub.associated_compound.pk
@pytest.mark.django_db
def test_substance_index_substance_list_delete(substance_factory):
with patch("requests.delete") as mocked_delete:
substances = [
substance.instance for substance in substance_factory.create_batch(2)
]
# We are not deleting. Just calling SubstanceIndex as if a delete had occured
SubstanceIndex().sync_instances(substances, delete=True)
# Assert a delete request was sent for every substance in the list
assert mocked_delete.call_count == len(substances)
mock_urls = [call.args[0] for call in mocked_delete.mock_calls]
for substance in substances:
# See if this sid occurs in any of the urls from the mocked_delete
assert any(substance.pk in url for url in mock_urls)
@pytest.mark.django_db
def test_substance_index_single_substance_delete(substance_factory):
with patch("requests.delete") as mocked_delete:
substance = substance_factory().instance
# We are not deleting. Just calling SubstanceIndex as if a delete had occured
SubstanceIndex().sync_instances(substance, delete=True)
mocked_delete.assert_called_once()
# See if this sid occurs in the url from the mocked_delete
mock_url = mocked_delete.mock_calls[0].args[0]
assert substance.pk in mock_url
def test_substance_index_delete_all():
with patch("requests.delete") as mocked_delete:
SubstanceIndex().delete_all_instances()
# Assert a delete request was sent
mocked_delete.assert_called_once()
| 2.40625
| 2
|
SATD_Detector/compare.py
|
isabelaedilene/technicalDebtTisVI
| 0
|
12778648
|
<filename>SATD_Detector/compare.py
from csv import writer
import pandas as pd
with open("../Sonar/analiseSonar.csv", "r", encoding="utf-8") as f:
csv_string = f.read()
with open("sonar_analysis.csv", "w", encoding="utf-8") as f:
csv = writer(f)
for line in csv_string.splitlines():
csv.writerow(line.strip('"').split(","))
satd_dirty = pd.read_csv("satd_files_full.csv")
sonar = pd.read_csv("sonar_analysis.csv")
sonar_projects = sonar.project.drop_duplicates().to_list()
satd = satd_dirty[satd_dirty.name.isin(sonar_projects)]
satd.to_csv("satd_files.csv")
# print(len(satd)/len(satd_dirty))
# print(len(satd_dirty) - len(satd))
satd_files = satd["file_path"].to_list()
sonar_satd = sonar[sonar.file_path.isin(satd_files)]
sonar_codesmells = sonar[sonar.code_smells > 0]
sonar_not_satd = sonar[~sonar.file_path.isin(satd_files) & sonar.code_smells > 0]
sonar_codesmells_satd = sonar[sonar.file_path.isin(satd_files) & sonar.code_smells > 0]
sonar_not_codesmells_satd = sonar[sonar.file_path.isin(satd_files) & sonar.code_smells == 0]
sonar_satd.to_csv("results_sonar_satd.csv")
sonar_not_satd.to_csv("results_sonar_not_satd.csv")
sonar_codesmells.to_csv("results_sonar_codesmells.csv")
sonar_codesmells_satd.to_csv("results_sonar_codesmells_satd.csv")
sonar_not_codesmells_satd.to_csv("results_sonar_not_codesmells_satd.csv")
print(f"Sonar SATDs = {len(sonar_satd)}")
print(f"Sonar code_smells = {len(sonar_codesmells)}")
print(f"Sonar SATDs, code_smells=0 = {len(sonar_not_codesmells_satd)}")
print(f"Sonar SATDs, code_smells>0 = {len(sonar_codesmells_satd)}")
print(f"SATD Detector files = {len(satd_files)}")
print(f"code_smells&SATD/SATD files = {len(sonar_codesmells_satd)/len(satd_files)*1e2 :.4}%")
print(f"SATD + CS Mean NcLoc = {sonar_codesmells_satd.ncloc.mean()}")
print(f"SATD + CS Mean Ciclomatic = {sonar_codesmells_satd.ciclomatic.mean()}")
print(f"SATD + CS Mean Cognitive = {sonar_codesmells_satd.cognitive.mean()}")
print(f"SATD + CS Mean CodeSmells = {sonar_codesmells_satd.code_smells.mean()}")
print(f"SATD + CS Median NcLoc = {sonar_codesmells_satd.ncloc.median()}")
print(f"SATD + CS Median Ciclomatic = {sonar_codesmells_satd.ciclomatic.median()}")
print(f"SATD + CS Median Cognitive = {sonar_codesmells_satd.cognitive.median()}")
print(f"SATD + CS Median CodeSmells = {sonar_codesmells_satd.code_smells.median()}")
print(f"SATD - CS Mean NcLoc = {sonar_not_codesmells_satd.ncloc.mean()}")
print(f"SATD - CS Mean Ciclomatic = {sonar_not_codesmells_satd.ciclomatic.mean()}")
print(f"SATD - CS Mean Cognitive = {sonar_not_codesmells_satd.cognitive.mean()}")
print(f"SATD - CS Mean CodeSmells = {sonar_not_codesmells_satd.code_smells.mean()}")
print(f"SATD - CS Median NcLoc = {sonar_not_codesmells_satd.ncloc.median()}")
print(f"SATD - CS Median Ciclomatic = {sonar_not_codesmells_satd.ciclomatic.median()}")
print(f"SATD - CS Median Cognitive = {sonar_not_codesmells_satd.cognitive.median()}")
print(f"SATD - CS Median CodeSmells = {sonar_not_codesmells_satd.code_smells.median()}")
| 3
| 3
|
eval/fig3.py
|
tk2lab/logbesselk
| 0
|
12778649
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from . import common
def main(debug=False):
name = ['I', 'A', 'S', 'C']
suffix = ['', '', '', '']
df0 = []
for n, s in zip(name, suffix):
prec = pd.read_csv(f'results/logk_prec_{n}{s}.csv')
prec = prec.groupby(['v', 'x'])['log_err'].mean()
time = pd.read_csv(f'results/logk_time_{n}{s}.csv')
time = time.groupby(['v', 'x'])['time'].mean()
tmp = pd.concat([prec, time], axis=1)
tmp['time'] = np.where(tmp['log_err'] < 3, 1000 * tmp['time'], np.nan)
tmp = tmp['time']
tmp.name = n
df0.append(tmp)
df0 = pd.concat(df0, axis=1)
name = [['I', 'A'], ['S', 'C']]
pos = [[[0.1, 0.85], [0.85, 0.1]], [[0.1, 0.1], [0.1, 0.85]]]
fig = common.figure(figsize=(5.5, 4), box=debug)
ax = fig.subplots(
2, 3, sharex=True, sharey=True,
gridspec_kw=dict(width_ratios=(1,1,0.15)),
)
ax[0, 2].set_visible(False)
ax[1, 2].set_visible(False)
cbar = fig.add_axes([0.93, 0.1, 0.02, 0.85])
xticks = [0, 1, 5, 10, 50]
yticks = [0.1, 0.5, 1, 5, 10, 50]
cmap = plt.get_cmap('Greys').copy()
cmap.set_bad(color='gray')
for i in range(2):
for j in range(2):
hm = df0[name[i][j]].unstack(0)
if i == j == 0:
args = dict(cbar_ax=cbar)
else:
args = dict(cbar=False)
sns.heatmap(hm, vmin=0, vmax=28, cmap=cmap, ax=ax[i, j], **args)
ax[i, j].invert_yaxis()
ax[i, j].text(*pos[i][j], name[i][j], transform=ax[i, j].transAxes)
ax[i, j].set_xticks([40*np.log10(x+1) for x in xticks])
ax[i, j].set_xticklabels([f"${k}$" for k in xticks], rotation=0)
ax[i, j].xaxis.set_ticks_position('both')
ax[i, j].set_yticks([40*(np.log10(x)+1) for x in yticks])
ax[i, j].set_yticklabels([f"${k}$" for k in yticks])
ax[i, j].yaxis.set_ticks_position('both')
if i == 1:
ax[i, j].set_xlabel('$v$')
else:
ax[i, j].set_xlabel('')
if j == 0:
ax[i, j].set_ylabel('$x$')
else:
ax[i, j].set_ylabel('')
cbar = ax[0, 0].collections[0].colorbar
cbar.set_ticks([0, 10, 20])
cbar.set_ticklabels([f'${{{l}}}$' for l in [0, 10, 20]])
fig.savefig('figs/fig3.pdf')
if __name__ == '__main__':
main(debug=False)
| 2.328125
| 2
|
i2b2/utils/path.py
|
jtourille/i2b2-coref-task1c-converter
| 1
|
12778650
|
<reponame>jtourille/i2b2-coref-task1c-converter<filename>i2b2/utils/path.py
import os
def ensure_dir(directory: str) -> None:
"""
Creates a directory
Args:
directory (str): path to create
Returns:
None
"""
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError as e:
# Raising any errors except concurrent access
if e.errno != 17:
raise
def get_other_extension(filename: str, target_extension: str) -> str:
"""
Returns the filename given as argument with another extension (given as argument)
Args:
filename (str): filename to modify
target_extension (str): new extension
Returns:
str: filename with new extension
"""
basename, extension = os.path.splitext(filename)
return "{0}.{1}".format(basename, target_extension)
def remove_abs(path: str) -> str:
"""
Remove leading slash from path
Args:
path (str): path from which the leading slash must be removed
Returns:
str: path without leading slash
"""
if os.path.isabs(path):
return path.lstrip("/")
else:
return path
| 2.90625
| 3
|