hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d221a20f328570df4486c769b0b3faa56183c596
| 3,357
|
py
|
Python
|
RCJ_pcms_base/scripts/QrCode.py
|
FablabHome/PCMS_home_robotics
|
21202fb73811edfcbdfde204ba33fb8bd4360d4b
|
[
"MIT"
] | 1
|
2021-09-23T09:42:32.000Z
|
2021-09-23T09:42:32.000Z
|
RCJ_pcms_base/scripts/QrCode.py
|
FablabHome/PCMS_home_robotics
|
21202fb73811edfcbdfde204ba33fb8bd4360d4b
|
[
"MIT"
] | null | null | null |
RCJ_pcms_base/scripts/QrCode.py
|
FablabHome/PCMS_home_robotics
|
21202fb73811edfcbdfde204ba33fb8bd4360d4b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import time
import cv2
import numpy as np
import rospy
from cv_bridge import CvBridge
from pyzbar import pyzbar
from sensor_msgs.msg import CompressedImage
from std_msgs.msg import String
rospy.init_node('qr_code')
def unique_count_app(a):
colors, count = np.unique(a.reshape(-1,a.shape[-1]), axis=0, return_counts=True)
return colors[count.argmax()]
def callback(img: CompressedImage):
global bridge, srcframe
srcframe = bridge.compressed_imgmsg_to_cv2(img)
rospy.set_param('~lock', True)
pub = rospy.Publisher(
'~status',
String,
queue_size=1
)
speaker_pub = rospy.Publisher(
'/speaker/say',
String,
queue_size=1
)
rospy.Subscriber(
'/top_camera/rgb/image_raw/compressed',
CompressedImage,
callback,
queue_size=1
)
def read_barcodes(frame):
barcodes = pyzbar.decode(frame)
crop_img2 = None
for barcode in barcodes:
x, y, w, h = barcode.rect
barcode_info = barcode.data.decode('utf-8')
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
crop_img2 = frame[y:y + h, x:x + w]
font = cv2.FONT_HERSHEY_DUPLEX
return frame, crop_img2
def detect_color(img):
try:
valid = True
height, width, _ = img.shape
avg_color_per_row = np.average(img, axis=0)
avg_colors = np.average(avg_color_per_row, axis=0)
int_averages = np.array(avg_colors, dtype=np.uint8)
print(f'int_averages: {int_averages}')
if int_averages[-1] >= int_averages[1]:
valid = False
return valid
except ZeroDivisionError:
return None
srcframe = None
bridge = CvBridge()
def main():
while not rospy.is_shutdown():
if srcframe is None:
continue
frame = srcframe.copy()
img_y, img_x, c = frame.shape
x = int(img_x / 2 - 160)
y = int(img_y / 2 - 160)
w = 320
h = 320
crop_img = frame[y:y + h, x:x + w]
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 255), 3)
crop_img, crop_img2 = read_barcodes(crop_img)
frame[y:y + h, x:x + w] = crop_img
cv2.imshow('Barcode/QR code', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
if crop_img2 is None:
continue
valid = detect_color(crop_img2)
rospy.loginfo(unique_count_app(crop_img2))
if valid:
cv2.putText(frame, "Valid", (int(img_y / 2 - 60), int(img_y / 2 + 200)), cv2.FONT_HERSHEY_SIMPLEX, 3,
(0, 255, 0), 5, cv2.LINE_AA, False)
cv2.imshow('Barcode/QR code', frame)
cv2.waitKey(3)
time.sleep(3)
pub.publish("Valid")
else:
cv2.putText(frame, "Invalid", (int(img_y / 2 - 60), int(img_y / 2 + 200)), cv2.FONT_HERSHEY_SIMPLEX,
3,
(0, 0, 255), 5, cv2.LINE_AA, False)
pub.publish('Invalid')
cv2.imshow('Barcode/QR code', frame)
cv2.waitKey(3)
time.sleep(3)
if not rospy.get_param('~lock'):
if not valid:
speaker_pub.publish('Mister, Your health code is invalid')
else:
speaker_pub.publish('Your health code is valid')
#
if __name__ == '__main__':
main()
| 25.625954
| 113
| 0.579386
|
b842c5e20c0185531d6b724ef71046f2b36c1421
| 3,946
|
py
|
Python
|
deeptrack/sequences.py
|
BenjaminMidtvedt/DeepTrack-2.0
|
64245f31e63011fb48d38f211134774bbce28cf4
|
[
"MIT"
] | 1
|
2022-01-16T14:51:07.000Z
|
2022-01-16T14:51:07.000Z
|
deeptrack/sequences.py
|
BenjaminMidtvedt/DeepTrack-2.0
|
64245f31e63011fb48d38f211134774bbce28cf4
|
[
"MIT"
] | null | null | null |
deeptrack/sequences.py
|
BenjaminMidtvedt/DeepTrack-2.0
|
64245f31e63011fb48d38f211134774bbce28cf4
|
[
"MIT"
] | null | null | null |
"""Features and tools for resolving sequences of images.
Classes
-------
Sequence
Resolves a feature as a sequence.
Functions
---------
Sequential
Converts a feature to be resolved as a sequence.
"""
from .features import Feature
from .properties import SequentialProperty
from .types import PropertyLike
class Sequence(Feature):
"""Resolves a feature as a sequence.
The input feature is resolved `sequence_length` times, with the kwarg
arguments `sequene_length` and `sequence_step` passed to all properties
of the feature set.
Parameters
----------
feature : Feature
The feature to resolve as a sequence.
sequence_length : int
The number of times to resolve the feature.
Attributes
----------
feature : Feature
The feature to resolve as a sequence.
"""
__distributed__ = False
def __init__(
self, feature: Feature, sequence_length: PropertyLike[int] = 1, **kwargs
):
self.feature = feature
super().__init__(sequence_length=sequence_length, **kwargs)
# Require update
# self.update()
def get(self, input_list, sequence_length=None, **kwargs):
outputs = input_list or []
for sequence_step in range(sequence_length):
propagate_sequential_data(
self.feature,
sequence_step=sequence_step,
sequence_length=sequence_length,
)
out = self.feature()
outputs.append(out)
return outputs
def Sequential(feature: Feature, **kwargs):
"""Converts a feature to be resolved as a sequence.
Should be called on individual features, not combinations of features. All
keyword arguments will be trated as sequential properties and will be
passed to the parent feature.
If a property from the keyword argument already exists on the feature, the
existing property will be used to initilize the passed property (that is,
it will be used for the first timestep).
Parameters
----------
feature : Feature
Feature to make sequential.
kwargs
Keyword arguments to pass on as sequential properties of `feature`.
"""
for property_name in kwargs.keys():
if property_name in feature.properties:
# Insert property with initialized value
feature.properties[property_name] = SequentialProperty(
feature.properties[property_name], **feature.properties
)
else:
# insert empty property
feature.properties[property_name] = SequentialProperty()
feature.properties.add_dependency(feature.properties[property_name])
feature.properties[property_name].add_child(feature.properties)
for property_name, sampling_rule in kwargs.items():
prop = feature.properties[property_name]
all_kwargs = dict(
previous_value=prop.previous_value,
previous_values=prop.previous_values,
sequence_length=prop.sequence_length,
sequence_step=prop.sequence_step,
)
for key, val in feature.properties.items():
if key == property_name:
continue
if isinstance(val, SequentialProperty):
all_kwargs[key] = val
all_kwargs["previous_" + key] = val.previous_values
else:
all_kwargs[key] = val
if not prop.initialization:
prop.initialization = prop.create_action(sampling_rule, **all_kwargs)
prop.current = prop.create_action(sampling_rule, **all_kwargs)
return feature
def propagate_sequential_data(X, **kwargs):
for dep in X.recurse_dependencies():
if isinstance(dep, SequentialProperty):
for key, value in kwargs.items():
if hasattr(dep, key):
getattr(dep, key).set_value(value)
| 29.22963
| 81
| 0.64369
|
79f8732c92f509772bfae9066ba9f8555164e21c
| 113
|
py
|
Python
|
biclustering/__init__.py
|
lucasbrunialti/biclustering-experiments
|
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
|
[
"BSD-2-Clause"
] | 3
|
2017-11-21T08:21:32.000Z
|
2020-03-10T14:57:06.000Z
|
biclustering/__init__.py
|
lucasbrunialti/biclustering-experiments
|
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
|
[
"BSD-2-Clause"
] | null | null | null |
biclustering/__init__.py
|
lucasbrunialti/biclustering-experiments
|
30e51e23b0c3d91939bf7ec49c47d3035e6ecb57
|
[
"BSD-2-Clause"
] | 4
|
2017-01-18T18:10:37.000Z
|
2021-12-15T02:23:15.000Z
|
from biclustering import Bicluster, MSR, DeltaBiclustering
__all__ = ['Bicluster', 'MSR', 'DeltaBiclustering']
| 22.6
| 58
| 0.769912
|
0ddb9a5c1a677b74b8d70bf2bc7a8b2fc3800271
| 11,961
|
py
|
Python
|
modelling/models/input_variation_hydro_zone_model.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
modelling/models/input_variation_hydro_zone_model.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
modelling/models/input_variation_hydro_zone_model.py
|
bcgov-c/wally
|
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
from numpy.core.numeric import allclose
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import math
import csv
import itertools
from ast import literal_eval
from sklearn.metrics import mean_squared_error, r2_score
from pathlib import Path
import matplotlib.backends.backend_pdf
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import pyplot
from operator import attrgetter
import pickle
from joblib import dump, load
# Model Settings
OUTPUT_DIR = 'Sept_9_2021'
TESTING_SIZE = 0.3
TEST_ALL_DATA = False
FOLDS = 50
DEPENDANT_VARIABLE = '7Q10-S'
NORMALIZE_LOWFLOW = True
ALLOCATED_THRESHOLD = 2
NET_THRESHOLD = 0
# ZONES = ["all_data"]
# ZONES = ["25", "26", "27", "all_data"]
ZONES = ["all_data"]
ALL_ZONES = False
directory = '../data/4_training/sept2_2021'
output_directory_base = "./output/" + OUTPUT_DIR
zone_scores = {}
count = 0
inputs_list = ["average_slope","annual_precipitation","glacial_coverage","potential_evapotranspiration","median_elevation","solar_exposure"]
all_combinations = []
for r in range(len(inputs_list) + 1):
combinations_object = itertools.combinations(inputs_list, r)
combinations_list = list(combinations_object)
all_combinations += combinations_list
# limit input list size
all_combinations = [x for x in all_combinations if len(x)<=5]
print('total combinations: ', len(all_combinations))
def filter_data(df):
return df[(df["percent_allocated_mad"] <= ALLOCATED_THRESHOLD) | (df["percent_allocated_mad_abs"] == NET_THRESHOLD)]
# 1. Find best performing inputs for each zone
for filename in sorted(os.listdir(directory)):
if filename.endswith(".csv"):
zone_name = filename.split('.')[0]
# limits zones calculated if constant has zone numbers in it
if zone_name in ZONES or ALL_ZONES:
print("Starting Zone:", zone_name)
else:
continue
model = LinearRegression()
zone_df = pd.read_csv(os.path.join(directory, filename))
zone_df = filter_data(zone_df)
zone_df = zone_df.dropna(subset=[DEPENDANT_VARIABLE]) # drop NaNs
print(zone_df)
# Divide 7Q10-S by MAD
if DEPENDANT_VARIABLE == '7Q10-S' and NORMALIZE_LOWFLOW:
print('NORMALIZING LOWFLOW BY DIVIDING BY MAD')
zone_df['7Q10-S'] = zone_df['7Q10-S'] / ((zone_df['mean'] / 1000) * zone_df['drainage_area'])
print("ZONE DATA ROWS COUNT:", zone_df.shape)
all_combo_stats = []
local_dir = output_directory_base + "/zone_" + str(zone_name) + "/"
Path(local_dir).mkdir(parents=True, exist_ok=True)
for inputs in all_combinations:
count += 1
inputs = list(inputs) + [DEPENDANT_VARIABLE]
# print(inputs)
if len(inputs) < 2:
continue
features_df = zone_df[inputs]
X = features_df.dropna(subset=[DEPENDANT_VARIABLE]) # drop NaNs
y = X.get(DEPENDANT_VARIABLE) # dependant
X = X.drop([DEPENDANT_VARIABLE], axis=1) # independant
if len(X.index) <=1:
continue
best_r2 = 0
min_r2 = 1
all_r2 = []
for i in range(0, FOLDS):
if TEST_ALL_DATA:
model.fit(X, y)
X_test = X
y_test = y
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TESTING_SIZE)
model.fit(X_train, y_train)
# model_score = model.score(X_test, y_test)
r2 = model.score(X, y)
y_pred = model.predict(X_test)
correlation_matrix = np.corrcoef(y_test.to_numpy(), y_pred)
correlation_xy = correlation_matrix[0,1]
r2 = correlation_xy**2
# print(r2)
if r2 > best_r2:
best_r2 = r2
if r2 < min_r2:
min_r2 = r2
all_r2.append(r2)
combo_stats = {
"best_r2": best_r2,
"min_r2": min_r2,
"avg_r2": sum(all_r2) / len(all_r2),
"columns": list(X.columns.values)
}
print(combo_stats)
all_combo_stats.append(combo_stats)
# Find best performing combo based on three factors
highest_min_r2_combo = {'best_r2': 0, 'min_r2': 0, 'avg_r2': 0, 'columns': []}
highest_best_r2_combo = {'best_r2': 0, 'min_r2': 0, 'avg_r2': 0, 'columns': []}
highest_avg_r2_combo = {'best_r2': 0, 'min_r2': 0, 'avg_r2': 0, 'columns': []}
for combo in all_combo_stats:
if combo["best_r2"] == 0 or combo["min_r2"] == 1 or combo["avg_r2"] is math.nan:
continue
if combo["min_r2"] > highest_min_r2_combo["min_r2"]:
highest_min_r2_combo = combo
if combo["best_r2"] > highest_best_r2_combo["best_r2"]:
highest_best_r2_combo = combo
if combo["avg_r2"] > highest_avg_r2_combo["avg_r2"]:
highest_avg_r2_combo = combo
print("highest_min_r2_combo", highest_min_r2_combo)
score = highest_min_r2_combo["min_r2"]
best_inputs = highest_min_r2_combo["columns"]
zone_scores[zone_name] = {
"score": score,
"best_inputs": best_inputs
}
# output all variation results file
local_file_path = local_dir + "zone_" + str(zone_name) + "_input_variation_results.csv"
df = pd.DataFrame(all_combo_stats)
df.to_csv(local_file_path, index=False)
print('ZONE:', zone_name, 'SCORE:', score, 'INPUTS:', best_inputs)
else:
continue
# 2. Run iterations on each zones best input set and output the summary
for attr, value in zone_scores.items():
zone_name = 'zone_' + str(attr)
zone_df = pd.read_csv(os.path.join(directory, str(attr) + '.csv'))
zone_df = filter_data(zone_df)
# Divide 7Q10-S by MAD
if DEPENDANT_VARIABLE == '7Q10-S' and NORMALIZE_LOWFLOW:
print('NORMALIZING LOWFLOW BY DIVIDING BY MAD')
zone_df['7Q10-S'] = zone_df['7Q10-S'] / ((zone_df['mean'] / 1000) * zone_df['drainage_area'])
print("2nd Round Size:", zone_df.shape)
inputs = value["best_inputs"] + [DEPENDANT_VARIABLE]
model = LinearRegression()
features_df = zone_df[inputs + ['station_number']]
X = features_df.dropna(subset=[DEPENDANT_VARIABLE]) # drop NaNs
y = X.get(DEPENDANT_VARIABLE) # dependant
X = X.drop([DEPENDANT_VARIABLE], axis=1) # independant
if len(X.index) <=1:
continue
fold_counter = 1
all_models = []
best_model = None
fold_dir = local_dir + 'fold_data/'
Path(fold_dir).mkdir(parents=True, exist_ok=True)
for i in range(0, FOLDS):
# Train Model
if TEST_ALL_DATA:
model.fit(X, y)
X_test = X
y_test = y
else:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TESTING_SIZE)
model.fit(X_train.drop(['station_number'], axis=1), y_train)
y_pred = model.predict(X_test.drop(['station_number'], axis=1))
# r2 = r2_score(y_test.to_numpy(), y_pred)
r2_test = model.score(X_test.drop(['station_number'], axis=1), y_test)
correlation_matrix = np.corrcoef(y_test.to_numpy(), y_pred)
correlation_xy = correlation_matrix[0,1]
r2 = correlation_xy**2
print("")
print("r2_test", r2_test, r2)
print(fold_counter, r2)
print(y_test.to_numpy())
print(type(y_test.to_numpy()))
print(y_pred)
print(type(y_pred))
print("")
rmse = mean_squared_error(y_test, y_pred, squared=False)
X_test['y_test'] = y_test
X_test['y_pred'] = y_pred
X_test['r2'] = r2
X_test['rmse'] = rmse
fold_file_path = fold_dir + str(fold_counter) + '.csv'
X_test_df = pd.DataFrame(X_test)
try:
print(fold_file_path)
print(X_test_df)
X_test_df.to_csv(fold_file_path, index=False)
print("successfully exported fold data")
except Exception as e:
print("Error exporting fold data", e)
feat_import = model.coef_
intercept = model.intercept_
output_dir = output_directory_base + "/zone_" + str(attr) + "/"
# Report Output
Path(output_dir).mkdir(parents=True, exist_ok=True)
pdf = matplotlib.backends.backend_pdf.PdfPages(output_dir + "fold_" + str(fold_counter) + "_stats.pdf")
size = (10, 6)
marker_size = 9
# Pred MAR vs Real MAR
fig_pred, ax1 = plt.subplots(figsize=size)
ax1 = sns.regplot(y_pred, y_test, fit_reg=True, truncate=True)
fig_pred.suptitle("Predicted MAR vs Real MAR")
xlim = ax1.get_xlim()[1]
ylim = ax1.get_ylim()[1]
max_size = max(xlim, ylim)
ax1.set_xlabel('Pred MAR')
ax1.set_ylabel('Real MAR')
lx = np.linspace(0,max_size/2,100)
ly = lx
ax1.plot(lx, ly, ':')
ax1.legend()
pdf.savefig( fig_pred )
# Covariance of Features
corr = X.corr()
corr.style.background_gradient(cmap='coolwarm')
fig_cov, ax = plt.subplots(figsize=size)
ax.set_xticks(range(len(corr.columns)))
ax.set_yticks(range(len(corr.columns)))
fig_cov.suptitle("Feature Correlations")
ax = sns.heatmap(
corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
)
pdf.savefig( fig_cov )
pdf.close()
model_test = {
"model": model,
"r2": round(r2,4),
"rmse": round(rmse,6),
"gain": feat_import,
"fold": fold_counter
}
all_models.append(model_test)
if best_model is None:
best_model = model_test
elif rmse < best_model['rmse']:
best_model = model_test
fold_counter += 1
# Create summary reports
model = best_model['model']
rmse = best_model['rmse']
r2 = best_model['r2']
model_output_dir =output_directory_base + "/" + str(zone_name) + "/" + zone_name + '.joblib'
dump(model, model_output_dir)
r2s = []
rmses = []
foldsArr = []
for item in all_models:
# if item["r2"] > 0:
r2s.append(item["r2"])
rmses.append(item["rmse"])
foldsArr.append(item["fold"])
fig1, (ax1, ax2, ax3) = pyplot.subplots(3, 1, figsize=(10,20))
fig1.subplots_adjust(hspace=0.4)
ax1.plot(r2s, rmses, 'o')
ax1.set_xlabel("R2")
ax1.set_ylabel("RMSE")
ax1.title.set_text('RMSE vs R2')
xrng = range(1, FOLDS + 1)
ax2.plot(xrng, r2s, 'o')
avg = sum(r2s) / len(r2s)
ax2.plot(xrng, [avg] * len(xrng), ':')
ax2.set_xlabel("FOLDS")
ax2.set_ylabel("R2")
ax2.title.set_text('Average R2')
ax2.set_ylim([min(avg - 0.05, -1),1])
ax3.plot(xrng, rmses, 'o')
avg = sum(rmses) / len(rmses)
ax3.plot(xrng, [avg] * len(xrng), ':')
ax3.set_xlabel("FOLDS")
ax3.set_ylabel("RMSE")
ax3.title.set_text('Average RMSE')
for i, fold in enumerate(foldsArr):
ax1.annotate(str(fold), (r2s[i], rmses[i]))
# pyplot.show()
local_img_path = output_dir + zone_name + "_summary.png"
fig1.savefig(local_img_path, bbox_inches='tight')
print("zone", zone_name)
print("r2 score", r2)
rmse_95_p = rmse * 2
print("RMSE 95%", rmse_95_p)
| 33.317549
| 140
| 0.596188
|
3df36d90ba99b3345b191890bb0ea532728f189f
| 1,491
|
py
|
Python
|
autoscaler/__init__.py
|
vanvalenlab/kiosk-autoscaler
|
770cc8ec47927549e64c032b4df81732235bdbf9
|
[
"Apache-2.0"
] | null | null | null |
autoscaler/__init__.py
|
vanvalenlab/kiosk-autoscaler
|
770cc8ec47927549e64c032b4df81732235bdbf9
|
[
"Apache-2.0"
] | 13
|
2018-10-29T19:54:17.000Z
|
2020-04-13T20:38:23.000Z
|
autoscaler/__init__.py
|
vanvalenlab/kiosk-autoscaler
|
770cc8ec47927549e64c032b4df81732235bdbf9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2020 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/kiosk-autoscaler/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autoscaler import redis
from autoscaler.autoscaler import Autoscaler
del absolute_import
del division
del print_function
| 40.297297
| 78
| 0.751844
|
115d769e78e5d0d9a0e60c0eee72c88e16f1dfd6
| 4,657
|
py
|
Python
|
Merge-NMAP-XML-Files.py
|
osvaldohdzm/Port-Services-Scan-Tool
|
7ecad2d3299fd916eef93b45fbebcd475b3135af
|
[
"MIT"
] | null | null | null |
Merge-NMAP-XML-Files.py
|
osvaldohdzm/Port-Services-Scan-Tool
|
7ecad2d3299fd916eef93b45fbebcd475b3135af
|
[
"MIT"
] | null | null | null |
Merge-NMAP-XML-Files.py
|
osvaldohdzm/Port-Services-Scan-Tool
|
7ecad2d3299fd916eef93b45fbebcd475b3135af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
####################################################################################
#
# nMapMerge.py
#
# Description
# Combine nMap xml files into one XML
#
# Example
# python nMapMerge.py -f nmap_scan.xml
# python nMapMerge.py -f ./nMap/
#
# Author:
# Hue B. Solutions LLC, CBHue
#
#
####################################################################################
import os
import re
import time
import logging
import xml.etree.ElementTree as ET
from argparse import ArgumentParser
from xml.etree.ElementTree import ParseError
def merge_nMap(xmlFile,mf):
HOSTS = 0
with open(mf, mode = 'a', encoding='utf-8') as mergFile:
with open(xmlFile) as f:
try:
nMapXML = ET.parse(f)
for host in nMapXML.findall('host'):
HOSTS = HOSTS + 1
cHost = ET.tostring(host, encoding='unicode', method='xml')
mergFile.write(cHost)
mergFile.flush()
except:
print("Hubo error")
return HOSTS
def addHeader(f):
nMap_Header = '<?xml version="1.0" encoding="UTF-8"?>'
nMap_Header += '<!DOCTYPE nmaprun>'
nMap_Header += '<?xml-stylesheet href="file:///usr/share/nmap/nmap.xsl" type="text/xsl"?>'
nMap_Header += '<!-- Nmap Merged with nMapMergER.py https://github.com/CBHue/nMapMergER -->'
nMap_Header += '<nmaprun scanner="nmap" args="nmap -iL hostList.txt" start="1" startstr="https://github.com/CBHue/nMapMerge/nMapMerge.py" version="7.70" xmloutputversion="1.04">'
nMap_Header += '<scaninfo type="syn" protocol="tcp" numservices="1" services="1"/>'
nMap_Header += '<verbose level="0"/>'
nMap_Header += '<debugging level="0"/>'
mFile = open(f, "w")
mFile.write(nMap_Header)
mFile.close()
def addFooter(f, h):
nMap_Footer = '<runstats><finished time="1" timestr="Wed Sep 0 00:00:00 0000" elapsed="0" summary="Nmap done at Wed Sep 0 00:00:00 0000; ' + str(h) + ' IP address scanned in 0.0 seconds" exit="success"/>'
nMap_Footer += '</runstats>'
nMap_Footer += '</nmaprun>'
mFile = open(f, "a")
mFile.write(nMap_Footer)
mFile.close()
def htmlER(mergeFile):
import os
cmd = '/usr/bin/xsltproc'
if os.path.isfile(cmd):
out = re.sub(r'.xml', '.html', mergeFile)
cmd = cmd + " -o " + out + " " + mergeFile
os.system(cmd)
print ("Output HTML File:", os.path.abspath(out))
else:
print(cmd, "does not exits")
def checkDatabase():
mycursor = mydb.cursor()
mycursor.execute("CREATE DATABASE mydatabase")
#
# If you want to use this as a module you need to pass a set of nmap xmls
#
# nmapSET = set()
# nmapSET.add('/nmap-Dir/nmap_10.10.10.10.xml')
#
# Then call the main function passing the set:
# main_nMapMerger(nmapSET)
#
def main_nMapMerger(xmlSet):
HOSTS = 0
# Check to ensute we have work to do
if not xmlSet:
print("No XML files were found ... No work to do")
exit()
# Create the Merged filename
from datetime import datetime
dtNow = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
dt = re.sub(r"\s+", '-', str(dtNow))
dt = re.sub(r":", '-', str(dt))
mergeFile = "nmap-merged-" + dt + ".xml"
# Add Header to mergefile
addHeader(mergeFile)
for xml in xmlSet:
if xml.endswith('.xml'):
logging.debug("Parsing: %r", xml)
H = merge_nMap(xml,mergeFile)
HOSTS = HOSTS + H
# Add Footer to mergefile
addFooter(mergeFile, HOSTS)
print('')
print ("Output XML File:", os.path.abspath(mergeFile))
# Convert merged XML to html
htmlER(mergeFile)
if __name__ == "__main__":
import sys
if sys.version_info <= (3, 0):
sys.stdout.write("This script requires Python 3.x\n")
sys.exit(1)
parser = ArgumentParser()
parser.add_argument("-f", "--file", dest="filename", help="parse FILE", metavar="FILE")
parser.add_argument("-d", "--dir", dest="directory", help="Parse all xml in directory", metavar="DIR")
parser.add_argument("-q", "--quiet", dest="verbose", action="store_false", default=True, help="don't print status messages to stdout")
args = parser.parse_args()
s = set()
if args.verbose:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
print('Debug On')
if args.filename is not None:
f = args.filename
if f.endswith('.xml'):
logging.debug("Adding: %r", f)
s.add(f)
elif args.directory is not None:
if os.path.isdir(args.directory):
path = args.directory
for f in os.listdir(path):
# For now we assume xml is nMap
if f.endswith('.xml'):
fullname = os.path.join(path, f)
logging.debug("Adding: %r", fullname)
s.add(fullname)
else:
logging.warn("Not a directory: %r", args.directory)
else :
print ("usage issues =(")
parser.print_help()
exit()
# Pass set of xml files to main
main_nMapMerger(s)
| 27.886228
| 208
| 0.640971
|
208c707f0053622260b0ec4cb1722fe83c94b57f
| 2,581
|
py
|
Python
|
Scripts/GenerateLayerOntology.py
|
allemangD/cell-locator
|
0bf11d853ade1a0cf82526ca2063618524559ad0
|
[
"Apache-2.0"
] | 12
|
2018-07-11T20:29:53.000Z
|
2021-11-20T11:15:16.000Z
|
Scripts/GenerateLayerOntology.py
|
allemangD/cell-locator
|
0bf11d853ade1a0cf82526ca2063618524559ad0
|
[
"Apache-2.0"
] | 173
|
2018-06-27T12:17:17.000Z
|
2022-03-15T16:14:48.000Z
|
Scripts/GenerateLayerOntology.py
|
allemangD/cell-locator
|
0bf11d853ade1a0cf82526ca2063618524559ad0
|
[
"Apache-2.0"
] | 7
|
2018-08-23T20:56:34.000Z
|
2021-08-05T08:38:22.000Z
|
"""Generate layer ontology.
Prerequisites: pip install allensdk
Initial version written by David Feng <davidf@alleninstitute.org>
See https://gist.github.com/dyf/056095756b15a6b76dfb28558c4633da
"""
import argparse
import sys
try:
from allensdk.api.queries.rma_api import RmaApi
import allensdk.core.json_utilities as ju
except ImportError:
raise SystemExit(
"allensdk not available: "
"consider installing it running 'pip install allensdk'"
)
def generate_layer_ontology(output):
all_structs = []
root = RmaApi().model_query("Structure", criteria="[graph_id$eq1],[acronym$eqgrey]")[0]
all_structs.append(root)
layers = [ { 'id': 900000000, 'acronym': 'Isocortex1', 'name': 'Isocortex layer 1', 'color_hex_triplet': '7fc97f' },
{ 'id': 900000001, 'acronym': 'Isocortex2/3', 'name': 'Isocortex layer 2/3', 'color_hex_triplet': 'beaed4' },
{ 'id': 900000002, 'acronym': 'Isocortex4', 'name': 'Isocortex layer 4', 'color_hex_triplet': 'fdc086' },
{ 'id': 900000003, 'acronym': 'Isocortex5', 'name': 'Isocortex layer 5', 'color_hex_triplet': 'ffff99' },
{ 'id': 900000004, 'acronym': 'Isocortex6a', 'name': 'Isocortex layer 6a', 'color_hex_triplet': '386cb0' },
{ 'id': 900000005, 'acronym': 'Isocortex6b', 'name': 'Isocortex layer 6b', 'color_hex_triplet': 'f0027f' } ]
all_structs += layers
for layer in layers:
layer['structure_id_path'] = '/%d/%d/' % (root['id'], layer['id'])
layer['parent_structure_id'] = root['id']
structs = RmaApi().model_query("Structure", criteria="structure_sets[name$eq'%s']"%layer['name'])
for struct in structs:
struct['structure_id_path'] = '/%d/%d/%d/' % (root['id'], layer['id'], struct['id'])
struct['color_hex_triplet'] = layer['color_hex_triplet']
struct['parent_structure_id'] = layer['id']
all_structs += structs
# Generate structure similar to the one returned by the http://api.brain-map.org/api/v2/data/Structure/query.json
content = {
"msg": all_structs,
"num_rows": len(all_structs),
"start_row": 0,
"success": True,
"total_rows": len(all_structs)
}
ju.write(output, content)
def main(argv):
parser = argparse.ArgumentParser(
description='Generate layer ontology.'
)
parser.add_argument(
'--output',
metavar='/path/to/file.json',
required=True,
help='Path to the output json file'
)
args = parser.parse_args(argv)
generate_layer_ontology(args.output)
if __name__ == '__main__':
main(sys.argv[1:])
| 32.670886
| 122
| 0.655947
|
6d4d373b4c8d8334d333a6a789ca7b6fe43036f0
| 2,462
|
py
|
Python
|
setup.py
|
clytwynec/pa11ycrawler
|
fc672d4524463bc050ade4c7c97801c0d5bf8c9e
|
[
"Apache-2.0"
] | 20
|
2016-05-18T01:13:05.000Z
|
2019-09-30T14:34:53.000Z
|
setup.py
|
clytwynec/pa11ycrawler
|
fc672d4524463bc050ade4c7c97801c0d5bf8c9e
|
[
"Apache-2.0"
] | 57
|
2016-04-01T15:43:39.000Z
|
2019-03-20T21:44:11.000Z
|
setup.py
|
clytwynec/pa11ycrawler
|
fc672d4524463bc050ade4c7c97801c0d5bf8c9e
|
[
"Apache-2.0"
] | 15
|
2016-08-10T19:47:18.000Z
|
2020-02-24T04:21:30.000Z
|
from setuptools import setup
VERSION = '1.7.3'
DESCRIPTION = 'A Scrapy spider for a11y auditing Open edX installations.'
LONG_DESCRIPTION = """pa11ycrawler is a Scrapy spider that runs a Pa11y check
on every page of an Open edX installation,
to audit it for accessibility purposes."""
def is_requirement(line):
line = line.strip()
# Skip blank lines, comments, and editable installs
return not (
line == '' or
line.startswith('--') or
line.startswith('-r') or
line.startswith('#') or
line.startswith('-e') or
line.startswith('git+')
)
def get_requirements(path):
with open(path) as f:
lines = f.readlines()
return [l.strip() for l in lines if is_requirement(l)]
setup(
name='pa11ycrawler',
version=VERSION,
author='edX',
author_email='oscm@edx.org',
url='https://github.com/edx/pa11ycrawler',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
package_data={
'pa11ycrawler': [
'templates/*.*',
'templates/assets/js/*.*',
'templates/assets/css/*.*',
]
},
packages=[
'pa11ycrawler',
'pa11ycrawler.pipelines',
'pa11ycrawler.spiders',
'pa11ycrawler.commands',
],
install_requires=get_requirements("requirements.txt"),
tests_require=get_requirements("dev-requirements.txt"),
license="Apache-2.0",
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance'],
entry_points={
'console_scripts': [
'pa11ycrawler-html=pa11ycrawler.html:main',
]
}
)
| 33.27027
| 79
| 0.582859
|
bd237a66b7c7f88908eb6d7b73ac261fc2151108
| 1,703
|
py
|
Python
|
requests/packages/urllib3/__init__.py
|
devavenkata/sublimeapex
|
a4a3d7559b58e4d7b1e5e766731a3d30fce6b2ec
|
[
"MIT"
] | 6
|
2015-01-20T08:26:53.000Z
|
2019-01-16T00:09:22.000Z
|
requests/packages/urllib3/__init__.py
|
devavenkata/sublimeapex
|
a4a3d7559b58e4d7b1e5e766731a3d30fce6b2ec
|
[
"MIT"
] | 7
|
2015-01-03T14:06:32.000Z
|
2015-05-07T01:40:04.000Z
|
requests/packages/urllib3/__init__.py
|
devavenkata/sublimeapex
|
a4a3d7559b58e4d7b1e5e766731a3d30fce6b2ec
|
[
"MIT"
] | 17
|
2015-06-09T13:01:39.000Z
|
2021-03-17T03:10:20.000Z
|
# urllib3/__init__.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.8.2'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util import make_headers, get_host, Timeout
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added an stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
| 28.864407
| 84
| 0.741632
|
8242d655b3181f6dbb86f2f0a38f4a4cc995659a
| 1,722
|
py
|
Python
|
lib/googlecloudsdk/command_lib/functions/v1/delete/command.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/command_lib/functions/v1/delete/command.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/functions/v1/delete/command.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file provides the implementation of the `functions delete` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.functions.v1 import exceptions
from googlecloudsdk.api_lib.functions.v1 import operations
from googlecloudsdk.api_lib.functions.v1 import util
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
def Run(args):
"""Delete a Google Cloud Function."""
client = util.GetApiClientInstance()
messages = client.MESSAGES_MODULE
function_ref = args.CONCEPTS.name.Parse()
function__url = function_ref.RelativeName()
prompt_message = 'Resource [{0}] will be deleted.'.format(function__url)
if not console_io.PromptContinue(message=prompt_message):
raise exceptions.FunctionsError('Deletion aborted by user.')
op = client.projects_locations_functions.Delete(
messages.CloudfunctionsProjectsLocationsFunctionsDeleteRequest(
name=function__url))
operations.Wait(op, messages, client)
log.DeletedResource(function__url)
| 41
| 78
| 0.784553
|
e6d2b7ad2abd6b80bdb20a12996b2ab9cb380fe6
| 494
|
py
|
Python
|
Utils/Logger.py
|
FullMetalNicky/PitchNet
|
9987398e304238ed67c588a4166ec0cc1801f805
|
[
"MIT"
] | null | null | null |
Utils/Logger.py
|
FullMetalNicky/PitchNet
|
9987398e304238ed67c588a4166ec0cc1801f805
|
[
"MIT"
] | null | null | null |
Utils/Logger.py
|
FullMetalNicky/PitchNet
|
9987398e304238ed67c588a4166ec0cc1801f805
|
[
"MIT"
] | null | null | null |
import logging
def LogProgram():
logging.basicConfig(level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename="log.txt",
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
| 32.933333
| 75
| 0.55668
|
bf9a835022cf1b5840b3784a239a3bb1c6f2c15d
| 26
|
py
|
Python
|
spoapy/__init__.py
|
tprodanov/spoa
|
3a09bc65a08915a1a3956ef74b5b7cc4391cd08c
|
[
"MIT"
] | null | null | null |
spoapy/__init__.py
|
tprodanov/spoa
|
3a09bc65a08915a1a3956ef74b5b7cc4391cd08c
|
[
"MIT"
] | null | null | null |
spoapy/__init__.py
|
tprodanov/spoa
|
3a09bc65a08915a1a3956ef74b5b7cc4391cd08c
|
[
"MIT"
] | null | null | null |
from spoapy.core import *
| 13
| 25
| 0.769231
|
c8e815d0966934bcc798babcd0eb42e3ccdce51e
| 3,229
|
py
|
Python
|
assignments/assignment2/model.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | 1
|
2019-03-27T09:18:47.000Z
|
2019-03-27T09:18:47.000Z
|
assignments/assignment2/model.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | null | null | null |
assignments/assignment2/model.py
|
nikitakogut/dlcourse_ai
|
bd5826c04331864e0c37c84cf33674438d3f9b01
|
[
"MIT"
] | null | null | null |
import numpy as np
from layers import FullyConnectedLayer, ReLULayer, softmax, softmax_with_cross_entropy, l2_regularization
class TwoLayerNet:
""" Neural network with two fully connected layers """
def __init__(self, n_input, n_output, hidden_layer_size, reg):
"""
Initializes the neural network
Arguments:
n_input, int - dimension of the model input
n_output, int - number of classes to predict
hidden_layer_size, int - number of neurons in the hidden layer
reg, float - L2 regularization strength
"""
self.reg = reg
# TODO Create necessary layers
self.layers = [FullyConnectedLayer(n_input, hidden_layer_size), ReLULayer(),
FullyConnectedLayer(hidden_layer_size, n_output)]
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients
on a batch of training examples
Arguments:
X, np array (batch_size, input_features) - input data
y, np array of int (batch_size) - classes
"""
# Before running forward and backward pass through the model,
# clear parameter gradients aggregated from the previous pass
# TODO Set parameter gradient to zeros
# Hint: using self.params() might be useful!
for layer in self.layers:
if {'W', 'B'} <= set(layer.params()):
layer.W.grad = np.zeros(layer.W.value.shape)
layer.B.grad = np.zeros(layer.B.value.shape)
# TODO Compute loss and fill param gradients
# by running forward and backward passes through the model
forward_val = X
for layer in self.layers:
forward_val = layer.forward(forward_val)
loss, backward_val = softmax_with_cross_entropy(forward_val, y)
for layer in self.layers[::-1]:
backward_val = layer.backward(backward_val)
# After that, implement l2 regularization on all params
# Hint: self.params() is useful again!
for layer in self.layers:
for param_name, param in layer.params().items():
loss_reg, grad_reg = l2_regularization(param.value, self.reg)
loss += loss_reg
param.grad += grad_reg
return loss
def predict(self, X):
"""
Produces classifier predictions on the set
Arguments:
X, np array (test_samples, num_features)
Returns:
y_pred, np.array of int (test_samples)
"""
# TODO: Implement predict
# Hint: some of the code of the compute_loss_and_gradients
# can be reused
pred = np.zeros(X.shape[0], np.int)
forward_val = X
for layer in self.layers:
forward_val = layer.forward(forward_val)
pred = np.argmax(softmax(forward_val), axis=1)
return pred
def params(self):
result = {}
# TODO Implement aggregating all of the params
for ind, layer in enumerate(self.layers):
for param in layer.params().items():
result['layer_' + str(ind/2+1) + '_' + param[0]] = param[1]
return result
| 37.988235
| 105
| 0.613503
|
df3d01211cf05279a83f1d059ec14e3bd3fd7b70
| 2,206
|
py
|
Python
|
utest/namespace/test_library_cache.py
|
crylearner/RIDE3X
|
767f45b0c908f18ecc7473208def8dc7489f43b0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2017-08-20T14:46:02.000Z
|
2017-08-20T14:46:02.000Z
|
utest/namespace/test_library_cache.py
|
crylearner/RIDE3X
|
767f45b0c908f18ecc7473208def8dc7489f43b0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
utest/namespace/test_library_cache.py
|
crylearner/RIDE3X
|
767f45b0c908f18ecc7473208def8dc7489f43b0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import unittest
import sys
import os
from robotide.spec.librarymanager import LibraryManager
from threading import Thread
from robotide.namespace.cache import LibraryCache
from resources import DATAPATH
sys.path.append(os.path.join(DATAPATH, 'libs'))
class TestLibraryCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._library_manager = LibraryManager(':memory:')
cls._library_manager.start()
cls._library_manager.create_database()
@classmethod
def tearDownClass(cls):
cls._library_manager.stop()
cls._library_manager = None
def test_auto_importing_libraries(self):
cache = self._create_cache_with_auto_imports('TestLib')
self._assert_keyword_in_keywords(cache.get_default_keywords(), 'Testlib Keyword')
def test_auto_importing_libraries_with_arguments(self):
cache = self._create_cache_with_auto_imports('ArgLib|foo')
self._assert_keyword_in_keywords(cache.get_default_keywords(), 'Get Mandatory')
def test_importing_library_with_dictionary_arg(self):
LibraryCache({}, lambda:0, self._library_manager)._get_library('ArgLib', [{'moi':'hoi'}, []])
def test_importing_from_two_threads(self):
cache = self._create_cache_with_auto_imports('TestLib')
self._thread_results = []
def check_test_lib_keyword():
cache.get_default_keywords()
self._thread_results.append('ok')
t1 = Thread(target=check_test_lib_keyword)
t2 = Thread(target=check_test_lib_keyword)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(['ok', 'ok'], self._thread_results)
def _create_cache_with_auto_imports(self, auto_import):
settings = {'auto imports': [auto_import]}
return LibraryCache(settings, lambda:0, self._library_manager)
def _assert_keyword_in_keywords(self, keywords, name):
for kw in keywords:
if kw.name == name:
return
raise AssertionError('Keyword %s not found in default keywords' % name)
if __name__ == "__main__":
unittest.main()
| 33.938462
| 102
| 0.67815
|
d614d4d47eb4f0d9ced64a90806b7112c12038c7
| 2,396
|
py
|
Python
|
etutils/viz/scatter3.py
|
erdogant/etutils
|
69be71d2b2a0e7536b9244eb90edd689a6d1148c
|
[
"MIT"
] | null | null | null |
etutils/viz/scatter3.py
|
erdogant/etutils
|
69be71d2b2a0e7536b9244eb90edd689a6d1148c
|
[
"MIT"
] | null | null | null |
etutils/viz/scatter3.py
|
erdogant/etutils
|
69be71d2b2a0e7536b9244eb90edd689a6d1148c
|
[
"MIT"
] | null | null | null |
""" This function creates a 3d scatterplot
from etutils.viz.scatter3 import scatter3
A= scatter3(x,y,z <optional>)
INPUT:
data: numpy array
x
y
z
OPTIONAL
verbose: Integer [0..5] if verbose >= DEBUG: print('debug message')
0: (default)
1: ERROR
2: WARN
3: INFO
4: DEBUG
OUTPUT
output
DESCRIPTION
Plot 3d scatterplot
EXAMPLE
from etutils.viz.scatter3 import scatter3
data='hallo'
A = scatter3(data,verbose=1)
SEE ALSO
scatter
"""
#--------------------------------------------------------------------------
# Name : scatter3.py
# Version : 1.0
# Author : E.Taskesen
# Contact : erdogant@gmail.com
# Date : April. 2019
#--------------------------------------------------------------------------
#%% Libraries
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#%% Main
def scatter3(x, y, z, dtypes=[None,None,None], xlabel='X-axis', ylabel='Y-axis', zlabel='Z-axis', colors='b', verbose=3):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x_num = checktype(x, dtypes[0])
y_num = checktype(y, dtypes[1])
z_num = checktype(z, dtypes[2])
if not 'str' in str(type(colors)):
colors = checktype(colors, dtypes=None)
# Plot the values
ax.scatter(x_num, y_num, z_num, c=colors, marker='.')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
plt.show()
return
#%% Check type
def checktype(data, dtypes=None):
if isinstance(dtypes, type(None)):
score = np.isnan(pd.to_numeric(data,errors='coerce')).sum()/len(data)
if score>=0.1:
dtypes='cat'
else:
dtypes='num'
if dtypes=='num':
if 'pandas' in str(type(data)):
out = pd.to_numeric(data,errors='coerce').values
if 'numpy' in str(type(data)):
out = data.astype(float)
if dtypes=='cat':
data = data.astype(str)
if 'numpy' in str(type(data)):
_,out = etutils.ismember(data, np.unique(data))
if 'pandas' in str(type(data)):
data = data.astype('category')
out = data.cat.codes.values
return(out)
| 24.44898
| 121
| 0.514608
|
b03b2c3be68f7feecfa85afe6e10c72be599c55c
| 3,260
|
py
|
Python
|
tests/ocd_backend/transformers/test_database_transformer.py
|
openstate/open-raadsinformatie
|
bb5cb306d794251334942f34e573a204f52db61b
|
[
"MIT"
] | 23
|
2015-10-28T09:02:41.000Z
|
2021-12-15T08:40:41.000Z
|
tests/ocd_backend/transformers/test_database_transformer.py
|
openstate/open-raadsinformatie
|
bb5cb306d794251334942f34e573a204f52db61b
|
[
"MIT"
] | 326
|
2015-11-03T12:59:48.000Z
|
2022-03-11T23:18:14.000Z
|
tests/ocd_backend/transformers/test_database_transformer.py
|
openstate/open-raadsinformatie
|
bb5cb306d794251334942f34e573a204f52db61b
|
[
"MIT"
] | 10
|
2016-02-05T08:43:07.000Z
|
2022-03-09T10:04:32.000Z
|
import os
import json
from unittest import TestCase
from ocd_backend.models.model import Model
from ocd_backend.transformers.database import database_item
class DatabaseTransformerTestCase(TestCase):
def setUp(self):
self.PWD = os.path.dirname(__file__)
self.transformer_class = database_item
self.transformer_class.get_supplier = self.mock_get_supplier
self.untested_models = set()
@staticmethod
def mock_get_supplier(ori_id):
if ori_id == 35138:
return 'allmanak'
else:
return 'notubiz'
def test_transformed_object_properties_equal(self):
"""
Tests whether the properties of the transformed resource are equal to the properties in the JSON
dump. Note that date and datetime properties have been removed from the JSON because they are not
JSON serializable.
"""
with open(os.path.join(self.PWD, '../test_dumps/database_extracted_meeting.json'), 'r') as f:
extracted_resource, extracted_subresources = json.loads(f.read())
args = ('object', (extracted_resource, extracted_subresources), '612019', 'source_item_dummy')
kwargs = {'source_definition': {
'cleanup': 'ocd_backend.tasks.cleanup_elasticsearch',
'key': 'groningen',
}
}
transformed_meeting = self.transformer_class.apply(args, kwargs).get()
# Test properties of the main resource
extracted_properties = {_prop['predicate']: _prop['value'] for _prop in extracted_resource['properties']
if _prop['predicate'] != 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'}
self.assertEqual(len(transformed_meeting.values), len(extracted_properties))
for _property in transformed_meeting.values.items():
if isinstance(_property[1], Model):
self.untested_models.add(_property[1])
else:
self.assertTrue(transformed_meeting.definition(_property[0]).absolute_uri() in extracted_properties)
self.assertTrue(extracted_properties[transformed_meeting.definition(_property[0]).absolute_uri()] ==
_property[1])
# Test properties of subresources
for subresource in self.untested_models:
extracted_subresource_properties = extracted_subresources[subresource.ori_identifier.rsplit('/')[-1]][0]['properties']
extracted_properties = {_prop['predicate']: _prop['value'] for _prop in extracted_subresource_properties
if _prop['predicate'] != 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'}
self.assertEqual(len(subresource.values), len(extracted_properties))
for _property in subresource.values.items():
if isinstance(_property[1], Model):
self.untested_models.add(_property[1])
else:
self.assertTrue(subresource.definition(_property[0]).absolute_uri() in extracted_properties)
self.assertTrue(extracted_properties[subresource.definition(_property[0]).absolute_uri()] ==
_property[1])
| 50.9375
| 130
| 0.645092
|
7e703e1301b77456a3c86d21c5923f6f04a26290
| 699
|
py
|
Python
|
gofile2/errors.py
|
Itz-fork/Gofile2
|
14065c6fd5d9ef82baf2fcd099d80c43cee9a564
|
[
"MIT"
] | 9
|
2021-09-19T15:47:05.000Z
|
2021-12-10T05:22:44.000Z
|
gofile2/errors.py
|
Itz-fork/Gofile2
|
14065c6fd5d9ef82baf2fcd099d80c43cee9a564
|
[
"MIT"
] | null | null | null |
gofile2/errors.py
|
Itz-fork/Gofile2
|
14065c6fd5d9ef82baf2fcd099d80c43cee9a564
|
[
"MIT"
] | null | null | null |
# Original Author: Codec04
# Re-built by Itz-fork
# Project: Gofile2
import requests
class InvalidToken(Exception):
pass
class JobFailed(Exception):
pass
class ResponseError(Exception):
pass
class InvalidPath(Exception):
pass
# Function to check if token is valid or not (using request lib as this is just a sync function)
def is_valid_token(url, token):
get_account_resp = requests.get(
url=f"{url}getAccountDetails?token={token}&allDetails=true").json()
if get_account_resp["status"] == "error-wrongToken":
raise InvalidToken(
"Invalid Gofile Token, Get your Gofile token from --> https://gofile.io/myProfile")
else:
pass
| 21.181818
| 96
| 0.695279
|
bc6185ab34afcb1afb878944eb9c1ba67938533b
| 1,659
|
py
|
Python
|
setup.py
|
mattsanchez/sqlalchemy-sparksql
|
122f27c8ae7709a5d6df6bbba1194884c59bbb81
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
mattsanchez/sqlalchemy-sparksql
|
122f27c8ae7709a5d6df6bbba1194884c59bbb81
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
mattsanchez/sqlalchemy-sparksql
|
122f27c8ae7709a5d6df6bbba1194884c59bbb81
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup
from setuptools.command.test import test as TestCommand
import sqlalchemy_sparksql
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
with open('README.md') as readme:
long_description = readme.read()
setup(
name="sqlalchemy-sparksql",
version=sqlalchemy_sparksql.__version__,
description="SparkSQL Driver for SQLAlchemy",
long_description=long_description,
url='https://github.com/matthsanchez/sqlalchemy-sparksql',
author="Matt Sanchez",
author_email="matt at c12.com",
license="Apache License, Version 2.0",
packages=['sqlalchemy_sparksql'],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Database :: Front-Ends",
],
install_requires=[
'sqlalchemy>=0.12.0',
'pyhive[hive]',
],
tests_require=[
'mock>=1.0.0',
'pytest',
'pytest-cov',
'requests>=1.0.0',
'sasl>=0.2.1',
'sqlalchemy>=0.12.0',
'thrift>=0.10.0',
],
cmdclass={'test': PyTest},
package_data={
'': ['*.rst'],
},
entry_points={
'sqlalchemy.dialects': [
'sparksql = sqlalchemy_sparksql.sparksql:SparkSqlDialect',
],
}
)
| 26.333333
| 70
| 0.615431
|
7aec0cc19c3157a64f6a4be580841c82830021a6
| 664
|
py
|
Python
|
koku/cost_models/migrations/0003_auto_20210615_2011.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 157
|
2018-04-30T16:27:53.000Z
|
2022-03-31T08:17:21.000Z
|
koku/cost_models/migrations/0003_auto_20210615_2011.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 3,250
|
2018-04-26T14:14:25.000Z
|
2022-03-31T23:49:15.000Z
|
koku/cost_models/migrations/0003_auto_20210615_2011.py
|
rubik-ai/koku
|
3255d1c217b7b6685cb2e130bf4e025946e76fac
|
[
"Apache-2.0"
] | 65
|
2018-05-10T14:11:50.000Z
|
2022-03-18T19:22:58.000Z
|
# Generated by Django 3.1.12 on 2021-06-15 20:11
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("cost_models", "0002_auto_20210318_1514")]
operations = [
migrations.AddField(
model_name="costmodel",
name="distribution",
field=models.TextField(choices=[("memory", "memory"), ("cpu", "cpu")], default="cpu"),
),
migrations.AddField(
model_name="costmodelaudit",
name="distribution",
field=models.TextField(choices=[("memory", "memory"), ("cpu", "cpu")], default="cpu"),
),
]
| 30.181818
| 98
| 0.597892
|
728938a32a07383a8aeec68d57f3163806ca0f0c
| 1,721
|
py
|
Python
|
python_modules/libraries/dagster-k8s/setup.py
|
joeyfreund/dagster
|
e551ff4bbb2c42b497a3e1c28cfb51fd5f2b1c21
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-k8s/setup.py
|
joeyfreund/dagster
|
e551ff4bbb2c42b497a3e1c28cfb51fd5f2b1c21
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-k8s/setup.py
|
joeyfreund/dagster
|
e551ff4bbb2c42b497a3e1c28cfb51fd5f2b1c21
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
from setuptools import find_packages, setup
def get_version(name):
version = {}
with open('dagster_k8s/version.py') as fp:
exec(fp.read(), version) # pylint: disable=W0122
if name == 'dagster-k8s':
return version['__version__']
elif name == 'dagster-k8s-nightly':
return version['__nightly__']
else:
raise Exception('Shouldn\'t be here: bad package name {name}'.format(name=name))
parser = argparse.ArgumentParser()
parser.add_argument('--nightly', action='store_true')
def _do_setup(name='dagster-k8s'):
setup(
name=name,
version=get_version(name),
author='Elementl',
license='Apache-2.0',
description='A Dagster integration for k8s',
url='https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-k8s',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
packages=find_packages(exclude=['test']),
install_requires=[
'dagster',
'dagster_graphql',
'kubernetes',
# RSA 4.1+ is incompatible with py2.7
'rsa<=4.0; python_version<"3"',
],
tests_require=[],
zip_safe=False,
)
if __name__ == '__main__':
parsed, unparsed = parser.parse_known_args()
sys.argv = [sys.argv[0]] + unparsed
if parsed.nightly:
_do_setup('dagster-k8s-nightly')
else:
_do_setup('dagster-k8s')
| 29.169492
| 101
| 0.598489
|
42cbad91992189bb16a322caba1e7ef2e17423ef
| 511
|
py
|
Python
|
var/spack/repos/builtin.mock/packages/extendee/package.py
|
whitfin/spack
|
aabd2be31a511d0e00c1017f7311a421659319d9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3
|
2019-06-27T13:26:50.000Z
|
2019-07-01T16:24:54.000Z
|
var/spack/repos/builtin.mock/packages/extendee/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75
|
2016-07-27T11:43:00.000Z
|
2020-12-08T15:56:53.000Z
|
var/spack/repos/builtin.mock/packages/extendee/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8
|
2015-10-16T13:51:49.000Z
|
2021-10-18T13:58:03.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Extendee(Package):
"""A package with extensions"""
homepage = "http://www.example.com"
url = "http://www.example.com/extendee-1.0.tar.gz"
extendable = True
version('1.0', 'hash-extendee-1.0')
def install(self, spec, prefix):
mkdirp(prefix.bin)
| 24.333333
| 73
| 0.679061
|
0fde83b717479801193cd7a93ef17b7567607eb9
| 1,003
|
py
|
Python
|
code/main.py
|
abdullahkhawer/ak-encoder-decoder
|
7553476d40d8629d24fd58541d31ea767381cf6a
|
[
"Apache-2.0"
] | 8
|
2019-07-04T11:13:55.000Z
|
2021-06-28T13:19:32.000Z
|
code/main.py
|
abdullahkhawer/ak-encoder-decoder
|
7553476d40d8629d24fd58541d31ea767381cf6a
|
[
"Apache-2.0"
] | null | null | null |
code/main.py
|
abdullahkhawer/ak-encoder-decoder
|
7553476d40d8629d24fd58541d31ea767381cf6a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from AKEncoderDecoder import AKEncoderDecoder
import os
import sys
pass_key = os.getenv("AK_ENCODER_DECODER_PASS_KEY", " ")
pass_key = '+'.join(str(ord(c)) for c in pass_key)
pass_key = eval(pass_key)/2
if __name__ == "__main__":
if len(sys.argv) != 3:
print("ERROR: Invalid Number of Arguments Passed.")
exit(1)
code_mode = int(sys.argv[1])
input_str = sys.argv[2]
AKEncoderDecoder_OBJ = AKEncoderDecoder()
print("Input String: " + input_str)
if code_mode == 0:
encoded_str = AKEncoderDecoder_OBJ.ak_encoder(input_str, int(pass_key))
print("Encoded String: \"" + encoded_str + "\"")
#decoded_str = AKEncoderDecoder_OBJ.ak_decoder(encoded_str, int(pass_key))
#print("Decoded String: " + decoded_str)
elif code_mode == 1:
decoded_str = AKEncoderDecoder_OBJ.ak_decoder(input_str, int(pass_key))
print("Decoded String: " + decoded_str)
else:
print("ERROR: Invalid Code Mode.")
| 30.393939
| 82
| 0.666999
|
e3af27aec21d81cbfbc6c63da67a642852630a5d
| 1,070
|
py
|
Python
|
mmdet/models/utils/__init__.py
|
yan-roo/LambdaNetworks-Mask_RCNN
|
dbd2e3171c69c262a7ed6583f79bdaf329128a2a
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/utils/__init__.py
|
yan-roo/LambdaNetworks-Mask_RCNN
|
dbd2e3171c69c262a7ed6583f79bdaf329128a2a
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/utils/__init__.py
|
yan-roo/LambdaNetworks-Mask_RCNN
|
dbd2e3171c69c262a7ed6583f79bdaf329128a2a
|
[
"Apache-2.0"
] | null | null | null |
from .builder import build_linear_layer, build_transformer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .lambda_layer import LambdaBlock, LambdaLayer
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, Transformer)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'LambdaBlock', 'LambdaLayer'
]
| 48.636364
| 78
| 0.776636
|
e6033cc0547a0f36d507969825b911bb255aeb53
| 2,909
|
py
|
Python
|
GUIPackage1/datavisualize/datavisualize6/main_window.py
|
cybernetor066/test1DesktopAppPySide2
|
1f6f42bcdb4b0ee39a76c0e2d206c5957cc9ff24
|
[
"PSF-2.0"
] | null | null | null |
GUIPackage1/datavisualize/datavisualize6/main_window.py
|
cybernetor066/test1DesktopAppPySide2
|
1f6f42bcdb4b0ee39a76c0e2d206c5957cc9ff24
|
[
"PSF-2.0"
] | null | null | null |
GUIPackage1/datavisualize/datavisualize6/main_window.py
|
cybernetor066/test1DesktopAppPySide2
|
1f6f42bcdb4b0ee39a76c0e2d206c5957cc9ff24
|
[
"PSF-2.0"
] | null | null | null |
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing/
##
## This file is part of the Qt for Python examples of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of The Qt Company Ltd nor the names of its
## contributors may be used to endorse or promote products derived
## from this software without specific prior written permission.
##
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## $QT_END_LICENSE$
##
#############################################################################
from PySide2.QtCore import Slot, qApp
from PySide2.QtWidgets import QMainWindow, QAction
class MainWindow(QMainWindow):
def __init__(self, widget):
QMainWindow.__init__(self)
self.setWindowTitle("Eartquakes information")
# Menu
self.menu = self.menuBar()
self.file_menu = self.menu.addMenu("File")
# Exit QAction
exit_action = QAction("Exit", self)
exit_action.setShortcut("Ctrl+Q")
exit_action.triggered.connect(self.exit_app)
self.file_menu.addAction(exit_action)
# Status Bar
self.status = self.statusBar()
self.status.showMessage("Data loaded and plotted")
# Window dimensions
geometry = qApp.desktop().availableGeometry(self)
self.setFixedSize(geometry.width() * 0.8, geometry.height() * 0.7)
self.setCentralWidget(widget)
@Slot()
def exit_app(self, checked):
sys.exit()
| 40.402778
| 77
| 0.676521
|
f036c65900d0a9bf7e71ab916a8aad398a96e0f2
| 999
|
py
|
Python
|
myproject/urls.py
|
ritwickjha99/onlineclass
|
1d0ea759c33ae4204d1ecaa5078b50ec9f3851ca
|
[
"Apache-2.0"
] | null | null | null |
myproject/urls.py
|
ritwickjha99/onlineclass
|
1d0ea759c33ae4204d1ecaa5078b50ec9f3851ca
|
[
"Apache-2.0"
] | null | null | null |
myproject/urls.py
|
ritwickjha99/onlineclass
|
1d0ea759c33ae4204d1ecaa5078b50ec9f3851ca
|
[
"Apache-2.0"
] | null | null | null |
"""myproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('onlinecourse/', include('onlinecourse.urls')),
path("",include('onlinecourse.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38.423077
| 77
| 0.724725
|
c7b5a52827f8b09c77b3cde18696f16ed7dca6a3
| 4,363
|
py
|
Python
|
toontown/toon/DistributedNPCToonBase.py
|
journeyfan/toontown-journey
|
7a4db507e5c1c38a014fc65588086d9655aaa5b4
|
[
"MIT"
] | 1
|
2020-09-27T22:12:47.000Z
|
2020-09-27T22:12:47.000Z
|
toontown/toon/DistributedNPCToonBase.py
|
journeyfan/toontown-journey
|
7a4db507e5c1c38a014fc65588086d9655aaa5b4
|
[
"MIT"
] | null | null | null |
toontown/toon/DistributedNPCToonBase.py
|
journeyfan/toontown-journey
|
7a4db507e5c1c38a014fc65588086d9655aaa5b4
|
[
"MIT"
] | 2
|
2020-09-26T20:37:18.000Z
|
2020-11-15T20:55:33.000Z
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import ClockDelta
from direct.distributed import DistributedObject
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import *
import random
from . import DistributedToon
from . import NPCToons
from toontown.nametag import NametagGlobals
from toontown.quest import QuestChoiceGui
from toontown.quest import QuestParser
from toontown.quest import Quests
from toontown.toonbase import ToontownGlobals
class DistributedNPCToonBase(DistributedToon.DistributedToon):
def __init__(self, cr):
try:
self.DistributedNPCToon_initialized
except:
self.DistributedNPCToon_initialized = 1
DistributedToon.DistributedToon.__init__(self, cr)
self.__initCollisions()
self.setPickable(0)
self.setPlayerType(NametagGlobals.CCNonPlayer)
def disable(self):
self.ignore('enter' + self.cSphereNode.getName())
DistributedToon.DistributedToon.disable(self)
def delete(self):
try:
self.DistributedNPCToon_deleted
except:
self.DistributedNPCToon_deleted = 1
self.__deleteCollisions()
DistributedToon.DistributedToon.delete(self)
def generate(self):
DistributedToon.DistributedToon.generate(self)
self.cSphereNode.setName(self.uniqueName('NPCToon'))
self.detectAvatars()
self.setParent(ToontownGlobals.SPRender)
self.startLookAround()
def generateToon(self):
self.setLODs()
self.generateToonLegs()
self.generateToonHead()
self.generateToonTorso()
self.generateToonColor()
self.parentToonParts()
self.rescaleToon()
self.resetHeight()
self.rightHands = []
self.leftHands = []
self.headParts = []
self.hipsParts = []
self.torsoParts = []
self.legsParts = []
self.__bookActors = []
self.__holeActors = []
def announceGenerate(self):
self.initToonState()
DistributedToon.DistributedToon.announceGenerate(self)
def initToonState(self):
self.setAnimState('neutral', 0.9, None, None)
npcOrigin = render.find('**/npc_origin_' + str(self.posIndex))
if not npcOrigin.isEmpty():
self.reparentTo(npcOrigin)
self.initPos()
def initPos(self):
self.clearMat()
def wantsSmoothing(self):
return 0
def detectAvatars(self):
self.accept('enter' + self.cSphereNode.getName(), self.handleCollisionSphereEnter)
def ignoreAvatars(self):
self.ignore('enter' + self.cSphereNode.getName())
def getCollSphereRadius(self):
return 3.25
def __initCollisions(self):
self.cSphere = CollisionTube(0.0, 1.0, 0.0, 0.0, 1.0, 5.0, self.getCollSphereRadius())
self.cSphere.setTangible(0)
self.cSphereNode = CollisionNode('cSphereNode')
self.cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.attachNewNode(self.cSphereNode)
self.cSphereNodePath.hide()
self.cSphereNode.setCollideMask(ToontownGlobals.WallBitmask)
def __deleteCollisions(self):
del self.cSphere
del self.cSphereNode
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
def handleCollisionSphereEnter(self, collEntry):
pass
def setupAvatars(self, av):
self.ignoreAvatars()
av.headsUp(self, 0, 0, 0)
self.headsUp(av, 0, 0, 0)
av.stopLookAround()
av.lerpLookAt(Point3(-0.5, 4, 0), time=0.5)
self.stopLookAround()
self.lerpLookAt(Point3(av.getPos(self)), time=0.5)
def b_setPageNumber(self, paragraph, pageNumber):
self.setPageNumber(paragraph, pageNumber)
self.d_setPageNumber(paragraph, pageNumber)
def d_setPageNumber(self, paragraph, pageNumber):
timestamp = ClockDelta.globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setPageNumber', [paragraph, pageNumber, timestamp])
def freeAvatar(self):
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
def setPositionIndex(self, posIndex):
self.posIndex = posIndex
| 32.559701
| 94
| 0.673161
|
cd609493d26c3581cb9dc00cfdee3a72f669db42
| 15,229
|
py
|
Python
|
test/IECoreScene/MixSmoothSkinningWeightsOpTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 386
|
2015-01-02T11:10:43.000Z
|
2022-03-10T15:12:20.000Z
|
test/IECoreScene/MixSmoothSkinningWeightsOpTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 484
|
2015-01-09T18:28:06.000Z
|
2022-03-31T16:02:04.000Z
|
test/IECoreScene/MixSmoothSkinningWeightsOpTest.py
|
bradleyhenke/cortex
|
f8245cc6c9464b1de9e6c6e57068248198e63de0
|
[
"BSD-3-Clause"
] | 99
|
2015-01-28T23:18:04.000Z
|
2022-03-27T00:59:39.000Z
|
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
import random
import imath
import IECore
import IECoreScene
class MixSmoothSkinningWeightsOpTest( unittest.TestCase ) :
def createSSD( self, weights ) :
names = IECore.StringVectorData( [ 'jointA', 'jointB', 'jointC' ] )
poses = IECore.M44fVectorData( [imath.M44f(1),imath.M44f(2),imath.M44f(3)] )
offsets = IECore.IntVectorData( [0, 2, 5, 6] )
counts = IECore.IntVectorData( [2, 3, 1, 2] )
indices = IECore.IntVectorData( [0, 1, 0, 1, 2, 1, 1, 2] )
ssd = IECoreScene.SmoothSkinningData( names, poses, offsets, counts, indices, weights )
return ssd
def original( self ) :
weights = IECore.FloatVectorData( [0.7, 0.7, 0.2, 0.6, 0.2, 0.1, 1.2, 0.8] )
return self.createSSD( weights )
def toMix( self ) :
weights = IECore.FloatVectorData( [0.5, 0.5, 0.25, 0.75, 0.2, 1.0, 0.6, 0.4] )
return self.createSSD( weights )
def mixed50_50_50( self ) :
weights = IECore.FloatVectorData( [0.6, 0.6, 0.225, 0.675, 0.2, 0.55, 0.9, 0.6] )
return self.createSSD( weights )
def mixed75_75_75( self ) :
weights = IECore.FloatVectorData( [0.65, 0.65, 0.2125, 0.6375, 0.2, 0.325, 1.05, 0.7] )
return self.createSSD( weights )
def mixed40_60_80( self ) :
weights = IECore.FloatVectorData( [0.58, 0.62, 0.23, 0.66, 0.2, 0.46, 0.96, 0.72] )
return self.createSSD( weights )
def mixed0_50_100( self ) :
weights = IECore.FloatVectorData( [0.5, 0.6, 0.25, 0.675, 0.2, 0.55, 0.9, 0.8] )
return self.createSSD( weights )
def testTypes( self ) :
""" Test MixSmoothSkinningWeightsOp types"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
self.assertEqual( type(op), IECoreScene.MixSmoothSkinningWeightsOp )
self.assertEqual( op.typeId(), IECoreScene.TypeId.MixSmoothSkinningWeightsOp )
op.parameters()['input'].setValue( IECore.IntData(1) )
self.assertRaises( RuntimeError, op.operate )
def testSelfMixing( self ) :
""" Test MixSmoothSkinningWeightsOp by mixing with itself"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['skinningDataToMix'].setValue( ssd )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.5, 0.5, 0.5 ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertEqual( result, ssd )
def testMix50_50_50( self ) :
""" Test MixSmoothSkinningWeightsOp with a 50-50 split between all weights"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['skinningDataToMix'].setValue( self.toMix() )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.5, 0.5, 0.5 ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
mixed = self.mixed50_50_50()
self.assertEqual( result.influenceNames(), mixed.influenceNames() )
self.assertEqual( result.influencePose(), mixed.influencePose() )
self.assertEqual( result.pointIndexOffsets(), mixed.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), mixed.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), mixed.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
mixedWeights = mixed.pointInfluenceWeights()
for i in range( 0, result.pointInfluenceWeights().size() ) :
self.assertAlmostEqual( resultWeights[i], mixedWeights[i], 6 )
def testMix75_75_75( self ) :
""" Test MixSmoothSkinningWeightsOp with a 75-25 split between all weights"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['skinningDataToMix'].setValue( self.toMix() )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.75, 0.75, 0.75 ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
mixed = self.mixed75_75_75()
self.assertEqual( result.influenceNames(), mixed.influenceNames() )
self.assertEqual( result.influencePose(), mixed.influencePose() )
self.assertEqual( result.pointIndexOffsets(), mixed.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), mixed.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), mixed.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
mixedWeights = mixed.pointInfluenceWeights()
for i in range( 0, result.pointInfluenceWeights().size() ) :
self.assertAlmostEqual( resultWeights[i], mixedWeights[i], 6 )
def testMix40_60_80( self ) :
""" Test MixSmoothSkinningWeightsOp with a mixed split between all weights"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['skinningDataToMix'].setValue( self.toMix() )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.4, 0.6, 0.8 ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
mixed = self.mixed40_60_80()
self.assertEqual( result.influenceNames(), mixed.influenceNames() )
self.assertEqual( result.influencePose(), mixed.influencePose() )
self.assertEqual( result.pointIndexOffsets(), mixed.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), mixed.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), mixed.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
mixedWeights = mixed.pointInfluenceWeights()
for i in range( 0, result.pointInfluenceWeights().size() ) :
self.assertAlmostEqual( resultWeights[i], mixedWeights[i], 6 )
def testLockedInput( self ) :
""" Test MixSmoothSkinningWeightsOp with locked input weights"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['skinningDataToMix'].setValue( self.toMix() )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 1, 1, 1 ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
mixedWeights = ssd.pointInfluenceWeights()
for i in range( 0, result.pointInfluenceWeights().size() ) :
self.assertAlmostEqual( resultWeights[i], mixedWeights[i], 6 )
def testLockedMixingData( self ) :
""" Test MixSmoothSkinningWeightsOp with locked mixing weights"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['skinningDataToMix'].setValue( self.toMix() )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0, 0, 0 ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
mixed = self.toMix()
self.assertEqual( result.influenceNames(), mixed.influenceNames() )
self.assertEqual( result.influencePose(), mixed.influencePose() )
self.assertEqual( result.pointIndexOffsets(), mixed.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), mixed.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), mixed.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
mixedWeights = mixed.pointInfluenceWeights()
for i in range( 0, result.pointInfluenceWeights().size() ) :
self.assertAlmostEqual( resultWeights[i], mixedWeights[i], 6 )
def testMix0_50_100( self ) :
""" Test MixSmoothSkinningWeightsOp with some mixed and some locked weights"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
op.parameters()['skinningDataToMix'].setValue( self.toMix() )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0, 0.5, 1 ] ) )
result = op.operate()
self.assertEqual( result.influenceNames(), ssd.influenceNames() )
self.assertEqual( result.influencePose(), ssd.influencePose() )
self.assertEqual( result.pointIndexOffsets(), ssd.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), ssd.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), ssd.pointInfluenceIndices() )
self.assertNotEqual( result.pointInfluenceWeights(), ssd.pointInfluenceWeights() )
self.assertNotEqual( result, ssd )
mixed = self.mixed0_50_100()
self.assertEqual( result.influenceNames(), mixed.influenceNames() )
self.assertEqual( result.influencePose(), mixed.influencePose() )
self.assertEqual( result.pointIndexOffsets(), mixed.pointIndexOffsets() )
self.assertEqual( result.pointInfluenceCounts(), mixed.pointInfluenceCounts() )
self.assertEqual( result.pointInfluenceIndices(), mixed.pointInfluenceIndices() )
resultWeights = result.pointInfluenceWeights()
mixedWeights = mixed.pointInfluenceWeights()
for i in range( 0, result.pointInfluenceWeights().size() ) :
self.assertAlmostEqual( resultWeights[i], mixedWeights[i], 6 )
def testErrorStates( self ) :
""" Test MixSmoothSkinningWeightsOp with the various error states"""
ssd = self.original()
op = IECoreScene.MixSmoothSkinningWeightsOp()
op.parameters()['input'].setValue( ssd )
# no data to mix
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.5, 0.5, 0.5 ] ) )
self.assertRaises( RuntimeError, op.operate )
# wrong number of mixing weights
op.parameters()['skinningDataToMix'].setValue( self.toMix() )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.5, 0.5 ] ) )
self.assertRaises( RuntimeError, op.operate )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.5, 0.5, 0.5, 0.5 ] ) )
self.assertRaises( RuntimeError, op.operate )
# wrong number of influences
bad = IECoreScene.SmoothSkinningData( IECore.StringVectorData( [ 'jointA', 'jointB' ] ), ssd.influencePose(), ssd.pointIndexOffsets(), ssd.pointInfluenceCounts(), ssd.pointInfluenceIndices(), ssd.pointInfluenceWeights() )
op.parameters()['skinningDataToMix'].setValue( bad )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.5, 0.5, 0.5 ] ) )
self.assertRaises( RuntimeError, op.operate )
bad = IECoreScene.SmoothSkinningData( IECore.StringVectorData( [ 'jointA', 'jointB', 'jointC', 'jointD' ] ), ssd.influencePose(), ssd.pointIndexOffsets(), ssd.pointInfluenceCounts(), ssd.pointInfluenceIndices(), ssd.pointInfluenceWeights() )
op.parameters()['skinningDataToMix'].setValue( bad )
self.assertRaises( RuntimeError, op.operate )
# wrong number of points
bad = IECoreScene.SmoothSkinningData( ssd.influenceNames(), ssd.influencePose(), IECore.IntVectorData( [0, 2, 5, 6, 8] ), ssd.pointInfluenceCounts(), ssd.pointInfluenceIndices(), ssd.pointInfluenceWeights() )
op.parameters()['skinningDataToMix'].setValue( bad )
op.parameters()['mixingWeights'].setValue( IECore.FloatVectorData( [ 0.5, 0.5, 0.5 ] ) )
self.assertRaises( RuntimeError, op.operate )
if __name__ == "__main__":
unittest.main()
| 46.288754
| 243
| 0.73406
|
814eb30d2e20c51f9ed9b3a43a6c0bcc5dd63bdb
| 10,837
|
py
|
Python
|
tests/test_crawl.py
|
nfunato/scrapy
|
9d3ded5f2202e1b933e3f38671b114bb0ea238ce
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_crawl.py
|
nfunato/scrapy
|
9d3ded5f2202e1b933e3f38671b114bb0ea238ce
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_crawl.py
|
nfunato/scrapy
|
9d3ded5f2202e1b933e3f38671b114bb0ea238ce
|
[
"BSD-3-Clause"
] | 1
|
2019-07-17T09:23:13.000Z
|
2019-07-17T09:23:13.000Z
|
import json
import socket
import logging
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.http import Request
from scrapy.crawler import CrawlerRunner
from tests import mock
from tests.spiders import FollowAllSpider, DelaySpider, SimpleSpider, \
BrokenStartRequestsSpider, SingleRequestSpider, DuplicateStartRequestsSpider
from tests.mockserver import MockServer
class CrawlTestCase(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.runner = CrawlerRunner()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_follow_all(self):
crawler = self.runner.create_crawler(FollowAllSpider)
yield crawler.crawl()
self.assertEqual(len(crawler.spider.urls_visited), 11) # 10 + start_url
@defer.inlineCallbacks
def test_delay(self):
# short to long delays
yield self._test_delay(0.2, False)
yield self._test_delay(1, False)
# randoms
yield self._test_delay(0.2, True)
yield self._test_delay(1, True)
@defer.inlineCallbacks
def _test_delay(self, delay, randomize):
settings = {"DOWNLOAD_DELAY": delay, 'RANDOMIZE_DOWNLOAD_DELAY': randomize}
crawler = CrawlerRunner(settings).create_crawler(FollowAllSpider)
yield crawler.crawl(maxlatency=delay * 2)
t = crawler.spider.times
totaltime = t[-1] - t[0]
avgd = totaltime / (len(t) - 1)
tolerance = 0.6 if randomize else 0.2
self.assertTrue(avgd > delay * (1 - tolerance),
"download delay too small: %s" % avgd)
@defer.inlineCallbacks
def test_timeout_success(self):
crawler = self.runner.create_crawler(DelaySpider)
yield crawler.crawl(n=0.5)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 > 0)
self.assertTrue(crawler.spider.t2 > crawler.spider.t1)
@defer.inlineCallbacks
def test_timeout_failure(self):
crawler = CrawlerRunner({"DOWNLOAD_TIMEOUT": 0.35}).create_crawler(DelaySpider)
yield crawler.crawl(n=0.5)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 == 0)
self.assertTrue(crawler.spider.t2_err > 0)
self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
# server hangs after receiving response headers
yield crawler.crawl(n=0.5, b=1)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 == 0)
self.assertTrue(crawler.spider.t2_err > 0)
self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
@defer.inlineCallbacks
def test_retry_503(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/status?n=503")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_conn_failed(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:65432/status?n=503")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_dns_error(self):
with mock.patch('socket.gethostbyname',
side_effect=socket.gaierror(-5, 'No address associated with hostname')):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://example.com/")
self._assert_retried(l)
@defer.inlineCallbacks
def test_start_requests_bug_before_yield(self):
with LogCapture('scrapy', level=logging.ERROR) as l:
crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl(fail_before_yield=1)
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIsNotNone(record.exc_info)
self.assertIs(record.exc_info[0], ZeroDivisionError)
@defer.inlineCallbacks
def test_start_requests_bug_yielding(self):
with LogCapture('scrapy', level=logging.ERROR) as l:
crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl(fail_yielding=1)
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIsNotNone(record.exc_info)
self.assertIs(record.exc_info[0], ZeroDivisionError)
@defer.inlineCallbacks
def test_start_requests_lazyness(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = CrawlerRunner(settings).create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl()
#self.assertTrue(False, crawler.spider.seedsseen)
#self.assertTrue(crawler.spider.seedsseen.index(None) < crawler.spider.seedsseen.index(99),
# crawler.spider.seedsseen)
@defer.inlineCallbacks
def test_start_requests_dupes(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = CrawlerRunner(settings).create_crawler(DuplicateStartRequestsSpider)
yield crawler.crawl(dont_filter=True, distinct_urls=2, dupe_factor=3)
self.assertEqual(crawler.spider.visited, 6)
yield crawler.crawl(dont_filter=False, distinct_urls=3, dupe_factor=4)
self.assertEqual(crawler.spider.visited, 3)
@defer.inlineCallbacks
def test_unbounded_response(self):
# Completeness of responses without Content-Length or Transfer-Encoding
# can not be determined, we treat them as valid but flagged as "partial"
from six.moves.urllib.parse import urlencode
query = urlencode({'raw': '''\
HTTP/1.1 200 OK
Server: Apache-Coyote/1.1
X-Powered-By: Servlet 2.4; JBoss-4.2.3.GA (build: SVNTag=JBoss_4_2_3_GA date=200807181417)/JBossWeb-2.0
Set-Cookie: JSESSIONID=08515F572832D0E659FD2B0D8031D75F; Path=/
Pragma: no-cache
Expires: Thu, 01 Jan 1970 00:00:00 GMT
Cache-Control: no-cache
Cache-Control: no-store
Content-Type: text/html;charset=UTF-8
Content-Language: en
Date: Tue, 27 Aug 2013 13:05:05 GMT
Connection: close
foo body
with multiples lines
'''})
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/raw?{0}".format(query))
self.assertEqual(str(l).count("Got response 200"), 1)
@defer.inlineCallbacks
def test_retry_conn_lost(self):
# connection lost after receiving data
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/drop?abort=0")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_conn_aborted(self):
# connection lost before receiving data
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/drop?abort=1")
self._assert_retried(l)
def _assert_retried(self, log):
self.assertEqual(str(log).count("Retrying"), 2)
self.assertEqual(str(log).count("Gave up retrying"), 1)
@defer.inlineCallbacks
def test_referer_header(self):
"""Referer header is set by RefererMiddleware unless it is already set"""
req0 = Request('http://localhost:8998/echo?headers=1&body=0', dont_filter=1)
req1 = req0.replace()
req2 = req0.replace(headers={'Referer': None})
req3 = req0.replace(headers={'Referer': 'http://example.com'})
req0.meta['next'] = req1
req1.meta['next'] = req2
req2.meta['next'] = req3
crawler = self.runner.create_crawler(SingleRequestSpider)
yield crawler.crawl(seed=req0)
# basic asserts in case of weird communication errors
self.assertIn('responses', crawler.spider.meta)
self.assertNotIn('failures', crawler.spider.meta)
# start requests doesn't set Referer header
echo0 = json.loads(crawler.spider.meta['responses'][2].body)
self.assertNotIn('Referer', echo0['headers'])
# following request sets Referer to start request url
echo1 = json.loads(crawler.spider.meta['responses'][1].body)
self.assertEqual(echo1['headers'].get('Referer'), [req0.url])
# next request avoids Referer header
echo2 = json.loads(crawler.spider.meta['responses'][2].body)
self.assertNotIn('Referer', echo2['headers'])
# last request explicitly sets a Referer header
echo3 = json.loads(crawler.spider.meta['responses'][3].body)
self.assertEqual(echo3['headers'].get('Referer'), ['http://example.com'])
@defer.inlineCallbacks
def test_engine_status(self):
from scrapy.utils.engine import get_engine_status
est = []
def cb(response):
est.append(get_engine_status(crawler.engine))
crawler = self.runner.create_crawler(SingleRequestSpider)
yield crawler.crawl(seed='http://localhost:8998/', callback_func=cb)
self.assertEqual(len(est), 1, est)
s = dict(est[0])
self.assertEqual(s['engine.spider.name'], crawler.spider.name)
self.assertEqual(s['len(engine.scraper.slot.active)'], 1)
@defer.inlineCallbacks
def test_graceful_crawl_error_handling(self):
"""
Test whether errors happening anywhere in Crawler.crawl() are properly
reported (and not somehow swallowed) after a graceful engine shutdown.
The errors should not come from within Scrapy's core but from within
spiders/middlewares/etc., e.g. raised in Spider.start_requests(),
SpiderMiddleware.process_start_requests(), etc.
"""
class TestError(Exception):
pass
class FaultySpider(SimpleSpider):
def start_requests(self):
raise TestError
crawler = self.runner.create_crawler(FaultySpider)
yield self.assertFailure(crawler.crawl(), TestError)
self.assertFalse(crawler.crawling)
@defer.inlineCallbacks
def test_crawlerrunner_accepts_crawler(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as log:
yield self.runner.crawl(crawler, "http://localhost:8998/status?n=200")
self.assertIn("Got response 200", str(log))
@defer.inlineCallbacks
def test_crawl_multiple(self):
self.runner.crawl(SimpleSpider, "http://localhost:8998/status?n=200")
self.runner.crawl(SimpleSpider, "http://localhost:8998/status?n=503")
with LogCapture() as log:
yield self.runner.join()
self._assert_retried(log)
self.assertIn("Got response 200", str(log))
| 40.286245
| 103
| 0.676202
|
566494acdee128481d5770bf51f3c335ece9b5d1
| 2,058
|
py
|
Python
|
bot/exts/utils/bookmark.py
|
dhzdhd/Obsidian-Python
|
9ab047aeb96bf79f60f7c268c3252528cbb992a7
|
[
"MIT"
] | null | null | null |
bot/exts/utils/bookmark.py
|
dhzdhd/Obsidian-Python
|
9ab047aeb96bf79f60f7c268c3252528cbb992a7
|
[
"MIT"
] | null | null | null |
bot/exts/utils/bookmark.py
|
dhzdhd/Obsidian-Python
|
9ab047aeb96bf79f60f7c268c3252528cbb992a7
|
[
"MIT"
] | null | null | null |
import datetime
import discord
from discord.ext import commands
from bot.utils.embed import SuccessEmbed
# Add embed support
class Bookmark(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self._dict = {}
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload) -> None:
if payload.user_id != self.bot.user.id:
if payload.emoji == discord.PartialEmoji(name="📩"):
await payload.member.send(embed=self._dict[int(payload.message_id)])
elif payload.emoji == discord.PartialEmoji(name="❌"):
channel = await self.bot.fetch_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
await message.delete()
@commands.command(name="bookmark", aliases=("bm",))
async def bookmark(self, ctx: commands.Context, message_id: int = None) -> None:
await ctx.message.delete()
if message_id is None:
if ctx.message.reference is None:
return
message = await ctx.fetch_message(ctx.message.reference.message_id)
else:
message = await ctx.fetch_message(message_id)
dm_embed = discord.Embed(
title="Bookmarked message",
description=f"Author: **{message.author.name}**\nContents:\n{message.content}",
colour=discord.Colour.blue(),
timestamp=datetime.datetime.utcnow()
).set_footer(text=f"Invoked by {ctx.author.name}", icon_url=ctx.author.avatar_url)
reaction_embed = SuccessEmbed(
description=f"**Message bookmarked!**\nLink to message: \n{message.jump_url}",
author=ctx.author
)
msg = await ctx.send(embed=reaction_embed, delete_after=3600)
await msg.add_reaction("📩")
await msg.add_reaction("❌")
self._dict.update({int(msg.id): dm_embed})
await ctx.author.send(embed=dm_embed)
def setup(bot: commands.Bot) -> None:
bot.add_cog(Bookmark(bot))
| 35.482759
| 91
| 0.633139
|
5bb6946987b63b884e909f8782b091741b5a5567
| 1,073
|
py
|
Python
|
tests/v2/test_relationship_to_role.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
tests/v2/test_relationship_to_role.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
tests/v2/test_relationship_to_role.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v2
try:
from datadog_api_client.v2.model import relationship_to_role_data
except ImportError:
relationship_to_role_data = sys.modules[
'datadog_api_client.v2.model.relationship_to_role_data']
from datadog_api_client.v2.model.relationship_to_role import RelationshipToRole
class TestRelationshipToRole(unittest.TestCase):
"""RelationshipToRole unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRelationshipToRole(self):
"""Test RelationshipToRole"""
# FIXME: construct object with mandatory attributes with example values
# model = RelationshipToRole() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.512821
| 108
| 0.746505
|
3966e2da2fc2c58724cecd98f3d12609bf25c843
| 1,269
|
py
|
Python
|
src/followbot/scripts/follower_color_filter.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
src/followbot/scripts/follower_color_filter.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
src/followbot/scripts/follower_color_filter.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/env python
import rospy, cv2, cv_bridge, numpy
from sensor_msgs.msg import Image
class Follower:
# Constructor
def __init__(self):
# Instantiate a CvBridge object
self.bridge = cv_bridge.CvBridge()
# Create a CV window
cv2.namedWindow("window", 1)
# Create a Subscriber to /camera/rgb/image_raw
self.image_sub = rospy.Subscriber("camera/rgb/image_raw", Image, self.image_callback)
# Callback function
def image_callback(self, msg):
# Get the image using the CV bridge
image = self.bridge.imgmsg_to_cv2(msg)
# Get the HSV representation of the obtained RGB image
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Define HSV boundaries for the filter (filter the yellow line)
lower_yellow = numpy.array([ 50, 50, 170])
upper_yellow = numpy.array([255, 255, 190])
# Generate a binary HSV image inly with the pixels within the filter boundaries
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
# Apply the mask to the original image
masked = cv2.bitwise_and(image, image, mask=mask)
# Show image in opened window
cv2.imshow("window", mask)
# Wait 3 ms
cv2.waitKey(3)
if __name__ == '__main__':
# Initialize
rospy.init_node('follower')
follower = Follower()
# Pass control to ROS
rospy.spin()
| 25.897959
| 87
| 0.723404
|
ee77237cfafda8535d47a90a63b3e73d28c5a59e
| 42
|
py
|
Python
|
demon/__main__.py
|
lapidshay/DEMON
|
c7eeeb22be279c9dd9a6555bae06702f369021ac
|
[
"BSD-2-Clause"
] | 30
|
2017-02-24T08:56:57.000Z
|
2022-03-29T08:52:07.000Z
|
demon/__main__.py
|
KDDComplexNetworkAnalysis/DEMON
|
c32d80198a90c4d6c2165e34cffd79315aa764e7
|
[
"BSD-2-Clause"
] | 139
|
2017-08-24T08:40:50.000Z
|
2022-03-24T01:42:47.000Z
|
demon/__main__.py
|
KDDComplexNetworkAnalysis/DEMON
|
c32d80198a90c4d6c2165e34cffd79315aa764e7
|
[
"BSD-2-Clause"
] | 18
|
2016-06-08T21:39:46.000Z
|
2021-08-07T20:04:02.000Z
|
from demon.alg.Demon import main
main()
| 8.4
| 32
| 0.738095
|
3aa823b76fbb9b08519de64730ed5b8cc52abe43
| 24,288
|
py
|
Python
|
radio/views.py
|
hayden-t/trunk-player
|
26e597afe5f967eed36ae31d2bf2297efcf58ab5
|
[
"MIT"
] | null | null | null |
radio/views.py
|
hayden-t/trunk-player
|
26e597afe5f967eed36ae31d2bf2297efcf58ab5
|
[
"MIT"
] | null | null | null |
radio/views.py
|
hayden-t/trunk-player
|
26e597afe5f967eed36ae31d2bf2297efcf58ab5
|
[
"MIT"
] | null | null | null |
#import functools
import ssl
import sys
import re
import json
import pytz
from itertools import chain
from django.shortcuts import render, redirect
from django.http import Http404
from django.views.generic import ListView
from django.db.models import Q
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.contrib.auth import authenticate, login
from django.conf import settings
from django.views.generic import ListView, UpdateView
from django.views.generic.detail import DetailView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.exceptions import ImproperlyConfigured
from .models import *
from rest_framework import viewsets, generics
from .serializers import TransmissionSerializer, TalkGroupSerializer, ScanListSerializer, MenuScanListSerializer, MenuTalkGroupListSerializer, MessageSerializer
from datetime import datetime, timedelta
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import mail_admins
from allauth.account.models import EmailAddress as allauth_emailaddress
from pprint import pprint
from django.contrib import messages
import logging
from .forms import *
logger = logging.getLogger(__name__)
def check_anonymous(decorator):
"""
Decarator used to see if we allow anonymous access
"""
anonymous = getattr(settings, 'ALLOW_ANONYMOUS', True)
return decorator if not anonymous else lambda x: x
@login_required
def userScanList(request):
template = 'radio/userscanlist.html'
if request.method == "POST":
form = UserScanForm(request.POST)
if form.is_valid():
print('Form Valid')
name = form.cleaned_data['name']
tgs = form.cleaned_data['talkgroups']
print('Form Data [{}] [{}]'.format(name, tgs))
sl = ScanList()
sl.created_by = request.user
sl.name = name
sl.description = name
sl.save()
sl.talkgroups.add(*tgs)
return redirect('user_profile')
else:
print('Form not Valid')
else:
form = UserScanForm()
return render(request, template, {'form': form})
@login_required
def userProfile(request):
template = 'radio/profile.html'
if request.method == "POST":
form = UserForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
return redirect('user_profile')
else:
profile_form = UserForm(instance=request.user)
profile = Profile.objects.get(user=request.user)
scan_lists = ScanList.objects.filter(created_by=request.user)
return render(request, template, {'profile_form': profile_form, 'profile': profile, 'scan_lists': scan_lists} )
def agencyList(request):
template = 'radio/agency_list.html'
query_data = Agency.objects.exclude(short='_DEF_').order_by('name')
return render(request, template, {'agency': query_data})
def cityListView(request):
template = 'radio/city_list.html'
query_data = City.objects.filter(visible=True)
return render(request, template, {'cities': query_data})
def cityDetailView(request, slug):
template = 'radio/city_detail.html'
query_data = City.objects.get(slug=slug)
return render(request, template, {'object': query_data})
def TransDetailView(request, slug):
template = 'radio/transmission_detail.html'
status = 'Good'
try:
query_data = Transmission.objects.filter(slug=slug)
if not query_data:
raise Http404
except Transmission.DoesNotExist:
raise Http404
query_data2 = limit_transmission_history(request, query_data)
if not query_data2 and not query_data[0].incident_set.filter(public=True):
query_data[0].audio_file = None
status = 'Expired'
restricted, new_query = restrict_talkgroups(request, query_data)
if not new_query:
raise Http404
return render(request, template, {'object': query_data[0], 'status': status})
def transDownloadView(request, slug):
import requests
try:
query_data = Transmission.objects.filter(slug=slug)
if not query_data:
raise Http404
except Transmission.DoesNotExist:
raise Http404
query_data2 = limit_transmission_history(request, query_data)
if not query_data2: raise Http404 # Just raise 404 if its too old
restricted, new_query = restrict_talkgroups(request, query_data)
if not new_query: raise Http404
trans = new_query[0]
if trans.audio_file_type == 'm4a':
audio_type = 'audio/m4a'
else:
audio_type = 'audio/mp3'
response = HttpResponse(content_type=audio_type)
start_time = timezone.localtime(trans.start_datetime).strftime('%Y%m%d_%H%M%S')
filename = '{}_{}.{}'.format(start_time, trans.talkgroup_info.slug, trans.audio_file_type)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
url = 'https:{}{}.{}'.format(trans.audio_url, trans.audio_file, trans.audio_file_type)
if trans.audio_url[:2] != '//':
url = 'http:'
if request.is_secure():
url = 'https:'
url += '//{}/{}{}.{}'.format(request.get_host(), trans.audio_url, trans.audio_file, trans.audio_file_type)
data = requests.get(url, verify=False)
response.write(data.content)
return response
class TransmissionViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Transmission.objects.none()
serializer_class = TransmissionSerializer
def get_serializer_context(self):
return {'request': self.request}
class ScanListViewSet(viewsets.ModelViewSet):
queryset = ScanList.objects.all().prefetch_related('talkgroups')
serializer_class = ScanListSerializer
class TalkGroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
# queryset = TalkGroup.objects.filter(public=True)
serializer_class = TalkGroupSerializer
base_name = 'TalkGroup'
def get_queryset(self):
if settings.ACCESS_TG_RESTRICT:
tg = allowed_tg_list(self.request.user)
else:
tg = TalkGroup.objects.filter(public=True)
return tg
class TransmissionView(ListView):
model = Transmission
paginate_by = 50
def ScanListFilter(request, filter_val):
template = 'radio/transmission.html'
return render(request, template, {'filter_data': filter_val, 'api_url': '/api_v1/ScanList'})
def TalkGroupFilterNew(request, filter_val):
template = 'radio/transmission_play.html'
return render(request, template, {'filter_data': filter_val})
def TalkGroupFilterjq(request, filter_val):
template = 'radio/transmission_list_jq.html'
return TalkGroupFilterBase(request, filter_val, template)
def TalkGroupFilter(request, filter_val):
template = 'radio/transmission_list.html'
return TalkGroupFilterBase(request, filter_val, template)
# Open to anyone
def Generic(request, page_name):
template = 'radio/generic.html'
query_data = WebHtml.objects.get(name=page_name)
return render(request, template, {'html_object': query_data})
def get_user_profile(user):
if user.is_authenticated:
user_profile = Profile.objects.get(user=user)
else:
try:
anon_user = User.objects.get(username='ANONYMOUS_USER')
except User.DoesNotExist:
raise ImproperlyConfigured('ANONYMOUS_USER is missing from User table, was "./manage.py migrations" not run?')
user_profile = Profile.objects.get(user=anon_user)
return user_profile
def get_history_allow(user):
user_profile = get_user_profile(user)
if user_profile:
history_minutes = user_profile.plan.history
else:
history_minutes = settings.ANONYMOUS_TIME
return history_minutes
def limit_transmission_history(request, query_data):
history_minutes = get_history_allow(request.user)
if history_minutes > 0:
time_threshold = timezone.now() - timedelta(minutes=history_minutes)
query_data = query_data.filter(start_datetime__gt=time_threshold)
return query_data
def limit_transmission_history_six_months(request, query_data):
history_minutes = 259200
time_threshold = timezone.now() - timedelta(minutes=history_minutes)
query_data = query_data.filter(start_datetime__gt=time_threshold)
return query_data
def allowed_tg_list(user):
user_profile = get_user_profile(user)
tg_list = None
for group in user_profile.talkgroup_access.all():
if tg_list is None:
tg_list = group.talkgroups.all()
else:
tg_list = tg_list | group.talkgroups.all()
if tg_list:
tg_list = tg_list.distinct()
else:
# Set blank talkgroup queryset
tg_list = TalkGroup.objects.none()
return tg_list
def restrict_talkgroups(request, query_data):
''' Checks to make sure the user can view
each of the talkgroups in the query_data
returns ( was_restricted, new query_data )
'''
if not settings.ACCESS_TG_RESTRICT:
return False, query_data
tg_list = allowed_tg_list(request.user)
query_data = query_data.filter(talkgroup_info__in=tg_list)
return None, query_data
def TalkGroupFilterBase(request, filter_val, template):
try:
tg = TalkGroup.objects.get(alpha_tag__startswith=filter_val)
except TalkGroup.DoesNotExist:
raise Http404
try:
query_data = Transmission.objects.filter(talkgroup_info=tg).prefetch_related('units')
#query_data = limit_transmission_history(self.request, rc_data)
query_data = limit_transmission_history_six_months(self.request, rc_data)
restrict_talkgroups(self.request, rc_data)
except Transmission.DoesNotExist:
raise Http404
return render(request, template, {'object_list': query_data, 'filter_data': filter_val})
class ScanViewSet(generics.ListAPIView):
serializer_class = TransmissionSerializer
def get_queryset(self):
scanlist = self.kwargs['filter_val']
try:
sl = ScanList.objects.get(slug__iexact=scanlist)
except ScanList.DoesNotExist:
if scanlist == 'default':
tg = TalkGroup.objects.all()
else:
print("Scan list does not match")
raise
else:
tg = sl.talkgroups.all()
rc_data = Transmission.objects.filter(talkgroup_info__in=tg).prefetch_related('units').prefetch_related('talkgroup_info')
#rc_data = limit_transmission_history(self.request, rc_data)
rc_data = limit_transmission_history_six_months(self.request, rc_data)
restricted, rc_data = restrict_talkgroups(self.request, rc_data)
return rc_data
class IncViewSet(generics.ListAPIView):
serializer_class = TransmissionSerializer
def get_queryset(self):
inc = self.kwargs['filter_val']
try:
if self.request.user.is_staff:
rc_data = Incident.objects.get(slug__iexact=inc).transmissions.all()
else:
rc_data = Incident.objects.get(slug__iexact=inc, public=True).transmissions.all()
except Incident.DoesNotExist:
print("Incident does not exist")
raise
restricted, rc_data = restrict_talkgroups(self.request, rc_data)
return rc_data
class MessagePopUpViewSet(generics.ListAPIView):
serializer_class = MessageSerializer
def get_queryset(self):
return MessagePopUp.objects.filter(active=True)
class TalkGroupFilterViewSet(generics.ListAPIView):
serializer_class = TransmissionSerializer
def get_queryset(self):
tg_var = self.kwargs['filter_val']
search_tgs = re.split('[\+]', tg_var)
q = Q()
for stg in search_tgs:
q |= Q(common_name__iexact=stg)
q |= Q(slug__iexact=stg)
tg = TalkGroup.objects.filter(q)
rc_data = Transmission.objects.filter(talkgroup_info__in=tg).prefetch_related('units')
#rc_data = limit_transmission_history(self.request, rc_data)
rc_data = limit_transmission_history_six_months(self.request, rc_data)
restricted, rc_data = restrict_talkgroups(self.request, rc_data)
return rc_data
class UnitFilterViewSet(generics.ListAPIView):
serializer_class = TransmissionSerializer
def get_queryset(self):
unit_var = self.kwargs['filter_val']
search_unit = re.split('[\+]', unit_var)
q = Q()
for s_unit in search_unit:
q |= Q(slug__iexact=s_unit)
units = Unit.objects.filter(q)
rc_data = Transmission.objects.filter(units__in=units).filter(talkgroup_info__public=True).prefetch_related('units').distinct()
#rc_data = limit_transmission_history(self.request, rc_data)
rc_data = limit_transmission_history_six_months(self.request, rc_data)
restricted, rc_data = restrict_talkgroups(self.request, rc_data)
return rc_data
class TalkGroupList(ListView):
model = TalkGroup
context_object_name = 'talkgroups'
template_name = 'radio/talkgroup_list.html'
#queryset = TalkGroup.objects.filter(public=True)
def get_queryset(self):
if settings.ACCESS_TG_RESTRICT:
tg = allowed_tg_list(self.request.user)
else:
tg = TalkGroup.objects.filter(public=True)
if self.request.GET.get('recent', None):
tg = tg.order_by('-recent_usage', '-last_transmission')
return tg
@login_required
@csrf_protect
def upgrade(request):
if request.method == 'POST':
form = PaymentForm(request.POST)
if not form.is_valid():
return render(
request,
'registration/upgrade.html',
{'form': form},
)
try:
plan = form.cleaned_data.get('plan_type')
card_name = form.cleaned_data.get('cardholder_name')
stripe_cust = None
logger.error('Change plan to {} for customer {} Card Name {}'.format(plan, stripe_cust, card_name))
stripe_info = None
except stripe.InvalidRequestError as e:
messages.error(request, "Error with stripe {}".format(e))
logger.error("Error with stripe {}".format(e))
return render(
request,
'registration/upgrade.html',
{'form': form},
)
except stripe.CardError as e:
messages.error(request, "<b>Error</b> Sorry there was an error with processing your card:<br>{}".format(e))
logger.error("Error with stripe user card{}".format(e))
return render(
request,
'registration/upgrade.html',
{'form': form},
)
print('------ STRIPE DEBUG -----')
pprint(stripe_info, sys.stderr)
return render(
request,
'registration/upgrade_complete.html',
)
else:
form = PaymentForm()
return render(
request,
'registration/upgrade.html',
{'form': form, },
)
@csrf_protect
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
user = User.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password1'],
email=form.cleaned_data['email']
)
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
new_user = authenticate(username=username, password=password)
if new_user is not None:
if new_user.is_active:
#stripe_actions.customers.create(user=new_user)
login(request, new_user)
return HttpResponseRedirect('/scan/default/')
else:
# this would be weird to get here
return HttpResponseRedirect('/register/success/')
else:
return HttpResponseRedirect('/register/success/')
else:
form = RegistrationForm()
return render(
request,
'registration/register.html',
{ 'form': form },
)
def register_success(request):
return render(
request,
'registration/success.html', {},
)
class MenuScanListViewSet(viewsets.ModelViewSet):
serializer_class = MenuScanListSerializer
queryset = MenuScanList.objects.all()
class MenuTalkGroupListViewSet(viewsets.ModelViewSet):
serializer_class = MenuTalkGroupListSerializer
queryset = MenuTalkGroupList.objects.all()
class UnitUpdateView(PermissionRequiredMixin, UpdateView):
model = Unit
form_class = UnitEditForm
success_url = '/unitupdategood/'
permission_required = ('radio.change_unit')
def form_valid(self, form):
try:
update_unit_email = SiteOption.objects.get(name='SEND_ADMIN_EMAIL_ON_UNIT_NAME')
if update_unit_email.value_boolean_or_string() == True:
Unit = form.save()
send_mail(
'Unit ID Change',
'User {} updated unit ID {} Now {}'.format(self.request.user, Unit.dec_id, Unit.description),
settings.SERVER_EMAIL,
[ mail for name, mail in settings.ADMINS],
fail_silently=False,
)
except SiteOption.DoesNotExist:
pass
return super().form_valid(form)
def ScanDetailsList(request, name):
template = 'radio/scandetaillist.html'
scanlist = None
try:
scanlist = ScanList.objects.get(name=name)
except ScanList.DoesNotExist:
if name == 'default':
query_data = TalkGroup.objects.all()
else:
raise Http404
if scanlist:
query_data = scanlist.talkgroups.all()
return render(request, template, {'object_list': query_data, 'scanlist': scanlist, 'request': request})
@login_required
@csrf_protect
def cancel_plan(request):
template = 'radio/cancel.html'
if request.method == 'POST':
msg = 'User {} ({}) wants to cancel'.format(request.user.username, request.user.pk)
mail_admins('Cancel Subscription', msg )
return render(request, template, {'complete': True})
else:
return render(request, template, {'complete': False})
@csrf_protect
def plans(request):
token = None
has_verified_email = False
plans = None
default_plan = None
if request.method == 'POST':
template = 'radio/subscribed.html'
token = request.POST.get('stripeToken')
plan = request.POST.get('plan')
# See if this user already has a stripe account
try:
stripe_cust = None
except ObjectDoesNotExist:
#stripe_actions.customers.create(user=request.user)
stripe_cust = None
try:
stripe_info = None #stripe_actions.subscriptions.create(customer=stripe_cust, plan=plan, token=request.POST.get('stripeToken'))
except Exception as e: #stripe.CardError as e:
template = 'radio/charge_failed.html'
logger.error("Error with stripe user card{}".format(e))
return render(request, template, {'error_msg': e })
for t in request.POST:
logger.error("{} {}".format(t, request.POST[t]))
else:
template = 'radio/plans.html'
plans = StripePlanMatrix.objects.filter(order__lt=99).filter(active=True)
default_plan = Plan.objects.get(pk=Plan.DEFAULT_PK)
# Check if users email address is verified
if request.user.is_authenticated:
verified_email = allauth_emailaddress.objects.filter(user=request.user, primary=True, verified=True)
if verified_email:
has_verified_email = True
return render(request, template, {'token': token, 'verified_email': has_verified_email, 'plans': plans, 'default_plan': default_plan} )
def incident(request, inc_slug):
template = 'radio/player_main.html'
try:
if request.user.is_staff:
inc = Incident.objects.get(slug=inc_slug)
else:
inc = Incident.objects.get(slug=inc_slug, public=True)
except Incident.DoesNotExist:
raise Http404
return render(request, template, {'inc':inc})
@csrf_exempt
def import_transmission(request):
if request.method == "POST":
settings_auth_token = getattr(settings, 'ADD_TRANS_AUTH_TOKEN', None)
if settings_auth_token == '7cf5857c61284': # Check is default is still set
return HttpResponse('Unauthorized, default ADD_TRANS_AUTH_TOKEN still set.', status=401)
body_unicode = request.body.decode('utf-8')
request_data = json.loads(body_unicode)
auth_token = request_data.get('auth_token')
if auth_token != settings_auth_token:
return HttpResponse('Unauthorized, check auth_token', status=401)
# System
system_name = request_data.get('system')
if system_name is None:
return HttpResponse('system is missing', status=400)
system, created = System.objects.get_or_create(name=system_name)
# Source
source_name = request_data.get('source')
if source_name is None:
return HttpResponse('source is missing', status=400)
source, created = Source.objects.get_or_create(description=source_name)
# TalkGroup
tg_dec = request_data.get('talkgroup')
if tg_dec is None:
return HttpResponse('talkgroup is missing', status=400)
try:
tg = TalkGroup.objects.get(dec_id=tg_dec, system=system)
except TalkGroup.DoesNotExist:
name = '#{}'.format(tg_dec)
tg = TalkGroup.objects.create(dec_id=tg_dec, system=system, alpha_tag=name, description='TalkGroup {}'.format(name))
# Transmission start
epoc_ts = request_data.get('start_time')
start_dt = datetime.fromtimestamp(int(epoc_ts), pytz.UTC)
epoc_end_ts = request_data.get('stop_time')
end_dt = datetime.fromtimestamp(int(epoc_end_ts), pytz.UTC)
play_length = epoc_end_ts - epoc_ts
audio_filename = request_data.get('audio_filename')
audio_file_url_path = request_data.get('audio_file_url_path')
freq = request_data.get('freq') # This should be depricated
audio_file_type = request_data.get('audio_file_type')
audio_file_play_length = request_data.get('audio_file_play_length', play_length)
has_audio = request_data.get('has_audio', True)
t = Transmission( start_datetime = start_dt,
end_datetime = end_dt,
audio_file = audio_filename,
talkgroup = tg_dec,
talkgroup_info = tg,
freq = int(float(freq)),
emergency = False,
source = source,
system = system,
audio_file_url_path = audio_file_url_path,
audio_file_type = audio_file_type,
play_length = audio_file_play_length,
has_audio = has_audio,
)
t.save()
# Units
count = 0
for unit in request_data.get('srcList'):
try:
trans_unit = unit['src']
except TypeError:
trans_unit = unit
u,created = Unit.objects.get_or_create(dec_id=trans_unit,system=t.system)
tu = TranmissionUnit.objects.create(transmission=t, unit=u, order=count)
count=count+1
return HttpResponse("Transmission added [{}]".format(t.pk))
else:
return HttpResponse(status=405)
| 35.77025
| 160
| 0.65905
|
0a22aec433b1b8addcebae728212cc6b36bb8f1c
| 688
|
py
|
Python
|
setup.py
|
parasKumarSahu/Knolml-Analysis-Package
|
40fd3589ce045fe662c0fb3869464fc906a7d3ce
|
[
"MIT"
] | null | null | null |
setup.py
|
parasKumarSahu/Knolml-Analysis-Package
|
40fd3589ce045fe662c0fb3869464fc906a7d3ce
|
[
"MIT"
] | null | null | null |
setup.py
|
parasKumarSahu/Knolml-Analysis-Package
|
40fd3589ce045fe662c0fb3869464fc906a7d3ce
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="kml-analysis-parasKumarSahu",
version="0.1.1",
author="Paras Kumar",
author_email="paraskumardavhehal1@gmail.com",
description="Wikipedia Analysis Toolkit",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/parasKumarSahu/Knolml-Analysis-Package",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 31.272727
| 68
| 0.681686
|
d42d1754a1a90dd23bf5fb5f9c37a26c9dda6e6c
| 1,935
|
py
|
Python
|
exercises/arduino_projects/plant_watering_system/data_plotter.py
|
aliaksandr-klimovich/sandbox
|
adab6b18f3d78a891c38609abc71b5d7a751919c
|
[
"Unlicense"
] | 1
|
2020-12-25T18:21:18.000Z
|
2020-12-25T18:21:18.000Z
|
exercises/arduino_projects/plant_watering_system/data_plotter.py
|
aliaksandr-klimovich/sandbox
|
adab6b18f3d78a891c38609abc71b5d7a751919c
|
[
"Unlicense"
] | 1
|
2021-02-11T15:05:59.000Z
|
2021-02-11T15:05:59.000Z
|
exercises/arduino_projects/plant_watering_system/data_plotter.py
|
aliaksandr-klimovich/sandbox
|
adab6b18f3d78a891c38609abc71b5d7a751919c
|
[
"Unlicense"
] | null | null | null |
import os
import webbrowser
from collections import namedtuple
from datetime import timedelta, datetime
from threading import Thread
from data_saver import DB
from logger import get_logger
PLOT_FILE_NAME = 'plot.svg'
PLOT_SIZE = namedtuple('PLOT_SIZE', 'width,height')(width=800, height=600)
PIPE_NAME = 'tmp_pipe'
ROLL_BACK_TIME = timedelta(days=1)
log = get_logger(__name__)
def gnuplot():
cmd = (
f'exec 0>/dev/null 1>/dev/null 2>/dev/null 3<{PIPE_NAME};'
f'gnuplot -e "'
f'set terminal svg size {PLOT_SIZE.width},{PLOT_SIZE.height};'
f'set output \'{PLOT_FILE_NAME}\';'
f'set datafile separator comma;'
f'set xdata time;'
f'set timefmt \'%Y-%m-%dT%H:%M:%S\';'
f'set format x \'%d/%H:%M\';'
f'unset key;'
f'plot \'<&3\' using 1:2;";'
)
log.debug(cmd)
os.system(cmd)
def pipe_writer(data):
log.debug(f'pipe_name = {PIPE_NAME}')
fd = os.open(PIPE_NAME, os.O_WRONLY)
log.debug(f'fd = {fd}')
for x, y in data:
assert isinstance(x, datetime)
timestamp = x.isoformat(timespec='seconds')
os.write(fd, bytes(f'{timestamp},{y}\n', 'utf-8'))
os.close(fd)
def main():
if not os.path.exists(PIPE_NAME):
log.info(f'Pipe does not exit, create pipe {PIPE_NAME}')
os.mkfifo(PIPE_NAME)
db = DB()
data = db.get_data(ROLL_BACK_TIME)
db.disconnect()
thread_gnuplot = Thread(target=gnuplot)
thread_pipe_writer = Thread(target=pipe_writer, args=(data,))
thread_gnuplot.start()
thread_pipe_writer.start()
thread_gnuplot.join()
thread_pipe_writer.join()
cwd = os.getcwd()
log.debug(cwd)
log.info(f'Open svg file {PLOT_FILE_NAME}')
webbrowser.open(f'file://{cwd}/{PLOT_FILE_NAME}')
if os.path.exists(PIPE_NAME):
log.info(f'Remove pipe {PIPE_NAME}')
os.remove(PIPE_NAME)
if __name__ == '__main__':
main()
| 25.460526
| 74
| 0.634625
|
6c4b4a52848185bdc10cda12f47c4907f59a0324
| 15,220
|
py
|
Python
|
eqa_nav/tools/train_im_room.py
|
sawravchy/MT-EQA
|
9a5483cc29ed6ee8d00590e28264743c6bcbe7ad
|
[
"BSD-3-Clause"
] | 22
|
2019-06-10T22:50:39.000Z
|
2021-11-06T15:55:21.000Z
|
eqa_nav/tools/train_im_room.py
|
sawravchy/MT-EQA
|
9a5483cc29ed6ee8d00590e28264743c6bcbe7ad
|
[
"BSD-3-Clause"
] | 1
|
2019-07-15T08:14:58.000Z
|
2019-07-18T07:26:20.000Z
|
eqa_nav/tools/train_im_room.py
|
sawravchy/MT-EQA
|
9a5483cc29ed6ee8d00590e28264743c6bcbe7ad
|
[
"BSD-3-Clause"
] | 5
|
2019-06-10T23:37:13.000Z
|
2021-11-06T15:55:14.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import h5py
import time
import argparse
import random
import numpy as np
import os, sys, json
import os.path as osp
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import _init_paths
from nav.loaders.nav_imitation_loader import NavImitationDataset
from nav.models.crits import SeqModelCriterion, MaskedMSELoss
from nav.models.navigator import Navigator
import nav.models.utils as model_utils
def evaluate(val_dataset, model, nll_crit, mse_crit, opt):
# set mode
model.eval()
# predict
predictions = []
overall_nll = 0
overall_teacher_forcing_acc, overall_teacher_forcing_cnt = 0, 0
overall_mse = 0
Nav_nll = {'object': 0, 'room': 0}
Nav_cnt = {'object': 0, 'room': 0}
Nav_teacher_forcing_acc = {'object': 0, 'room': 0}
Nav_teacher_forcing_cnt = {'object': 0, 'room': 0}
for ix in range(len(val_dataset)):
# data = {qid, path_ix, house, id, type, phrase, phrase_emb, ego_feats, next_feats, res_feats,
# action_inputs, action_outputs, action_masks, ego_imgs}
data = val_dataset[ix]
ego_feats = torch.from_numpy(data['ego_feats']).cuda().unsqueeze(0) # (1, L, 3200)
phrase_embs = torch.from_numpy(data['phrase_emb']).cuda().unsqueeze(0) # (1, 300)
action_inputs = torch.from_numpy(data['action_inputs']).cuda().unsqueeze(0) # (1, L)
action_outputs = torch.from_numpy(data['action_outputs']).cuda().unsqueeze(0) # (1, L)
action_masks = torch.from_numpy(data['action_masks']).cuda().unsqueeze(0) # (1, L)
# forward
logprobs, _, pred_feats, _ = model(ego_feats, phrase_embs, action_inputs) # (1, L, #actions), (1, L, 3200)
nll_loss = nll_crit(logprobs, action_outputs, action_masks)
nll_loss = nll_loss.item()
mse_loss = 0
if opt['use_next']:
next_feats = torch.from_numpy(data['next_feats']).cuda().unsqueeze(0) # (1, L, 3200)
mse_loss = mse_crit(pred_feats, next_feats, action_masks)
mse_loss = mse_loss.item()
if opt['use_residual']:
res_feats = torch.from_numpy(data['res_feats']).cuda().unsqueeze(0) # (1, L, 3200)
mse_loss = mse_crit(pred_feats, res_feats, action_masks)
mse_loss = mse_loss.item()
pred_acts = logprobs[0].argmax(1) # (L, )
# entry
entry = {}
entry['qid'] = data['qid']
entry['house'] = data['house']
entry['id'] = data['id']
entry['type'] = data['type']
entry['path_ix'] = data['path_ix']
entry['pred_acts'] = pred_acts.tolist() # list of L actions
entry['pred_acts_probs'] = torch.exp(logprobs[0]).tolist() # (L, #actions)
entry['gd_acts'] = action_outputs[0].tolist() # list of L actions
entry['nll_loss'] = nll_loss
entry['mse_loss'] = mse_loss
# accumulate
predictions.append(entry)
Nav_nll[data['type']] += nll_loss
Nav_cnt[data['type']] += 1
acc, cnt = 0, 0
for pa, ga in zip(entry['pred_acts'], entry['gd_acts']):
if pa == ga:
acc += 1
cnt += 1
if ga == 3:
break
Nav_teacher_forcing_acc[data['type']] += acc
Nav_teacher_forcing_cnt[data['type']] += cnt
overall_nll += nll_loss
overall_mse += mse_loss
overall_teacher_forcing_acc += acc
overall_teacher_forcing_cnt += cnt
# print
if ix % 10 == 0:
print('(%s/%s)qid[%s], id[%s], type[%s], nll_loss=%.3f, mse_loss=%.3f' % \
(ix+1, len(val_dataset), entry['qid'], entry['id'], entry['type'], nll_loss, mse_loss))
# summarize
overall_nll /= len(val_dataset)
overall_mse /= len(val_dataset)
overall_teacher_forcing_acc /= overall_teacher_forcing_cnt
for _type in ['object', 'room']:
Nav_nll[_type] /= (Nav_cnt[_type]+1e-5)
Nav_teacher_forcing_acc[_type] /= (Nav_teacher_forcing_cnt[_type]+1e-5)
# return
return predictions, overall_nll, overall_teacher_forcing_acc, overall_mse, Nav_nll, Nav_teacher_forcing_acc
def main(args):
# make output directory
if args.checkpoint_dir is None:
args.checkpoint_dir = 'output/nav_room'
if not osp.isdir(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
# set random seed
random.seed(args.seed)
np.random.randn(args.seed)
torch.manual_seed(args.seed)
# set up loaders
train_loader_kwargs = {
'data_json': args.data_json,
'data_h5': args.data_h5,
'path_feats_dir': args.path_feats_dir,
'path_images_dir': args.path_images_dir,
'split': 'train',
'max_seq_length': args.max_seq_length,
'requires_imgs': False,
'nav_types': ['room'],
'question_types': ['all'],
}
train_dataset = NavImitationDataset(**train_loader_kwargs)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
val_loader_kwargs = {
'data_json': args.data_json,
'data_h5': args.data_h5,
'path_feats_dir': args.path_feats_dir,
'path_images_dir': args.path_images_dir,
'split': 'val',
'max_seq_length': args.max_seq_length,
'requires_imgs': False,
'nav_types': ['room'],
'question_types': ['all'],
}
val_dataset = NavImitationDataset(**val_loader_kwargs)
# set up models
opt = vars(args)
opt['act_to_ix'] = train_dataset.act_to_ix
opt['num_actions'] = len(opt['act_to_ix'])
model = Navigator(opt)
model.cuda()
print('navigator set up.')
# set up criterions
nll_crit = SeqModelCriterion().cuda()
mse_crit = MaskedMSELoss().cuda()
# set up optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate,
betas=(args.optim_alpha, args.optim_beta), eps=args.optim_epsilon,
weight_decay=args.weight_decay)
# resume from checkpoint
infos = {}
iters = infos.get('iters', 0)
epoch = infos.get('epoch', 0)
val_nll_history = infos.get('val_nll_history', {})
val_mse_history = infos.get('val_mse_history', {})
val_teacher_forcing_acc_history = infos.get('val_teacher_forcing_acc_history', {})
val_nav_object_nll_history = infos.get('val_nav_object_nll_history', {})
val_nav_object_teacher_forcing_acc_history = infos.get('val_nav_object_teacher_forcing_acc_history', {})
val_nav_room_nll_history = infos.get('val_nav_room_nll_history', {})
val_nav_room_teacher_forcing_acc_history = infos.get('val_nav_room_teacher_forcing_acc_history', {})
loss_history = infos.get('loss_history', {})
nll_loss_history = infos.get('nll_loss_history', {})
mse_loss_history = infos.get('mse_loss_history', {})
lr = infos.get('lr', args.learning_rate)
best_val_score, best_val_acc, best_predictions = None, None, None
# start training
while iters <= args.max_iters:
print('Starting epoch %d' % epoch)
# reset seq_length
if args.use_curriculum:
# assume we need 4 epochs to get full seq_length
seq_length = min((args.max_seq_length // 4) ** (epoch+1), args.max_seq_length)
train_dataset.reset_seq_length(seq_length)
else:
seq_length = args.max_seq_length
# train
for batch in train_loader:
# set mode
model.train()
# zero gradient
optimizer.zero_grad()
# batch = {qid, path_ix, house, id, type, phrase, phrase_emb, ego_feats, next_feats, res_feats,
# action_inputs, action_outputs, action_masks, ego_imgs}
ego_feats = batch['ego_feats'].cuda() # (n, L, 3200)
phrase_embs = batch['phrase_emb'].cuda() # (n, 300)
action_inputs = batch['action_inputs'].cuda() # (n, L)
action_outputs = batch['action_outputs'].cuda() # (n, L)
action_masks = batch['action_masks'].cuda() # (n, L)
# forward
# - logprobs (n, L, #actions)
# - output_feats (n, L, rnn_size)
# - pred_feats (n, L, 3200) or None
logprobs, _, pred_feats, _ = model(ego_feats, phrase_embs, action_inputs)
nll_loss = nll_crit(logprobs, action_outputs, action_masks)
mse_loss = 0
if args.use_next:
next_feats = batch['next_feats'].cuda() # (n, L, 3200)
mse_loss = mse_crit(pred_feats, next_feats, action_masks)
if args.use_residual:
res_feats = batch['res_feats'].cuda() # (n, L, 3200)
mse_loss = mse_crit(pred_feats, res_feats, action_masks)
loss = nll_loss + args.mse_weight * mse_loss
# backward
loss.backward()
model_utils.clip_gradient(optimizer, args.grad_clip)
optimizer.step()
# training log
if iters % args.losses_log_every == 0:
loss_history[iters] = loss.item()
nll_loss_history[iters] = nll_loss.item()
mse_loss_history[iters] = mse_loss.item() if (args.use_next or args.use_residual) else 0
print('iters[%s]epoch[%s], train_loss=%.3f (nll_loss=%.3f, mse_loss=%.3f) lr=%.2E, cur_seq_length=%s' % \
(iters, epoch, loss_history[iters], nll_loss_history[iters], mse_loss_history[iters], lr, train_loader.dataset.cur_seq_length))
# decay learning rate
if args.learning_rate_decay_start > 0 and iters > args.learning_rate_decay_start:
frac = (iters - args.learning_rate_decay_start) / args.learning_rate_decay_every
decay_factor = 0.1 ** frac
lr = args.learning_rate * decay_factor
model_utils.set_lr(optimizer, lr)
# evaluate
if iters % args.save_checkpoint_every == 0:
print('Checking validation ...')
predictions, overall_nll, overall_teacher_forcing_acc, overall_mse, Nav_nll, Nav_teacher_forcing_acc = \
evaluate(val_dataset, model, nll_crit, mse_crit, opt)
val_nll_history[iters] = overall_nll
val_teacher_forcing_acc_history[iters] = overall_teacher_forcing_acc
val_mse_history[iters] = overall_mse
val_nav_object_nll_history[iters] = Nav_nll['object']
val_nav_object_teacher_forcing_acc_history[iters] = Nav_teacher_forcing_acc['object']
val_nav_room_nll_history[iters] = Nav_nll['room']
val_nav_room_teacher_forcing_acc_history[iters] = Nav_teacher_forcing_acc['room']
# save model if best
# consider all three accuracy, perhaps a better weighting is needed.
current_score = -overall_nll
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_val_acc = overall_teacher_forcing_acc
best_predictions = predictions
checkpoint_path = osp.join(args.checkpoint_dir, '%s.pth' % args.id)
checkpoint = {}
checkpoint['model_state'] = model.state_dict()
checkpoint['opt'] = vars(args)
torch.save(checkpoint, checkpoint_path)
print('model saved to %s.' % checkpoint_path)
# write to json report
infos['iters'] = iters
infos['epoch'] = epoch
infos['loss_history'] = loss_history
infos['nll_loss_history'] = nll_loss_history
infos['mse_loss_history'] = mse_loss_history
infos['val_nll_history'] = val_nll_history
infos['val_teacher_forcing_acc_history'] = val_teacher_forcing_acc_history
infos['val_mse_history'] = val_mse_history
infos['val_nav_object_nll_history'] = val_nav_object_nll_history
infos['val_nav_object_teacher_forcing_acc_history'] = val_nav_object_teacher_forcing_acc_history
infos['val_nav_room_nll_history'] = val_nav_room_nll_history
infos['val_nav_room_teacher_forcing_acc_history'] = val_nav_room_teacher_forcing_acc_history
infos['best_val_score'] = best_val_score
infos['best_val_acc'] = best_val_acc
infos['best_predictions'] = predictions if best_predictions is None else best_predictions
infos['opt'] = vars(args)
infos['act_to_ix'] = train_dataset.act_to_ix
infos_json = osp.join(args.checkpoint_dir, '%s.json' % args.id)
with open(infos_json, 'w') as f:
json.dump(infos, f)
print('infos saved to %s.' % infos_json)
# update iters
iters += 1
# update epoch
epoch += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Data input settings
parser.add_argument('--data_json', type=str, default='cache/prepro/imitation/data.json')
parser.add_argument('--data_h5', type=str, default='cache/prepro/data.h5')
parser.add_argument('--path_feats_dir', type=str, default='cache/path_feats')
parser.add_argument('--path_images_dir', type=str, default='cache/path_images')
parser.add_argument('--checkpoint_dir', type=str, default='output/nav_room')
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--seed', type=int, default=24)
parser.add_argument('--start_from', type=str, default=None)
# Navigator settings
parser.add_argument('--max_seq_length', type=int, default=100, help='max_seq_length')
parser.add_argument('--rnn_type', type=str, default='lstm')
parser.add_argument('--rnn_size', type=int, default=256)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--rnn_dropout', type=float, default=0.1)
parser.add_argument('--fc_dropout', type=float, default=0.0)
parser.add_argument('--seq_dropout', type=float, default=0.0)
parser.add_argument('--fc_dim', type=int, default=64)
parser.add_argument('--act_dim', type=int, default=64)
parser.add_argument('--use_action', dest='use_action', action='store_true', help='if input previous action')
parser.add_argument('--use_residual', dest='use_residual', action='store_true', help='if predict the residual featuer')
parser.add_argument('--use_next', dest='use_next', action='store_true', help='if predict next image feature')
parser.add_argument('--use_curriculum', dest='use_curriculum', action='store_true', help='if use curriculum')
# Output settings
parser.add_argument('--id', type=str, default='im0')
parser.add_argument('--save_checkpoint_every', type=str, default=2000, help='how often to save a model checkpoint')
parser.add_argument('--losses_log_every', type=int, default=25)
# Optimizer
parser.add_argument('--mse_weight', type=float, default=1.0)
parser.add_argument('--max_iters', type=int, default=20000, help='max number of iterations to run')
parser.add_argument('--batch_size', type=int, default=40, help='batch size in number of questions per batch')
parser.add_argument('--grad_clip', type=float, default=0.1, help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=4e-4, help='learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=5000, help='at what iters to start decaying learning rate')
parser.add_argument('--learning_rate_decay_every', type=int, default=5000, help='every how many iters thereafter to drop LR by half')
parser.add_argument('--optim_alpha', type=float, default=0.8, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-8, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=1e-3, help='weight decay for l2 regularization')
args = parser.parse_args()
main(args)
| 44.764706
| 137
| 0.693627
|
6db703564f019e77a5652dc6cc38b331bb877065
| 1,219
|
py
|
Python
|
examples/pipeline/src/gen-version-header.py
|
dan65prc/conduit
|
6f73416c8b5526a84a21415d5079bb2b61772144
|
[
"BSD-3-Clause"
] | 5
|
2018-08-01T02:52:11.000Z
|
2020-09-19T08:12:07.000Z
|
examples/pipeline/src/gen-version-header.py
|
dan65prc/conduit
|
6f73416c8b5526a84a21415d5079bb2b61772144
|
[
"BSD-3-Clause"
] | null | null | null |
examples/pipeline/src/gen-version-header.py
|
dan65prc/conduit
|
6f73416c8b5526a84a21415d5079bb2b61772144
|
[
"BSD-3-Clause"
] | 1
|
2021-12-02T21:05:35.000Z
|
2021-12-02T21:05:35.000Z
|
#!/usr/bin/env python
import sys
import os
import re
import subprocess
from pprint import pprint
checkout_version = subprocess.Popen('git rev-parse HEAD', shell=True, stdout=subprocess.PIPE).stdout.read()
checkout_diff = subprocess.Popen('git diff -w', shell=True, stdout=subprocess.PIPE).stdout.read()[:256]
variables = {}
variables['checkout'] = '"{0}"'.format(checkout_version.decode('utf-8').strip())
# order on the replacements is important, replace original backslash first.
variables['diff'] = '"\\n\\"{0}\\"\\n"'.format(checkout_diff.decode('utf-8').strip().replace('\\', '\\\\').replace('"', '\\"').replace('\r', '').replace('\n', '\\n'))
variables['build_path'] = '"{0}"'.format(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../').replace('\\', '/'))
if not os.path.exists(os.path.dirname(sys.argv[1])):
os.makedirs(os.path.dirname(sys.argv[1]))
pprint(variables)
outfile = open(sys.argv[1], 'w+')
outfile.write('''
#ifndef CONDUIT_PROJECT_VERSION_H_
#define CONDUIT_PROJECT_VERSION_H_
#define CONDUIT_PROJECT_VERSION {checkout}
#define CONDUIT_PROJECT_BUILD_PATH {build_path}
#define CONDUIT_PROJECT_DIFF {diff}
#endif
'''.format(**variables))
| 32.945946
| 167
| 0.684988
|
5d3da0de37ce7616effa12dcef365c94ef6bdd6f
| 2,898
|
py
|
Python
|
model/model_ss.py
|
gicsaw/ConDo2
|
77bd4efdd5b6b72ac119f7ae9b217ef114476d21
|
[
"MIT"
] | null | null | null |
model/model_ss.py
|
gicsaw/ConDo2
|
77bd4efdd5b6b72ac119f7ae9b217ef114476d21
|
[
"MIT"
] | null | null | null |
model/model_ss.py
|
gicsaw/ConDo2
|
77bd4efdd5b6b72ac119f7ae9b217ef114476d21
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import torch
import torch.nn as nn
class ResidualBlock(nn.Module):
def __init__(self, hidden_dim, kernel_size, dilation, bn_momentum):
super(ResidualBlock, self).__init__()
padding = (kernel_size - 1) * dilation // 2
hidden_dim2 = hidden_dim*2
self.conv1 = nn.Conv1d(in_channels=hidden_dim,
out_channels=hidden_dim2,
kernel_size=kernel_size,
padding=padding,
dilation=dilation)
self.norm1 = nn.BatchNorm1d(hidden_dim2, momentum=bn_momentum)
self.conv2 = nn.Conv1d(in_channels=hidden_dim2,
out_channels=hidden_dim,
kernel_size=kernel_size,
padding=padding,
dilation=dilation)
self.norm2 = nn.BatchNorm1d(hidden_dim, momentum=bn_momentum)
def forward(self, X0):
X = self.conv1(X0)
X = torch.relu(X)
X1 = self.norm1(X)
X = self.conv2(X1)
X = torch.relu(X)
X2 = self.norm2(X)
Xout = X2 + X0
return Xout
class Network(nn.Module):
def __init__(self, para):
super(Network, self).__init__()
self.input_dim = para['input_dim']
self.output_dim = para['output_dim']
self.bn_momentum = para['bn_momentum']
self.hidden_dim = para['hidden_dim']
self.kernel_size = para['kernel_size']
self.dilation = para['dilation']
self.num_layers = para['num_layers']
self.padding = (self.kernel_size - 1) * self.dilation // 2
self.conv_input = nn.Conv1d(in_channels=self.input_dim,
out_channels=self.hidden_dim,
kernel_size=5, padding=2, dilation=1)
self.norm_input = nn.BatchNorm1d(
self.hidden_dim, momentum=self.bn_momentum)
resblocks = []
for i in range(0, self.num_layers):
resblocks += [ResidualBlock(self.hidden_dim, self.kernel_size,
self.dilation, self.bn_momentum)]
self.resblocks = nn.ModuleList(resblocks)
self.conv_o = nn.Conv1d(in_channels=self.hidden_dim,
out_channels=self.output_dim,
kernel_size=self.kernel_size,
padding=self.padding,
dilation=self.dilation)
def forward(self, X0):
X = X0.permute(0, 2, 1)
X = self.conv_input(X)
X = torch.relu(X)
X = self.norm_input(X)
for i in range(0, self.num_layers):
res_block = self.resblocks[i]
X = res_block(X)
out = self.conv_o(X)
out = out.permute(0, 2, 1)
return out
| 34.5
| 74
| 0.534852
|
649eecdc665b8e745e7c0d92a5697b533cf4a141
| 1,692
|
py
|
Python
|
helpers.py
|
ririw/kaggle-bimbo-pymc3
|
fbf016751e2459b9fa6c8d058aad9c75fca57731
|
[
"MIT"
] | null | null | null |
helpers.py
|
ririw/kaggle-bimbo-pymc3
|
fbf016751e2459b9fa6c8d058aad9c75fca57731
|
[
"MIT"
] | null | null | null |
helpers.py
|
ririw/kaggle-bimbo-pymc3
|
fbf016751e2459b9fa6c8d058aad9c75fca57731
|
[
"MIT"
] | null | null | null |
"""
This script is intended to translate the data in `csv` format to `sqlite db` while fixing the field type and renaming the columns.
The idea is to define some directives and let `odo` takes care of the rest ;-). The script will register the `train` and `test` data
into tables with the same respective name.
"""
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
from odo import odo, dshape
def prepare_database():
# Train set
df = dd.read_csv("./train.csv")
col_map = dict(zip(df.columns, ["week_num","agency_id","channel_id","route_id","client_id","product_id",
"sales_unit","sales_peso","returns_unit","returns_peso","adjusted_demand"]))
ds = dshape("var * {week_num:int64,agency_id:int64,channel_id:int64,route_id:int64,client_id:int64,product_id:int64,\
sales_unit:int64,sales_peso:float64,returns_unit:int64,returns_peso:float64, adjusted_demand:int64}")
df = df.rename(columns=col_map)
print("translating the train set...")
with ProgressBar():
odo(df, "sqlite:///data.sqlite3::train", dshape=ds) # the dirty part
# Test set
df = dd.read_csv("./test.csv", usecols=range(1,7)) # discard the `id` (first) column
col_map = dict(zip(df.columns, ["week_num","agency_id","channel_id","route_id","client_id","product_id"]))
ds = dshape("var * {week_num:int64,agency_id:int64,channel_id:int64,route_id:int64,client_id:int64,product_id:int64}")
df = df.rename(columns=col_map)
print("translating the test set...")
with ProgressBar():
odo(df, "sqlite:///data.sqlite3::test", dshape=ds)
prepare_database()
| 43.384615
| 132
| 0.677305
|
51d72e57aa3e0f2aa1920bf143dd3a56c427a5a8
| 1,298
|
py
|
Python
|
video_util.py
|
woo1/youtube-ffmpeg
|
bef663d34c9a51b4a93454d3a8137fbec08a0d16
|
[
"MIT"
] | null | null | null |
video_util.py
|
woo1/youtube-ffmpeg
|
bef663d34c9a51b4a93454d3a8137fbec08a0d16
|
[
"MIT"
] | null | null | null |
video_util.py
|
woo1/youtube-ffmpeg
|
bef663d34c9a51b4a93454d3a8137fbec08a0d16
|
[
"MIT"
] | null | null | null |
import os
import cv2
def mosaic(img, rect, mosaic_rate=15):
x, y, xmax, ymax = rect
# if x < 0:
# xmax = xmax - x
# x = 0
# if y < 0:
# ymax = ymax - x
# y = 0
if x < 0 or y < 0 or xmax < 0 or ymax < 0:
return img
w = xmax-x
h = ymax-y
face_img = img[y:ymax, x:xmax]
min_side = min(w, h)
if min_side <= mosaic_rate:
mosaic_rate = min_side // 2
face_img = cv2.resize(face_img, (w // mosaic_rate, h // mosaic_rate))
face_img = cv2.resize(face_img, (w, h), interpolation=cv2.INTER_AREA)
try:
img[y:ymax, x:xmax] = face_img
except Exception as e:
print('w', w, 'h', h, 'rect', rect, 'mosaic_rate', mosaic_rate)
print(e)
return img
def video2img(vdo_path, img_dir, fps, tot_seconds):
cmd = 'ffmpeg -t '+str(tot_seconds)+' -i "'+vdo_path+'" -r '+str(fps)+' -qscale:v 2 -f image2 '+img_dir+'/img_%d.jpg'
os.system(cmd)
import pytube
def download_youtube(url, sav_path):
yt = pytube.YouTube(url)
vids = yt.streams.all()
for i in range(len(vids)):
print(i, '. ', vids[i])
vnum = int(input('Select video number to download : '))
vids[vnum].download(sav_path)
if __name__ == '__main__':
download_youtube('YOUTUBE_URL', 'SAVE_DIR')
| 25.96
| 121
| 0.577042
|
53e9cf0fc42e46ed436d8dddb9e9b5efa9ee89bf
| 1,307
|
py
|
Python
|
releasetools.py
|
lihonglinglory/test
|
d38cb70ada6946abe711d5e5b8d3fac95d48ed9b
|
[
"FTL"
] | null | null | null |
releasetools.py
|
lihonglinglory/test
|
d38cb70ada6946abe711d5e5b8d3fac95d48ed9b
|
[
"FTL"
] | null | null | null |
releasetools.py
|
lihonglinglory/test
|
d38cb70ada6946abe711d5e5b8d3fac95d48ed9b
|
[
"FTL"
] | 1
|
2019-04-27T05:20:48.000Z
|
2019-04-27T05:20:48.000Z
|
# Copyright (C) 2009 The Android Open Source Project
# Copyright (c) 2011, The Linux Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import common
import re
def FullOTA_Assertions(info):
AddTrustZoneAssertion(info, info.input_zip)
return
def IncrementalOTA_Assertions(info):
AddTrustZoneAssertion(info, info.target_zip)
return
def AddTrustZoneAssertion(info, input_zip):
android_info = info.input_zip.read("OTA/android-info.txt")
m = re.search(r'require\s+version-trustzone\s*=\s*(\S+)', android_info)
if m:
versions = m.group(1).split('|')
if len(versions) and '*' not in versions:
cmd = 'assert(platina.verify_trustzone(' + ','.join(['"%s"' % tz for tz in versions]) + ') == "1");'
info.script.AppendExtra(cmd)
return
| 35.324324
| 106
| 0.729916
|
3fa3191830bb8fdf61b47fb64cba18ddcc8f7324
| 6,128
|
py
|
Python
|
genericadmin/admin.py
|
whatisjasongoldstein/django-genericadmin
|
77b246890f3813fcc44f28ded7f4cb36aa261e65
|
[
"MIT"
] | null | null | null |
genericadmin/admin.py
|
whatisjasongoldstein/django-genericadmin
|
77b246890f3813fcc44f28ded7f4cb36aa261e65
|
[
"MIT"
] | null | null | null |
genericadmin/admin.py
|
whatisjasongoldstein/django-genericadmin
|
77b246890f3813fcc44f28ded7f4cb36aa261e65
|
[
"MIT"
] | null | null | null |
import json
from functools import update_wrapper
from django.contrib import admin
from django.conf.urls import url
from django.conf import settings
from django.contrib.contenttypes.admin import GenericTabularInline, GenericStackedInline
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
from django.utils.text import capfirst
from django.contrib.admin.widgets import url_params_from_lookup_dict
from django.http import HttpResponse, HttpResponseNotAllowed, Http404
try:
from django.contrib.admin.views.main import IS_POPUP_VAR
except ImportError:
from django.contrib.admin.options import IS_POPUP_VAR
JS_PATH = getattr(settings, 'GENERICADMIN_JS', 'genericadmin/js/')
class BaseGenericModelAdmin(object):
class Media:
js = ()
content_type_lookups = {}
generic_fk_fields = []
content_type_blacklist = []
content_type_whitelist = []
def __init__(self, model, admin_site):
try:
media = list(self.Media.js)
except:
media = []
media.append(JS_PATH + 'genericadmin.js')
self.Media.js = tuple(media)
super(BaseGenericModelAdmin, self).__init__(model, admin_site)
def get_generic_field_list(self, request, prefix=''):
if hasattr(self, 'ct_field') and hasattr(self, 'ct_fk_field'):
exclude = [self.ct_field, self.ct_fk_field]
else:
exclude = []
field_list = []
if hasattr(self, 'generic_fk_fields') and self.generic_fk_fields:
for fields in self.generic_fk_fields:
if fields['ct_field'] not in exclude and \
fields['fk_field'] not in exclude:
fields['inline'] = prefix != ''
fields['prefix'] = prefix
field_list.append(fields)
else:
for field in self.model._meta.virtual_fields:
if isinstance(field, GenericForeignKey) and \
field.ct_field not in exclude and field.fk_field not in exclude:
field_list.append({
'ct_field': field.ct_field,
'fk_field': field.fk_field,
'inline': prefix != '',
'prefix': prefix,
})
if hasattr(self, 'inlines') and len(self.inlines) > 0:
for FormSet, inline in zip(self.get_formsets(request), self.get_inline_instances(request)):
prefix = FormSet.get_default_prefix()
field_list = field_list + inline.get_generic_field_list(request, prefix)
return field_list
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
custom_urls = [
url(r'^obj-data/$', wrap(self.generic_lookup), name='admin_genericadmin_obj_lookup'),
url(r'^genericadmin-init/$', wrap(self.genericadmin_js_init), name='admin_genericadmin_init'),
]
return custom_urls + super(BaseGenericModelAdmin, self).get_urls()
def genericadmin_js_init(self, request):
if request.method == 'GET':
obj_dict = {}
for c in ContentType.objects.all():
val = force_text('%s/%s' % (c.app_label, c.model))
params = self.content_type_lookups.get('%s.%s' % (c.app_label, c.model), {})
params = url_params_from_lookup_dict(params)
if self.content_type_whitelist:
if val in self.content_type_whitelist:
obj_dict[c.id] = (val, params)
elif val not in self.content_type_blacklist:
obj_dict[c.id] = (val, params)
data = {
'url_array': obj_dict,
'fields': self.get_generic_field_list(request),
'popup_var': IS_POPUP_VAR,
}
resp = json.dumps(data, ensure_ascii=False)
return HttpResponse(resp, content_type='application/json')
return HttpResponseNotAllowed(['GET'])
def generic_lookup(self, request):
if request.method != 'GET':
return HttpResponseNotAllowed(['GET'])
if 'content_type' in request.GET and 'object_id' in request.GET:
content_type_id = request.GET['content_type']
object_id = request.GET['object_id']
obj_dict = {
'content_type_id': content_type_id,
'object_id': object_id,
}
content_type = ContentType.objects.get(pk=content_type_id)
obj_dict["content_type_text"] = capfirst(force_text(content_type))
try:
obj = content_type.get_object_for_this_type(pk=object_id)
obj_dict["object_text"] = capfirst(force_text(obj))
except ObjectDoesNotExist:
raise Http404
resp = json.dumps(obj_dict, ensure_ascii=False)
else:
resp = ''
return HttpResponse(resp, content_type='application/json')
class GenericAdminModelAdmin(BaseGenericModelAdmin, admin.ModelAdmin):
"""Model admin for generic relations. """
class GenericTabularInline(BaseGenericModelAdmin, GenericTabularInline):
"""Model admin for generic tabular inlines. """
class GenericStackedInline(BaseGenericModelAdmin, GenericStackedInline):
"""Model admin for generic stacked inlines. """
class TabularInlineWithGeneric(BaseGenericModelAdmin, admin.TabularInline):
""""Normal tabular inline with a generic relation"""
class StackedInlineWithGeneric(BaseGenericModelAdmin, admin.StackedInline):
""""Normal stacked inline with a generic relation"""
| 39.535484
| 106
| 0.616678
|
94f6d714c8aca6d43dca0eb4767f8d0b81d9b12d
| 9,007
|
py
|
Python
|
musicscore/musictree/treeinstruments.py
|
alexgorji/music_score
|
b4176da52295361f3436826903485c5cb8054c5e
|
[
"MIT"
] | 2
|
2020-06-22T13:33:28.000Z
|
2020-12-30T15:09:00.000Z
|
musicscore/musictree/treeinstruments.py
|
alexgorji/music_score
|
b4176da52295361f3436826903485c5cb8054c5e
|
[
"MIT"
] | 37
|
2020-02-18T12:15:00.000Z
|
2021-12-13T20:01:14.000Z
|
musicscore/musictree/treeinstruments.py
|
alexgorji/music_score
|
b4176da52295361f3436826903485c5cb8054c5e
|
[
"MIT"
] | null | null | null |
import uuid
from musicscore.musictree.midi import G, D, A, E, C, MidiNote, Midi, B, F, midi_to_frequency, frequency_to_midi
from musicscore.musictree.treeclef import TreeClef, TREBLE_CLEF, ALTO_CLEF, BASS_CLEF
from musicscore.musicxml.types.complextypes.midiinstrument import ComplexTypeMidiInstrument
from musicscore.musicxml.types.complextypes.scorepart import PartName, PartAbbreviation
class TreeInstrument(ComplexTypeMidiInstrument):
_TAG = 'midi-instrument'
def __init__(self, name, number_of_staves=None, abbreviation=None, number=None, *args, **kwargs):
super().__init__(tag=self._TAG, id_='inst' + str(uuid.uuid4()), *args, **kwargs)
self._part_name = PartName(name=name)
self._part_abbreviation = PartAbbreviation()
self._number_of_staves = None
self._standard_clefs = None
self._number = None
self.number_of_staves = number_of_staves
self.abbreviation = abbreviation
self.number = number
# public properties
@property
def abbreviation(self):
return self._part_abbreviation.value
@abbreviation.setter
def abbreviation(self, val):
self._part_abbreviation.value = val
@property
def name(self):
return self._part_name.name
@name.setter
def name(self, val):
self._part_name.name = val
@property
def number(self):
return self._number
@number.setter
def number(self, val):
if self._number is not None:
raise AttributeError('number can only be set once')
if val is not None and not isinstance(val, int):
raise TypeError('number.value must be of type int not{}'.format(type(val)))
self._number = val
if self._number is not None:
self.name += ' ' + str(self._number)
self.abbreviation += ' ' + str(self._number)
@property
def part_name(self):
return self._part_name
@property
def part_abbreviation(self):
return self._part_abbreviation
@property
def standard_clefs(self):
return self._standard_clefs
@standard_clefs.setter
def standard_clefs(self, vals):
if not hasattr(vals, '__iter__'):
vals = [vals]
for index, val in enumerate(vals):
if not isinstance(val, TreeClef):
raise TypeError('standard_clef.value must be of type TreeClef not{}'.format(type(val)))
vals[index] = val.__deepcopy__()
if len(vals) > 1:
for index, val in enumerate(vals):
val.number = index + 1
self._standard_clefs = vals
@property
def number_of_staves(self):
return self._number_of_staves
@number_of_staves.setter
def number_of_staves(self, val):
if val is not None and not isinstance(val, int):
raise TypeError('number_of_staves.value must be of type int not{}'.format(type(val)))
self._number_of_staves = val
# strings
class String(object):
def __init__(self, number, tuning, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tuning = None
self.number = number
self.tuning = tuning
@property
def tuning(self):
return self._tuning
@tuning.setter
def tuning(self, val):
if not isinstance(val, Midi):
raise TypeError('tuning.value must be of type Midi not{}'.format(type(val)))
self._tuning = val
def get_step(self, number):
step = self.tuning.__deepcopy__()
step.transpose(number)
return step
class StringInstrument(TreeInstrument):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.strings = {}
class Violin(StringInstrument):
def __init__(self, number=None, *args, **kwargs):
super().__init__(name='Violin', abbreviation='vln.', number=number, *args, **kwargs)
self.strings = {4: String(4, G(3)),
3: String(3, D(4)),
2: String(2, A(4)),
1: String(1, E(5))
}
self.standard_clefs = TREBLE_CLEF
class Viola(StringInstrument):
def __init__(self, number=None, *args, **kwargs):
super().__init__(name='Viola', abbreviation='vla.', number=number, *args, **kwargs)
self.strings = {4: String(4, C(3)),
3: String(3, G(3)),
2: String(2, D(4)),
1: String(1, A(4))
}
self.standard_clefs = ALTO_CLEF
class Cello(StringInstrument):
def __init__(self, number=None, *args, **kwargs):
super().__init__(name='Cello', abbreviation='vc.', number=number, *args, **kwargs)
self.strings = {4: String(4, C(2)),
3: String(3, G(2)),
2: String(2, D(3)),
1: String(1, A(3))
}
self.standard_clefs = BASS_CLEF
class ViolaDamore(StringInstrument):
def __init__(self, number=None, *args, **kwargs):
super().__init__(name='Viola d\'amore\n430', abbreviation='vla.', number=number, *args, **kwargs)
# skordatura
self.strings = {1: String(1, B(4)),
2: String(2, B(4)),
3: String(3, F(4, '#')),
4: String(4, C(4)),
5: String(5, G(3)),
6: String(6, D(3)),
7: String(7, A(2))
}
# keyboards
class KeyboardInstrument(TreeInstrument):
def __init__(self, number_of_staves=2, *args, **kwargs):
super().__init__(number_of_staves=number_of_staves, *args, **kwargs)
self.standard_clefs = [TREBLE_CLEF, BASS_CLEF]
class Accordion(KeyboardInstrument):
def __init__(self, number=None, *args, **kwargs):
super().__init__(name='Accordion', abbreviation='acc.', number=number, *args, **kwargs)
class Piano(KeyboardInstrument):
def __init__(self, *args, **kwargs):
super().__init__(name='Piano', abbreviation='pno.', *args, **kwargs)
# brass
class NaturalInstrument(TreeInstrument):
def __init__(self, key, a4=440, *args, **kwargs):
super().__init__(*args, **kwargs)
self._a4 = None
self._key = None
self._transposition = None
self.a4 = a4
self.key = key
@property
def a4(self):
return self._a4
@a4.setter
def a4(self, val):
try:
float(val)
except AttributeError:
raise TypeError()
self._a4 = val
@property
def key(self):
return self._key
@key.setter
def key(self, val):
if not isinstance(val, MidiNote):
raise TypeError('key.value must be of type MidiNote not{}'.format(type(val)))
self._key = val
@property
def transposition(self):
return self._transposition
@transposition.setter
def transposition(self, val):
self._transposition = val
def get_fundamental_frequency(self):
return midi_to_frequency(self.key, self.a4)
def get_partial_midi_value(self, number):
if not isinstance(number, int):
return TypeError()
if number <= 0:
return ValueError()
return frequency_to_midi(self.get_fundamental_frequency() * number, self.a4)
class Horn(TreeInstrument):
def __init__(self, number=None, *args, **kwargs):
super().__init__(name='Horn', abbreviation='hrn.', number=number, *args, **kwargs)
class NaturalHorn(NaturalInstrument):
def __init__(self, key=E(1, 'b'), a4=430, *args, **kwargs):
super().__init__(name='Horn in E♭\n430', abbreviation='hrn.', key=key, a4=a4, *args, **kwargs)
self.transposition = 9
# percussion
class Percussion(TreeInstrument):
def __init__(self, number=None, *args, **kwargs):
super().__init__(name='Percussion', abbreviation='perc.', number=number, *args, **kwargs)
self.tamtam = TamTam()
self.cymbal_1 = Cymbal(1)
self.cymbal_2 = Cymbal(2)
self.cymbal_3 = Cymbal(3)
self.cymbal_4 = Cymbal(4)
self.cymbal_5 = Cymbal(5)
class TamTam(TreeInstrument):
def __init__(self, number=None, *args, **kwargs):
super().__init__(name='Tam-tam', abbreviation='Tam-t.', number=number, *args, **kwargs)
self.midi = B(3)
self.midi.notehead = 'x'
class Cymbal(TreeInstrument):
midis = {1: E(4), 2: G(4), 3: B(4), 4: D(5), 5: F(5)}
def __init__(self, number=1, *args, **kwargs):
super().__init__(name='cymbal-' + str(number), abbreviation='cym-' + str(number), number=number, *args,
**kwargs)
self.midi = self.midis[self.number]
self.midi.notehead = 'x'
# voice
class Voice(TreeInstrument):
def __init__(self, *args, **kwargs):
super().__init__(name='voice', abbreviation='v.', *args, **kwargs)
| 31.383275
| 111
| 0.597091
|
255c49363a15c3f3eb8e77eff2b8dcf39564880f
| 3,852
|
py
|
Python
|
open_spiel/python/egt/visualization_test.py
|
alexminnaar/open_spiel
|
c17a390f8a007ccc309f76cb0cfa29f06dc5d2c9
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:51:03.000Z
|
2019-12-19T01:51:03.000Z
|
open_spiel/python/egt/visualization_test.py
|
alexminnaar/open_spiel
|
c17a390f8a007ccc309f76cb0cfa29f06dc5d2c9
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/egt/visualization_test.py
|
alexminnaar/open_spiel
|
c17a390f8a007ccc309f76cb0cfa29f06dc5d2c9
|
[
"Apache-2.0"
] | 1
|
2020-12-25T03:02:31.000Z
|
2020-12-25T03:02:31.000Z
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.visualization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from absl.testing import absltest
# pylint: disable=g-import-not-at-top
try:
from matplotlib.figure import Figure
from matplotlib.quiver import Quiver
from matplotlib.streamplot import StreamplotSet
except ImportError as e:
logging.info("If your tests failed with the error 'ImportError: No module "
"named functools_lru_cache', this is a known bug in matplotlib "
"and there is a workaround (run sudo apt install "
"python-backports.functools-lru-cache. See: "
"https://github.com/matplotlib/matplotlib/issues/9344.")
raise ImportError(str(e))
import numpy as np
from open_spiel.python.egt import dynamics
from open_spiel.python.egt import utils
from open_spiel.python.egt import visualization
import pyspiel
def _build_dynamics2x2():
"""Build multi-population dynamics."""
game = pyspiel.load_game("matrix_pd")
payoff_tensor = utils.nfg_to_ndarray(game)
return dynamics.MultiPopulationDynamics(payoff_tensor, dynamics.replicator)
def _build_dynamics3x3():
"""Build single-population dynamics."""
game = pyspiel.load_game("matrix_rps")
payoff_tensor = utils.nfg_to_ndarray(game)
return dynamics.SinglePopulationDynamics(payoff_tensor, dynamics.replicator)
def _identity_dynamics(x):
"""Returns same input as output."""
return x
class VisualizationTest(absltest.TestCase):
def test_meshgrid(self):
n = 10
payoff_tensor = np.ones(shape=(2, 2, 2))
identity = lambda x, f: x
allzero = lambda x, f: np.zeros(x.shape)
dyn = dynamics.MultiPopulationDynamics(payoff_tensor, (identity, allzero))
x, y, u, v = visualization._eval_dynamics_2x2_grid(dyn, n)
np.testing.assert_allclose(x, u)
np.testing.assert_allclose(v, np.zeros(shape=(n, n)))
dyn = dynamics.MultiPopulationDynamics(payoff_tensor, (allzero, identity))
x, y, u, v = visualization._eval_dynamics_2x2_grid(dyn, n)
np.testing.assert_allclose(u, np.zeros(shape=(n, n)))
np.testing.assert_allclose(y, v)
def test_quiver2x2(self):
"""Test 2x2 quiver plot."""
dyn = _build_dynamics2x2()
fig = Figure(figsize=(4, 4))
ax = fig.add_subplot(111, projection="2x2")
res = ax.quiver(dyn)
self.assertIsInstance(res, Quiver)
def test_streamplot2x2(self):
"""Test 2x2 quiver plot."""
dyn = _build_dynamics2x2()
fig = Figure(figsize=(4, 4))
ax = fig.add_subplot(111, projection="2x2")
res = ax.streamplot(dyn)
self.assertIsInstance(res, StreamplotSet)
def test_quiver3x3(self):
"""Test 3x3 quiver plot."""
dyn = _build_dynamics3x3()
fig = Figure(figsize=(4, 4))
ax = fig.add_subplot(111, projection="3x3")
res = ax.quiver(dyn)
self.assertIsInstance(res, Quiver)
def test_streamplot3x3(self):
"""Test 3x3 quiver plot."""
dyn = _build_dynamics3x3()
fig = Figure(figsize=(4, 4))
ax = fig.add_subplot(111, projection="3x3")
res = ax.streamplot(dyn)
self.assertIsInstance(res, visualization.SimplexStreamMask)
if __name__ == "__main__":
absltest.main()
| 33.206897
| 79
| 0.722222
|
322ba076a735a23cbb55ce4e7f4d2ddf5097fbb5
| 45,724
|
py
|
Python
|
pyAudioAnalysis/audioSegmentation.py
|
elastic255/pyAudioAnalysis
|
90cf0ad36194ae73e6c3624a4a84bcbb9f8ed857
|
[
"Apache-2.0"
] | null | null | null |
pyAudioAnalysis/audioSegmentation.py
|
elastic255/pyAudioAnalysis
|
90cf0ad36194ae73e6c3624a4a84bcbb9f8ed857
|
[
"Apache-2.0"
] | null | null | null |
pyAudioAnalysis/audioSegmentation.py
|
elastic255/pyAudioAnalysis
|
90cf0ad36194ae73e6c3624a4a84bcbb9f8ed857
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import numpy
import sklearn.cluster
import scipy
import os
from pyAudioAnalysis import audioFeatureExtraction as aF
from pyAudioAnalysis import audioTrainTest as aT
from pyAudioAnalysis import audioBasicIO
from scipy.spatial import distance
import matplotlib.pyplot as plt
import sklearn.discriminant_analysis
import csv
import os.path
import sklearn
import sklearn.cluster
import hmmlearn.hmm
import pickle as cPickle
import glob
""" General utility functions """
def smoothMovingAvg(inputSignal, windowLen=11):
windowLen = int(windowLen)
if inputSignal.ndim != 1:
raise ValueError("")
if inputSignal.size < windowLen:
raise ValueError("Input vector needs to be bigger than window size.")
if windowLen < 3:
return inputSignal
s = numpy.r_[2*inputSignal[0] - inputSignal[windowLen-1::-1],
inputSignal, 2*inputSignal[-1]-inputSignal[-1:-windowLen:-1]]
w = numpy.ones(windowLen, 'd')
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[windowLen:-windowLen+1]
def selfSimilarityMatrix(featureVectors):
'''
This function computes the self-similarity matrix for a sequence
of feature vectors.
ARGUMENTS:
- featureVectors: a numpy matrix (nDims x nVectors) whose i-th column
corresponds to the i-th feature vector
RETURNS:
- S: the self-similarity matrix (nVectors x nVectors)
'''
[nDims, nVectors] = featureVectors.shape
[featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])
featureVectors2 = featureVectors2[0].T
S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))
return S
def flags2segs(flags, window):
'''
ARGUMENTS:
- flags: a sequence of class flags (per time window)
- window: window duration (in seconds)
RETURNS:
- segs: a sequence of segment's limits: segs[i,0] is start and
segs[i,1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of
the i-th segment
'''
preFlag = 0
cur_flag = 0
n_segs = 0
cur_val = flags[cur_flag]
segsList = []
classes = []
while (cur_flag < len(flags) - 1):
stop = 0
preFlag = cur_flag
preVal = cur_val
while (stop == 0):
cur_flag = cur_flag + 1
tempVal = flags[cur_flag]
if ((tempVal != cur_val) | (cur_flag == len(flags) - 1)): # stop
n_segs = n_segs + 1
stop = 1
cur_seg = cur_val
cur_val = flags[cur_flag]
segsList.append((cur_flag * window))
classes.append(preVal)
segs = numpy.zeros((len(segsList), 2))
for i in range(len(segsList)):
if i > 0:
segs[i, 0] = segsList[i-1]
segs[i, 1] = segsList[i]
return (segs, classes)
def segs2flags(seg_start, seg_end, seg_label, win_size):
'''
This function converts segment endpoints and respective segment
labels to fix-sized class labels.
ARGUMENTS:
- seg_start: segment start points (in seconds)
- seg_end: segment endpoints (in seconds)
- seg_label: segment labels
- win_size: fix-sized window (in seconds)
RETURNS:
- flags: numpy array of class indices
- class_names: list of classnames (strings)
'''
flags = []
class_names = list(set(seg_label))
curPos = win_size / 2.0
while curPos < seg_end[-1]:
for i in range(len(seg_start)):
if curPos > seg_start[i] and curPos <= seg_end[i]:
break
flags.append(class_names.index(seg_label[i]))
curPos += win_size
return numpy.array(flags), class_names
def computePreRec(cm, class_names):
'''
This function computes the precision, recall and f1 measures,
given a confusion matrix
'''
n_classes = cm.shape[0]
if len(class_names) != n_classes:
print("Error in computePreRec! Confusion matrix and class_names "
"list must be of the same size!")
return
precision = []
recall = []
f1 = []
for i, c in enumerate(class_names):
precision.append(cm[i,i] / numpy.sum(cm[:,i]))
recall.append(cm[i,i] / numpy.sum(cm[i,:]))
f1.append( 2 * precision[-1] * recall[-1] / (precision[-1] + recall[-1]))
return recall, precision, f1
def readSegmentGT(gt_file):
'''
This function reads a segmentation ground truth file, following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gt_file: the path of the CSV segment file
RETURNS:
- seg_start: a numpy array of segments' start positions
- seg_end: a numpy array of segments' ending positions
- seg_label: a list of respective class labels (strings)
'''
f = open(gt_file, 'rt')
reader = csv.reader(f, delimiter=',')
seg_start = []
seg_end = []
seg_label = []
for row in reader:
if len(row) == 3:
seg_start.append(float(row[0]))
seg_end.append(float(row[1]))
#if row[2]!="other":
# seg_label.append((row[2]))
#else:
# seg_label.append("silence")
seg_label.append((row[2]))
return numpy.array(seg_start), numpy.array(seg_end), seg_label
def plotSegmentationResults(flags_ind, flags_ind_gt, class_names, mt_step, ONLY_EVALUATE=False):
'''
This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method if ground-truth is available.
'''
flags = [class_names[int(f)] for f in flags_ind]
(segs, classes) = flags2segs(flags, mt_step)
min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0])
if min_len > 0:
accuracy = numpy.sum(flags_ind[0:min_len] ==
flags_ind_gt[0:min_len]) / float(min_len)
else:
accuracy = -1
if not ONLY_EVALUATE:
duration = segs[-1, 1]
s_percentages = numpy.zeros((len(class_names), 1))
percentages = numpy.zeros((len(class_names), 1))
av_durations = numpy.zeros((len(class_names), 1))
for iSeg in range(segs.shape[0]):
s_percentages[class_names.index(classes[iSeg])] += \
(segs[iSeg, 1]-segs[iSeg, 0])
for i in range(s_percentages.shape[0]):
percentages[i] = 100.0 * s_percentages[i] / duration
S = sum(1 for c in classes if c == class_names[i])
if S > 0:
av_durations[i] = s_percentages[i] / S
else:
av_durations[i] = 0.0
for i in range(percentages.shape[0]):
print(class_names[i], percentages[i], av_durations[i])
font = {'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(numpy.array(range(len(flags_ind))) * mt_step +
mt_step / 2.0, flags_ind)
if flags_ind_gt.shape[0] > 0:
ax1.plot(numpy.array(range(len(flags_ind_gt))) * mt_step +
mt_step / 2.0, flags_ind_gt + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(class_names) + 1, 0, 100))
ax2.set_xticks(numpy.array(range(len(class_names) + 1)))
ax2.set_xticklabels([" "] + class_names)
ax2.bar(numpy.array(range(len(class_names))) + 0.5, percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(class_names)+1, 0, av_durations.max()))
ax3.set_xticks(numpy.array(range(len(class_names) + 1)))
ax3.set_xticklabels([" "] + class_names)
ax3.bar(numpy.array(range(len(class_names))) + 0.5, av_durations)
fig.tight_layout()
plt.show()
return accuracy
def evaluateSpeakerDiarization(flags, flags_gt):
min_len = min(flags.shape[0], flags_gt.shape[0])
flags = flags[0:min_len]
flags_gt = flags_gt[0:min_len]
u_flags = numpy.unique(flags)
u_flags_gt = numpy.unique(flags_gt)
# compute contigency table:
c_matrix = numpy.zeros((u_flags.shape[0], u_flags_gt.shape[0]))
for i in range(min_len):
c_matrix[int(numpy.nonzero(u_flags == flags[i])[0]),
int(numpy.nonzero(u_flags_gt == flags_gt[i])[0])] += 1.0
Nc, Ns = c_matrix.shape
N_s = numpy.sum(c_matrix, axis=0)
N_c = numpy.sum(c_matrix, axis=1)
N = numpy.sum(c_matrix)
purity_clust = numpy.zeros((Nc, ))
purity_speak = numpy.zeros((Ns, ))
# compute cluster purity:
for i in range(Nc):
purity_clust[i] = numpy.max((c_matrix[i, :])) / (N_c[i])
for j in range(Ns):
purity_speak[j] = numpy.max((c_matrix[:, j])) / (N_s[j])
purity_cluster_m = numpy.sum(purity_clust * N_c) / N
purity_speaker_m = numpy.sum(purity_speak * N_s) / N
return purity_cluster_m, purity_speaker_m
def trainHMM_computeStatistics(features, labels):
'''
This function computes the statistics used to train an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a numpy matrix of feature vectors (numOfDimensions x n_wins)
- labels: a numpy array of class indices (n_wins x 1)
RETURNS:
- start_prob: matrix of prior class probabilities (n_classes x 1)
- transmat: transition matrix (n_classes x n_classes)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
'''
u_labels = numpy.unique(labels)
n_comps = len(u_labels)
n_feats = features.shape[0]
if features.shape[1] < labels.shape[0]:
print("trainHMM warning: number of short-term feature vectors "
"must be greater or equal to the labels length!")
labels = labels[0:features.shape[1]]
# compute prior probabilities:
start_prob = numpy.zeros((n_comps,))
for i, u in enumerate(u_labels):
start_prob[i] = numpy.count_nonzero(labels == u)
# normalize prior probabilities
start_prob = start_prob / start_prob.sum()
# compute transition matrix:
transmat = numpy.zeros((n_comps, n_comps))
for i in range(labels.shape[0]-1):
transmat[int(labels[i]), int(labels[i + 1])] += 1
# normalize rows of transition matrix:
for i in range(n_comps):
transmat[i, :] /= transmat[i, :].sum()
means = numpy.zeros((n_comps, n_feats))
for i in range(n_comps):
means[i, :] = numpy.matrix(features[:,
numpy.nonzero(labels ==
u_labels[i])[0]].mean(axis=1))
cov = numpy.zeros((n_comps, n_feats))
for i in range(n_comps):
#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==u_labels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!
cov[i, :] = numpy.std(features[:, numpy.nonzero(labels ==
u_labels[i])[0]],
axis=1)
return start_prob, transmat, means, cov
def trainHMM_fromFile(wav_file, gt_file, hmm_model_name, mt_win, mt_step):
'''
This function trains a HMM model for segmentation-classification using a single annotated audio file
ARGUMENTS:
- wav_file: the path of the audio filename
- gt_file: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row
- hmm_model_name: the name of the HMM model to be stored
- mt_win: mid-term window size
- mt_step: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- class_names: a list of class_names
After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file
'''
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step)
[fs, x] = audioBasicIO.readAudioFile(wav_file)
[F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs,
round(fs * 0.050), round(fs * 0.050))
start_prob, transmat, means, cov = trainHMM_computeStatistics(F, flags)
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag")
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmm_model_name, "wb")
cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(class_names, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, class_names
def trainHMM_fromDir(dirPath, hmm_model_name, mt_win, mt_step):
'''
This function trains a HMM model for segmentation-classification using
a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- dirPath: the path of the data diretory
- hmm_model_name: the name of the HMM model to be stored
- mt_win: mid-term window size
- mt_step: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- class_names: a list of class_names
After training, hmm, class_names, along with the mt_win
and mt_step values are stored in the hmm_model_name file
'''
flags_all = numpy.array([])
classes_all = []
for i, f in enumerate(glob.glob(dirPath + os.sep + '*.wav')):
# for each WAV file
wav_file = f
gt_file = f.replace('.wav', '.segments')
if not os.path.isfile(gt_file):
continue
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step)
for c in class_names:
# update class names:
if c not in classes_all:
classes_all.append(c)
[fs, x] = audioBasicIO.readAudioFile(wav_file)
[F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs,
mt_step * fs, round(fs * 0.050),
round(fs * 0.050))
lenF = F.shape[1]
lenL = len(flags)
min_sm = min(lenF, lenL)
F = F[:, 0:min_sm]
flags = flags[0:min_sm]
flagsNew = []
for j, fl in enumerate(flags): # append features and labels
flagsNew.append(classes_all.index(class_names[flags[j]]))
flags_all = numpy.append(flags_all, numpy.array(flagsNew))
if i == 0:
f_all = F
else:
f_all = numpy.concatenate((f_all, F), axis=1)
start_prob, transmat, means, cov = trainHMM_computeStatistics(f_all, flags_all) # compute HMM statistics
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") # train HMM
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
fo = open(hmm_model_name, "wb") # save HMM model
cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classes_all, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return hmm, classes_all
def hmmSegmentation(wav_file_name, hmm_model_name, plot_res=False,
gt_file_name=""):
[fs, x] = audioBasicIO.readAudioFile(wav_file_name)
try:
fo = open(hmm_model_name, "rb")
except IOError:
print("didn't find file")
return
try:
hmm = cPickle.load(fo)
classes_all = cPickle.load(fo)
mt_win = cPickle.load(fo)
mt_step = cPickle.load(fo)
except:
fo.close()
fo.close()
[Features, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs,
round(fs * 0.050),
round(fs * 0.050))
flags_ind = hmm.predict(Features.T) # apply model
if os.path.isfile(gt_file_name):
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file_name)
flags_gt, class_names_gt = segs2flags(seg_start, seg_end, seg_labs,
mt_step)
flagsGTNew = []
for j, fl in enumerate(flags_gt):
# "align" labels with GT
if class_names_gt[flags_gt[j]] in classes_all:
flagsGTNew.append(classes_all.index(class_names_gt[flags_gt[j]]))
else:
flagsGTNew.append(-1)
cm = numpy.zeros((len(classes_all), len(classes_all)))
flags_ind_gt = numpy.array(flagsGTNew)
for i in range(min(flags_ind.shape[0], flags_ind_gt.shape[0])):
cm[int(flags_ind_gt[i]),int(flags_ind[i])] += 1
else:
flags_ind_gt = numpy.array([])
acc = plotSegmentationResults(flags_ind, flags_ind_gt, classes_all,
mt_step, not plot_res)
if acc >= 0:
print("Overall Accuracy: {0:.2f}".format(acc))
return (flags_ind, class_names_gt, acc, cm)
else:
return (flags_ind, classes_all, -1, -1)
def mtFileClassification(input_file, model_name, model_type,
plot_results=False, gt_file=""):
'''
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used, i.e. a pre-trained classifier.
ARGUMENTS:
- input_file: path of the input WAV file
- model_name: name of the classification model
- model_type: svm or knn depending on the classifier type
- plot_results: True if results are to be plotted using
matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the
endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the
class ID of the i-th segment
'''
if not os.path.isfile(model_name):
print("mtFileClassificationError: input model_type not found!")
return (-1, -1, -1, -1)
# Load classifier:
if model_type == "knn":
[classifier, MEAN, STD, class_names, mt_win, mt_step, st_win, st_step, compute_beat] = \
aT.load_model_knn(model_name)
else:
[classifier, MEAN, STD, class_names, mt_win, mt_step, st_win, st_step,
compute_beat] = aT.load_model(model_name)
if compute_beat:
print("Model " + model_name + " contains long-term music features "
"(beat etc) and cannot be used in "
"segmentation")
return (-1, -1, -1, -1)
[fs, x] = audioBasicIO.readAudioFile(input_file) # load input file
if fs == -1: # could not read file
return (-1, -1, -1, -1)
x = audioBasicIO.stereo2mono(x) # convert stereo (if) to mono
duration = len(x) / fs
# mid-term feature extraction:
[mt_feats, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs,
mt_step * fs,
round(fs * st_win),
round(fs * st_step))
flags = []
Ps = []
flags_ind = []
for i in range(mt_feats.shape[1]): # for each feature vector (i.e. for each fix-sized segment):
cur_fv = (mt_feats[:, i] - MEAN) / STD # normalize current feature vector
[res, P] = aT.classifierWrapper(classifier, model_type, cur_fv) # classify vector
flags_ind.append(res)
flags.append(class_names[int(res)]) # update class label matrix
Ps.append(numpy.max(P)) # update probability matrix
flags_ind = numpy.array(flags_ind)
# 1-window smoothing
for i in range(1, len(flags_ind) - 1):
if flags_ind[i-1] == flags_ind[i + 1]:
flags_ind[i] = flags_ind[i + 1]
# convert fix-sized flags to segments and classes
(segs, classes) = flags2segs(flags, mt_step)
segs[-1] = len(x) / float(fs)
# Load grount-truth:
if os.path.isfile(gt_file):
[seg_start_gt, seg_end_gt, seg_l_gt] = readSegmentGT(gt_file)
flags_gt, class_names_gt = segs2flags(seg_start_gt, seg_end_gt, seg_l_gt, mt_step)
flags_ind_gt = []
for j, fl in enumerate(flags_gt):
# "align" labels with GT
if class_names_gt[flags_gt[j]] in class_names:
flags_ind_gt.append(class_names.index(class_names_gt[flags_gt[j]]))
else:
flags_ind_gt.append(-1)
flags_ind_gt = numpy.array(flags_ind_gt)
cm = numpy.zeros((len(class_names_gt), len(class_names_gt)))
for i in range(min(flags_ind.shape[0], flags_ind_gt.shape[0])):
cm[int(flags_ind_gt[i]),int(flags_ind[i])] += 1
else:
cm = []
flags_ind_gt = numpy.array([])
acc = plotSegmentationResults(flags_ind, flags_ind_gt,
class_names, mt_step, not plot_results)
if acc >= 0:
print("Overall Accuracy: {0:.3f}".format(acc) )
return (flags_ind, class_names_gt, acc, cm)
else:
return (flags_ind, class_names, acc, cm)
def evaluateSegmentationClassificationDir(dir_name, model_name, method_name):
flags_all = numpy.array([])
classes_all = []
accuracies = []
for i, f in enumerate(glob.glob(dir_name + os.sep + '*.wav')): # for each WAV file
wav_file = f
print(wav_file)
gt_file = f.replace('.wav', '.segments') # open for annotated file
if method_name.lower() in ["svm", "svm_rbf", "knn",
"randomforest","gradientboosting",
"extratrees"]:
flags_ind, class_names, acc, cm_t = \
mtFileClassification(wav_file, model_name, method_name,
False, gt_file)
else:
flags_ind, class_names, acc, cm_t = hmmSegmentation(wav_file,
model_name,
False, gt_file)
if acc > -1:
if i==0:
cm = numpy.copy(cm_t)
else:
cm = cm + cm_t
accuracies.append(acc)
print(cm_t, class_names)
print(cm)
[rec, pre, f1] = computePreRec(cm_t, class_names)
cm = cm / numpy.sum(cm)
[rec, pre, f1] = computePreRec(cm, class_names)
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("Average Accuracy: {0:.1f}".format(100.0*numpy.array(accuracies).mean()))
print("Average recall: {0:.1f}".format(100.0*numpy.array(rec).mean()))
print("Average precision: {0:.1f}".format(100.0*numpy.array(pre).mean()))
print("Average f1: {0:.1f}".format(100.0*numpy.array(f1).mean()))
print("Median Accuracy: {0:.1f}".format(100.0*numpy.median(numpy.array(accuracies))))
print("Min Accuracy: {0:.1f}".format(100.0*numpy.array(accuracies).min()))
print("Max Accuracy: {0:.1f}".format(100.0*numpy.array(accuracies).max()))
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False):
'''
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- fs: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
'''
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
# Step 1: feature extraction
x = audioBasicIO.stereo2mono(x)
st_feats, _ = aF.stFeatureExtraction(x, fs, st_win * fs,
st_step * fs)
# Step 2: train binary svm classifier of low vs high energy frames
# keep only the energy short-term sequence (2nd feature)
st_energy = st_feats[1, :]
en = numpy.sort(st_energy)
# number of 10% of the total short-term windows
l1 = int(len(en) / 10)
# compute "lower" 10% energy threshold
t1 = numpy.mean(en[0:l1]) + 0.000000000000001
# compute "higher" 10% energy threshold
t2 = numpy.mean(en[-l1:-1]) + 0.000000000000001
# get all features that correspond to low energy
class1 = st_feats[:, numpy.where(st_energy <= t1)[0]]
# get all features that correspond to high energy
class2 = st_feats[:, numpy.where(st_energy >= t2)[0]]
# form the binary classification task and ...
faets_s = [class1.T, class2.T]
# normalize and train the respective svm probabilistic model
# (ONSET vs SILENCE)
[faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s)
svm = aT.trainSVM(faets_s_norm, 1.0)
# Step 3: compute onset probability based on the trained svm
prob_on_set = []
for i in range(st_feats.shape[1]):
# for each frame
cur_fv = (st_feats[:, i] - means_s) / stds_s
# get svm probability (that it belongs to the ONSET class)
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1])
prob_on_set = numpy.array(prob_on_set)
# smooth probability:
prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step)
# Step 4A: detect onset frame indices:
prog_on_set_sort = numpy.sort(prob_on_set)
# find probability Threshold as a weighted average
# of top 10% and lower 10% of the values
Nt = int(prog_on_set_sort.shape[0] / 10)
T = (numpy.mean((1 - weight) * prog_on_set_sort[0:Nt]) +
weight * numpy.mean(prog_on_set_sort[-Nt::]))
max_idx = numpy.where(prob_on_set > T)[0]
# get the indices of the frames that satisfy the thresholding
i = 0
time_clusters = []
seg_limits = []
# Step 4B: group frame indices to onset segments
while i < len(max_idx):
# for each of the detected onset indices
cur_cluster = [max_idx[i]]
if i == len(max_idx)-1:
break
while max_idx[i+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_idx[i+1])
i += 1
if i == len(max_idx)-1:
break
i += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
# Step 5: Post process: remove very small segments:
min_dur = 0.2
seg_limits_2 = []
for s in seg_limits:
if s[1] - s[0] > min_dur:
seg_limits_2.append(s)
seg_limits = seg_limits_2
if plot:
timeX = numpy.arange(0, x.shape[0] / float(fs), 1.0 / fs)
plt.subplot(2, 1, 1)
plt.plot(timeX, x)
for s in seg_limits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.subplot(2, 1, 2)
plt.plot(numpy.arange(0, prob_on_set.shape[0] * st_step, st_step),
prob_on_set)
plt.title('Signal')
for s in seg_limits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.title('svm Probability')
plt.show()
return seg_limits
def speakerDiarization(filename, n_speakers, mt_size=2.0, mt_step=0.2,
st_win=0.05, lda_dim=35, plot_res=False):
'''
ARGUMENTS:
- filename: the name of the WAV file to be analyzed
- n_speakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mt_size (opt) mid-term window size
- mt_step (opt) mid-term window step
- st_win (opt) short-term window size
- lda_dim (opt) LDA dimension (0 for no LDA)
- plot_res (opt) 0 for not plotting the results 1 for plottingy
'''
[fs, x, audio_file] = audioBasicIO.readAudioFile(filename)
x = audioBasicIO.stereo2mono(x)
duration = len(x) / fs
[classifier_1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "knnSpeakerAll"))
[classifier_2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "knnSpeakerFemaleMale"))
[mt_feats, st_feats, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs,
mt_step * fs,
round(fs * st_win),
round(fs*st_win * 0.5))
MidTermFeatures2 = numpy.zeros((mt_feats.shape[0] + len(classNames1) +
len(classNames2), mt_feats.shape[1]))
for i in range(mt_feats.shape[1]):
cur_f1 = (mt_feats[:, i] - MEAN1) / STD1
cur_f2 = (mt_feats[:, i] - MEAN2) / STD2
[res, P1] = aT.classifierWrapper(classifier_1, "knn", cur_f1)
[res, P2] = aT.classifierWrapper(classifier_2, "knn", cur_f2)
MidTermFeatures2[0:mt_feats.shape[0], i] = mt_feats[:, i]
MidTermFeatures2[mt_feats.shape[0]:mt_feats.shape[0]+len(classNames1), i] = P1 + 0.0001
MidTermFeatures2[mt_feats.shape[0] + len(classNames1)::, i] = P2 + 0.0001
mt_feats = MidTermFeatures2 # TODO
iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]
mt_feats = mt_feats[iFeaturesSelect, :]
(mt_feats_norm, MEAN, STD) = aT.normalizeFeatures([mt_feats.T])
mt_feats_norm = mt_feats_norm[0].T
n_wins = mt_feats.shape[1]
# remove outliers:
dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_norm.T)),
axis=0)
m_dist_all = numpy.mean(dist_all)
i_non_outliers = numpy.nonzero(dist_all < 1.2 * m_dist_all)[0]
# TODO: Combine energy threshold for outlier removal:
#EnergyMin = numpy.min(mt_feats[1,:])
#EnergyMean = numpy.mean(mt_feats[1,:])
#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
#i_non_outliers = numpy.nonzero(mt_feats[1,:] > Thres)[0]
#print i_non_outliers
perOutLier = (100.0 * (n_wins - i_non_outliers.shape[0])) / n_wins
mt_feats_norm_or = mt_feats_norm
mt_feats_norm = mt_feats_norm[:, i_non_outliers]
# LDA dimensionality reduction:
if lda_dim > 0:
#[mt_feats_to_red, _, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs, st_win * fs, round(fs*st_win), round(fs*st_win));
# extract mid-term features with minimum step:
mt_win_ratio = int(round(mt_size / st_win))
mt_step_ratio = int(round(st_win / st_win))
mt_feats_to_red = []
num_of_features = len(st_feats)
num_of_stats = 2
#for i in range(num_of_stats * num_of_features + 1):
for i in range(num_of_stats * num_of_features):
mt_feats_to_red.append([])
for i in range(num_of_features): # for each of the short-term features:
curPos = 0
N = len(st_feats[i])
while (curPos < N):
N1 = curPos
N2 = curPos + mt_win_ratio
if N2 > N:
N2 = N
curStFeatures = st_feats[i][N1:N2]
mt_feats_to_red[i].append(numpy.mean(curStFeatures))
mt_feats_to_red[i+num_of_features].append(numpy.std(curStFeatures))
curPos += mt_step_ratio
mt_feats_to_red = numpy.array(mt_feats_to_red)
mt_feats_to_red_2 = numpy.zeros((mt_feats_to_red.shape[0] +
len(classNames1) + len(classNames2),
mt_feats_to_red.shape[1]))
for i in range(mt_feats_to_red.shape[1]):
cur_f1 = (mt_feats_to_red[:, i] - MEAN1) / STD1
cur_f2 = (mt_feats_to_red[:, i] - MEAN2) / STD2
[res, P1] = aT.classifierWrapper(classifier_1, "knn", cur_f1)
[res, P2] = aT.classifierWrapper(classifier_2, "knn", cur_f2)
mt_feats_to_red_2[0:mt_feats_to_red.shape[0], i] = mt_feats_to_red[:, i]
mt_feats_to_red_2[mt_feats_to_red.shape[0]:mt_feats_to_red.shape[0] + len(classNames1), i] = P1 + 0.0001
mt_feats_to_red_2[mt_feats_to_red.shape[0]+len(classNames1)::, i] = P2 + 0.0001
mt_feats_to_red = mt_feats_to_red_2
mt_feats_to_red = mt_feats_to_red[iFeaturesSelect, :]
#mt_feats_to_red += numpy.random.rand(mt_feats_to_red.shape[0], mt_feats_to_red.shape[1]) * 0.0000010
(mt_feats_to_red, MEAN, STD) = aT.normalizeFeatures([mt_feats_to_red.T])
mt_feats_to_red = mt_feats_to_red[0].T
#dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_to_red.T)), axis=0)
#m_dist_all = numpy.mean(dist_all)
#iNonOutLiers2 = numpy.nonzero(dist_all < 3.0*m_dist_all)[0]
#mt_feats_to_red = mt_feats_to_red[:, iNonOutLiers2]
Labels = numpy.zeros((mt_feats_to_red.shape[1], ));
LDAstep = 1.0
LDAstepRatio = LDAstep / st_win
#print LDAstep, LDAstepRatio
for i in range(Labels.shape[0]):
Labels[i] = int(i*st_win/LDAstepRatio);
clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=lda_dim)
clf.fit(mt_feats_to_red.T, Labels)
mt_feats_norm = (clf.transform(mt_feats_norm.T)).T
if n_speakers <= 0:
s_range = range(2, 10)
else:
s_range = [n_speakers]
clsAll = []
sil_all = []
centersAll = []
for iSpeakers in s_range:
k_means = sklearn.cluster.KMeans(n_clusters=iSpeakers)
k_means.fit(mt_feats_norm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
# Y = distance.squareform(distance.pdist(mt_feats_norm.T))
clsAll.append(cls)
centersAll.append(means)
sil_1 = []; sil_2 = []
for c in range(iSpeakers):
# for each speaker (i.e. for each extracted cluster)
clust_per_cent = numpy.nonzero(cls == c)[0].shape[0] / \
float(len(cls))
if clust_per_cent < 0.020:
sil_1.append(0.0)
sil_2.append(0.0)
else:
# get subset of feature vectors
mt_feats_norm_temp = mt_feats_norm[:, cls==c]
# compute average distance between samples
# that belong to the cluster (a values)
Yt = distance.pdist(mt_feats_norm_temp.T)
sil_1.append(numpy.mean(Yt)*clust_per_cent)
silBs = []
for c2 in range(iSpeakers):
# compute distances from samples of other clusters
if c2 != c:
clust_per_cent_2 = numpy.nonzero(cls == c2)[0].shape[0] /\
float(len(cls))
MidTermFeaturesNormTemp2 = mt_feats_norm[:, cls == c2]
Yt = distance.cdist(mt_feats_norm_temp.T,
MidTermFeaturesNormTemp2.T)
silBs.append(numpy.mean(Yt)*(clust_per_cent
+ clust_per_cent_2)/2.0)
silBs = numpy.array(silBs)
# ... and keep the minimum value (i.e.
# the distance from the "nearest" cluster)
sil_2.append(min(silBs))
sil_1 = numpy.array(sil_1);
sil_2 = numpy.array(sil_2);
sil = []
for c in range(iSpeakers):
# for each cluster (speaker) compute silhouette
sil.append( ( sil_2[c] - sil_1[c]) / (max(sil_2[c],
sil_1[c]) + 0.00001))
# keep the AVERAGE SILLOUETTE
sil_all.append(numpy.mean(sil))
imax = numpy.argmax(sil_all)
# optimal number of clusters
nSpeakersFinal = s_range[imax]
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows:
# this is achieved by giving them the value of their
# nearest non-outlier window)
cls = numpy.zeros((n_wins,))
for i in range(n_wins):
j = numpy.argmin(numpy.abs(i-i_non_outliers))
cls[i] = clsAll[imax][j]
# Post-process method 1: hmm smoothing
for i in range(1):
# hmm training
start_prob, transmat, means, cov = \
trainHMM_computeStatistics(mt_feats_norm_or, cls)
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag")
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means; hmm.covars_ = cov
cls = hmm.predict(mt_feats_norm_or.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
sil = sil_all[imax]
class_names = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)];
# load ground-truth if available
gt_file = filename.replace('.wav', '.segments')
# if groundturh exists
if os.path.isfile(gt_file):
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
flags_gt, class_names_gt = segs2flags(seg_start, seg_end, seg_labs, mt_step)
plot_res = False
time_stamps = numpy.array(range(len(cls)))*mt_step+mt_step/2.0
if plot_res:
fig = plt.figure()
if n_speakers > 0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(numpy.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(numpy.array(range(len(cls)))*mt_step+mt_step/2.0, cls)
if os.path.isfile(gt_file):
if plot_res:
ax1.plot(numpy.array(range(len(flags_gt))) *
mt_step + mt_step / 2.0, flags_gt, 'r')
purity_cluster_m, purity_speaker_m = \
evaluateSpeakerDiarization(cls, flags_gt)
print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.title("Cluster purity: {0:.1f}% - "
"Speaker purity: {1:.1f}%".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.xlabel("time (seconds)")
#print s_range, sil_all
if n_speakers<=0:
plt.subplot(212)
plt.plot(s_range, sil_all)
plt.xlabel("number of clusters");
plt.ylabel("average clustering's sillouette");
plt.show()
from itertools import groupby
i = 0
base_output_filename = filename.split('/')[-1]
for speaker, group in groupby(cls):
groupList = list(group)
init = int(time_stamps[i]*1000)
i = i + len(groupList)
end = int(time_stamps[i]*1000) if i < len(cls) else len(audio_file)
segment = audio_file[init:end]
segment.export(base_output_filename + '-speaker' + str(int(speaker)) + '-time-' + str(init) + '-' + str(end) + '.wav', format='wav')
print(speaker, init, end)
return cls
def convertTimeStamp(timeStamp):
import datetime
return str(datetime.timedelta(seconds=timeStamp))
def speakerDiarizationEvaluateScript(folder_name, ldas):
'''
This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
SEGMENT (ground-truth) files are stored
- ldas: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folder_name, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wav_file in wavFilesList:
gt_file = wav_file.replace('.wav', '.segments');
if os.path.isfile(gt_file):
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
N.append(len(list(set(seg_labs))))
else:
N.append(-1)
for l in ldas:
print("LDA = {0:d}".format(l))
for i, wav_file in enumerate(wavFilesList):
speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False)
print
def musicThumbnailing(x, fs, short_term_size=1.0, short_term_step=0.5,
thumb_size=10.0, limit_1 = 0, limit_2 = 1):
'''
This function detects instances of the most representative part of a
music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of
audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1 second
- Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered) self-similarity matrix.
The audio segments that correspond to the diagonial around that position are the selected thumbnails
ARGUMENTS:
- x: input signal
- fs: sampling frequency
- short_term_size: window size (in seconds)
- short_term_step: window step (in seconds)
- thumb_size: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[fs, x] = basicIO.readAudioFile(input_file)
[A1, A2, B1, B2] = musicThumbnailing(x, fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing
of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
'''
x = audioBasicIO.stereo2mono(x);
# feature extraction:
st_feats, _ = aF.stFeatureExtraction(x, fs, fs * short_term_size,
fs * short_term_step)
# self-similarity matrix
S = selfSimilarityMatrix(st_feats)
# moving filter:
M = int(round(thumb_size / short_term_step))
B = numpy.eye(M,M)
S = scipy.signal.convolve2d(S, B, 'valid')
# post-processing (remove main diagonal elements)
min_sm = numpy.min(S)
for i in range(S.shape[0]):
for j in range(S.shape[1]):
if abs(i-j) < 5.0 / short_term_step or i > j:
S[i,j] = min_sm;
# find max position:
S[0:int(limit_1 * S.shape[0]), :] = min_sm
S[:, 0:int(limit_1 * S.shape[0])] = min_sm
S[int(limit_2 * S.shape[0])::, :] = min_sm
S[:, int(limit_2 * S.shape[0])::] = min_sm
maxVal = numpy.max(S)
[I, J] = numpy.unravel_index(S.argmax(), S.shape)
#plt.imshow(S)
#plt.show()
# expand:
i1 = I
i2 = I
j1 = J
j2 = J
while i2-i1<M:
if i1 <=0 or j1<=0 or i2 >= S.shape[0]-2 or j2 >= S.shape[1]-2:
break
if S[i1-1, j1-1] > S[i2 + 1, j2 + 1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return short_term_step * i1, short_term_step * i2, \
short_term_step * j1, short_term_step * j2, S
| 40.42794
| 203
| 0.587219
|
3db336886ae97fe4bdf97f98dbe18559bee8bc97
| 20,550
|
py
|
Python
|
flux_combined_high_binding/model_691.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_691.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
flux_combined_high_binding/model_691.py
|
LoLab-VU/Bayesian_Inference_of_Network_Dynamics
|
54a5ef7e868be34289836bbbb024a2963c0c9c86
|
[
"MIT"
] | null | null | null |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 80000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 190000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2df, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1dr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2xf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1xr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.138889
| 798
| 0.804136
|
7fd87affb47a7320002a226362a75ebc201a96a5
| 2,347
|
py
|
Python
|
main.py
|
nikeee/poor-mans-floyd
|
77d429ffc662394c454c5b776022775958d22a14
|
[
"Unlicense"
] | null | null | null |
main.py
|
nikeee/poor-mans-floyd
|
77d429ffc662394c454c5b776022775958d22a14
|
[
"Unlicense"
] | null | null | null |
main.py
|
nikeee/poor-mans-floyd
|
77d429ffc662394c454c5b776022775958d22a14
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
from typing import Optional
class Node:
value: int
next: Optional['Node']
def __init__(self, value: int):
self.value = value
def __str__(self):
return f'<{self.value}>'
def advance(self) -> Optional['Node']:
return self.next
def advance_by_two(self):
node = self.advance()
return node.advance() if node is not None else None
graph = Node(1) # 1
graph.next = Node(2) # 1 -> 2
graph.next.next = Node(3) # 1 -> 2 -> 3
graph.next.next.next = Node(4) # 1 -> 2 -> 3 -> 4
graph.next.next.next.next = Node(5) # 1 -> 2 -> 3 -> 4 -> 5
graph.next.next.next.next.next = graph.next.next # 1 -> 2 -> 3 -> 4 -> 5 -> 3
def find_cycle_set(graph: Node) -> Optional[Node]:
"""
Implements an O(n) space and O(n) time complexity algorithm for finding the start of a cycle.
Just notes down which nodes were visited.
"""
visited = set()
current_node = graph
while current_node is not None and current_node not in visited:
visited.add(current_node)
current_node = current_node.next
return current_node
def find_cycle_floyd(graph: Node) -> Optional[Node]:
"""
Implements an O(1) space and O(n) time complexity algorithm for finding the start of a cycle.
Also called Floyd algorithm, shown in this video: https://www.youtube.com/watch?v=pKO9UjSeLew
"""
## TODO: This can be removed
if graph.next is graph:
return graph
# Let them advance until they meet
while True:
tortoise = graph.advance()
hare = graph.advance_by_two()
if hare is tortoise:
break
if tortoise is None or hare is None:
return None
assert hare is tortoise
# both met at node hare/tortoise
# Now put hare back to start and let him run as slow as the tortoise
# Where they meet is the point of the cycle start
hare = graph
while hare is not tortoise:
hare = hare.advance()
tortoise = tortoise.advance()
if tortoise is None or hare is None:
return None
return hare
def main():
c = find_cycle_set(graph)
print(f'Node where the cycle started (set): {c}')
c = find_cycle_floyd(graph)
print(f'Node where the cycle started (floyd): {c}')
if __name__ == "__main__":
main()
| 25.236559
| 97
| 0.624627
|
88b4def2415c21032dcb9d41dd729f6f90094679
| 10,923
|
py
|
Python
|
libuavcan/dsdl_compiler/libuavcan_dsdl_compiler/__init__.py
|
tridge/uavcan_old
|
6dd432c9742c22e1dd1638c7f91cf937e4bdb2f1
|
[
"MIT"
] | null | null | null |
libuavcan/dsdl_compiler/libuavcan_dsdl_compiler/__init__.py
|
tridge/uavcan_old
|
6dd432c9742c22e1dd1638c7f91cf937e4bdb2f1
|
[
"MIT"
] | null | null | null |
libuavcan/dsdl_compiler/libuavcan_dsdl_compiler/__init__.py
|
tridge/uavcan_old
|
6dd432c9742c22e1dd1638c7f91cf937e4bdb2f1
|
[
"MIT"
] | null | null | null |
#
# UAVCAN DSDL compiler for libuavcan
#
# Copyright (C) 2014 Pavel Kirienko <pavel.kirienko@gmail.com>
#
'''
This module implements the core functionality of the UAVCAN DSDL compiler for libuavcan.
Supported Python versions: 3.2+, 2.7.
It accepts a list of root namespaces and produces the set of C++ header files for libuavcan.
It is based on the DSDL parsing package from pyuavcan.
'''
from __future__ import division, absolute_import, print_function, unicode_literals
import sys, os, logging, errno, re
from .pyratemp import Template
from pyuavcan import dsdl
# Python 2.7 compatibility
try:
str = unicode
except NameError:
pass
OUTPUT_FILE_EXTENSION = 'hpp'
OUTPUT_FILE_PERMISSIONS = 0o444 # Read only for all
TEMPLATE_FILENAME = os.path.join(os.path.dirname(__file__), 'data_type_template.tmpl')
__all__ = ['run', 'logger', 'DsdlCompilerException']
class DsdlCompilerException(Exception):
pass
logger = logging.getLogger(__name__)
def run(source_dirs, include_dirs, output_dir):
'''
This function takes a list of root namespace directories (containing DSDL definition files to parse), a
possibly empty list of search directories (containing DSDL definition files that can be referenced from the types
that are going to be parsed), and the output directory path (possibly nonexistent) where the generated C++
header files will be stored.
Note that this module features lazy write, i.e. if an output file does already exist and its content is not going
to change, it will not be overwritten. This feature allows to avoid unnecessary recompilation of dependent object
files.
Args:
source_dirs List of root namespace directories to parse.
include_dirs List of root namespace directories with referenced types (possibly empty). This list is
automaitcally extended with source_dirs.
output_dir Output directory path. Will be created if doesn't exist.
'''
assert isinstance(source_dirs, list)
assert isinstance(include_dirs, list)
output_dir = str(output_dir)
types = run_parser(source_dirs, include_dirs + source_dirs)
if not types:
die('No type definitions were found')
logger.info('%d types total', len(types))
run_generator(types, output_dir)
# -----------------
def pretty_filename(filename):
try:
a = os.path.abspath(filename)
r = os.path.relpath(filename)
return a if '..' in r else r
except ValueError:
return filename
def type_output_filename(t):
assert t.category == t.CATEGORY_COMPOUND
return t.full_name.replace('.', os.path.sep) + '.' + OUTPUT_FILE_EXTENSION
def makedirs(path):
try:
try:
os.makedirs(path, exist_ok=True) # May throw "File exists" when executed as root, which is wrong
except TypeError:
os.makedirs(path) # Python 2.7 compatibility
except OSError as ex:
if ex.errno != errno.EEXIST: # http://stackoverflow.com/questions/12468022
raise
def die(text):
raise DsdlCompilerException(str(text))
def run_parser(source_dirs, search_dirs):
try:
types = dsdl.parse_namespaces(source_dirs, search_dirs)
except dsdl.DsdlException as ex:
logger.info('Parser failure', exc_info=True)
die(ex)
return types
def run_generator(types, dest_dir):
try:
template_expander = make_template_expander(TEMPLATE_FILENAME)
dest_dir = os.path.abspath(dest_dir) # Removing '..'
makedirs(dest_dir)
for t in types:
logger.info('Generating type %s', t.full_name)
filename = os.path.join(dest_dir, type_output_filename(t))
text = generate_one_type(template_expander, t)
write_generated_data(filename, text)
except Exception as ex:
logger.info('Generator failure', exc_info=True)
die(ex)
def write_generated_data(filename, data):
dirname = os.path.dirname(filename)
makedirs(dirname)
# Lazy update - file will not be rewritten if its content is not going to change
if os.path.exists(filename):
with open(filename) as f:
existing_data = f.read()
if data == existing_data:
logger.info('Up to date [%s]', pretty_filename(filename))
return
logger.info('Rewriting [%s]', pretty_filename(filename))
os.remove(filename)
else:
logger.info('Creating [%s]', pretty_filename(filename))
# Full rewrite
with open(filename, 'w') as f:
f.write(data)
try:
os.chmod(filename, OUTPUT_FILE_PERMISSIONS)
except (OSError, IOError) as ex:
logger.warning('Failed to set permissions for %s: %s', pretty_filename(filename), ex)
def type_to_cpp_type(t):
if t.category == t.CATEGORY_PRIMITIVE:
cast_mode = {
t.CAST_MODE_SATURATED: '::uavcan::CastModeSaturate',
t.CAST_MODE_TRUNCATED: '::uavcan::CastModeTruncate',
}[t.cast_mode]
if t.kind == t.KIND_FLOAT:
return '::uavcan::FloatSpec< %d, %s >' % (t.bitlen, cast_mode)
else:
signedness = {
t.KIND_BOOLEAN: '::uavcan::SignednessUnsigned',
t.KIND_UNSIGNED_INT: '::uavcan::SignednessUnsigned',
t.KIND_SIGNED_INT: '::uavcan::SignednessSigned',
}[t.kind]
return '::uavcan::IntegerSpec< %d, %s, %s >' % (t.bitlen, signedness, cast_mode)
elif t.category == t.CATEGORY_ARRAY:
value_type = type_to_cpp_type(t.value_type)
mode = {
t.MODE_STATIC: '::uavcan::ArrayModeStatic',
t.MODE_DYNAMIC: '::uavcan::ArrayModeDynamic',
}[t.mode]
return '::uavcan::Array< %s, %s, %d >' % (value_type, mode, t.max_size)
elif t.category == t.CATEGORY_COMPOUND:
return '::' + t.full_name.replace('.', '::')
else:
raise DsdlCompilerException('Unknown type category: %s' % t.category)
def generate_one_type(template_expander, t):
t.short_name = t.full_name.split('.')[-1]
t.cpp_type_name = t.short_name + '_'
t.cpp_full_type_name = '::' + t.full_name.replace('.', '::')
# Dependencies (no duplicates)
def fields_includes(fields):
def detect_include(t):
if t.category == t.CATEGORY_COMPOUND:
return type_output_filename(t)
if t.category == t.CATEGORY_ARRAY:
return detect_include(t.value_type)
return list(sorted(set(filter(None, [detect_include(x.type) for x in fields]))))
if t.kind == t.KIND_MESSAGE:
t.cpp_includes = fields_includes(t.fields)
else:
t.cpp_includes = fields_includes(t.request_fields + t.response_fields)
t.cpp_namespace_components = t.full_name.split('.')[:-1]
t.has_default_dtid = t.default_dtid is not None
# Attribute types
def inject_cpp_types(attributes):
for a in attributes:
a.cpp_type = type_to_cpp_type(a.type)
if t.kind == t.KIND_MESSAGE:
inject_cpp_types(t.fields)
inject_cpp_types(t.constants)
t.all_attributes = t.fields + t.constants
else:
inject_cpp_types(t.request_fields)
inject_cpp_types(t.request_constants)
inject_cpp_types(t.response_fields)
inject_cpp_types(t.response_constants)
t.all_attributes = t.request_fields + t.request_constants + t.response_fields + t.response_constants
# Constant properties
def inject_constant_info(constants):
for c in constants:
if c.type.kind == c.type.KIND_FLOAT:
float(c.string_value) # Making sure that this is a valid float literal
c.cpp_value = c.string_value
else:
int(c.string_value) # Making sure that this is a valid integer literal
c.cpp_value = c.string_value
if c.type.kind == c.type.KIND_UNSIGNED_INT:
c.cpp_value += 'U'
if t.kind == t.KIND_MESSAGE:
inject_constant_info(t.constants)
else:
inject_constant_info(t.request_constants)
inject_constant_info(t.response_constants)
# Data type kind
t.cpp_kind = {
t.KIND_MESSAGE: '::uavcan::DataTypeKindMessage',
t.KIND_SERVICE: '::uavcan::DataTypeKindService',
}[t.kind]
# Generation
text = template_expander(t=t) # t for Type
text = '\n'.join(x.rstrip() for x in text.splitlines())
text = text.replace('\n\n\n\n\n', '\n\n').replace('\n\n\n\n', '\n\n').replace('\n\n\n', '\n\n')
text = text.replace('{\n\n ', '{\n ')
return text
def make_template_expander(filename):
'''
Templating is based on pyratemp (http://www.simple-is-better.org/template/pyratemp.html).
The pyratemp's syntax is rather verbose and not so human friendly, so we define some
custom extensions to make it easier to read and write.
The resulting syntax somewhat resembles Mako (which was used earlier instead of pyratemp):
Substitution:
${expression}
Line joining through backslash (replaced with a single space):
${foo(bar(very_long_arument=42, \
second_line=72))}
Blocks:
% for a in range(10):
% if a == 5:
${foo()}
% endif
% endfor
The extended syntax is converted into pyratemp's through regexp substitution.
'''
with open(filename) as f:
template_text = f.read()
# Backslash-newline elimination
template_text = re.sub(r'\\\r{0,1}\n\ *', r' ', template_text)
# Substitution syntax transformation: ${foo} ==> $!foo!$
template_text = re.sub(r'([^\$]{0,1})\$\{([^\}]+)\}', r'\1$!\2!$', template_text)
# Flow control expression transformation: % foo: ==> <!--(foo)-->
template_text = re.sub(r'(?m)^(\ *)\%\ *([^\:]+?):{0,1}$', r'\1<!--(\2)-->', template_text)
# Block termination transformation: <!--(endfoo)--> ==> <!--(end)-->
template_text = re.sub(r'\<\!--\(end[a-z]+\)--\>', r'<!--(end)-->', template_text)
# Pyratemp workaround.
# The problem is that if there's no empty line after a macro declaration, first line will be doubly indented.
# Workaround:
# 1. Remove trailing comments
# 2. Add a newline after each macro declaration
template_text = re.sub(r'\ *\#\!.*', '', template_text)
template_text = re.sub(r'(\<\!--\(macro\ [a-zA-Z0-9_]+\)--\>.*?)', r'\1\n', template_text)
# Preprocessed text output for debugging
# with open(filename + '.d', 'w') as f:
# f.write(template_text)
template = Template(template_text)
def expand(**args):
# This function adds one indentation level (4 spaces); it will be used from the template
args['indent'] = lambda text, idnt = ' ': idnt + text.replace('\n', '\n' + idnt)
return template(**args)
return expand
| 38.059233
| 117
| 0.639568
|
bbed86d03a3eb78165bf4d4950688e61dc8b5dcf
| 3,056
|
py
|
Python
|
kuri_objects_mapping/scripts/objects_mapping.py
|
kucars/kuri_mbzirc_challenge_3
|
9942aae773eb4d32971b43223e4fea1554c1c8c8
|
[
"BSD-3-Clause"
] | 4
|
2019-03-02T12:55:51.000Z
|
2019-07-23T08:45:17.000Z
|
kuri_objects_mapping/scripts/objects_mapping.py
|
kucars/kuri_mbzirc_challenge_3
|
9942aae773eb4d32971b43223e4fea1554c1c8c8
|
[
"BSD-3-Clause"
] | 2
|
2019-07-23T08:40:18.000Z
|
2019-07-23T13:22:18.000Z
|
kuri_objects_mapping/scripts/objects_mapping.py
|
kucars/kuri_mbzirc_challenge_3
|
9942aae773eb4d32971b43223e4fea1554c1c8c8
|
[
"BSD-3-Clause"
] | 2
|
2018-06-08T01:40:13.000Z
|
2019-07-23T11:24:22.000Z
|
#! /usr/bin/env python
#Copyright (c) 2016, Buti Al Delail
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of kuri_mbzirc_challenge_3 nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import thread
import threading
import time
import mavros
import actionlib
from math import *
from mavros.utils import *
from mavros import setpoint as SP
from tf.transformations import quaternion_from_euler
from nav_msgs.msg import *
from kuri_msgs.msg import *
from mapping_action_server import MappingServer
class ObjectsMapping:
def __init__(self):
self.objects = []
self.objects_map = ObjectsMap()
client = actionlib.SimpleActionClient('TrackingAction', TrackingAction)
print "Waiting for tracker server"
client.wait_for_server()
goal = TrackingGoal()
goal.uav_id = 3
client.send_goal(goal)
print "Waiting for result"
client.wait_for_result()
print "Result:",client.get_result()
self.sub = rospy.Subscriber("TrackingAction/feedback",Object, self.callback)
def callback(self, actionServer):
print 'Mapping: Recieving Tracked Objects --deprecated', actionServer
#for obj in objects.feedback.tracked_objects.objects:
## TODO: Check and process objects
# self.objects.append(obj)
#self.objects.append(actionServer.feedback.new_object)
#self.objects_map.objects = self.objects;
#self.objects_map.map = OccupancyGrid()
# if self.actionServer.hasGoal:
# self.actionServer.update(self.objects_map)
# else:
# self.actionServer.objects_map = self.objects_map
| 40.210526
| 84
| 0.737565
|
7240a9e72ee2d0e19528397ff80a5fbf07f69619
| 5,420
|
py
|
Python
|
features/eolearn/tests/test_radiometric_normalization.py
|
chorng/eo-learn
|
a1a3c6fa5568d398f5e43f5ad5aecdfeb05e8d3c
|
[
"MIT"
] | null | null | null |
features/eolearn/tests/test_radiometric_normalization.py
|
chorng/eo-learn
|
a1a3c6fa5568d398f5e43f5ad5aecdfeb05e8d3c
|
[
"MIT"
] | null | null | null |
features/eolearn/tests/test_radiometric_normalization.py
|
chorng/eo-learn
|
a1a3c6fa5568d398f5e43f5ad5aecdfeb05e8d3c
|
[
"MIT"
] | null | null | null |
"""
Credits:
Copyright (c) 2018-2019 Johannes Schmid (GeoVille)
Copyright (c) 2017-2022 Matej Aleksandrov, Matic Lubej, Devis Peressutti, Žiga Lukšič (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
from datetime import datetime
import copy
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from pytest import approx
from eolearn.core import FeatureType
from eolearn.core.eodata_io import FeatureIO
from eolearn.mask import MaskFeatureTask
from eolearn.features import (
ReferenceScenesTask,
BlueCompositingTask,
HOTCompositingTask,
MaxNDVICompositingTask,
MaxNDWICompositingTask,
MaxRatioCompositingTask,
HistogramMatchingTask,
)
@pytest.fixture(name="eopatch")
def eopatch_fixture(example_eopatch):
np.random.seed(0)
example_eopatch.mask["SCL"] = np.random.randint(0, 11, example_eopatch.data["BANDS-S2-L1C"].shape, np.uint8)
blue = BlueCompositingTask(
(FeatureType.DATA, "REFERENCE_SCENES"),
(FeatureType.DATA_TIMELESS, "REFERENCE_COMPOSITE"),
blue_idx=0,
interpolation="geoville",
)
blue.execute(example_eopatch)
return example_eopatch
DATA_TEST_FEATURE = FeatureType.DATA, "TEST"
DATA_TIMELESS_TEST_FEATURE = FeatureType.DATA_TIMELESS, "TEST"
@pytest.mark.parametrize(
"task, test_feature, expected_min, expected_max, expected_mean, expected_median",
(
[
MaskFeatureTask(
(FeatureType.DATA, "BANDS-S2-L1C", "TEST"),
(FeatureType.MASK, "SCL"),
mask_values=[0, 1, 2, 3, 8, 9, 10, 11],
),
DATA_TEST_FEATURE,
0.0002,
1.4244,
0.21167801,
0.142,
],
[
ReferenceScenesTask(
(FeatureType.DATA, "BANDS-S2-L1C", "TEST"), (FeatureType.SCALAR, "CLOUD_COVERAGE"), max_scene_number=5
),
DATA_TEST_FEATURE,
0.0005,
0.5318,
0.16823094,
0.1404,
],
[
BlueCompositingTask(
(FeatureType.DATA, "REFERENCE_SCENES"),
(FeatureType.DATA_TIMELESS, "TEST"),
blue_idx=0,
interpolation="geoville",
),
DATA_TIMELESS_TEST_FEATURE,
0.0005,
0.5075,
0.11658352,
0.0833,
],
[
HOTCompositingTask(
(FeatureType.DATA, "REFERENCE_SCENES"),
(FeatureType.DATA_TIMELESS, "TEST"),
blue_idx=0,
red_idx=2,
interpolation="geoville",
),
DATA_TIMELESS_TEST_FEATURE,
0.0005,
0.5075,
0.117758796,
0.0846,
],
[
MaxNDVICompositingTask(
(FeatureType.DATA, "REFERENCE_SCENES"),
(FeatureType.DATA_TIMELESS, "TEST"),
red_idx=2,
nir_idx=7,
interpolation="geoville",
),
DATA_TIMELESS_TEST_FEATURE,
0.0005,
0.5075,
0.13430128,
0.0941,
],
[
MaxNDWICompositingTask(
(FeatureType.DATA, "REFERENCE_SCENES"),
(FeatureType.DATA_TIMELESS, "TEST"),
nir_idx=6,
swir1_idx=8,
interpolation="geoville",
),
DATA_TIMELESS_TEST_FEATURE,
0.0005,
0.5318,
0.2580135,
0.2888,
],
[
MaxRatioCompositingTask(
(FeatureType.DATA, "REFERENCE_SCENES"),
(FeatureType.DATA_TIMELESS, "TEST"),
blue_idx=0,
nir_idx=6,
swir1_idx=8,
interpolation="geoville",
),
DATA_TIMELESS_TEST_FEATURE,
0.0006,
0.5075,
0.13513365,
0.0958,
],
[
HistogramMatchingTask(
(FeatureType.DATA, "BANDS-S2-L1C", "TEST"), (FeatureType.DATA_TIMELESS, "REFERENCE_COMPOSITE")
),
DATA_TEST_FEATURE,
-0.049050678,
0.68174845,
0.1165936,
0.08370649,
],
),
)
def test_haralick(eopatch, task, test_feature, expected_min, expected_max, expected_mean, expected_median):
initial_patch = copy.deepcopy(eopatch)
eopatch = task.execute(eopatch)
# Test that no other features were modified
for feature, value in initial_patch.data.items():
if isinstance(value, FeatureIO):
value = value.load()
assert_array_equal(value, eopatch.data[feature], err_msg=f"EOPatch data feature '{feature}' has changed")
assert isinstance(eopatch.timestamp, list), "Expected a list of timestamps"
assert isinstance(eopatch.timestamp[0], datetime), "Expected timestamps of type datetime.datetime"
delta = 1e-3
result = eopatch[test_feature]
assert np.nanmin(result) == approx(expected_min, abs=delta)
assert np.nanmax(result) == approx(expected_max, abs=delta)
assert np.nanmean(result) == approx(expected_mean, abs=delta)
assert np.nanmedian(result) == approx(expected_median, abs=delta)
| 30.971429
| 118
| 0.569557
|
f6daec85fffe1ba9b290ed0c873b2ee245a7a6ef
| 2,846
|
py
|
Python
|
torchsr/datasets/reds.py
|
mgm52/torchSR
|
7d9fb3f2f95b58fa3bb0e9ab00dec4c1a2265ac2
|
[
"MIT"
] | 73
|
2021-04-24T20:50:47.000Z
|
2022-03-30T09:36:35.000Z
|
torchsr/datasets/reds.py
|
DefTruth/torchSR
|
115c9177cfbe170524e281fc1c2f82af3349bb6a
|
[
"MIT"
] | 7
|
2021-05-18T09:57:43.000Z
|
2022-03-21T09:02:09.000Z
|
torchsr/datasets/reds.py
|
DefTruth/torchSR
|
115c9177cfbe170524e281fc1c2f82af3349bb6a
|
[
"MIT"
] | 7
|
2021-04-26T23:12:42.000Z
|
2022-03-04T22:50:50.000Z
|
import os
from typing import Callable, List, Optional, Tuple, Union
from .common import FolderByDir, pil_loader
class REDS(FolderByDir):
"""`REDS <https://seungjunnah.github.io/Datasets/reds>` Superresolution Dataset
Args:
root (string): Root directory for the dataset.
scale (int, optional): The upsampling ratio: 2, 3, 4 or 8.
track (str, optional): The downscaling method: bicubic, unknown, real_mild,
real_difficult, real_wild.
split (string, optional): The dataset split, supports ``train``, ``val`` or 'test'.
transform (callable, optional): A function/transform that takes in several PIL images
and returns a transformed version. It is not a torchvision transform!
loader (callable, optional): A function to load an image given its path.
download (boolean, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
predecode (boolean, optional): If true, decompress the image files to disk
preload (boolean, optional): If true, load all images in memory
"""
urls = [
]
track_dirs = {
('hr', 'train', 1) : os.path.join('train', 'train_sharp')
, ('blur', 'train', 1) : os.path.join('train', 'train_blur')
, ('blur_comp', 'train', 1) : os.path.join('train', 'train_blur_comp')
, ('bicubic', 'train', 4) : os.path.join('train', 'train_sharp_bicubic')
, ('blur_bicubic', 'train', 4) : os.path.join('train', 'train_blur_bicubic')
, ('hr', 'val', 1) : os.path.join('val', 'val_sharp')
, ('blur', 'val', 1) : os.path.join('val', 'val_blur')
, ('blur_comp', 'val', 1) : os.path.join('val', 'val_blur_comp')
, ('bicubic', 'val', 4) : os.path.join('val', 'val_sharp_bicubic')
, ('blur_bicubic', 'val', 4) : os.path.join('val', 'val_blur_bicubic')
, ('blur', 'test', 1) : os.path.join('test', 'test_blur')
, ('blur_comp', 'test', 1) : os.path.join('test', 'test_blur_comp')
, ('bicubic', 'test', 4) : os.path.join('test', 'test_sharp_bicubic')
, ('blur_bicubic', 'test', 4) : os.path.join('test', 'test_blur_bicubic')
}
def __init__(
self,
root: str,
scale: Optional[int] = None,
track: Union[str, List[str]] = 'bicubic',
split: str = 'train',
transform: Optional[Callable] = None,
loader: Callable = pil_loader,
download: bool = False,
predecode: bool = False,
preload: bool = False):
super(REDS, self).__init__(os.path.join(root, 'REDS'),
scale, track, split, transform,
loader, download, predecode, preload)
| 47.433333
| 93
| 0.587491
|
5992050ca04656ac4747d4061661f90452004628
| 2,173
|
py
|
Python
|
ibis/backends/pandas/tests/execution/test_structs.py
|
matthewmturner/ibis
|
9360bf9878e78c06cadd6733abd04bf98ee0a090
|
[
"Apache-2.0"
] | null | null | null |
ibis/backends/pandas/tests/execution/test_structs.py
|
matthewmturner/ibis
|
9360bf9878e78c06cadd6733abd04bf98ee0a090
|
[
"Apache-2.0"
] | null | null | null |
ibis/backends/pandas/tests/execution/test_structs.py
|
matthewmturner/ibis
|
9360bf9878e78c06cadd6733abd04bf98ee0a090
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
import ibis.expr.datatypes as dt
from ... import connect, execute
@pytest.fixture(scope="module")
def value():
return OrderedDict([("fruit", "pear"), ("weight", 0)])
@pytest.fixture(scope="module")
def struct_client(value):
df = pd.DataFrame(
{
"s": [
OrderedDict([("fruit", "apple"), ("weight", None)]),
value,
OrderedDict([("fruit", "pear"), ("weight", 1)]),
],
"key": list("aab"),
"value": [1, 2, 3],
}
)
return connect({"t": df})
@pytest.fixture
def struct_table(struct_client):
return struct_client.table(
"t",
schema={
"s": dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
},
)
def test_struct_field_literal(value):
struct = ibis.literal(value)
assert struct.type() == dt.Struct.from_tuples(
[("fruit", dt.string), ("weight", dt.int8)]
)
expr = struct.fruit
result = execute(expr)
assert result == "pear"
expr = struct.weight
result = execute(expr)
assert result == 0
def test_struct_field_series(struct_table):
t = struct_table
expr = t.s.fruit
result = expr.execute()
expected = pd.Series(["apple", "pear", "pear"], name="fruit")
tm.assert_series_equal(result, expected)
def test_struct_field_series_group_by_key(struct_table):
t = struct_table
expr = t.groupby(t.s.fruit).aggregate(total=t.value.sum())
result = expr.execute()
expected = pd.DataFrame(
[("apple", 1), ("pear", 5)], columns=["fruit", "total"]
)
tm.assert_frame_equal(result, expected)
def test_struct_field_series_group_by_value(struct_table):
t = struct_table
expr = t.groupby(t.key).aggregate(total=t.s.weight.sum())
result = expr.execute()
# these are floats because we have a NULL value in the input data
expected = pd.DataFrame([("a", 0.0), ("b", 1.0)], columns=["key", "total"])
tm.assert_frame_equal(result, expected)
| 25.267442
| 79
| 0.599632
|
d387b82fad2600089705a20bc3763b97bad8dd17
| 5,947
|
py
|
Python
|
Projects/3_Adversarial Search/my_custom_player.py
|
kdcarlsen/artificial-intelligence
|
6124dcc5c2caa6b4284dd287a75714cc2b52b0c1
|
[
"MIT"
] | null | null | null |
Projects/3_Adversarial Search/my_custom_player.py
|
kdcarlsen/artificial-intelligence
|
6124dcc5c2caa6b4284dd287a75714cc2b52b0c1
|
[
"MIT"
] | null | null | null |
Projects/3_Adversarial Search/my_custom_player.py
|
kdcarlsen/artificial-intelligence
|
6124dcc5c2caa6b4284dd287a75714cc2b52b0c1
|
[
"MIT"
] | null | null | null |
from sample_players import DataPlayer
# from algorithms import algorithms
# from evaluationFuncs import evaluationFuncs
_WIDTH = 11
_HEIGHT = 9
_SIZE = (_WIDTH + 2) * _HEIGHT - 2
class CustomPlayer(DataPlayer):
""" Implement your own agent to play knight's Isolation
The get_action() method is the only required method for this project.
You can modify the interface for get_action by adding named parameters
with default values, but the function MUST remain compatible with the
default interface.
**********************************************************************
NOTES:
- The test cases will NOT be run on a machine with GPU access, nor be
suitable for using any other machine learning techniques.
- You can pass state forward to your agent on the next turn by assigning
any pickleable object to the self.context attribute.
**********************************************************************
"""
def minimax(self, state, depth, evalFunc, playerId):
def min_value(state, depth, evalFunc, playerId):
if state.terminal_test(): return state.utility(playerId)
if depth <= 0: return evalFunc(state, playerId)
value = float("inf")
for action in state.actions():
value = min(value, max_value(state.result(action), depth - 1, evalFunc, playerId))
return value
def max_value(state, depth, evalFunc, playerId):
if state.terminal_test(): return state.utility(playerId)
if depth <= 0: return evalFunc(state, playerId)
value = float("-inf")
for action in state.actions():
value = max(value, min_value(state.result(action), depth - 1, evalFunc, playerId))
return value
return max(state.actions(), key=lambda x: min_value(state.result(x), depth - 1, evalFunc, playerId))
def move_difference(self, gameState, playerId):
player_loc = gameState.locs[playerId]
opp_loc = gameState.locs[1- playerId]
player_libs = gameState.liberties(player_loc)
opp_libs = gameState.liberties(opp_loc)
return len(player_libs) - len(opp_libs)
def centeredness(self, gameState, playerId):
# Goal: Maximize self-centeredness while preferring opponents at the edge
# Reasoning: In the middle of the board you are more flexible then at the edges
player_loc = gameState.locs[playerId]
opp_loc = gameState.locs[1- playerId]
#Process player location
player_xy = self.computeXy(player_loc)
x_centeredness = 5-abs(5-player_xy[0])
y_centeredness = 4-abs(4-player_xy[1])
player_centeredness = x_centeredness + y_centeredness
#Process opponent location
opp_xy = self.computeXy(opp_loc)
opp_x_cent = 5-abs(5-opp_xy[0])
opp_y_cent = 4-abs(4-opp_xy[1])
opp_centeredness = opp_x_cent + opp_y_cent
return player_centeredness - opp_centeredness
def centeredness_and_libs(self, gameState, playerId):
center_score = self.centeredness(gameState, playerId)
liberties_score = self.move_difference(gameState,playerId)
# Do some kind of fancy weighting
return liberties_score*2 + center_score*1
def computeXy(self, ind):
return (ind % (_WIDTH + 2), ind // (_WIDTH + 2))
def get_action(self, state):
""" Employ an adversarial search technique to choose an action
available in the current state calls self.queue.put(ACTION) at least
This method must call self.queue.put(ACTION) at least once, and may
call it as many times as you want; the caller will be responsible
for cutting off the function after the search time limit has expired.
See RandomPlayer and GreedyPlayer in sample_players for more examples.
**********************************************************************
NOTE:
- The caller is responsible for cutting off search, so calling
get_action() from your own code will create an infinite loop!
Refer to (and use!) the Isolation.play() function to run games.
**********************************************************************
"""
# TODO: Replace the example implementation below with your own search
# method by combining techniques from lecture
#
# EXAMPLE: choose a random move without any search--this function MUST
# call self.queue.put(ACTION) at least once before time expires
# (the timer is automatically managed for you)
#import random
#self.queue.put(random.choice(state.actions()))
import random
SEARCH_DEPTH_MAX = 4
EVAL_FUNC = self.centeredness_and_libs
### BASELINE, NO ITERATIVE DEEPENING ###
# randomly select a move as player 1 or 2 on an empty board, otherwise
# return the optimal minimax move at a fixed search depth of 3 plies
if state.ply_count < 2:
self.queue.put(random.choice(state.actions()))
else:
bestMove = self.minimax(state=state, depth=SEARCH_DEPTH_MAX, evalFunc=EVAL_FUNC, playerId=self.player_id)
self.queue.put(bestMove)
### ITERATIVE DEEPENING AND SMART START SELECTION INCLUDED ###
# if state.ply_count < 2:
# self.queue.put(random.choice(state.actions()))
# # self.context = list()
# else:
# best_move = None
# # self.context.append(0)
# for depth in range(1, SEARCH_DEPTH_MAX+1):
# best_move = algorithms.minimax(state, depth=depth, evalFunc=EVAL_FUNC, playerId=self.player_id)
# self.queue.put(best_move)
# # self.context = depth
# # print("Average Depth Searched: " + str(sum(self.context)/len(self.context)))
# # print("Max Depth Searched: " + str(max(self.context)))
| 41.880282
| 117
| 0.624685
|
a6c25e1b5039c978603c7e689d1cf937f52c7e91
| 1,631
|
py
|
Python
|
plugins/vmray/komand_vmray/actions/submit_file/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/vmray/komand_vmray/actions/submit_file/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/vmray/komand_vmray/actions/submit_file/action.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
import komand
from .schema import SubmitFileInput, SubmitFileOutput
# Custom imports below
import base64
class SubmitFile(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="submit_file",
description="Submit file for analysis",
input=SubmitFileInput(),
output=SubmitFileOutput(),
)
def run(self, params={}):
file_ = params.get("file", None)
file_name = file_.get("filename")
optional_params = params.get("optional_params")
analyzer_mode = params.get("analyzer_mode")
if analyzer_mode != "default":
optional_params["analyzer_mode"] = analyzer_mode
try:
file_bytes = base64.b64decode(file_.get("content"))
except:
raise Exception("Error decoding base64, contents of the file must be encoded with base64!")
mime_types, check_pass = self.connection.api.check_filetype(file_bytes)
if check_pass:
self.logger.info(f"File types {mime_types} found for file {file_name} and are supported by VMRay")
resp = self.connection.api.submit_file(file_name, file_bytes, optional_params)
clean_data = komand.helper.clean(resp)
return {"results": clean_data}
else:
self.logger.error(f"File types, not supported by VMRay: {mime_types}")
self.logger.error(f"Here is a list of supported file types {self.connection.api.SUPPORTED_FILETYPES}")
return {"results": {"errors": [{"files": f"File types found are not supported by VMRay {mime_types}"}]}}
| 39.780488
| 116
| 0.646229
|
54eee5cd28339664107001236657aab8deb34e50
| 3,455
|
py
|
Python
|
optimisation_studies/overvoltage/5_plot_performance.py
|
sstcam/sstcam-simulation
|
3fb67ba64329c201d3995971e5f377c5ec71b18e
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T23:26:36.000Z
|
2019-12-23T23:26:36.000Z
|
optimisation_studies/overvoltage/5_plot_performance.py
|
cta-chec/sstCASSIM
|
75bb863675991f1a36b7d430f9253ae09416f33e
|
[
"BSD-3-Clause"
] | 6
|
2020-09-18T10:59:41.000Z
|
2022-03-15T11:01:49.000Z
|
optimisation_studies/overvoltage/5_plot_performance.py
|
cta-chec/sstCASSIM
|
75bb863675991f1a36b7d430f9253ae09416f33e
|
[
"BSD-3-Clause"
] | 2
|
2020-04-14T08:01:01.000Z
|
2021-11-30T12:11:17.000Z
|
import numpy as np
import pandas as pd
from CHECLabPy.plotting.setup import Plotter
from CHECLabPy.plotting.resolutions import ChargeResolutionPlotter
from scipy.interpolate import RegularGridInterpolator
from IPython import embed
# Requirements
CRREQ_2PE = ChargeResolutionPlotter.requirement(np.array([2]))[0]
CRREQ_20PE = ChargeResolutionPlotter.requirement(np.array([20]))[0]
CRREQ_200PE = ChargeResolutionPlotter.requirement(np.array([200]))[0]
CRREQ_2000PE = ChargeResolutionPlotter.requirement(np.array([2000]))[0]
CRREQ_2PE_50PDE = ChargeResolutionPlotter.requirement(np.array([2*0.5]))[0]
CRREQ_20PE_50PDE = ChargeResolutionPlotter.requirement(np.array([20*0.5]))[0]
CRREQ_200PE_50PDE = ChargeResolutionPlotter.requirement(np.array([200*0.5]))[0]
CRREQ_2000PE_50PDE = ChargeResolutionPlotter.requirement(np.array([2000*0.5]))[0]
MINIMGAMP_GAMMA_25PDE = 250 * 0.25
MINIMGAMP_PROTON_25PDE = 480 * 0.25
MINIMGAMP_GAMMA_50PDE = 250 * 0.5
MINIMGAMP_PROTON_50PDE = 480 * 0.5
class MIAContourPlot(Plotter):
def __init__(self, interpolator, talk):
self.interpolator = interpolator
super().__init__(talk=talk)
def plot_opct_vs_nsb(self, opct: np.ndarray, nsb: np.ndarray, mv_per_pe: float):
xg, yg, zg = np.meshgrid(opct, nsb, mv_per_pe, indexing='ij')
opct = xg.ravel()
nsb = yg.ravel()
mv_per_pe = zg.ravel()
mia = self.interpolator(opct, nsb, mv_per_pe)
c = self.ax.tricontourf(opct, nsb, mia, 15)
self.ax.set_xlabel("OCT")
self.ax.set_ylabel("NSB (MHz)")
cb = self.fig.colorbar(c, ax=self.ax, label="Minimum Image Amplitude")
def plot_mv_per_pe_vs_nsb(self, mv_per_pe: np.ndarray, nsb: np.ndarray, opct: float):
xg, yg, zg = np.meshgrid(opct, nsb, mv_per_pe, indexing='ij')
opct = xg.ravel()
nsb = yg.ravel()
mv_per_pe = zg.ravel()
mia = self.interpolator(opct, nsb, mv_per_pe)
c = self.ax.tricontourf(mv_per_pe, nsb, mia, 15)
self.ax.set_xlabel("mV per p.e.")
self.ax.set_ylabel("NSB (MHz)")
cb = self.fig.colorbar(c, ax=self.ax, label="Minimum Image Amplitude")
class MinimumImageAmplitudeInterpolator:
def __init__(self, df):
xcol, ycol, zcol, vcol = "opct", "nsb_rate", "mv_per_pe", "minimum_image_amplitude"
df = df.sort_values(by=[xcol, ycol, zcol])
xvals = df[xcol].unique()
yvals = df[ycol].unique()
zvals = df[zcol].unique()
vvals = df[vcol].values.reshape(len(xvals), len(yvals), len(zvals))
self.f = RegularGridInterpolator((xvals, yvals, zvals), vvals)
def __call__(self, opct, nsb, mv_per_pe):
pts = np.column_stack([opct, nsb, mv_per_pe])
return self.f(pts)
def main():
with pd.HDFStore("performance.h5", mode='r') as store:
df_ = store['data']
df_g = df_.loc[df_['shower_primary_id'] == 0]
df_p = df_.loc[df_['shower_primary_id'] == 101]
talk = True
interpolator = MinimumImageAmplitudeInterpolator(df_g)
opct = np.linspace(0, 0.5, 100)
nsb = np.linspace(0, 50, 100)
mv_per_pe = np.linspace(0.4, 4, 100)
p_2d = MIAContourPlot(interpolator, talk=talk)
p_2d.plot_opct_vs_nsb(opct, nsb, 4)
p_2d.save("mia_opct_vs_nsb.pdf")
p_2d = MIAContourPlot(interpolator, talk=talk)
p_2d.plot_mv_per_pe_vs_nsb(mv_per_pe, nsb, 0.08)
p_2d.save("mia_mVperpe_vs_nsb.pdf")
if __name__ == '__main__':
main()
| 35.989583
| 91
| 0.678726
|
184269e428d5bb9b19887e50f34f9af0b34a9510
| 621
|
py
|
Python
|
d2go/modeling/modeldef/fbnet_modeldef_registry.py
|
wenliangzhao2018/d2go
|
a9dce74e5caf4c2260371a1abb603e3d5f14d763
|
[
"Apache-2.0"
] | 687
|
2021-03-03T07:50:15.000Z
|
2022-03-25T19:31:57.000Z
|
d2go/modeling/modeldef/fbnet_modeldef_registry.py
|
wenliangzhao2018/d2go
|
a9dce74e5caf4c2260371a1abb603e3d5f14d763
|
[
"Apache-2.0"
] | 193
|
2021-03-03T17:15:57.000Z
|
2022-03-31T03:13:47.000Z
|
d2go/modeling/modeldef/fbnet_modeldef_registry.py
|
wenliangzhao2018/d2go
|
a9dce74e5caf4c2260371a1abb603e3d5f14d763
|
[
"Apache-2.0"
] | 90
|
2021-03-03T16:08:36.000Z
|
2022-03-30T23:42:19.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
class FBNetV2ModelArch(object):
_MODEL_ARCH = {}
@staticmethod
def add(name, arch):
assert (
name not in FBNetV2ModelArch._MODEL_ARCH
), "Arch name '{}' is already existed".format(name)
FBNetV2ModelArch._MODEL_ARCH[name] = arch
@staticmethod
def add_archs(archs):
for name, arch in archs.items():
FBNetV2ModelArch.add(name, arch)
@staticmethod
def get(name):
return copy.deepcopy(FBNetV2ModelArch._MODEL_ARCH[name])
| 23.884615
| 70
| 0.652174
|
f2821019a7cb7b0da8792e13299db6d724cb7e6e
| 757
|
py
|
Python
|
test_app.py
|
hypervectorio/ci-for-data-science-tutorial
|
8d8fb52ef5bbd13f5e5f9f6f77056f95fbeb8674
|
[
"MIT"
] | 2
|
2021-08-31T14:34:24.000Z
|
2021-11-12T09:14:11.000Z
|
test_app.py
|
hypervectorio/ci-for-data-science-tutorial
|
8d8fb52ef5bbd13f5e5f9f6f77056f95fbeb8674
|
[
"MIT"
] | 2
|
2021-06-08T13:29:22.000Z
|
2021-07-07T13:02:20.000Z
|
test_app.py
|
hypervectorio/ci-for-data-science-tutorial
|
8d8fb52ef5bbd13f5e5f9f6f77056f95fbeb8674
|
[
"MIT"
] | 1
|
2021-08-31T14:34:29.000Z
|
2021-08-31T14:34:29.000Z
|
import hypervector
import pytest
from app import get_prediction
hypervector.API_KEY = "YOUR_API_KEY"
@pytest.fixture
def hypervector_fixture():
definition = hypervector.Definition.get("YOUR_DEFINITION_UUID")
ensemble = definition.ensembles[0]
hypervectors = ensemble.hypervectors()
benchmark = ensemble.benchmarks[0]
return hypervectors, benchmark
def test_single_prediction():
test_case = [0, 0, 0, 0]
result = get_prediction(test_case)['prediction']
assert result == [1]
def test_bulk_prediction(hypervector_fixture):
hypervectors, benchmark = hypervector_fixture
results = get_prediction(hypervectors)['prediction']
assertion = benchmark.assert_equal(results)
assert assertion['asserted'] is True
| 25.233333
| 67
| 0.754293
|
370a47dcb0753f25224f60bedb244fcb21d0553a
| 273
|
py
|
Python
|
src/quanguru/extensions/__init__.py
|
AngsarM/QuanGuru
|
5db6105f843bbc78c2d5b1547e32d494fbe10b8d
|
[
"BSD-3-Clause"
] | 9
|
2021-05-23T06:30:45.000Z
|
2021-12-27T13:33:54.000Z
|
src/quanguru/extensions/__init__.py
|
cahitkargi/QuanGuru
|
9b5c94465cd58bc32f6ff845f29dfdec7e0f9075
|
[
"BSD-3-Clause"
] | 26
|
2022-03-18T02:40:54.000Z
|
2022-03-25T07:00:25.000Z
|
src/quanguru/extensions/__init__.py
|
cahitkargi/QuanGuru
|
9b5c94465cd58bc32f6ff845f29dfdec7e0f9075
|
[
"BSD-3-Clause"
] | 5
|
2021-05-23T06:30:24.000Z
|
2022-02-04T02:40:08.000Z
|
r"""
Contains certain extensions, such as saving methods.
.. currentmodule:: quanguru.extensions
Modules
-------
.. autosummary::
saveReadCSV
_helpers
"""
from ._helpers import *
from .saveReadH5 import *
from .saveReadCSV import *
| 15.166667
| 56
| 0.630037
|
8c9ea19996ef2989fbc8f3a1e8074ddf67a8b6d9
| 8,480
|
py
|
Python
|
fab/depends.py
|
zhester/fab
|
a453e6f02092af11848693137a45d975c7c4dca4
|
[
"BSD-2-Clause"
] | null | null | null |
fab/depends.py
|
zhester/fab
|
a453e6f02092af11848693137a45d975c7c4dca4
|
[
"BSD-2-Clause"
] | null | null | null |
fab/depends.py
|
zhester/fab
|
a453e6f02092af11848693137a45d975c7c4dca4
|
[
"BSD-2-Clause"
] | null | null | null |
#=============================================================================
#
# Dependency Discovery Tool
#
#=============================================================================
"""
Dependency Discovery Tool
=========================
Allows programs to determine dependency chains based on project-specific
rules. Most commonly, dependency relationships are useful for software build
systems, but can also be used for content management, testing, or other
automated production systems.
Usage Examples
--------------
import depends
# Global configuration interface.
depends.setup( paths = [ 'includes' ] )
# Create a dependency scanner.
# Note: Directory of a source file is always scanned.
# Note: The scanning type and rules are usually detected automatically.
scanner = depends.scanner( 'path/to/sourcefile.c' )
# Local configuration interface.
scanner.setup( paths = [ 'path/to/includes' ] )
# Iterator supported to retrieve all dependencies.
# Note: Each dependency contains complete path information.
for dep in scanner:
print( 'Dependency:', dep )
#=============================================================================
class MyRule( Rule ):
'''
Example of a customized dependency scanning and detection rule.
'''
# File extensions used to determine what files should match this rule.
extensions = ( 'ext1', 'ext2' )
# Patterns used to determine dependencies based on file content.
extracts = ( r'!import\s+['"]([^'"]+)['"]', )
# Patterns used to determine what files should match this rule.
names = ( r'special_\S+\.ext$', )
#=========================================================================
def check_source( self, path, match = None ):
'''
Called each time a matching source file is found.
Note: If the `extracts` property can comprehensively determine
dependencies for a rule, this method does not need to be defined.
@param path The complete path to the matched source file
@param match A possible match object from file name matching
@return An iterable object that enumerates all dependencies
'''
return ()
# Add the custom rule to the dependency scanning system.
depends.addrule( MyRule )
"""
import os
import re
__version__ = '0.0.0'
#=============================================================================
class Rule( object ):
"""
Models a dependency rule.
"""
# Patterns used to determine what files should match this rule.
names = ()
# File extensions used to determine what files should match this rule.
extensions = ()
# Patterns used to determine dependencies based on file content.
extracts = ()
#=========================================================================
def __init__( self ):
"""
Initializes a Rule object.
"""
# The result of the most recent name pattern match test.
self.match = None
#=========================================================================
def check_source( self, path, match = None ):
"""
Called each time a matching source file is found.
Note: If the `extracts` property can comprehensively determine
dependencies for a rule, this method does not need to be defined.
@param path The complete path to the matched source file
@param match A possible match object from file name matching
@return An iterable object that enumerates all dependencies
"""
### ZIH
raise NotImplementedError()
#=========================================================================
def match_name( self, path ):
"""
Tests a file name to see if it matches this rule.
@param path A complete path to the file name to test
@return True for a matching file name
"""
# Patterns based on file name extensions.
patterns = [ r'.+\.' + ext + '$' for ext in self.extensions ]
# Special file name patterns.
patterns += list( self.names )
# Scan all patterns against this file name.
for pattern in patterns:
match = re.match( pattern, path )
if match is not None:
self.match = match
return True
# File name did not match any patterns.
self.match = None
return False
#=============================================================================
class CRule( object ):
"""
C code dependency rule.
"""
extensions = ( 'c', 'h' )
extracts = ( r'#include\s*["]([^"]+)["]', )
#=============================================================================
class Scanner( object ):
"""
Provides an interface for defining dependency scanning techniques.
"""
#=========================================================================
def __init__( self, path ):
"""
Initializes a Scanner object.
@param path The path to the source file to scan for dependencies
"""
# Path to file being scanned for dependencies.
self._path = path
# Per-scanner configuration starts with global configuration.
self._conf = dict( _conf )
# Directory to this file.
path_dir = os.path.dirname( self._path )
# Make sure current path is in list.
if path_dir not in self._conf[ 'paths' ]:
self._conf[ 'paths' ].insert( 0, path_dir )
#=========================================================================
def __iter__( self ):
"""
Provides iterable object support.
@return An iterable object that yields all dependencies for the
current scanning context
"""
### ZIH
raise NotImplementedError()
#=========================================================================
def setup( self, **kwargs ):
"""
Scanner configuration function.
@param kwargs Keyword arguments specify configuration data
paths : List of paths to append to path list
"""
_setup_dict( self._conf, **kwargs )
#=============================================================================
# Module Variables
# Module-level configuration.
_conf = {
'paths' : [],
'rules' : []
}
#=============================================================================
# Module Interface Functions
#=============================================================================
def addrule( rule ):
"""
Adds a rule to the list of scanning/detection rules.
@param rule The rule to add to the list of scanning/detection rules
"""
_setup_dict( _conf, rules = [ rule ] )
#=============================================================================
def scanner( path ):
"""
Creates a dependency scanning object for a given source file name.
ZIH
"""
# Test for an assumed rule list.
if len( _conf[ 'rules' ] ) == 0:
_conf[ 'rules' ] = [ CRule ]
# Create the dependency scanner for the requested file name.
return Scanner( path )
#=============================================================================
def setup( **kwargs ):
"""
Module-level configuration function.
ZIH
"""
_setup_dict( _conf, **kwargs )
#=============================================================================
# Module Private Functions
#=============================================================================
def _setup_dict( conf, **kwargs ):
"""
Provides normalized dictionary configuration management.
@param conf The target configuration dictionary
@param kwargs Keyword arguments used to update the configuration
"""
# Append-mode lists.
applists = ( 'paths', 'rules' )
# Existing append-mode lists.
oldlists = {}
# Save any lists that need to have items appended to them.
for alist in applists:
if alist in kwargs:
oldlists[ alist ] = conf[ alist ]
# Update all other config values.
conf.update( kwargs )
# Restore the saved lists.
for alist in oldlists:
conf[ alist ] = oldlists[ alist ]
# Append any new entries to the list.
for newitem in kwargs[ alist ]:
if newitem not in conf[ alist ]:
conf[ alist ].append( newitem )
| 28.843537
| 78
| 0.505425
|
04108873a40aa7e8ea1c9710a18ab1de40f44f67
| 644
|
py
|
Python
|
disco/extensions/upgrade_simulation/upgrade_inputs.py
|
NREL/disco
|
19afa1c397c6c24e37222f6cbf027eb88833beda
|
[
"BSD-3-Clause"
] | 2
|
2022-03-11T20:04:34.000Z
|
2022-03-14T22:25:29.000Z
|
disco/extensions/upgrade_simulation/upgrade_inputs.py
|
NREL/disco
|
19afa1c397c6c24e37222f6cbf027eb88833beda
|
[
"BSD-3-Clause"
] | 4
|
2022-03-11T17:48:50.000Z
|
2022-03-17T21:39:47.000Z
|
disco/extensions/upgrade_simulation/upgrade_inputs.py
|
NREL/disco
|
19afa1c397c6c24e37222f6cbf027eb88833beda
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from jade.utils.utils import load_data
from disco.distribution.distribution_inputs import DistributionInputs
from disco.extensions.upgrade_simulation.upgrade_parameters import UpgradeParameters
class UpgradeInputs(DistributionInputs):
def __init__(self, base_directory):
super().__init__(base_directory)
def _parse_config_files(self):
filename = os.path.join(self._base, self._CONFIG_FILE)
data = load_data(filename)
for job_data in data:
job = UpgradeParameters(**job_data)
assert job.name not in self._parameters
self._parameters[job.name] = job
| 30.666667
| 84
| 0.726708
|
9d3c6120bbb57c741faa7b645dc3bd84ba58e98c
| 2,101
|
py
|
Python
|
tests/run_sanity_checks.py
|
alvarobartt/serving-tensorflow-models
|
e70a4ea191cf5e888277b8bb72f3934931e270c8
|
[
"MIT"
] | 42
|
2021-02-14T18:52:25.000Z
|
2022-02-08T16:04:09.000Z
|
tests/run_sanity_checks.py
|
alvarobartt/serving-tensorflow-models
|
e70a4ea191cf5e888277b8bb72f3934931e270c8
|
[
"MIT"
] | 1
|
2022-01-25T10:52:40.000Z
|
2022-01-25T11:04:24.000Z
|
tests/run_sanity_checks.py
|
alvarobartt/serving-tensorflow-models
|
e70a4ea191cf5e888277b8bb72f3934931e270c8
|
[
"MIT"
] | 9
|
2021-03-08T16:16:59.000Z
|
2022-02-26T17:38:26.000Z
|
# Copyright 2021 Alvaro Bartolome, alvarobartt @ GitHub
# See LICENSE for details.
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Mapping of ids to labels (The Simpsons characters)
MAPPING = {
0: "abraham_grampa_simpson", 1: "apu_nahasapeemapetilon", 2: "barney_gumble", 3: "bart_simpson",
4: "carl_carlson", 5: "charles_montgomery_burns", 6: "chief_wiggum", 7: "comic_book_guy",
8: "disco_stu", 9: "edna_krabappel", 10: "groundskeeper_willie", 11: "homer_simpson",
12: "kent_brockman", 13: "krusty_the_clown", 14: "lenny_leonard", 15: "lisa_simpson",
16: "maggie_simpson", 17: "marge_simpson", 18: "martin_prince", 19: "mayor_quimby",
20: "milhouse_van_houten", 21: "moe_szyslak", 22: "ned_flanders", 23: "nelson_muntz",
24: "patty_bouvier", 25: "principal_skinner", 26: "professor_john_frink", 27: "ralph_wiggum",
28: "selma_bouvier", 29: "sideshow_bob", 30: "snake_jailbird", 31: "waylon_smithers"
}
def run_sanity_checks():
model = tf.keras.models.load_model("simpsonsnet/1")
model.summary();
eval_datagen = ImageDataGenerator(rescale=1./255.)
eval_generator = eval_datagen.flow_from_directory(
directory="evaluation", class_mode='categorical', target_size=(224, 224),
batch_size=16, shuffle=False
)
loss, accuracy = model.evaluate(eval_generator)
with open("results.txt", "w") as f:
f.write(pd.DataFrame([{'accuracy': accuracy, 'loss': loss}]).to_markdown())
predictions = model.predict(eval_generator)
predictions = np.argmax(predictions, axis=1)
ground_truth = eval_generator.classes
conf_mat = tf.math.confusion_matrix(ground_truth, predictions)
conf_mat = pd.DataFrame(conf_mat.numpy(), index=list(MAPPING.values()), columns=list(MAPPING.values()))
plt.figure(figsize=(12,8))
sns.heatmap(conf_mat, annot=True)
plt.tight_layout()
plt.savefig("confusion_matrix.png")
if __name__ == "__main__":
run_sanity_checks()
| 36.224138
| 107
| 0.713946
|
22f9d667725e3c09af5c57744206ffc1923d44f4
| 1,553
|
py
|
Python
|
walle/config/settings.py
|
flying1020/walle-web
|
c7655a3a258c05dbcb3ba362780864b7d4bc221c
|
[
"Apache-2.0"
] | null | null | null |
walle/config/settings.py
|
flying1020/walle-web
|
c7655a3a258c05dbcb3ba362780864b7d4bc221c
|
[
"Apache-2.0"
] | null | null | null |
walle/config/settings.py
|
flying1020/walle-web
|
c7655a3a258c05dbcb3ba362780864b7d4bc221c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Application configuration."""
import os
from datetime import timedelta
class Config(object):
"""Base configuration."""
VERSION = '2.0.0'
SECRET_KEY = os.environ.get('WALLE_SECRET', 'secret-key')
APP_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
WTF_CSRF_ENABLED = False
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
# Can be "memcached", "redis", etc.
CACHE_TYPE = 'simple'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
LOGIN_DISABLED = False
# 设置session的保存时间。
PERMANENT_SESSION_LIFETIME = timedelta(days=1)
# 前端项目部署路径
FE_PATH = os.path.abspath(PROJECT_ROOT + '/fe/') + '/'
AVATAR_PATH = '/avatar/'
UPLOAD_AVATAR = FE_PATH + AVATAR_PATH
# 邮箱配置
MAIL_SERVER = 'smtp.exmail.qq.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_DEFAULT_SENDER = 'service@walle-web.io'
MAIL_USERNAME = 'service@walle-web.io'
MAIL_PASSWORD = 'Ki9y&3U82'
# 日志
LOG_PATH = os.path.join(PROJECT_ROOT, 'logs')
LOG_PATH_ERROR = os.path.join(LOG_PATH, 'error.log')
LOG_PATH_INFO = os.path.join(LOG_PATH, 'info.log')
LOG_FILE_MAX_BYTES = 100 * 1024 * 1024
# 轮转数量是 10 个
LOG_FILE_BACKUP_COUNT = 10
LOG_FORMAT = "%(asctime)s %(thread)d %(message)s"
# 登录cookie 防止退出浏览器重新登录
COOKIE_ENABLE = False
| 28.236364
| 81
| 0.676755
|
7d6fb64f97c29064d4daa2ab8f3596281ece8aa1
| 2,572
|
py
|
Python
|
app/old/former_init.py
|
SimonSkade/labplaner
|
5ca73237713faa0d423374fdcf0bf1185ebc11f8
|
[
"Apache-2.0"
] | 1
|
2021-06-06T17:56:44.000Z
|
2021-06-06T17:56:44.000Z
|
app/old/former_init.py
|
SimonSkade/labplaner
|
5ca73237713faa0d423374fdcf0bf1185ebc11f8
|
[
"Apache-2.0"
] | null | null | null |
app/old/former_init.py
|
SimonSkade/labplaner
|
5ca73237713faa0d423374fdcf0bf1185ebc11f8
|
[
"Apache-2.0"
] | null | null | null |
import os
from flask import Flask, render_template, request, redirect, url_for, flash, g
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_marshmallow import Marshmallow
from werkzeug.exceptions import NotFound
app = Flask(__name__)
config = os.environ.get('LAB_CONFIG', default='config/dev.cfg')
app.config.from_pyfile(os.path.abspath(config))
app.secret_key = app.secret_key.encode()
db = SQLAlchemy(app)
ma = Marshmallow(app)
migrate = Migrate(app, db)
from app.models.user import Session, User
from app.utils import after_this_request, requires_auth
@app.errorhandler(404)
def not_found(error):
return NotFound()
@app.after_request
def call_after_request_callbacks(response):
for callback in getattr(g, 'after_request_callbacks', ()):
callback(response)
return response
@app.before_request
def auth_middleware():
sid = request.cookies.get('sid', default='')
if sid:
session_result = Session.verify(sid)
if session_result:
g.session = session_result
else:
_session = Session()
db.session.add(_session)
db.session.commit()
g.session = _session
else:
_session = Session()
db.session.add(_session)
db.session.commit()
g.session = _session
if g.session.authenticated:
g.user = User.query.get(g.session.user_id)
@after_this_request
def set_cookie(response):
response.set_cookie('sid', g.session.get_string_cookie(),
httponly=True, expires=g.session.expires)
from app.blueprints.api.v1 import api
from app.blueprints.api.v1 import user
from app.blueprints.api.v1 import ag as ag_api
from app.blueprints.api.v1 import event as event_api
from app.blueprints.api.v1 import date as date_api
from app.blueprints import auth
from app.blueprints import ag
from app.blueprints import cal
from app.blueprints import pizza
app.register_blueprint(api.bp, url_prefix='/api/v1')
app.register_blueprint(user.bp, url_prefix='/api/v1/user')
app.register_blueprint(ag_api.bp, url_prefix='/api/v1/ag')
app.register_blueprint(event_api.bp, url_prefix='/api/v1/event')
app.register_blueprint(date_api.bp, url_prefix='/api/v1/date')
app.register_blueprint(auth.bp, url_prefix='/auth')
app.register_blueprint(ag.bp, url_prefix='/ag')
app.register_blueprint(cal.bp, url_prefix='/cal')
app.register_blueprint(pizza.bp, url_prefix='/pizza')
@app.route('/')
@requires_auth()
def index():
return render_template('index.html', title='Dashboard')
| 30.987952
| 78
| 0.726672
|
0f261fc0e6326e61e6c0a48cea07527f041c307b
| 13,961
|
py
|
Python
|
cryptography/stream_cipher.py
|
Ale-Cas/MasterDataAnalytics
|
fcbdbbb534c97c9796231e4b20747988bcdeced8
|
[
"MIT"
] | null | null | null |
cryptography/stream_cipher.py
|
Ale-Cas/MasterDataAnalytics
|
fcbdbbb534c97c9796231e4b20747988bcdeced8
|
[
"MIT"
] | null | null | null |
cryptography/stream_cipher.py
|
Ale-Cas/MasterDataAnalytics
|
fcbdbbb534c97c9796231e4b20747988bcdeced8
|
[
"MIT"
] | null | null | null |
"""
@author: Alessio Castrica
@date: 22/02/2022
Implementazione in python del cifrario A5/1.
Ho seguito il paper https://www.rocq.inria.fr/secret/Anne.Canteaut/encyclopedia.pdf
senza implementare nè la fase di warm-up nè i clock irregolari.
Il keystream è generato facendo lo XOR tra il key bit e i feedback bit dei 3 LSFR.
Se la rappresentazione binaria del messaggio è più lunga della somma tra frame_number e secrecy_key
allora per i bit eccedenti lo XOR è operato solamente tra i feedback bit dei 3 LSFR.
"""
from abc import ABC, abstractmethod
import os
import re
from typing import Dict, List, Optional, Union
import random
def validate_binary_list(
list_of_numbers: List[int],
) -> bool:
"""
Takes a list of bits and returns true if it's made only of 0s and 1s.
Parameters
----------
list_of_numbers: List[int]
A list of 0s and 1s.
Returns
-------
A boolean value, true if the list is made only of 0s and 1s.
Otherwise it raises value errors.
"""
validation = False
if isinstance(list_of_numbers, List):
for index, value in enumerate(list_of_numbers):
if isinstance(value, int) and (value == 1 or value == 0):
validation = True
else:
raise ValueError(
"All values in the list must be 1s or 0s, "
+ f"while at index {index} the value is {value}."
)
else:
raise ValueError("The argument must be a list.")
return validation
def text_to_bits(
text: str,
encoding: str = "utf-8",
errors: str = "surrogatepass",
) -> List[int]:
"""
Takes a string and returns it's binary representation.
Parameters
----------
text: str
Any string.
Returns
-------
A list of 0s and 1s.
"""
bits = bin(int.from_bytes(text.encode(encoding, errors), "big"))[2:]
bits_list = []
for bit in bits.zfill(8 * ((len(bits) + 7) // 8)):
bits_list.append(int(bit))
return bits_list
def text_from_bits(
bits_list: List[int],
encoding: str = "utf-8",
errors: str = "surrogatepass",
) -> str:
"""
Takes a list of bits and returns it's text message.
Parameters
----------
bits_list: List[int]
A list of 0s and 1s.
Returns
-------
A string.
"""
assert validate_binary_list(bits_list)
string_list_bits = [str(bit) for bit in bits_list]
str_of_bits = "".join(string_list_bits)
n = int(str_of_bits, 2)
return n.to_bytes((n.bit_length() + 7) // 8, "big").decode(encoding, errors) or "\0"
def binary_list_to_string(
bits_list: List[int],
) -> str:
"""
Takes a list of bits and returns it's text message.
Parameters
----------
bits_list: List[int]
A list of 0s and 1s.
Returns
-------
A string with 0s and 1s.
"""
assert validate_binary_list(bits_list)
return "".join([str(bit) for bit in bits_list])
class LinearFeedbackShiftRegisters:
"""
Linear Feedback Shift Registers (LFSRs) are the basic components of many
running-key generators for stream cipher applications,
because they are appropriate to hardware implementation and
they produce sequences with good statistical properties.
LFSR refers to a feedback shift register with a linear feedback function.
Parameters
----------
length: int
The length of the LSFR object.
initial_state: Optional[List[int]] = None
The initial state of the LSFR object.
taps: Optional[List[int]]
The taps of the LSFR object.
"""
def __init__(
self,
length: int,
initial_state: Optional[List[int]] = None,
taps: Optional[List[int]] = None,
) -> None:
self.length = length
if initial_state is None or validate_binary_list(initial_state):
self._initial_state = initial_state
self._taps = taps
self._state = None
@property
def initial_state(self) -> List[int]:
if self._initial_state is None:
self._initial_state = [random.randint(0, 1) for _ in range(self.length)]
return self._initial_state
@initial_state.setter
def initial_state(self, init_st: List[int]) -> None:
if isinstance(init_st, List):
for _ in init_st:
if isinstance(_, int) and (_ == 1 or _ == 0):
self._initial_state = init_st
else:
raise ValueError(
"All values in the initial state list must be 1s or 0s,"
+ f"while {_} the value is not."
)
else:
raise ValueError("The initial state must be a list.")
assert (
len(self._initial_state) == self.length
), f"The initial state must have the same length as the overall {self.__class__.__name__} object."
@property
def taps(self) -> List[int]:
"""Indeces of states that we take for the update."""
if self._taps is None:
self._taps = []
return self._taps
@taps.setter
def taps(self, new_taps: List[int]) -> None:
if isinstance(new_taps, List):
for _ in new_taps:
if isinstance(_, int):
self._taps = new_taps
else:
raise ValueError(
"All values in the taps list must be integers,"
+ f"while {_} is not."
)
else:
raise ValueError("Taps must be a list.")
@property
def state(self) -> List[int]:
if self._state is None:
self._state = self.initial_state
return self._state
@state.setter
def state(self, new_state: List[int]):
if validate_binary_list(new_state):
self._state = new_state
assert (
len(self._state) == self.length
), f"The state must have the same length as the overall {self.__class__.__name__} object."
def update(self, n_cicles: int = 1) -> Dict[int, int]:
"""
Update the state of the LSFR object as many times as n_cicles.
Parameters
----------
n_cicles: int
Number of times the user wants to update the state.
Returns
-------
feedback_bits: Dict[int, int]
The feedback bit at each iteration.
The key is the number of the iteration while the value is the feedback bit.
"""
self.states_in_time = {0: self.state}
feedback_bits: Dict[int, int] = {}
for cicle in range(n_cicles):
# insert sum of bits
feedback_bit = sum([self.state[i] for i in self.taps]) % 2
self.state.insert(0, feedback_bit)
# remove last bit
self.state.pop()
self.states_in_time[cicle + 1] = self.state
feedback_bits[cicle] = feedback_bit
return feedback_bits
class StreamCipher(ABC):
"""
Abstract class that represents a stream cipher.
A stream cipher is a symmetric cipher which operates with a time-varying transformation on
individual plaintext digits.
"""
@abstractmethod
def encrypt(self, plaintext: Union[List[int], str]) -> List[int]:
pass
@abstractmethod
def decrypt(self, ciphertext: List[int]) -> List[Union[int, str]]:
pass
class A5_1(StreamCipher):
"""
A5/1 is the symmetric cipher used for encrypting over-the-air
transmissions in the GSM standard.
Parameters
----------
secrecy_key: List[int]
A user defined key, default is a random 64-bit key.
frame_number: List[int]
A public frame number, default is a random 22-bit key.
"""
def __init__(
self,
secrecy_key: List[int] = [random.randint(0, 1) for _ in range(64)],
frame_number: List[int] = [random.randint(0, 1) for _ in range(22)],
) -> None:
super().__init__()
if validate_binary_list(secrecy_key) and len(secrecy_key) == 64:
self.secrecy_key = secrecy_key
else:
raise ValueError("The key must be a 64-bit list of 1s and 0s")
if validate_binary_list(frame_number) and len(frame_number) == 22:
self.frame_number = frame_number
else:
raise ValueError("The frame number must be a 22-bit list of 1s and 0s")
lsfr1 = LinearFeedbackShiftRegisters(
length=19, taps=[13, 16, 17, 18], initial_state=self.secrecy_key[0:19]
)
lsfr2 = LinearFeedbackShiftRegisters(
length=22, taps=[20, 21], initial_state=self.secrecy_key[0:22]
)
lsfr3 = LinearFeedbackShiftRegisters(
length=23, taps=[7, 20, 21, 22], initial_state=self.secrecy_key[0:23]
)
self.set_of_lsfrs = {lsfr1, lsfr2, lsfr3}
def get_key_from_user_input(self) -> None:
"""Get secrecy key from user input in the terminal."""
user_key = ""
while len(user_key) != 64 or not re.match("^([01])+", user_key):
user_key = str(input("Please enter a 64-bit key: "))
if len(user_key) == 64 and re.match("^([01])+", user_key):
self.secrecy_key = [int(bit) for bit in user_key]
def generate_keystream(self, binary_messsage_represenation: List[int]) -> List[int]:
"""
The keystream is generated by xored the key bit Kt to the feedback bit of each LSFR.
"""
# TODO: da rivedere!
generator_initial_state = self.secrecy_key + self.frame_number
lsfr_feedback_bits = {}
for index, lsfr in enumerate(self.set_of_lsfrs):
lsfr_feedback_bits[index] = list(
lsfr.update(n_cicles=len(binary_messsage_represenation)).values()
)
keystream = []
if len(binary_messsage_represenation) <= len(generator_initial_state):
for bit in range(len(binary_messsage_represenation)):
keystream.append(
generator_initial_state[bit]
^ lsfr_feedback_bits[0][bit]
^ lsfr_feedback_bits[1][bit]
^ lsfr_feedback_bits[2][bit]
)
else:
for bit in range(len(generator_initial_state)):
keystream.append(
generator_initial_state[bit]
^ lsfr_feedback_bits[0][bit]
^ lsfr_feedback_bits[1][bit]
^ lsfr_feedback_bits[2][bit]
)
for bit in range(
len(binary_messsage_represenation) - len(generator_initial_state)
):
keystream.append(
lsfr_feedback_bits[0][bit]
^ lsfr_feedback_bits[1][bit]
^ lsfr_feedback_bits[2][bit]
)
self.keystream = keystream
return self.keystream
def encrypt(self, plaintext: Union[List[int], str]) -> List[int]:
if isinstance(plaintext, str):
binary_representation = text_to_bits(plaintext)
elif isinstance(plaintext, list):
if validate_binary_list(plaintext):
binary_representation = plaintext
else:
raise ValueError("Plaintext must be a string or a list of 0s and 1s.")
self.keystream = self.generate_keystream(binary_representation)
ciphertext = []
for bit in range(len(binary_representation)):
ciphertext.append(self.keystream[bit] ^ binary_representation[bit])
return ciphertext
def decrypt(self, ciphertext: List[int]) -> List[Union[int, str]]:
plaintext = []
for bit in range(len(ciphertext)):
plaintext.append(self.keystream[bit] ^ ciphertext[bit])
return text_from_bits(plaintext)
if __name__ == "__main__":
## UNCOMMENT THIS LINES IF YOU WANT TO TEST THE IMPLEMENTATION OF THE LSFR ##
# print("\033[1mSingle LSFR implementation\033[0m:")
# lsfr = LinearFeedbackShiftRegisters(
# length=19,
# taps=[13, 16, 17, 18],
# )
# print(f"The initial state of the LSFR is: \n{lsfr.state} ")
# print(f"The taps (0-based indexing) of the LSFR are: \n{lsfr.taps}\n ")
# n_updates = 3
# print(
# f"The feedback bits of the LSFR when updating {n_updates} times are: \n{lsfr.update(n_updates)}\n "
# )
# print(f"The state after the update of the LSFR is: \n{lsfr.state}\n ")
# print(f"The states along time of the LSFR are: \n{lsfr.states_in_time}\n ")
os.system("cls")
print("\033[1mA5/1 implementation\033[0m\n")
a5_1 = A5_1()
## UNCOMMENT THIS LINES IF YOU WANT TO PROVIDE A CUSTOM 64-BIT KEY ##
# print(
# f"Default random 64-bit secrecy key: {binary_list_to_string(a5_1.secrecy_key)}"
# )
# print("The user can provide a custom key.")
# a5_1.get_key_from_user_input()
# print("If provided the secrecy key of the object will be updated:")
# print(a5_1.secrecy_key)
# Può inserire il messaggio sia nella variabile sottostante che nel terminal
message = ""
while len(message) == 0:
message = str(input("Please enter a message: "))
print(f"Message: {message}")
print(f"Message in bits: \n{text_to_bits(message)}")
print(f"Length of message in bits: {len(text_to_bits(message))}")
print("\nKeystream:")
print(a5_1.generate_keystream(text_to_bits(message)))
print("Length: " + str(len(a5_1.generate_keystream(text_to_bits(message)))))
enc_message = a5_1.encrypt(plaintext=str(message))
print(f"\n\033[1mEncrypted message\033[0m: \n{enc_message}")
print(f"Length encrypted message: \n{len(enc_message)}")
dec_message = a5_1.decrypt(ciphertext=enc_message)
print(f"\n\033[1mDecrypted message\033[0m: \n{dec_message}")
assert dec_message == message
| 34.728856
| 109
| 0.601461
|
5612ed503513f0f00da0327e7ab1ccb23a16aa99
| 7,063
|
py
|
Python
|
app/api_1_0/users.py
|
ToonoW/flashy
|
565f5348be8f7f7369346be06ed64e2c4f403a1d
|
[
"MIT"
] | null | null | null |
app/api_1_0/users.py
|
ToonoW/flashy
|
565f5348be8f7f7369346be06ed64e2c4f403a1d
|
[
"MIT"
] | null | null | null |
app/api_1_0/users.py
|
ToonoW/flashy
|
565f5348be8f7f7369346be06ed64e2c4f403a1d
|
[
"MIT"
] | null | null | null |
from flask import jsonify, request, current_app, url_for
from flask.ext.login import current_user, login_required
from .decorators import permission_required
from . import api
from ..models import User, Post, Permission
from .. import db
from .authentication import verify_password
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/user/<int:id>/setsex/<string:sex>')
def set_sex(id, sex):
user = User.query.filter(User.id == id).first()
if user is not None and (sex in ['male', 'female', 'secret']):
user.sex = sex
db.session.commit()
return jsonify({
'status': 1,
'msg': 'modified success'
})
return jsonify({
'status': 0,
'msg': 'please check your data'
})
@api.route('/user/<int:id>/setusername/', methods=['POST'])
def set_username(id):
body = request.json
username = body.get('username')
user = User.query.filter(User.id == id).first()
if user is not None and username is not None:
user.username = username
db.session.commit()
return jsonify({
'status': 1,
'msg': 'modified success'
})
return jsonify({
'status': 0,
'msg': 'please check your data'
})
@api.route('/user/<int:id>/setaboutme/', methods=['POST'])
def set_aboutme(id):
body = request.json
about_me = body.get('about_me')
user = User.query.filter(User.id == id).first()
if user is not None and about_me is not None:
user.about_me = about_me
db.session.commit()
return jsonify({
'status': 1,
'msg': 'modified success'
})
return jsonify({
'status': 0,
'msg': 'please check your data'
})
@api.route('/user/<int:id>/setavatar/', methods=['POST', 'GET'])
def avatar_upload(id):
from ..main.forms import UploadAvatarForm_forAPI
form = UploadAvatarForm_forAPI()
if request.method == 'GET':
from flask import render_template
return render_template('upload_avatar.html', id=id)
user = User.query.filter(User.id == id).first()
image = form.image.data
if user is not None and image is not None:
# 保存图片,文件名用hash命名
import os, hashlib
m1 = hashlib.md5()
m1.update(user.email.encode('utf-8'))
(name, ext) = os.path.splitext(form.image.data.filename)
filename = m1.hexdigest() + ext
abspath = os.path.abspath('app/static/avatar')
filepath = os.path.join(abspath, filename)
image.save(filepath)
# 写入数据库
user.avatar_url = '/static/avatar/' + filename
db.session.commit()
return jsonify({
'status': 1,
'msg': 'upload avatar success',
'avatar_url': user.avatar_url
})
return jsonify({
'status': 0,
'msg': 'fail to upload avatar'
})
@api.route('/user/<int:id>/setbirthday/<int:birthday>')
def set_birthday(id, birthday):
user = User.query.filter(User.id == id).first()
if user is not None and birthday>0:
user.birthday = birthday;
db.session.commit()
return jsonify({
'status': 1,
'msg': 'set birthday success'
})
return jsonify({
'status': 0,
'msg': 'please check your data'
})
@api.route('/follow/<int:id>/<int:id2>')
def follow_user(id, id2):
current_user = User.query.filter(User.id == id2).first()
user = User.query.filter(User.id == id).first()
if user is not None:
current_user.follow(user)
return jsonify({
"status": 1,
"msg": "follow success"
})
return jsonify({
"status": 0,
"msg": "follow fail, can't find user"
})
@api.route('/unfollow/<int:id>/<int:id2>')
def unfollow_user(id, id2):
current_user = User.query.filter(User.id == id2).first()
user = User.query.filter(User.id == id).first()
if user is not None:
current_user.unfollow(user)
return jsonify({
"status": 1,
"msg": "unfollow success"
})
return jsonify({
"status": 0,
"msg": "follow fail, can't find user"
})
# 检查是否处于关注状态
@api.route('/check_follow/<int:id>/<int:id2>')
def check_follow(id, id2):
current_user = User.query.filter(User.id == id2).first()
user = User.query.filter(User.id == id).first()
if user is not None:
follow_status = current_user.is_following(user)
return jsonify({
"status": 1,
"msg": "check success",
"result": follow_status
})
return jsonify({
"status": 0,
"msg": "check fail, can't find user"
})
# 分页获取关注人列表
@api.route('/followers/<int:id>')
def followers(id):
user = User.query.filter_by(id=id).first()
if user is None:
return jsonify({
'status': 0,
'msg': "can't find user"
})
followeds = user.followed.all()
follower = []
for f in followeds:
try:
u = User.query.filter(User.id == f.followed_id).first()
info_dic = {'id': u.id,
'username': u.username,
'avatar_url': u.avatar_url,
'about_me': u.about_me
}
follower.append(info_dic)
except:
pass
return jsonify({
'status': 1,
'msg': 'pull followers list success',
'results': follower
})
| 28.479839
| 78
| 0.5778
|
8218ba54c46df3f37419aa6fc32e6ae83c5dc62d
| 3,257
|
py
|
Python
|
scripts/data_congress_legislators.py
|
aclu-national/elections-api
|
8af28a2dcc9507004f28e8ec98aa9e9fee566eaa
|
[
"MIT"
] | 28
|
2018-11-14T21:07:10.000Z
|
2022-03-09T04:55:30.000Z
|
scripts/data_congress_legislators.py
|
aclu-national/elections-api
|
8af28a2dcc9507004f28e8ec98aa9e9fee566eaa
|
[
"MIT"
] | 31
|
2019-01-24T20:57:18.000Z
|
2021-12-08T15:20:44.000Z
|
scripts/data_congress_legislators.py
|
aclu-national/elections-api
|
8af28a2dcc9507004f28e8ec98aa9e9fee566eaa
|
[
"MIT"
] | 3
|
2019-04-19T15:39:19.000Z
|
2021-09-17T23:39:58.000Z
|
#!/bin/env python
import os, re, sys, yaml, json, unicodedata
import data_index
script = os.path.realpath(sys.argv[0])
scripts_dir = os.path.dirname(script)
root_dir = os.path.dirname(scripts_dir)
# Build lookup of existing legislators to avoid duplicate records
existing_legislator_lookup = {}
index = data_index.get_index('elections-api')
for path in index['lookup'] :
if ('congress_legislators' in path) :
abs_path = "%s/data/%s" % (root_dir, path)
with open(abs_path, 'r') as f:
legislator = json.load(f)
bioguide_id = legislator['id']['bioguide']
existing_legislator_lookup[bioguide_id] = index['lookup'][path]
source_path = "%s/sources/congress_legislators/legislators-current.yaml" % root_dir
print("Loading %s" % source_path)
file = open(source_path, "r")
data = yaml.load(file)
legislator_lookup = {}
legislator_list = []
for legislator in data:
id = legislator["id"]["bioguide"]
legislator_lookup[id] = legislator
source_path = "%s/sources/congress_legislators/legislators-social-media.yaml" % root_dir
print("Loading %s" % source_path)
file = open(source_path, "r")
data = yaml.load(file)
for legislator in data:
id = legislator["id"]["bioguide"]
legislator_lookup[id]["social"] = legislator["social"]
def strip_accents(s):
if (type(s) == str):
s = s.decode('utf-8')
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def get_filename(legislator):
fname = strip_accents(legislator["name"]["first"])
lname = strip_accents(legislator["name"]["last"])
name = "%s-%s" % (fname, lname)
slug = re.sub('[^A-Za-z]+', '-', name).lower()
state = legislator["terms"][0]["state"].lower()
filename = "congress_legislator_%s_%s.json" % (state, slug)
return filename
def get_url_slug(legislator):
lname = strip_accents(legislator["name"]["last"])
fname = strip_accents(legislator["name"]["first"])
name = "%s-%s" % (fname, lname)
name = re.sub('[^A-Za-z]+', '-', name).lower()
state = legislator["terms"][0]["state"].lower()
return "%s-%s" % (state, name)
def sort_legislators(a, b):
a_filename = get_filename(a)
b_filename = get_filename(b)
if a_filename > b_filename:
return 1
else:
return -1
legislator_list.sort(sort_legislators)
# TODO: sort the list of keys instead?
for bioguide_id in legislator_lookup:
legislator = legislator_lookup[bioguide_id]
if (bioguide_id in existing_legislator_lookup):
path_index = 1
path = existing_legislator_lookup[bioguide_id][path_index]
else:
filename = get_filename(legislator)
state = legislator["terms"][0]["state"].lower()
path = "congress_legislators/%s/%s" % (state, filename)
fname = legislator["name"]["first"]
lname = legislator["name"]["last"]
name = "%s %s" % (fname, lname)
abs_path = "%s/data/%s" % (root_dir, path)
aclu_id = data_index.get_id('elections-api', 'congress_legislator', path, name)
legislator["id"]["aclu_id"] = aclu_id
legislator["url_slug"] = get_url_slug(legislator)
print("Saving %s" % path)
dirname = os.path.dirname(abs_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(abs_path, 'w') as outfile:
json.dump(legislator, outfile, indent=2, sort_keys=True)
print("Saving index")
data_index.save_index('elections-api')
print("Done")
| 29.342342
| 88
| 0.706171
|
be11ed0f98b8eb0dbef70620ff48369e9cafc4f5
| 888
|
py
|
Python
|
carts/migrations/0001_initial.py
|
RafaAlkhamry2020/django
|
f7a5b7c34890e08d753dc0dba0aaf85d06c7cc0b
|
[
"MIT"
] | null | null | null |
carts/migrations/0001_initial.py
|
RafaAlkhamry2020/django
|
f7a5b7c34890e08d753dc0dba0aaf85d06c7cc0b
|
[
"MIT"
] | 8
|
2021-03-30T13:48:27.000Z
|
2022-03-12T00:40:39.000Z
|
carts/migrations/0001_initial.py
|
RafaAlkhamry2020/django
|
f7a5b7c34890e08d753dc0dba0aaf85d06c7cc0b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-07-16 18:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0002_product_image'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('updateed_at', models.DateTimeField(auto_now=True)),
('items', models.ManyToManyField(to='products.product')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='cart', to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.714286
| 142
| 0.638514
|
fb1186aacea091b10139be4a9662cdaa45a024d7
| 11,246
|
py
|
Python
|
diofant/tensor/array/ndim_array.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/tensor/array/ndim_array.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/tensor/array/ndim_array.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
import collections
from ...core import Expr, Integer
from ...core.sympify import sympify
from ...logic import true
from ...matrices import MatrixBase
from ...printing.defaults import DefaultPrinting
from ..indexed import Indexed
class NDimArray(DefaultPrinting):
"""N-dim array.
Examples
========
Create an N-dim array of zeros:
>>> a = MutableDenseNDimArray.zeros(2, 3, 4)
>>> a
[[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
Create an N-dim array from a list;
>>> a = MutableDenseNDimArray([[2, 3], [4, 5]])
>>> a
[[2, 3], [4, 5]]
>>> b = MutableDenseNDimArray([[[1, 2], [3, 4], [5, 6]],
... [[7, 8], [9, 10], [11, 12]]])
>>> b
[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]]
Create an N-dim array from a flat list with dimension shape:
>>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3))
>>> a
[[1, 2, 3], [4, 5, 6]]
Create an N-dim array from a matrix:
>>> a = Matrix([[1, 2], [3, 4]])
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> b = MutableDenseNDimArray(a)
>>> b
[[1, 2], [3, 4]]
Arithmetic operations on N-dim arrays
>>> a = MutableDenseNDimArray([1, 1, 1, 1], (2, 2))
>>> b = MutableDenseNDimArray([4, 4, 4, 4], (2, 2))
>>> c = a + b
>>> c
[[5, 5], [5, 5]]
>>> a - b
[[-3, -3], [-3, -3]]
"""
def _parse_index(self, index):
if isinstance(index, (int, Integer)):
if index >= self._loop_size:
raise ValueError('index out of range')
return index
if len(index) != self._rank:
raise ValueError('Wrong number of array axes')
real_index = 0
# check if input index can exist in current indexing
for i in range(self._rank):
if index[i] >= self.shape[i]:
raise ValueError('Index ' + str(index) + ' out of border')
real_index = real_index*self.shape[i] + index[i]
return real_index
def _get_tuple_index(self, integer_index):
index = []
for sh in reversed(self.shape):
index.append(integer_index % sh)
integer_index //= sh
index.reverse()
return tuple(index)
def _check_symbolic_index(self, index):
# Check if any index is symbolic:
tuple_index = (index if isinstance(index, tuple) else (index,))
if any((isinstance(i, Expr) and (not i.is_number)) for i in tuple_index):
for i, nth_dim in zip(tuple_index, self.shape):
i = sympify(i)
if ((i < 0) is true) or ((i >= nth_dim) is true):
raise ValueError('index out of range')
return Indexed(self, *tuple_index)
def _setter_iterable_check(self, value):
if isinstance(value, (collections.abc.Iterable, MatrixBase, NDimArray)):
raise NotImplementedError
@classmethod
def _scan_iterable_shape(cls, iterable):
def f(pointer):
if not isinstance(pointer, collections.abc.Iterable):
return [pointer], ()
result = []
elems, shapes = zip(*[f(i) for i in pointer])
if len(set(shapes)) != 1:
raise ValueError('could not determine shape unambiguously')
for i in elems:
result.extend(i)
return result, (len(shapes),)+shapes[0]
return f(iterable)
@classmethod
def _handle_ndarray_creation_inputs(cls, iterable=None, shape=None, **kwargs):
if shape is None and iterable is None:
shape = ()
iterable = ()
# Construction from another `NDimArray`:
elif shape is None and isinstance(iterable, NDimArray):
shape = iterable.shape
iterable = list(iterable)
# Construct N-dim array from an iterable (numpy arrays included):
elif shape is None and isinstance(iterable, collections.abc.Iterable):
iterable, shape = cls._scan_iterable_shape(iterable)
# Construct N-dim array from a Matrix:
elif shape is None and isinstance(iterable, MatrixBase):
shape = iterable.shape
# Construct NDimArray(iterable, shape)
elif shape is not None:
pass
else:
raise TypeError('Data type not understood')
if isinstance(shape, (int, Integer)):
shape = shape,
shape = tuple(shape)
if any(not isinstance(dim, (int, Integer)) for dim in shape):
raise TypeError('Shape should contain integers only.')
if isinstance(iterable, collections.abc.Mapping):
for k in list(iterable):
if not isinstance(k, collections.abc.Sequence):
continue
new_key = 0
for i, idx in enumerate(k):
new_key = new_key * shape[i] + idx
iterable[new_key] = iterable[k]
del iterable[k]
return shape, iterable
def __len__(self):
"""Overload common function len(). Returns number of elements in array.
Examples
========
>>> a = MutableDenseNDimArray.zeros(3, 3)
>>> a
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]
>>> len(a)
9
"""
return self._loop_size
@property
def shape(self):
"""
Returns array shape (dimension).
Examples
========
>>> a = MutableDenseNDimArray.zeros(3, 3)
>>> a.shape
(3, 3)
"""
return self._shape
def rank(self):
"""
Returns rank of array.
Examples
========
>>> a = MutableDenseNDimArray.zeros(3, 4, 5, 6, 3)
>>> a.rank()
5
"""
return self._rank
def diff(self, *args, **kwargs):
"""
Calculate the derivative of each element in the array.
Examples
========
>>> M = ImmutableDenseNDimArray([[x, y], [1, x*y]])
>>> M.diff(x)
[[1, 0], [0, y]]
"""
# pylint: disable=not-an-iterable
return type(self)((x.diff(*args, **kwargs) for x in self), self.shape)
def applyfunc(self, f):
"""Apply a function to each element of the N-dim array.
Examples
========
>>> m = ImmutableDenseNDimArray([i*2+j for i in range(2)
... for j in range(2)], (2, 2))
>>> m
[[0, 1], [2, 3]]
>>> m.applyfunc(lambda i: 2*i)
[[0, 2], [4, 6]]
"""
return type(self)(map(f, self), self.shape)
def __str__(self):
"""Returns string, allows to use standard functions print() and str().
Examples
========
>>> a = MutableDenseNDimArray.zeros(2, 2)
>>> a
[[0, 0], [0, 0]]
"""
def f(sh, shape_left, i, j):
# pylint: disable=unsubscriptable-object
if len(shape_left) == 1:
return '['+', '.join([str(self[e]) for e in range(i, j)])+']'
sh //= shape_left[0]
return '[' + ', '.join([f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh) for e in range(shape_left[0])]) + ']' # + '\n'*len(shape_left)
return f(self._loop_size, self.shape, 0, self._loop_size)
def tolist(self):
"""
Conveting MutableDenseNDimArray to one-dim list
Examples
========
>>> a = MutableDenseNDimArray([1, 2, 3, 4], (2, 2))
>>> a
[[1, 2], [3, 4]]
>>> b = a.tolist()
>>> b
[[1, 2], [3, 4]]
"""
def f(sh, shape_left, i, j):
# pylint: disable=unsubscriptable-object
if len(shape_left) == 1:
return [self[e] for e in range(i, j)]
result = []
sh //= shape_left[0]
for e in range(shape_left[0]):
result.append(f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh))
return result
return f(self._loop_size, self.shape, 0, self._loop_size)
def __add__(self, other):
if not isinstance(other, NDimArray):
raise TypeError(str(other))
if self.shape != other.shape:
raise ValueError('array shape mismatch')
result_list = [i + j for i, j in zip(self, other)]
return type(self)(result_list, self.shape)
def __sub__(self, other):
if not isinstance(other, NDimArray):
raise TypeError(str(other))
if self.shape != other.shape:
raise ValueError('array shape mismatch')
result_list = [i - j for i, j in zip(self, other)]
return type(self)(result_list, self.shape)
def __mul__(self, other):
if isinstance(other, (collections.abc.Iterable, NDimArray, MatrixBase)):
raise ValueError('scalar expected, use tensorproduct(...) for tensorial product')
other = sympify(other)
result_list = [i*other for i in self] # pylint: disable=not-an-iterable
return type(self)(result_list, self.shape)
def __rmul__(self, other):
if isinstance(other, (collections.abc.Iterable, NDimArray, MatrixBase)):
raise ValueError('scalar expected, use tensorproduct(...) for tensorial product')
other = sympify(other)
result_list = [other*i for i in self] # pylint: disable=not-an-iterable
return type(self)(result_list, self.shape)
def __truediv__(self, other):
if isinstance(other, (collections.abc.Iterable, NDimArray, MatrixBase)):
raise ValueError('scalar expected')
other = sympify(other)
result_list = [i/other for i in self] # pylint: disable=not-an-iterable
return type(self)(result_list, self.shape)
def __rtruediv__(self, other):
return NotImplemented
def __eq__(self, other):
"""
Compare NDimArray instances.
Instances equal if they have same shape and data.
Examples
========
>>> a = MutableDenseNDimArray.zeros(2, 3)
>>> b = MutableDenseNDimArray.zeros(2, 3)
>>> a == b
True
>>> c = a.reshape(3, 2)
>>> c == b
False
>>> a[0, 0] = 1
>>> b[0, 0] = 2
>>> a == b
False
"""
if not isinstance(other, NDimArray):
return False
return (self.shape == other.shape) and (list(self) == list(other))
def _eval_transpose(self):
from .arrayop import permutedims
if self.rank() != 2:
raise ValueError('array rank not 2')
return permutedims(self, (1, 0))
def transpose(self):
return self._eval_transpose()
def _eval_conjugate(self):
return self.func([i.conjugate() for i in self], self.shape) # pylint: disable=not-an-iterable
def conjugate(self):
return self._eval_conjugate()
def _eval_adjoint(self):
return self.transpose().conjugate()
def adjoint(self):
return self._eval_adjoint()
class ImmutableNDimArray(NDimArray, Expr):
"""An immutable version of the N-dim array."""
_op_priority = 11.0
| 28.984536
| 141
| 0.533256
|
bd75dfe22ca10f655ffde16866d4f2c3b12b6e9f
| 489
|
py
|
Python
|
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/_version.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2022-02-18T01:17:27.000Z
|
2022-02-18T01:17:27.000Z
|
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/_version.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/_version.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
VERSION = "13.0.0b1"
| 48.9
| 94
| 0.529652
|
cdfa51015480acd661f3e2524e13710458207bb8
| 5,860
|
py
|
Python
|
homeassistant/components/mqtt/lock.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 1
|
2021-03-23T07:20:03.000Z
|
2021-03-23T07:20:03.000Z
|
homeassistant/components/mqtt/lock.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 60
|
2020-08-03T07:32:56.000Z
|
2022-03-31T06:02:07.000Z
|
homeassistant/components/mqtt/lock.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 4
|
2017-01-10T04:17:33.000Z
|
2021-09-02T16:37:24.000Z
|
"""Support for MQTT locks."""
import functools
import voluptuous as vol
from homeassistant.components import lock
from homeassistant.components.lock import LockEntity
from homeassistant.const import CONF_NAME, CONF_OPTIMISTIC, CONF_VALUE_TEMPLATE
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
DOMAIN,
PLATFORMS,
subscription,
)
from .. import mqtt
from .debug_info import log_messages
from .mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity, async_setup_entry_helper
CONF_PAYLOAD_LOCK = "payload_lock"
CONF_PAYLOAD_UNLOCK = "payload_unlock"
CONF_STATE_LOCKED = "state_locked"
CONF_STATE_UNLOCKED = "state_unlocked"
DEFAULT_NAME = "MQTT Lock"
DEFAULT_OPTIMISTIC = False
DEFAULT_PAYLOAD_LOCK = "LOCK"
DEFAULT_PAYLOAD_UNLOCK = "UNLOCK"
DEFAULT_STATE_LOCKED = "LOCKED"
DEFAULT_STATE_UNLOCKED = "UNLOCKED"
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_PAYLOAD_LOCK, default=DEFAULT_PAYLOAD_LOCK): cv.string,
vol.Optional(CONF_PAYLOAD_UNLOCK, default=DEFAULT_PAYLOAD_UNLOCK): cv.string,
vol.Optional(CONF_STATE_LOCKED, default=DEFAULT_STATE_LOCKED): cv.string,
vol.Optional(CONF_STATE_UNLOCKED, default=DEFAULT_STATE_UNLOCKED): cv.string,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT lock panel through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, async_add_entities, config)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT lock dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, lock.DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT Lock platform."""
async_add_entities([MqttLock(hass, config, config_entry, discovery_data)])
class MqttLock(MqttEntity, LockEntity):
"""Representation of a lock that can be toggled using MQTT."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the lock."""
self._state = False
self._optimistic = False
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._optimistic = config[CONF_OPTIMISTIC]
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Handle new MQTT messages."""
payload = msg.payload
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
payload = value_template.async_render_with_possible_json_value(payload)
if payload == self._config[CONF_STATE_LOCKED]:
self._state = True
elif payload == self._config[CONF_STATE_UNLOCKED]:
self._state = False
self.async_write_ha_state()
if self._config.get(CONF_STATE_TOPIC) is None:
# Force into optimistic mode.
self._optimistic = True
else:
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config.get(CONF_STATE_TOPIC),
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
},
)
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
async def async_lock(self, **kwargs):
"""Lock the device.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
self._config[CONF_PAYLOAD_LOCK],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that the lock has changed state.
self._state = True
self.async_write_ha_state()
async def async_unlock(self, **kwargs):
"""Unlock the device.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
self._config[CONF_PAYLOAD_UNLOCK],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
# Optimistically assume that the lock has changed state.
self._state = False
self.async_write_ha_state()
| 33.485714
| 88
| 0.66843
|
6a43438b1488464f970c2766390391cf27ee0bc1
| 15,212
|
py
|
Python
|
neutron/services/metering/drivers/iptables/iptables_driver.py
|
NeCTAR-RC/neutron
|
acf78cc3c88aff638180819419a65145a9a79695
|
[
"Apache-2.0"
] | 5
|
2015-10-20T07:56:53.000Z
|
2017-12-31T22:39:15.000Z
|
neutron/services/metering/drivers/iptables/iptables_driver.py
|
NeCTAR-RC/neutron
|
acf78cc3c88aff638180819419a65145a9a79695
|
[
"Apache-2.0"
] | null | null | null |
neutron/services/metering/drivers/iptables/iptables_driver.py
|
NeCTAR-RC/neutron
|
acf78cc3c88aff638180819419a65145a9a79695
|
[
"Apache-2.0"
] | 3
|
2015-05-08T22:36:28.000Z
|
2015-10-24T21:25:35.000Z
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
import six
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.common import constants as constants
from neutron.common import ipv6_utils
from neutron.i18n import _LE, _LI
from neutron.services.metering.drivers import abstract_driver
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qrouter-'
WRAP_NAME = 'neutron-meter'
EXTERNAL_DEV_PREFIX = 'qg-'
TOP_CHAIN = WRAP_NAME + "-FORWARD"
RULE = '-r-'
LABEL = '-l-'
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
cfg.CONF.register_opts(interface.OPTS)
class IptablesManagerTransaction(object):
__transactions = {}
def __init__(self, im):
self.im = im
transaction = self.__transactions.get(im, 0)
transaction += 1
self.__transactions[im] = transaction
def __enter__(self):
return self.im
def __exit__(self, type, value, traceback):
transaction = self.__transactions.get(self.im)
if transaction == 1:
self.im.apply()
del self.__transactions[self.im]
else:
transaction -= 1
self.__transactions[self.im] = transaction
class RouterWithMetering(object):
def __init__(self, conf, router):
self.conf = conf
self.id = router['id']
self.router = router
self.ns_name = NS_PREFIX + self.id if conf.use_namespaces else None
self.iptables_manager = iptables_manager.IptablesManager(
namespace=self.ns_name,
binary_name=WRAP_NAME,
use_ipv6=ipv6_utils.is_enabled())
self.metering_labels = {}
class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver):
def __init__(self, plugin, conf):
self.plugin = plugin
self.conf = conf or cfg.CONF
self.routers = {}
if not self.conf.interface_driver:
raise SystemExit(_('An interface driver must be specified'))
LOG.info(_LI("Loading interface driver %s"),
self.conf.interface_driver)
self.driver = importutils.import_object(self.conf.interface_driver,
self.conf)
def _update_router(self, router):
r = self.routers.get(router['id'],
RouterWithMetering(self.conf, router))
r.router = router
self.routers[r.id] = r
return r
@log_helpers.log_method_call
def update_routers(self, context, routers):
# disassociate removed routers
router_ids = set(router['id'] for router in routers)
for router_id, rm in six.iteritems(self.routers):
if router_id not in router_ids:
self._process_disassociate_metering_label(rm.router)
for router in routers:
old_gw_port_id = None
old_rm = self.routers.get(router['id'])
if old_rm:
old_gw_port_id = old_rm.router['gw_port_id']
gw_port_id = router['gw_port_id']
if gw_port_id != old_gw_port_id:
if old_rm:
with IptablesManagerTransaction(old_rm.iptables_manager):
self._process_disassociate_metering_label(router)
if gw_port_id:
self._process_associate_metering_label(router)
elif gw_port_id:
self._process_associate_metering_label(router)
@log_helpers.log_method_call
def remove_router(self, context, router_id):
if router_id in self.routers:
del self.routers[router_id]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def _process_metering_label_rules(self, rm, rules, label_chain,
rules_chain):
im = rm.iptables_manager
ext_dev = self.get_external_device_name(rm.router['gw_port_id'])
if not ext_dev:
return
for rule in rules:
self._add_rule_to_chain(ext_dev, rule, im,
label_chain, rules_chain)
def _process_metering_label_rule_add(self, rm, rule, ext_dev,
label_chain, rules_chain):
im = rm.iptables_manager
self._add_rule_to_chain(ext_dev, rule, im, label_chain, rules_chain)
def _process_metering_label_rule_delete(self, rm, rule, ext_dev,
label_chain, rules_chain):
im = rm.iptables_manager
self._remove_rule_from_chain(ext_dev, rule, im,
label_chain, rules_chain)
def _add_rule_to_chain(self, ext_dev, rule, im,
label_chain, rules_chain):
ipt_rule = self._prepare_rule(ext_dev, rule, label_chain)
if rule['excluded']:
im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
wrap=False, top=True)
else:
im.ipv4['filter'].add_rule(rules_chain, ipt_rule,
wrap=False, top=False)
def _remove_rule_from_chain(self, ext_dev, rule, im,
label_chain, rules_chain):
ipt_rule = self._prepare_rule(ext_dev, rule, label_chain)
if rule['excluded']:
im.ipv4['filter'].remove_rule(rules_chain, ipt_rule,
wrap=False, top=True)
else:
im.ipv4['filter'].remove_rule(rules_chain, ipt_rule,
wrap=False, top=False)
def _prepare_rule(self, ext_dev, rule, label_chain):
remote_ip = rule['remote_ip_prefix']
if rule['direction'] == 'egress':
dir_opt = '-o %s -s %s' % (ext_dev, remote_ip)
else:
dir_opt = '-i %s -d %s' % (ext_dev, remote_ip)
if rule['excluded']:
ipt_rule = '%s -j RETURN' % dir_opt
else:
ipt_rule = '%s -j %s' % (dir_opt, label_chain)
return ipt_rule
def _process_associate_metering_label(self, router):
self._update_router(router)
rm = self.routers.get(router['id'])
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(label_chain,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_chain(rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(TOP_CHAIN, '-j ' +
rules_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].add_rule(label_chain,
'',
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
rm.metering_labels[label_id] = label
def _process_disassociate_metering_label(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
if label_id not in rm.metering_labels:
continue
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(label_chain,
wrap=False)
rm.iptables_manager.ipv4['filter'].remove_chain(rules_chain,
wrap=False)
del rm.metering_labels[label_id]
@log_helpers.log_method_call
def add_metering_label(self, context, routers):
for router in routers:
self._process_associate_metering_label(router)
@log_helpers.log_method_call
def add_metering_label_rule(self, context, routers):
for router in routers:
self._add_metering_label_rule(router)
@log_helpers.log_method_call
def remove_metering_label_rule(self, context, routers):
for router in routers:
self._remove_metering_label_rule(router)
@log_helpers.log_method_call
def update_metering_label_rules(self, context, routers):
for router in routers:
self._update_metering_label_rules(router)
def _add_metering_label_rule(self, router):
self._process_metering_rule_action(router, 'create')
def _remove_metering_label_rule(self, router):
self._process_metering_rule_action(router, 'delete')
def _process_metering_rule_action(self, router, action):
rm = self.routers.get(router['id'])
if not rm:
return
ext_dev = self.get_external_device_name(rm.router['gw_port_id'])
if not ext_dev:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rule = label.get('rule')
if rule:
if action == 'create':
self._process_metering_label_rule_add(rm, rule,
ext_dev,
label_chain,
rules_chain)
elif action == 'delete':
self._process_metering_label_rule_delete(rm, rule,
ext_dev,
label_chain,
rules_chain)
def _update_metering_label_rules(self, router):
rm = self.routers.get(router['id'])
if not rm:
return
with IptablesManagerTransaction(rm.iptables_manager):
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
label_chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL + label_id,
wrap=False)
rules_chain = iptables_manager.get_chain_name(WRAP_NAME +
RULE + label_id,
wrap=False)
rm.iptables_manager.ipv4['filter'].empty_chain(rules_chain,
wrap=False)
rules = label.get('rules')
if rules:
self._process_metering_label_rules(rm, rules,
label_chain,
rules_chain)
@log_helpers.log_method_call
def remove_metering_label(self, context, routers):
for router in routers:
self._process_disassociate_metering_label(router)
@log_helpers.log_method_call
def get_traffic_counters(self, context, routers):
accs = {}
for router in routers:
rm = self.routers.get(router['id'])
if not rm:
continue
for label_id, label in rm.metering_labels.items():
try:
chain = iptables_manager.get_chain_name(WRAP_NAME +
LABEL +
label_id,
wrap=False)
chain_acc = rm.iptables_manager.get_traffic_counters(
chain, wrap=False, zero=True)
except RuntimeError:
LOG.exception(_LE('Failed to get traffic counters, '
'router: %s'), router)
continue
if not chain_acc:
continue
acc = accs.get(label_id, {'pkts': 0, 'bytes': 0})
acc['pkts'] += chain_acc['pkts']
acc['bytes'] += chain_acc['bytes']
accs[label_id] = acc
return accs
| 41.002695
| 79
| 0.522548
|
6f470000566de2292d447debe0a81c004c319943
| 548
|
py
|
Python
|
Kraven/urls.py
|
nhaines/Kraven
|
3bfc091aefe8d00a91f5b993b25e7d6329db4aa2
|
[
"Apache-2.0"
] | null | null | null |
Kraven/urls.py
|
nhaines/Kraven
|
3bfc091aefe8d00a91f5b993b25e7d6329db4aa2
|
[
"Apache-2.0"
] | null | null | null |
Kraven/urls.py
|
nhaines/Kraven
|
3bfc091aefe8d00a91f5b993b25e7d6329db4aa2
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'Kraven.views.home', name='home'),
# url(r'^Kraven/', include('Kraven.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| 30.444444
| 71
| 0.686131
|
1d1f1e3bf9aa2fc994be565b206ec315d675dab9
| 3,640
|
py
|
Python
|
detectron2/modeling/backbone/vgg16.py
|
mbsariyildiz/detectron2
|
1353101f05349c2b54079e896e88cb05ab939475
|
[
"Apache-2.0"
] | 4
|
2019-12-19T06:30:20.000Z
|
2022-03-09T10:08:32.000Z
|
detectron2/modeling/backbone/vgg16.py
|
mbsariyildiz/detectron2
|
1353101f05349c2b54079e896e88cb05ab939475
|
[
"Apache-2.0"
] | null | null | null |
detectron2/modeling/backbone/vgg16.py
|
mbsariyildiz/detectron2
|
1353101f05349c2b54079e896e88cb05ab939475
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import math
from .backbone import Backbone
from .build import BACKBONE_REGISTRY
from detectron2.layers import ShapeSpec
from detectron2.layers import FrozenBatchNorm2d
__all__ = [ 'VGG16', 'build_vgg16_backbone']
class VGG16(Backbone):
def __init__(self, features, sobel):
super().__init__()
self.features = features
self.d_ft = 512
self._out_features = ["features"]
self._out_feature_strides = {"features": 16}
self._out_feature_channels = {"features": 512}
self._initialize_weights()
if sobel:
grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0)
grayscale.weight.data.fill_(1.0 / 3.0)
grayscale.bias.data.zero_()
sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1)
sobel_filter.weight.data[0,0].copy_(
torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
)
sobel_filter.weight.data[1,0].copy_(
torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
)
sobel_filter.bias.data.zero_()
self.sobel = nn.Sequential(grayscale, sobel_filter)
for p in self.sobel.parameters():
p.requires_grad = False
else:
self.sobel = None
def forward(self, x):
if self.sobel:
x = self.sobel(x)
x = self.features(x)
return { "features": x }
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def make_layers(input_dim, batch_norm):
layers = []
in_channels = input_dim
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, "M"]
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
@BACKBONE_REGISTRY.register()
def build_vgg16_backbone(cfg, input_shape):
"""
Creates a VGG16 instance from the config.
Returns:
VGG16: a :class:`VGG16` instance.
"""
sobel = cfg.MODEL.BACKBONE.SOBEL
bn = cfg.MODEL.BACKBONE.BN
dim = 2 + int(not sobel)
model = VGG16(make_layers(dim, bn), sobel)
freeze_bn = cfg.MODEL.BACKBONE.FREEZE_BN
if freeze_bn:
model = FrozenBatchNorm2d.convert_frozen_batchnorm(model)
freeze = cfg.MODEL.BACKBONE.FREEZE_AT > 0
if freeze:
for p in model.parameters():
p.requires_grad = False
return model
| 32.212389
| 99
| 0.583242
|
2bceeff51e2d74c6a189803f27b3089aa587ffbc
| 4,685
|
py
|
Python
|
otp/chat/ChatInputTyped.py
|
MasterLoopyBM/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 1
|
2020-02-07T18:15:12.000Z
|
2020-02-07T18:15:12.000Z
|
otp/chat/ChatInputTyped.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | null | null | null |
otp/chat/ChatInputTyped.py
|
TrueBlueDogemon/Toontown
|
ebed7fc3f2ef06a529cf02eda7ab46361aceef9d
|
[
"MIT"
] | 2
|
2020-11-08T03:38:35.000Z
|
2021-09-02T07:03:47.000Z
|
from direct.gui.DirectGui import *
from direct.showbase import DirectObject
from pandac.PandaModules import *
import sys
from otp.otpbase import OTPGlobals
from otp.otpbase import OTPLocalizer
from toontown.chat.ChatGlobals import *
class ChatInputTyped(DirectObject.DirectObject):
def __init__(self, mainEntry = 0):
self.whisperName = None
self.whisperId = None
self.toPlayer = 0
self.mainEntry = mainEntry
wantHistory = 0
if __dev__:
wantHistory = 1
self.wantHistory = base.config.GetBool('want-chat-history', wantHistory)
self.history = ['']
self.historySize = base.config.GetInt('chat-history-size', 10)
self.historyIndex = 0
return
def typeCallback(self, extraArgs):
self.activate()
def delete(self):
self.ignore('arrow_up-up')
self.ignore('arrow_down-up')
self.chatFrame.destroy()
del self.chatFrame
del self.chatButton
del self.cancelButton
del self.chatEntry
del self.whisperLabel
del self.chatMgr
def show(self, whisperId = None, toPlayer = 0):
self.toPlayer = toPlayer
self.whisperId = whisperId
self.whisperName = None
if self.whisperId:
self.whisperName = base.talkAssistant.findName(whisperId, toPlayer)
if hasattr(self, 'whisperPos'):
self.chatFrame.setPos(self.whisperPos)
self.whisperLabel['text'] = OTPLocalizer.ChatInputWhisperLabel % self.whisperName
self.whisperLabel.show()
else:
if hasattr(self, 'normalPos'):
self.chatFrame.setPos(self.normalPos)
self.whisperLabel.hide()
self.chatEntry['focus'] = 1
self.chatEntry.set('')
self.chatFrame.show()
self.chatEntry.show()
self.cancelButton.show()
self.typedChatButton.hide()
self.typedChatBar.hide()
if self.wantHistory:
self.accept('arrow_up-up', self.getPrevHistory)
self.accept('arrow_down-up', self.getNextHistory)
return
def hide(self):
self.chatEntry.set('')
self.chatEntry['focus'] = 0
self.chatFrame.hide()
self.chatEntry.hide()
self.cancelButton.hide()
self.typedChatButton.show()
self.typedChatBar.show()
self.ignore('arrow_up-up')
self.ignore('arrow_down-up')
def activate(self):
self.chatEntry.set('')
self.chatEntry['focus'] = 1
self.chatFrame.show()
self.chatEntry.show()
self.cancelButton.show()
self.typedChatButton.hide()
self.typedChatBar.hide()
if self.whisperId:
if self.toPlayer:
if not base.talkAssistant.checkWhisperTypedChatPlayer(self.whisperId):
messenger.send('Chat-Failed player typed chat test')
self.deactivate()
elif not base.talkAssistant.checkWhisperTypedChatAvatar(self.whisperId):
messenger.send('Chat-Failed avatar typed chat test')
self.deactivate()
elif not base.talkAssistant.checkOpenTypedChat():
messenger.send('Chat-Failed open typed chat test')
self.deactivate()
def deactivate(self):
self.chatEntry.set('')
self.chatEntry['focus'] = 0
self.chatFrame.show()
self.chatEntry.hide()
self.cancelButton.hide()
self.typedChatButton.show()
self.typedChatBar.show()
def sendChat(self, text):
self.deactivate()
if text:
if self.toPlayer:
if self.whisperId:
pass
elif self.whisperId:
pass
else:
base.talkAssistant.sendOpenTalk(text)
if self.wantHistory:
self.addToHistory(text)
self.chatEntry.set('')
def chatOverflow(self, overflowText):
self.sendChat(self.chatEntry.get())
def cancelButtonPressed(self):
self.chatEntry.set('')
self.deactivate()
def chatButtonPressed(self):
self.sendChat(self.chatEntry.get())
def addToHistory(self, text):
self.history = [text] + self.history[:self.historySize - 1]
self.historyIndex = 0
def getPrevHistory(self):
self.chatEntry.set(self.history[self.historyIndex])
self.historyIndex += 1
self.historyIndex %= len(self.history)
def getNextHistory(self):
self.chatEntry.set(self.history[self.historyIndex])
self.historyIndex -= 1
self.historyIndex %= len(self.history)
| 32.762238
| 93
| 0.60683
|
6dd6b99ca3f23dab3041fbd0d9fe8d62084390f0
| 911
|
py
|
Python
|
python_pipe_test.py
|
darth-cheney/jtf-lib
|
8430e6590c30db67c157159be016d6972121de26
|
[
"MIT"
] | null | null | null |
python_pipe_test.py
|
darth-cheney/jtf-lib
|
8430e6590c30db67c157159be016d6972121de26
|
[
"MIT"
] | null | null | null |
python_pipe_test.py
|
darth-cheney/jtf-lib
|
8430e6590c30db67c157159be016d6972121de26
|
[
"MIT"
] | null | null | null |
import subprocess, sys, asyncio, requests, json
if 'win32' in sys.platform:
# Windows specific event-loop policy & cmd
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
async def command():
'''
'''
command = ['jtf', 'get', 'cdli', '-p', 'import.txt']
#command = ['jtf', 'get', 'cdli', '-a', '-p', 'import.txt']
proc = await asyncio.create_subprocess_shell(
' '.join(command),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
while True:
data = await proc.stdout.readline()
line = data.decode('utf-8').rstrip()
if 'Endpoint set' in line:
return await fetch_data()
async def fetch_data():
'''
'''
while True:
try:
return json.loads(requests.get("http://localhost:9000/").text)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
result = asyncio.run(command())
print(result)
| 26.028571
| 75
| 0.641054
|
c01d09f141c95d2d673f6778836456ebec075f3f
| 8,772
|
py
|
Python
|
storyscript/ErrorCodes.py
|
marqov/storyscript
|
4b99ff904305109cfb4310cb504127e2649ba44a
|
[
"MIT"
] | null | null | null |
storyscript/ErrorCodes.py
|
marqov/storyscript
|
4b99ff904305109cfb4310cb504127e2649ba44a
|
[
"MIT"
] | null | null | null |
storyscript/ErrorCodes.py
|
marqov/storyscript
|
4b99ff904305109cfb4310cb504127e2649ba44a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class ErrorCodes:
unidentified_error = ('E0001', '')
service_name = ('E0002', "A service name can't contain `.`")
arguments_noservice = ('E0003',
'You have defined an argument, but not a service')
return_outside = ('E0004', '`return` is allowed only inside functions')
variables_backslash = ('E0005', "A variable name can't contain `/`")
variables_dash = ('E0006', "A variable name can't contain `-`")
assignment_incomplete = ('E0007', 'Missing value after `=`')
function_misspell = ('E0008', 'You have misspelt `function`')
import_misspell = ('E0009', 'You have misspelt `import`')
import_misspell_as = ('E0010',
'You have misspelt `as` in an import statement')
import_unquoted_file = ('E0011', 'The imported filename must be in quotes')
string_opening_quote = ('E0012', 'Missing opening quote for string')
string_closing_quote = ('E0013', 'Missing closing quote for string')
list_trailing_comma = ('E0014', 'Trailing comma in list')
list_opening_bracket = ('E0015', 'Missing opening bracket for list')
list_closing_bracket = ('E0016', 'Missing closing bracket for list')
object_opening_bracket = ('E0017', 'Missing opening bracket for object')
object_closing_bracket = ('E0018', 'Missing closing bracket for object')
service_argument_colon = ('E0019', 'Missing colon in service argument')
reserved_keyword = ('E0020', '`{keyword}` is a reserved keyword')
future_reserved_keyword = ('E0030',
'`{keyword}` is reserved for future use')
arguments_nomutation = (
'E0039',
'You have defined a chained mutation, but not a mutation')
compiler_error_no_operator = (
'E0040', 'Invalid operator `{operator}` provided.')
invalid_character = ('E0041', '`{character}` is not allowed here')
unexpected_token = ('E0043',
'`{token}` is not allowed here. Allowed: {allowed}')
break_outside = ('E0044', '`break` is allowed only inside loops')
unnecessary_colon = (
'E0045',
'There is an unnecessary colon at the end of the line')
block_expected_after = ('E0045',
'An indented block is required to follow here')
block_expected_before = ('E0046',
'An indented block is required to be before here')
file_not_found = ('E0047',
'File `{path}` not found at `{abspath}`')
function_call_invalid_path = ('E0049',
'Functions can only be called by name')
function_call_no_inline_expression = (
'E0050', 'Service output can not be called as a function')
when_no_output_parent = (
'E0051', 'No service parent has been found.')
service_without_command = (
'E0052', 'Service calls require a command.')
unexpected_end_of_line = (
'E0053', 'Unexpected end of line. Expected: {allowed}.')
arguments_expected = (
'E0054', 'Arguments need to be declared with `key:value`')
first_option_more_stories = (
'E0055',
'The option `--first`/-`f` can only be used if one story is complied.')
expected_end_of_line = (
'E0056', 'Expected end of line instead of `{token}`.')
string_templates_no_assignment = (
'E0057', 'Only expressions are allowed inside string templates')
path_name_internal = (
'E0058', "Path names can't start with double underscore")
string_templates_nested = (
'E0059', "String templates can't be nested")
string_templates_empty = (
'E0060', "String templates can't be empty")
path_name_invalid_char = (
'E0061', 'Invalid path name: `{path}`. '
"Path names can't contain `{token}`")
return_required = ('E0062', 'All paths of a function need to return')
assignment_inline_expression = (
'E0063', "Can't assign to inline expressions.")
foreach_output_required = (
'E0064', 'Foreach blocks require an output (e.g. `as item`)')
nested_service_block = ('E0065', 'Nested service blocks are not allowed')
nested_when_block = ('E0066', 'Nested when blocks are not allowed')
time_value_inconsistent_week = (
'E0067', 'Time value inconsistency: `w` must be the first time unit')
time_value_inconsistent = (
'E0068',
'Time value inconsistency: `{current}` must to be before `{prev}`')
time_value_duplicate = (
'E0069',
'Time value duplication: `{time_type}` must only occur once')
string_templates_unclosed = (
'E0070', 'Unclosed string template. Did you forget a `}}`?')
string_templates_unopened = (
'E0071',
('Unopened string template. Did you forget a `{{` or '
'wanted to escape with `\\}}`?'))
object_destructoring_invalid_path = (
'E0072', 'Objects can only be destructored into variable names.')
object_destructoring_no_variables = (
'E0073', 'Objects destructoring requires variable names.')
unicode_decode_error = (
'E0074', 'Unicode decode error: {reason}.')
expected_closing_parenthesis = (
'E0075', 'Expected closing parenthesis: {cp}')
expected_closing_block = (
'E0076',
'Unexpected end of line. Maybe close a `}}` or `]` expression?')
indentation_error = (
'E0077', 'Invalid indentation detected. Did you mix tabs and spaces?')
type_assignment_different = (
'E0100', "Can't assign `{source}` to `{target}`")
var_not_defined = (
'E0101', 'Variable `{name}` has not been defined.')
return_type_differs = (
'E0102',
"`{source}` can't be implicitly converted to expected "
'return type `{target}`.')
type_operation_incompatible = (
'E0103',
'`{op}` between `{left}` and `{right}` is not supported.'
)
type_index_incompatible = (
'E0104',
"`{left}` can't be indexed with `{right}`"
)
foreach_output_children = (
'E0105',
'`foreach` can only have one or two outputs'
)
foreach_iterable_required = (
'E0106',
'`foreach` requires an iterable type, but `{target}` is not'
)
output_type_only_one = (
'E0107',
'Only one output is allowed for `{target}` blocks.'
)
output_unique = (
'E0108', 'Service output `{name}` must be unique. Use `as outputName`')
service_no_inline_output = (
'E0109', "Inline service calls can't define an output")
function_without_output_return = (
'E0110',
('Function has no return output defined. '
'Only `return` is allowed.'))
function_redeclaration = (
'E0111', 'Function `{name}` has already been declared')
function_not_found = (
'E0112', 'Function `{name}` has not been declared')
function_arg_required = (
'E0113', '{fn_type} `{name}` requires argument `{arg}`')
function_arg_invalid = (
'E0114', '{fn_type} `{name}` does not accept argument `{arg}`')
function_arg_type_mismatch = (
'E0115',
'{fn_type} `{name}` requires argument `{arg_name}` to be of '
'`{target}`, not `{source}`')
assignment_type_none = (
'E0116', 'Assignments with the type `None` are not allowed')
mutation_invalid_name = (
'E0117', 'Invalid mutation `{name}`')
arg_name_required = (
'E0118', '{fn_type} `{name}` requires arguments to be named')
mutation_nested = (
'E0119', "Mutations can't have nested blocks.")
mutation_output = (
'E0120', "Mutations can't have outputs.")
mutation_overload_mismatch = (
'E0121', 'Multiple mutation overloads for `{name}` found:'
'{overloads}\n'
'but none matches.')
type_operation_boolean_incompatible = (
'E0122',
"`{val}` can't be converted to `boolean`"
)
type_operation_cmp_incompatible = (
'E0123',
"`{left}` can't be compared with `{right}`"
)
type_operation_equal_incompatible = (
'E0124',
'Equality comparison not supported between `{left}` and `{right}`.'
)
type_key_not_hashable = (
'E0125',
"`{key}` is not hashable and can't be used as an object key."
)
@staticmethod
def is_error(error_name):
"""
Checks whether a given error name is a valid error.
"""
if isinstance(error_name, str):
if hasattr(ErrorCodes, error_name):
return True
@staticmethod
def get_error(error_name):
"""
Retrieve the error object for a valid error name.
"""
return getattr(ErrorCodes, error_name)
| 43
| 79
| 0.614569
|
ed9cf9f6c28123b6378673d088d26f4a71fee268
| 1,664
|
py
|
Python
|
tests/reducers/test_class_weighted_reducer.py
|
kvzhao/pytorch-metric-learning
|
9c8a94bd1a906317d5834f26d8a94e59d578b825
|
[
"MIT"
] | 2
|
2020-08-11T03:42:15.000Z
|
2022-01-11T07:25:30.000Z
|
tests/reducers/test_class_weighted_reducer.py
|
FadouaKhm/pytorch-metric-learning
|
9eb792bcfc1616b599e6ee457514e3cb3a7235dd
|
[
"MIT"
] | null | null | null |
tests/reducers/test_class_weighted_reducer.py
|
FadouaKhm/pytorch-metric-learning
|
9eb792bcfc1616b599e6ee457514e3cb3a7235dd
|
[
"MIT"
] | 1
|
2021-03-15T04:24:52.000Z
|
2021-03-15T04:24:52.000Z
|
import unittest
import torch
from pytorch_metric_learning.reducers import ClassWeightedReducer
class TestClassWeightedReducer(unittest.TestCase):
def test_class_weighted_reducer(self):
class_weights = torch.tensor([1, 0.9, 1, 0.1, 0, 0, 0, 0, 0, 0])
reducer = ClassWeightedReducer(class_weights)
batch_size = 100
num_classes = 10
embedding_size = 64
embeddings = torch.randn(batch_size, embedding_size)
labels = torch.randint(0,num_classes,(batch_size,))
pair_indices = (torch.randint(0,batch_size,(batch_size,)), torch.randint(0,batch_size,(batch_size,)))
triplet_indices = pair_indices + (torch.randint(0,batch_size,(batch_size,)),)
losses = torch.randn(batch_size)
for indices, reduction_type in [(torch.arange(batch_size), "element"),
(pair_indices, "pos_pair"),
(pair_indices, "neg_pair"),
(triplet_indices, "triplet")]:
loss_dict = {"loss": {"losses": losses, "indices": indices, "reduction_type": reduction_type}}
output = reducer(loss_dict, embeddings, labels)
correct_output = 0
for i in range(len(losses)):
if reduction_type == "element":
batch_idx = indices[i]
else:
batch_idx = indices[0][i]
class_label = labels[batch_idx]
correct_output += losses[i]*class_weights[class_label]
correct_output /= len(losses)
self.assertTrue(torch.isclose(output,correct_output))
| 50.424242
| 109
| 0.594351
|
c93c51d35d5b8ddd26ad70d489a459583c331a74
| 88,234
|
py
|
Python
|
statsmodels/tsa/regime_switching/markov_switching.py
|
larsoner/statsmodels
|
e0b772ed95880e58fd0c089c04ab01eb393c2485
|
[
"BSD-3-Clause"
] | 1
|
2017-11-13T17:13:04.000Z
|
2017-11-13T17:13:04.000Z
|
statsmodels/tsa/regime_switching/markov_switching.py
|
bert9bert/statsmodels
|
898ddfc483c45bb0f8e5156dd8506abda84c9b63
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/tsa/regime_switching/markov_switching.py
|
bert9bert/statsmodels
|
898ddfc483c45bb0f8e5156dd8506abda84c9b63
|
[
"BSD-3-Clause"
] | 2
|
2018-05-22T11:32:30.000Z
|
2018-11-17T13:58:01.000Z
|
"""
Markov switching models
Author: Chad Fulton
License: BSD-3
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import pandas as pd
from statsmodels.compat.collections import OrderedDict
from scipy.misc import logsumexp
from statsmodels.base.data import PandasData
import statsmodels.tsa.base.tsa_model as tsbase
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tools.tools import Bunch
from statsmodels.tools.numdiff import approx_fprime_cs, approx_hess_cs
from statsmodels.tools.decorators import cache_readonly, resettable_cache
from statsmodels.tools.eval_measures import aic, bic, hqic
from statsmodels.tools.tools import pinv_extended
from statsmodels.tools.sm_exceptions import EstimationWarning
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.statespace.tools import find_best_blas_type
from statsmodels.tsa.regime_switching._hamilton_filter import (
shamilton_filter, dhamilton_filter, chamilton_filter, zhamilton_filter)
from statsmodels.tsa.regime_switching._kim_smoother import (
skim_smoother, dkim_smoother, ckim_smoother, zkim_smoother)
prefix_hamilton_filter_map = {
's': shamilton_filter, 'd': dhamilton_filter,
'c': chamilton_filter, 'z': zhamilton_filter
}
prefix_kim_smoother_map = {
's': skim_smoother, 'd': dkim_smoother,
'c': ckim_smoother, 'z': zkim_smoother
}
def _prepare_exog(exog):
k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
k_exog = exog.shape[1]
return k_exog, exog
def _logistic(x):
"""
Note that this is not a vectorized function
"""
x = np.array(x)
# np.exp(x) / (1 + np.exp(x))
if x.ndim == 0:
y = np.reshape(x, (1, 1, 1))
# np.exp(x[i]) / (1 + np.sum(np.exp(x[:])))
elif x.ndim == 1:
y = np.reshape(x, (len(x), 1, 1))
# np.exp(x[i,t]) / (1 + np.sum(np.exp(x[:,t])))
elif x.ndim == 2:
y = np.reshape(x, (x.shape[0], 1, x.shape[1]))
# np.exp(x[i,j,t]) / (1 + np.sum(np.exp(x[:,j,t])))
elif x.ndim == 3:
y = x
else:
raise NotImplementedError
tmp = np.c_[np.zeros((y.shape[-1], y.shape[1], 1)), y.T].T
evaluated = np.reshape(np.exp(y - logsumexp(tmp, axis=0)), x.shape)
return evaluated
def _partials_logistic(x):
"""
Note that this is not a vectorized function
"""
tmp = _logistic(x)
# k
if tmp.ndim == 0:
return tmp - tmp**2
# k x k
elif tmp.ndim == 1:
partials = np.diag(tmp - tmp**2)
# k x k x t
elif tmp.ndim == 2:
partials = [np.diag(tmp[:, t] - tmp[:, t]**2)
for t in range(tmp.shape[1])]
shape = tmp.shape[1], tmp.shape[0], tmp.shape[0]
partials = np.concatenate(partials).reshape(shape).transpose((1, 2, 0))
# k x k x j x t
else:
partials = [[np.diag(tmp[:, j, t] - tmp[:, j, t]**2)
for t in range(tmp.shape[2])]
for j in range(tmp.shape[1])]
shape = tmp.shape[1], tmp.shape[2], tmp.shape[0], tmp.shape[0]
partials = np.concatenate(partials).reshape(shape).transpose(
(2, 3, 0, 1))
for i in range(tmp.shape[0]):
for j in range(i):
partials[i, j, ...] = -tmp[i, ...] * tmp[j, ...]
partials[j, i, ...] = partials[i, j, ...]
return partials
def py_hamilton_filter(initial_probabilities, regime_transition,
conditional_likelihoods):
"""
Hamilton filter using pure Python
Parameters
----------
initial_probabilities : array
Array of initial probabilities, shaped (k_regimes,).
regime_transition : array
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
conditional_likelihoods : array
Array of likelihoods conditional on the last `order+1` regimes,
shaped (k_regimes,)*(order + 1) + (nobs,).
Returns
-------
filtered_marginal_probabilities : array
Array containing Pr[S_t=s_t | Y_t] - the probability of being in each
regime conditional on time t information. Shaped (k_regimes, nobs).
predicted_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
joint_likelihoods : array
Array of likelihoods condition on time t information, shaped (nobs,).
filtered_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
"""
# Dimensions
k_regimes = len(initial_probabilities)
nobs = conditional_likelihoods.shape[-1]
order = conditional_likelihoods.ndim - 2
dtype = conditional_likelihoods.dtype
# Storage
# Pr[S_t = s_t | Y_t]
filtered_marginal_probabilities = (
np.zeros((k_regimes, nobs), dtype=dtype))
# Pr[S_t = s_t, ... S_{t-r} = s_{t-r} | Y_{t-1}]
predicted_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
# f(y_t | Y_{t-1})
joint_likelihoods = np.zeros((nobs,), dtype)
# Pr[S_t = s_t, ... S_{t-r} = s_{t-r} | Y_t]
filtered_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs + 1,), dtype=dtype)
# Initial probabilities
filtered_marginal_probabilities[:, 0] = initial_probabilities
tmp = np.copy(initial_probabilities)
shape = (k_regimes, k_regimes)
for i in range(order):
tmp = np.reshape(regime_transition[..., i], shape + (1,) * i) * tmp
filtered_joint_probabilities[..., 0] = tmp
# Reshape regime_transition so we can use broadcasting
shape = (k_regimes, k_regimes)
shape += (1,) * (order-1)
shape += (regime_transition.shape[-1],)
regime_transition = np.reshape(regime_transition, shape)
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] > 1:
regime_transition = regime_transition[..., order:]
# Hamilton filter iterations
transition_t = 0
for t in range(nobs):
if regime_transition.shape[-1] > 1:
transition_t = t
# S_t, S_{t-1}, ..., S_{t-r} | t-1, stored at zero-indexed location t
predicted_joint_probabilities[..., t] = (
# S_t | S_{t-1}
regime_transition[..., transition_t] *
# S_{t-1}, S_{t-2}, ..., S_{t-r} | t-1
filtered_joint_probabilities[..., t].sum(axis=-1))
# f(y_t, S_t, ..., S_{t-r} | t-1)
tmp = (conditional_likelihoods[..., t] *
predicted_joint_probabilities[..., t])
# f(y_t | t-1)
joint_likelihoods[t] = np.sum(tmp)
# S_t, S_{t-1}, ..., S_{t-r} | t, stored at index t+1
filtered_joint_probabilities[..., t+1] = (
tmp / joint_likelihoods[t])
# S_t | t
filtered_marginal_probabilities = filtered_joint_probabilities[..., 1:]
for i in range(1, filtered_marginal_probabilities.ndim - 1):
filtered_marginal_probabilities = np.sum(
filtered_marginal_probabilities, axis=-2)
return (filtered_marginal_probabilities, predicted_joint_probabilities,
joint_likelihoods, filtered_joint_probabilities[..., 1:])
def cy_hamilton_filter(initial_probabilities, regime_transition,
conditional_likelihoods):
"""
Hamilton filter using Cython inner loop
Parameters
----------
initial_probabilities : array
Array of initial probabilities, shaped (k_regimes,).
regime_transition : array
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
conditional_likelihoods : array
Array of likelihoods conditional on the last `order+1` regimes,
shaped (k_regimes,)*(order + 1) + (nobs,).
Returns
-------
filtered_marginal_probabilities : array
Array containing Pr[S_t=s_t | Y_t] - the probability of being in each
regime conditional on time t information. Shaped (k_regimes, nobs).
predicted_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
joint_likelihoods : array
Array of likelihoods condition on time t information, shaped (nobs,).
filtered_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
"""
# Dimensions
k_regimes = len(initial_probabilities)
nobs = conditional_likelihoods.shape[-1]
order = conditional_likelihoods.ndim - 2
dtype = conditional_likelihoods.dtype
# Storage
# Pr[S_t = s_t | Y_t]
filtered_marginal_probabilities = (
np.zeros((k_regimes, nobs), dtype=dtype))
# Pr[S_t = s_t, ... S_{t-r} = s_{t-r} | Y_{t-1}]
# Has k_regimes^(order+1) elements
predicted_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
# f(y_t | Y_{t-1})
joint_likelihoods = np.zeros((nobs,), dtype)
# Pr[S_t = s_t, ... S_{t-r+1} = s_{t-r+1} | Y_t]
# Has k_regimes^order elements
filtered_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs + 1,), dtype=dtype)
# Initial probabilities
filtered_marginal_probabilities[:, 0] = initial_probabilities
tmp = np.copy(initial_probabilities)
shape = (k_regimes, k_regimes)
transition_t = 0
for i in range(order):
if regime_transition.shape[-1] > 1:
transition_t = i
tmp = np.reshape(regime_transition[..., transition_t],
shape + (1,) * i) * tmp
filtered_joint_probabilities[..., 0] = tmp
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] > 1:
regime_transition = regime_transition[..., order:]
# Run Cython filter iterations
prefix, dtype, _ = find_best_blas_type((
regime_transition, conditional_likelihoods, joint_likelihoods,
predicted_joint_probabilities, filtered_joint_probabilities))
func = prefix_hamilton_filter_map[prefix]
func(nobs, k_regimes, order, regime_transition,
conditional_likelihoods.reshape(k_regimes**(order+1), nobs),
joint_likelihoods,
predicted_joint_probabilities.reshape(k_regimes**(order+1), nobs),
filtered_joint_probabilities.reshape(k_regimes**(order+1), nobs+1))
# S_t | t
filtered_marginal_probabilities = filtered_joint_probabilities[..., 1:]
for i in range(1, filtered_marginal_probabilities.ndim - 1):
filtered_marginal_probabilities = np.sum(
filtered_marginal_probabilities, axis=-2)
return (filtered_marginal_probabilities, predicted_joint_probabilities,
joint_likelihoods, filtered_joint_probabilities[..., 1:])
def py_kim_smoother(regime_transition, predicted_joint_probabilities,
filtered_joint_probabilities):
"""
Kim smoother using pure Python
Parameters
----------
regime_transition : array
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
predicted_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
filtered_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
Returns
-------
smoothed_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_T] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on all information.
Shaped (k_regimes,) * (order + 1) + (nobs,).
smoothed_marginal_probabilities : array
Array containing Pr[S_t=s_t | Y_T] - the probability of being in each
regime conditional on all information. Shaped (k_regimes, nobs).
"""
# Dimensions
k_regimes = filtered_joint_probabilities.shape[0]
nobs = filtered_joint_probabilities.shape[-1]
order = filtered_joint_probabilities.ndim - 2
dtype = filtered_joint_probabilities.dtype
# Storage
smoothed_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
smoothed_marginal_probabilities = np.zeros((k_regimes, nobs), dtype=dtype)
# S_T, S_{T-1}, ..., S_{T-r} | T
smoothed_joint_probabilities[..., -1] = (
filtered_joint_probabilities[..., -1])
# Reshape transition so we can use broadcasting
shape = (k_regimes, k_regimes)
shape += (1,) * (order)
shape += (regime_transition.shape[-1],)
regime_transition = np.reshape(regime_transition, shape)
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] == nobs + order:
regime_transition = regime_transition[..., order:]
# Kim smoother iterations
transition_t = 0
for t in range(nobs - 2, -1, -1):
if regime_transition.shape[-1] > 1:
transition_t = t + 1
# S_{t+1}, S_t, ..., S_{t-r+1} | t
# x = predicted_joint_probabilities[..., t]
x = (filtered_joint_probabilities[..., t] *
regime_transition[..., transition_t])
# S_{t+1}, S_t, ..., S_{t-r+2} | T / S_{t+1}, S_t, ..., S_{t-r+2} | t
y = (smoothed_joint_probabilities[..., t+1] /
predicted_joint_probabilities[..., t+1])
# S_t, S_{t-1}, ..., S_{t-r+1} | T
smoothed_joint_probabilities[..., t] = (x * y[..., None]).sum(axis=0)
# Get smoothed marginal probabilities S_t | T by integrating out
# S_{t-k+1}, S_{t-k+2}, ..., S_{t-1}
smoothed_marginal_probabilities = smoothed_joint_probabilities
for i in range(1, smoothed_marginal_probabilities.ndim - 1):
smoothed_marginal_probabilities = np.sum(
smoothed_marginal_probabilities, axis=-2)
return smoothed_joint_probabilities, smoothed_marginal_probabilities
def cy_kim_smoother(regime_transition, predicted_joint_probabilities,
filtered_joint_probabilities):
"""
Kim smoother using Cython inner loop
Parameters
----------
regime_transition : array
Matrix of regime transition probabilities, shaped either
(k_regimes, k_regimes, 1) or if there are time-varying transition
probabilities (k_regimes, k_regimes, nobs).
predicted_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t-1}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t-1
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
filtered_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_{t}] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on time t
information. Shaped (k_regimes,) * (order + 1) + (nobs,).
Returns
-------
smoothed_joint_probabilities : array
Array containing Pr[S_t=s_t, ..., S_{t-order}=s_{t-order} | Y_T] -
the joint probability of the current and previous `order` periods
being in each combination of regimes conditional on all information.
Shaped (k_regimes,) * (order + 1) + (nobs,).
smoothed_marginal_probabilities : array
Array containing Pr[S_t=s_t | Y_T] - the probability of being in each
regime conditional on all information. Shaped (k_regimes, nobs).
"""
# Dimensions
k_regimes = filtered_joint_probabilities.shape[0]
nobs = filtered_joint_probabilities.shape[-1]
order = filtered_joint_probabilities.ndim - 2
dtype = filtered_joint_probabilities.dtype
# Storage
smoothed_joint_probabilities = np.zeros(
(k_regimes,) * (order + 1) + (nobs,), dtype=dtype)
# Get appropriate subset of transition matrix
if regime_transition.shape[-1] == nobs + order:
regime_transition = regime_transition[..., order:]
# Run Cython smoother iterations
prefix, dtype, _ = find_best_blas_type((
regime_transition, predicted_joint_probabilities,
filtered_joint_probabilities))
func = prefix_kim_smoother_map[prefix]
func(nobs, k_regimes, order, regime_transition,
predicted_joint_probabilities.reshape(k_regimes**(order+1), nobs),
filtered_joint_probabilities.reshape(k_regimes**(order+1), nobs),
smoothed_joint_probabilities.reshape(k_regimes**(order+1), nobs))
# Get smoothed marginal probabilities S_t | T by integrating out
# S_{t-k+1}, S_{t-k+2}, ..., S_{t-1}
smoothed_marginal_probabilities = smoothed_joint_probabilities
for i in range(1, smoothed_marginal_probabilities.ndim - 1):
smoothed_marginal_probabilities = np.sum(
smoothed_marginal_probabilities, axis=-2)
return smoothed_joint_probabilities, smoothed_marginal_probabilities
class MarkovSwitchingParams(object):
"""
Class to hold parameters in Markov switching models
Parameters
----------
k_regimes : int
The number of regimes between which parameters may switch.
Notes
-----
The purpose is to allow selecting parameter indexes / slices based on
parameter type, regime number, or both.
Parameters are lexicographically ordered in the following way:
1. Named type string (e.g. "autoregressive")
2. Number (e.g. the first autoregressive parameter, then the second)
3. Regime (if applicable)
Parameter blocks are set using dictionary setter notation where the key
is the named type string and the value is a list of boolean values
indicating whether a given parameter is switching or not.
For example, consider the following code:
parameters = MarkovSwitchingParams(k_regimes=2)
parameters['regime_transition'] = [1,1]
parameters['exog'] = [0, 1]
This implies the model has 7 parameters: 4 "regime_transition"-related
parameters (2 parameters that each switch according to regimes) and 3
"exog"-related parameters (1 parameter that does not switch, and one 1 that
does).
The order of parameters is then:
1. The first "regime_transition" parameter, regime 0
2. The first "regime_transition" parameter, regime 1
3. The second "regime_transition" parameter, regime 1
4. The second "regime_transition" parameter, regime 1
5. The first "exog" parameter
6. The second "exog" parameter, regime 0
7. The second "exog" parameter, regime 1
Retrieving indexes / slices is done through dictionary getter notation.
There are three options for the dictionary key:
- Regime number (zero-indexed)
- Named type string (e.g. "autoregressive")
- Regime number and named type string
In the above example, consider the following getters:
>>> parameters[0]
array([0, 2, 4, 6])
>>> parameters[1]
array([1, 3, 5, 6])
>>> parameters['exog']
slice(4, 7, None)
>>> parameters[0, 'exog']
[4, 6]
>>> parameters[1, 'exog']
[4, 7]
Notice that in the last two examples, both lists of indexes include 4.
That's because that is the index of the the non-switching first "exog"
parameter, which should be selected regardless of the regime.
In addition to the getter, the `k_parameters` attribute is an OrderedDict
with the named type strings as the keys. It can be used to get the total
number of parameters of each type:
>>> parameters.k_parameters['regime_transition']
4
>>> parameters.k_parameters['exog']
3
"""
def __init__(self, k_regimes):
self.k_regimes = k_regimes
self.k_params = 0
self.k_parameters = OrderedDict()
self.switching = OrderedDict()
self.slices_purpose = OrderedDict()
self.relative_index_regime_purpose = [
OrderedDict() for i in range(self.k_regimes)]
self.index_regime_purpose = [
OrderedDict() for i in range(self.k_regimes)]
self.index_regime = [[] for i in range(self.k_regimes)]
def __getitem__(self, key):
_type = type(key)
# Get a slice for a block of parameters by purpose
if _type is str:
return self.slices_purpose[key]
# Get a slice for a block of parameters by regime
elif _type is int:
return self.index_regime[key]
elif _type is tuple:
if not len(key) == 2:
raise IndexError('Invalid index')
if type(key[1]) == str and type(key[0]) == int:
return self.index_regime_purpose[key[0]][key[1]]
elif type(key[0]) == str and type(key[1]) == int:
return self.index_regime_purpose[key[1]][key[0]]
else:
raise IndexError('Invalid index')
else:
raise IndexError('Invalid index')
def __setitem__(self, key, value):
_type = type(key)
if _type is str:
value = np.array(value, dtype=bool, ndmin=1)
k_params = self.k_params
self.k_parameters[key] = (
value.size + np.sum(value) * (self.k_regimes - 1))
self.k_params += self.k_parameters[key]
self.switching[key] = value
self.slices_purpose[key] = np.s_[k_params:self.k_params]
for j in range(self.k_regimes):
self.relative_index_regime_purpose[j][key] = []
self.index_regime_purpose[j][key] = []
offset = 0
for i in range(value.size):
switching = value[i]
for j in range(self.k_regimes):
# Non-switching parameters
if not switching:
self.relative_index_regime_purpose[j][key].append(
offset)
# Switching parameters
else:
self.relative_index_regime_purpose[j][key].append(
offset + j)
offset += 1 if not switching else self.k_regimes
for j in range(self.k_regimes):
offset = 0
indices = []
for k, v in self.relative_index_regime_purpose[j].items():
v = (np.r_[v] + offset).tolist()
self.index_regime_purpose[j][k] = v
indices.append(v)
offset += self.k_parameters[k]
self.index_regime[j] = np.concatenate(indices).astype(int)
else:
raise IndexError('Invalid index')
class MarkovSwitching(tsbase.TimeSeriesModel):
"""
First-order k-regime Markov switching model
Parameters
----------
endog : array_like
The endogenous variable.
k_regimes : integer
The number of regimes.
order : integer, optional
The order of the model describes the dependence of the likelihood on
previous regimes. This depends on the model in question and should be
set appropriately by subclasses.
exog_tvtp : array_like, optional
Array of exogenous or lagged variables to use in calculating
time-varying transition probabilities (TVTP). TVTP is only used if this
variable is provided. If an intercept is desired, a column of ones must
be explicitly included in this array.
Notes
-----
This model is new and API stability is not guaranteed, although changes
will be made in a backwards compatible way if possible.
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
def __init__(self, endog, k_regimes, order=0, exog_tvtp=None, exog=None,
dates=None, freq=None, missing='none'):
# Properties
self.k_regimes = k_regimes
self.tvtp = exog_tvtp is not None
# The order of the model may be overridden in subclasses
self.order = order
# Exogenous data
# TODO add checks for exog_tvtp consistent shape and indices
self.k_tvtp, self.exog_tvtp = _prepare_exog(exog_tvtp)
# Initialize the base model
super(MarkovSwitching, self).__init__(endog, exog, dates=dates,
freq=freq, missing=missing)
# Dimensions
self.nobs = self.endog.shape[0]
# Sanity checks
if self.endog.ndim > 1 and self.endog.shape[1] > 1:
raise ValueError('Must have univariate endogenous data.')
if self.k_regimes < 2:
raise ValueError('Markov switching models must have at least two'
' regimes.')
if not(self.exog_tvtp is None or self.exog_tvtp.shape[0] == self.nobs):
raise ValueError('Time-varying transition probabilities exogenous'
' array must have the same number of observations'
' as the endogenous array.')
# Parameters
self.parameters = MarkovSwitchingParams(self.k_regimes)
k_transition = self.k_regimes - 1
if self.tvtp:
k_transition *= self.k_tvtp
self.parameters['regime_transition'] = [1] * k_transition
# Internal model properties: default is steady-state initialization
self._initialization = 'steady-state'
self._initial_probabilities = None
@property
def k_params(self):
"""
(int) Number of parameters in the model
"""
return self.parameters.k_params
def initialize_steady_state(self):
"""
Set initialization of regime probabilities to be steady-state values
Notes
-----
Only valid if there are not time-varying transition probabilities.
"""
if self.tvtp:
raise ValueError('Cannot use steady-state initialization when'
' the regime transition matrix is time-varying.')
self._initialization = 'steady-state'
self._initial_probabilities = None
def initialize_known(self, probabilities, tol=1e-8):
"""
Set initialization of regime probabilities to use known values
"""
self._initialization = 'known'
probabilities = np.array(probabilities, ndmin=1)
if not probabilities.shape == (self.k_regimes,):
raise ValueError('Initial probabilities must be a vector of shape'
' (k_regimes,).')
if not np.abs(np.sum(probabilities) - 1) < tol:
raise ValueError('Initial probabilities vector must sum to one.')
self._initial_probabilities = probabilities
def initial_probabilities(self, params, regime_transition=None):
"""
Retrieve initial probabilities
"""
params = np.array(params, ndmin=1)
if self._initialization == 'steady-state':
if regime_transition is None:
regime_transition = self.regime_transition_matrix(params)
if regime_transition.ndim == 3:
regime_transition = regime_transition[..., 0]
m = regime_transition.shape[0]
A = np.c_[(np.eye(m) - regime_transition).T, np.ones(m)].T
try:
probabilities = np.linalg.pinv(A)[:, -1]
except np.linalg.LinAlgError:
raise RuntimeError('Steady-state probabilities could not be'
' constructed.')
elif self._initialization == 'known':
probabilities = self._initial_probabilities
else:
raise RuntimeError('Invalid initialization method selected.')
return probabilities
def _regime_transition_matrix_tvtp(self, params, exog_tvtp=None):
if exog_tvtp is None:
exog_tvtp = self.exog_tvtp
nobs = len(exog_tvtp)
regime_transition_matrix = np.zeros(
(self.k_regimes, self.k_regimes, nobs),
dtype=np.promote_types(np.float64, params.dtype))
# Compute the predicted values from the regression
for i in range(self.k_regimes):
coeffs = params[self.parameters[i, 'regime_transition']]
regime_transition_matrix[:-1, i, :] = np.dot(
exog_tvtp,
np.reshape(coeffs, (self.k_regimes-1, self.k_tvtp)).T).T
# Perform the logistic transformation
tmp = np.c_[np.zeros((nobs, self.k_regimes, 1)),
regime_transition_matrix[:-1, :, :].T].T
regime_transition_matrix[:-1, :, :] = np.exp(
regime_transition_matrix[:-1, :, :] - logsumexp(tmp, axis=0))
# Compute the last column of the transition matrix
regime_transition_matrix[-1, :, :] = (
1 - np.sum(regime_transition_matrix[:-1, :, :], axis=0))
return regime_transition_matrix
def regime_transition_matrix(self, params, exog_tvtp=None):
"""
Construct the left-stochastic transition matrix
Notes
-----
This matrix will either be shaped (k_regimes, k_regimes, 1) or if there
are time-varying transition probabilities, it will be shaped
(k_regimes, k_regimes, nobs).
The (i,j)th element of this matrix is the probability of transitioning
from regime j to regime i; thus the previous regime is represented in a
column and the next regime is represented by a row.
It is left-stochastic, meaning that each column sums to one (because
it is certain that from one regime (j) you will transition to *some
other regime*).
"""
params = np.array(params, ndmin=1)
if not self.tvtp:
regime_transition_matrix = np.zeros(
(self.k_regimes, self.k_regimes, 1),
dtype=np.promote_types(np.float64, params.dtype))
regime_transition_matrix[:-1, :, 0] = np.reshape(
params[self.parameters['regime_transition']],
(self.k_regimes-1, self.k_regimes))
regime_transition_matrix[-1, :, 0] = (
1 - np.sum(regime_transition_matrix[:-1, :, 0], axis=0))
else:
regime_transition_matrix = (
self._regime_transition_matrix_tvtp(params, exog_tvtp))
return regime_transition_matrix
def predict(self, params, start=None, end=None, probabilities=None,
conditional=False):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
params : array
Parameters at which to form predictions
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
probabilities : string or array_like, optional
Specifies the weighting probabilities used in constructing the
prediction as a weighted average. If a string, can be 'predicted',
'filtered', or 'smoothed'. Otherwise can be an array of
probabilities to use. Default is smoothed.
conditional: boolean or int, optional
Whether or not to return predictions conditional on current or
past regimes. If False, returns a single vector of weighted
predictions. If True or 1, returns predictions conditional on the
current regime. For larger integers, returns predictions
conditional on the current regime and some number of past regimes.
Returns
-------
predict : array
Array of out of in-sample predictions and / or out-of-sample
forecasts.
"""
if start is None:
start = self._index[0]
# Handle start, end
start, end, out_of_sample, prediction_index = (
self._get_prediction_index(start, end))
if out_of_sample > 0:
raise NotImplementedError
# Perform in-sample prediction
predict = self.predict_conditional(params)
squeezed = np.squeeze(predict)
# Check if we need to do weighted averaging
if squeezed.ndim - 1 > conditional:
# Determine in-sample weighting probabilities
if probabilities is None or probabilities == 'smoothed':
results = self.smooth(params, return_raw=True)
probabilities = results.smoothed_joint_probabilities
elif probabilities == 'filtered':
results = self.filter(params, return_raw=True)
probabilities = results.filtered_joint_probabilities
elif probabilities == 'predicted':
results = self.filter(params, return_raw=True)
probabilities = results.predicted_joint_probabilities
# Compute weighted average
predict = (predict * probabilities)
for i in range(predict.ndim - 1 - int(conditional)):
predict = np.sum(predict, axis=-2)
else:
predict = squeezed
return predict[start:end + out_of_sample + 1]
def predict_conditional(self, params):
"""
In-sample prediction, conditional on the current, and possibly past,
regimes
Parameters
----------
params : array_like
Array of parameters at which to perform prediction.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes
"""
raise NotImplementedError
def _conditional_likelihoods(self, params):
"""
Compute likelihoods conditional on the current period's regime (and
the last self.order periods' regimes if self.order > 0).
Must be implemented in subclasses.
"""
raise NotImplementedError
def _filter(self, params, regime_transition=None):
# Get the regime transition matrix if not provided
if regime_transition is None:
regime_transition = self.regime_transition_matrix(params)
# Get the initial probabilities
initial_probabilities = self.initial_probabilities(
params, regime_transition)
# Compute the conditional likelihoods
conditional_likelihoods = self._conditional_likelihoods(params)
# Apply the filter
return ((regime_transition, initial_probabilities,
conditional_likelihoods) +
cy_hamilton_filter(initial_probabilities, regime_transition,
conditional_likelihoods))
def filter(self, params, transformed=True, cov_type=None, cov_kwds=None,
return_raw=False, results_class=None,
results_wrapper_class=None):
"""
Apply the Hamilton filter
Parameters
----------
params : array_like
Array of parameters at which to perform filtering.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
cov_type : str, optional
See `fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `fit` for a description of required keywords for alternative
covariance estimators
return_raw : boolean,optional
Whether or not to return only the raw Hamilton filter output or a
full results object. Default is to return a full results object.
results_class : type, optional
A results class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
results_wrapper_class : type, optional
A results wrapper class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
Returns
-------
MarkovSwitchingResults
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
# Save the parameter names
self.data.param_names = self.param_names
# Get the result
names = ['regime_transition', 'initial_probabilities',
'conditional_likelihoods', 'filtered_marginal_probabilities',
'predicted_joint_probabilities', 'joint_likelihoods',
'filtered_joint_probabilities']
result = HamiltonFilterResults(
self, Bunch(**dict(zip(names, self._filter(params)))))
# Wrap in a results object
if not return_raw:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
if cov_kwds is not None:
result_kwargs['cov_kwds'] = cov_kwds
if results_class is None:
results_class = MarkovSwitchingResults
if results_wrapper_class is None:
results_wrapper_class = MarkovSwitchingResultsWrapper
result = results_wrapper_class(
results_class(self, params, result, **result_kwargs)
)
return result
def _smooth(self, params, filtered_marginal_probabilities,
predicted_joint_probabilities,
filtered_joint_probabilities, regime_transition=None):
# Get the regime transition matrix
if regime_transition is None:
regime_transition = self.regime_transition_matrix(params)
# Apply the smoother
return cy_kim_smoother(regime_transition,
predicted_joint_probabilities,
filtered_joint_probabilities)
def smooth(self, params, transformed=True, cov_type=None, cov_kwds=None,
return_raw=False, results_class=None,
results_wrapper_class=None):
"""
Apply the Kim smoother and Hamilton filter
Parameters
----------
params : array_like
Array of parameters at which to perform filtering.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
cov_type : str, optional
See `fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `fit` for a description of required keywords for alternative
covariance estimators
return_raw : boolean,optional
Whether or not to return only the raw Hamilton filter output or a
full results object. Default is to return a full results object.
results_class : type, optional
A results class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
results_wrapper_class : type, optional
A results wrapper class to instantiate rather than
`MarkovSwitchingResults`. Usually only used internally by
subclasses.
Returns
-------
MarkovSwitchingResults
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
# Save the parameter names
self.data.param_names = self.param_names
# Hamilton filter
names = ['regime_transition', 'initial_probabilities',
'conditional_likelihoods', 'filtered_marginal_probabilities',
'predicted_joint_probabilities', 'joint_likelihoods',
'filtered_joint_probabilities']
result = Bunch(**dict(zip(names, self._filter(params))))
# Kim smoother
out = self._smooth(params, result.filtered_marginal_probabilities,
result.predicted_joint_probabilities,
result.filtered_joint_probabilities)
result['smoothed_joint_probabilities'] = out[0]
result['smoothed_marginal_probabilities'] = out[1]
result = KimSmootherResults(self, result)
# Wrap in a results object
if not return_raw:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
if cov_kwds is not None:
result_kwargs['cov_kwds'] = cov_kwds
if results_class is None:
results_class = MarkovSwitchingResults
if results_wrapper_class is None:
results_wrapper_class = MarkovSwitchingResultsWrapper
result = results_wrapper_class(
results_class(self, params, result, **result_kwargs)
)
return result
def loglikeobs(self, params, transformed=True):
"""
Loglikelihood evaluation for each period
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
results = self._filter(params)
return np.log(results[5])
def loglike(self, params, transformed=True):
"""
Loglikelihood evaluation
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
return np.sum(self.loglikeobs(params, transformed))
def score(self, params, transformed=True):
"""
Compute the score function at params.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_fprime_cs(params, self.loglike, args=(transformed,))
def score_obs(self, params, transformed=True):
"""
Compute the score per observation, evaluated at params
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_fprime_cs(params, self.loglikeobs, args=(transformed,))
def hessian(self, params, transformed=True):
"""
Hessian matrix of the likelihood function, evaluated at the given
parameters
Parameters
----------
params : array_like
Array of parameters at which to evaluate the Hessian
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
"""
params = np.array(params, ndmin=1)
return approx_hess_cs(params, self.loglike)
def fit(self, start_params=None, transformed=True, cov_type='approx',
cov_kwds=None, method='bfgs', maxiter=100, full_output=1, disp=0,
callback=None, return_params=False, em_iter=5, search_reps=0,
search_iter=5, search_scale=1., **kwargs):
"""
Fits the model by maximum likelihood via Hamilton filter.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : boolean, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The type of covariance matrix estimator to use. Can be one of
'approx', 'opg', 'robust', or 'none'. Default is 'approx'.
cov_kwds : dict or None, optional
Keywords for alternative covariance estimators
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
maxiter : int, optional
The maximum number of iterations to perform.
full_output : boolean, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : boolean, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : boolean, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
em_iter : int, optional
Number of initial EM iteration steps used to improve starting
parameters.
search_reps : int, optional
Number of randomly drawn search parameters that are drawn around
`start_params` to try and improve starting parameters. Default is
0.
search_iter : int, optional
Number of initial EM iteration steps used to improve each of the
search parameter repetitions.
search_scale : float or array, optional.
Scale of variates for random start parameter search.
**kwargs
Additional keyword arguments to pass to the optimizer.
Returns
-------
MarkovSwitchingResults
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
# Random search for better start parameters
if search_reps > 0:
start_params = self._start_params_search(
search_reps, start_params=start_params,
transformed=transformed, em_iter=search_iter,
scale=search_scale)
transformed = True
# Get better start params through EM algorithm
if em_iter and not self.tvtp:
start_params = self._fit_em(start_params, transformed=transformed,
maxiter=em_iter, tolerance=0,
return_params=True)
transformed = True
if transformed:
start_params = self.untransform_params(start_params)
# Maximum likelihood estimation by scoring
fargs = (False,)
mlefit = super(MarkovSwitching, self).fit(start_params, method=method,
fargs=fargs,
maxiter=maxiter,
full_output=full_output,
disp=disp, callback=callback,
skip_hessian=True, **kwargs)
# Just return the fitted parameters if requested
if return_params:
result = self.transform_params(mlefit.params)
# Otherwise construct the results class if desired
else:
result = self.smooth(mlefit.params, transformed=False,
cov_type=cov_type, cov_kwds=cov_kwds)
result.mlefit = mlefit
result.mle_retvals = mlefit.mle_retvals
result.mle_settings = mlefit.mle_settings
return result
def _fit_em(self, start_params=None, transformed=True, cov_type='none',
cov_kwds=None, maxiter=50, tolerance=1e-6, full_output=True,
return_params=False, **kwargs):
"""
Fits the model using the Expectation-Maximization (EM) algorithm
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by `start_params`.
transformed : boolean, optional
Whether or not `start_params` is already transformed. Default is
True.
cov_type : str, optional
The type of covariance matrix estimator to use. Can be one of
'approx', 'opg', 'robust', or 'none'. Default is 'none'.
cov_kwds : dict or None, optional
Keywords for alternative covariance estimators
maxiter : int, optional
The maximum number of iterations to perform.
tolerance : float, optional
The iteration stops when the difference between subsequent
loglikelihood values is less than this tolerance.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. This includes all intermediate values for
parameters and loglikelihood values
return_params : boolean, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
**kwargs
Additional keyword arguments to pass to the optimizer.
Notes
-----
This is a private method for finding good starting parameters for MLE
by scoring. It has not been tested for a thoroughly correct EM
implementation in all cases. It does not support TVTP transition
probabilities.
Returns
-------
MarkovSwitchingResults
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
if not transformed:
start_params = self.transform_params(start_params)
# Perform expectation-maximization
llf = []
params = [start_params]
i = 0
delta = 0
while i < maxiter and (i < 2 or (delta > tolerance)):
out = self._em_iteration(params[-1])
llf.append(out[0].llf)
params.append(out[1])
if i > 0:
delta = 2 * (llf[-1] - llf[-2]) / np.abs((llf[-1] + llf[-2]))
i += 1
# Just return the fitted parameters if requested
if return_params:
result = params[-1]
# Otherwise construct the results class if desired
else:
result = self.filter(params[-1], transformed=True,
cov_type=cov_type, cov_kwds=cov_kwds)
# Save the output
if full_output:
em_retvals = Bunch(**{'params': np.array(params),
'llf': np.array(llf),
'iter': i})
em_settings = Bunch(**{'tolerance': tolerance,
'maxiter': maxiter})
else:
em_retvals = None
em_settings = None
result.mle_retvals = em_retvals
result.mle_settings = em_settings
return result
def _em_iteration(self, params0):
"""
EM iteration
Notes
-----
The EM iteration in this base class only performs the EM step for
non-TVTP transition probabilities.
"""
params1 = np.zeros(params0.shape,
dtype=np.promote_types(np.float64, params0.dtype))
# Smooth at the given parameters
result = self.smooth(params0, transformed=True, return_raw=True)
# The EM with TVTP is not yet supported, just return the previous
# iteration parameters
if self.tvtp:
params1[self.parameters['regime_transition']] = (
params0[self.parameters['regime_transition']])
else:
regime_transition = self._em_regime_transition(result)
for i in range(self.k_regimes):
params1[self.parameters[i, 'regime_transition']] = (
regime_transition[i])
return result, params1
def _em_regime_transition(self, result):
"""
EM step for regime transition probabilities
"""
# Marginalize the smoothed joint probabilites to just S_t, S_{t-1} | T
tmp = result.smoothed_joint_probabilities
for i in range(tmp.ndim - 3):
tmp = np.sum(tmp, -2)
smoothed_joint_probabilities = tmp
# Transition parameters (recall we're not yet supporting TVTP here)
k_transition = len(self.parameters[0, 'regime_transition'])
regime_transition = np.zeros((self.k_regimes, k_transition))
for i in range(self.k_regimes): # S_{t_1}
for j in range(self.k_regimes - 1): # S_t
regime_transition[i, j] = (
np.sum(smoothed_joint_probabilities[j, i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
# It may be the case that due to rounding error this estimates
# transition probabilities that sum to greater than one. If so,
# re-scale the probabilities and warn the user that something
# is not quite right
delta = np.sum(regime_transition[i]) - 1
if delta > 0:
warnings.warn('Invalid regime transition probabilities'
' estimated in EM iteration; probabilities have'
' been re-scaled to continue estimation.',
EstimationWarning)
regime_transition[i] /= 1 + delta + 1e-6
return regime_transition
def _start_params_search(self, reps, start_params=None, transformed=True,
em_iter=5, scale=1.):
"""
Search for starting parameters as random permutations of a vector
Parameters
----------
reps : int
Number of random permutations to try.
start_params : array, optional
Starting parameter vector. If not given, class-level start
parameters are used.
transformed : boolean, optional
If `start_params` was provided, whether or not those parameters
are already transformed. Default is True.
em_iter : int, optional
Number of EM iterations to apply to each random permutation.
scale : array or float, optional
Scale of variates for random start parameter search. Can be given
as an array of length equal to the number of parameters or as a
single scalar.
Notes
-----
This is a private method for finding good starting parameters for MLE
by scoring, where the defaults have been set heuristically.
"""
if start_params is None:
start_params = self.start_params
transformed = True
else:
start_params = np.array(start_params, ndmin=1)
# Random search is over untransformed space
if transformed:
start_params = self.untransform_params(start_params)
# Construct the standard deviations
scale = np.array(scale, ndmin=1)
if scale.size == 1:
scale = np.ones(self.k_params) * scale
if not scale.size == self.k_params:
raise ValueError('Scale of variates for random start'
' parameter search must be given for each'
' parameter or as a single scalar.')
# Construct the random variates
variates = np.zeros((reps, self.k_params))
for i in range(self.k_params):
variates[:, i] = scale[i] * np.random.uniform(-0.5, 0.5, size=reps)
llf = self.loglike(start_params, transformed=False)
params = start_params
for i in range(reps):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
proposed_params = self._fit_em(
start_params + variates[i], transformed=False,
maxiter=em_iter, return_params=True)
proposed_llf = self.loglike(proposed_params)
if proposed_llf > llf:
llf = proposed_llf
params = self.untransform_params(proposed_params)
except:
pass
# Return transformed parameters
return self.transform_params(params)
@property
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
"""
params = np.zeros(self.k_params, dtype=np.float64)
# Transition probabilities
if self.tvtp:
params[self.parameters['regime_transition']] = 0.
else:
params[self.parameters['regime_transition']] = 1. / self.k_regimes
return params
@property
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
param_names = np.zeros(self.k_params, dtype=object)
# Transition probabilities
if self.tvtp:
# TODO add support for exog_tvtp_names
param_names[self.parameters['regime_transition']] = [
'p[%d->%d].tvtp%d' % (j, i, k)
for i in range(self.k_regimes-1)
for k in range(self.k_tvtp)
for j in range(self.k_regimes)
]
else:
param_names[self.parameters['regime_transition']] = [
'p[%d->%d]' % (j, i)
for i in range(self.k_regimes-1)
for j in range(self.k_regimes)]
return param_names.tolist()
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evalation.
Notes
-----
In the base class, this only transforms the transition-probability-
related parameters.
"""
constrained = np.array(unconstrained, copy=True)
constrained = constrained.astype(
np.promote_types(np.float64, constrained.dtype))
# Nothing to do for transition probabilities if TVTP
if self.tvtp:
constrained[self.parameters['regime_transition']] = (
unconstrained[self.parameters['regime_transition']])
# Otherwise do logistic transformation
else:
# Transition probabilities
for i in range(self.k_regimes):
tmp1 = unconstrained[self.parameters[i, 'regime_transition']]
tmp2 = np.r_[0, tmp1]
constrained[self.parameters[i, 'regime_transition']] = np.exp(
tmp1 - logsumexp(tmp2))
# Do not do anything for the rest of the parameters
return constrained
def _untransform_logistic(self, unconstrained, constrained):
"""
Function to allow using a numerical root-finder to reverse the
logistic transform.
"""
resid = np.zeros(unconstrained.shape, dtype=unconstrained.dtype)
exp = np.exp(unconstrained)
sum_exp = np.sum(exp)
for i in range(len(unconstrained)):
resid[i] = (unconstrained[i] -
np.log(1 + sum_exp - exp[i]) +
np.log(1 / constrained[i] - 1))
return resid
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evalution, to be
transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
Notes
-----
In the base class, this only untransforms the transition-probability-
related parameters.
"""
unconstrained = np.array(constrained, copy=True)
unconstrained = unconstrained.astype(
np.promote_types(np.float64, unconstrained.dtype))
# Nothing to do for transition probabilities if TVTP
if self.tvtp:
unconstrained[self.parameters['regime_transition']] = (
constrained[self.parameters['regime_transition']])
# Otherwise reverse logistic transformation
else:
for i in range(self.k_regimes):
s = self.parameters[i, 'regime_transition']
if self.k_regimes == 2:
unconstrained[s] = -np.log(1. / constrained[s] - 1)
else:
from scipy.optimize import root
out = root(self._untransform_logistic,
np.zeros(unconstrained[s].shape,
unconstrained.dtype),
args=(constrained[s],))
if not out['success']:
raise ValueError('Could not untransform parameters.')
unconstrained[s] = out['x']
# Do not do anything for the rest of the parameters
return unconstrained
class HamiltonFilterResults(object):
"""
Results from applying the Hamilton filter to a state space model.
Parameters
----------
model : Representation
A Statespace representation
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_regimes : int
The number of unobserved regimes.
regime_transition : array
The regime transition matrix.
initialization : str
Initialization method for regime probabilities.
initial_probabilities : array
Initial regime probabilities
conditional_likelihoods : array
The likelihood values at each time period, conditional on regime.
predicted_joint_probabilities : array
Predicted joint probabilities at each time period.
filtered_marginal_probabilities : array
Filtered marginal probabilities at each time period.
filtered_joint_probabilities : array
Filtered joint probabilities at each time period.
joint_likelihoods : array
The likelihood values at each time period.
llf_obs : array
The loglikelihood values at each time period.
"""
def __init__(self, model, result):
self.model = model
self.nobs = model.nobs
self.order = model.order
self.k_regimes = model.k_regimes
attributes = ['regime_transition', 'initial_probabilities',
'conditional_likelihoods',
'predicted_joint_probabilities',
'filtered_marginal_probabilities',
'filtered_joint_probabilities',
'joint_likelihoods']
for name in attributes:
setattr(self, name, getattr(result, name))
self.initialization = model._initialization
self.llf_obs = np.log(self.joint_likelihoods)
self.llf = np.sum(self.llf_obs)
# Subset transition if necessary (e.g. for Markov autoregression)
if self.regime_transition.shape[-1] > 1 and self.order > 0:
self.regime_transition = self.regime_transition[..., self.order:]
# Cache for predicted marginal probabilities
self._predicted_marginal_probabilities = None
@property
def predicted_marginal_probabilities(self):
if self._predicted_marginal_probabilities is None:
self._predicted_marginal_probabilities = (
self.predicted_joint_probabilities)
for i in range(self._predicted_marginal_probabilities.ndim - 2):
self._predicted_marginal_probabilities = np.sum(
self._predicted_marginal_probabilities, axis=-2)
return self._predicted_marginal_probabilities
@property
def expected_durations(self):
"""
(array) Expected duration of a regime, possibly time-varying.
"""
return 1. / (1 - np.diagonal(self.regime_transition).squeeze())
class KimSmootherResults(HamiltonFilterResults):
"""
Results from applying the Kim smoother to a Markov switching model.
Parameters
----------
model : MarkovSwitchingModel
The model object.
result : dict
A dictionary containing two keys: 'smoothd_joint_probabilities' and
'smoothed_marginal_probabilities'.
Attributes
----------
nobs : int
Number of observations.
k_endog : int
The dimension of the observation series.
k_states : int
The dimension of the unobserved state process.
"""
def __init__(self, model, result):
super(KimSmootherResults, self).__init__(model, result)
attributes = ['smoothed_joint_probabilities',
'smoothed_marginal_probabilities']
for name in attributes:
setattr(self, name, getattr(result, name))
class MarkovSwitchingResults(tsbase.TimeSeriesModelResults):
r"""
Class to hold results from fitting a Markov switching model
Parameters
----------
model : MarkovSwitching instance
The fitted model instance
params : array
Fitted parameters
filter_results : HamiltonFilterResults or KimSmootherResults instance
The underlying filter and, optionally, smoother output
cov_type : string
The type of covariance matrix estimator to use. Can be one of 'approx',
'opg', 'robust', or 'none'.
Attributes
----------
model : Model instance
A reference to the model that was fit.
filter_results : HamiltonFilterResults or KimSmootherResults instance
The underlying filter and, optionally, smoother output
nobs : float
The number of observations used to fit the model.
params : array
The parameters of the model.
scale : float
This is currently set to 1.0 and not used by the model or its results.
"""
use_t = False
def __init__(self, model, params, results, cov_type='opg', cov_kwds=None,
**kwargs):
self.data = model.data
tsbase.TimeSeriesModelResults.__init__(self, model, params,
normalized_cov_params=None,
scale=1.)
# Save the filter / smoother output
self.filter_results = results
if isinstance(results, KimSmootherResults):
self.smoother_results = results
else:
self.smoother_results = None
# Dimensions
self.nobs = model.nobs
self.order = model.order
self.k_regimes = model.k_regimes
# Setup covariance matrix notes dictionary
if not hasattr(self, 'cov_kwds'):
self.cov_kwds = {}
self.cov_type = cov_type
# Setup the cache
self._cache = resettable_cache()
# Handle covariance matrix calculation
if cov_kwds is None:
cov_kwds = {}
self._cov_approx_complex_step = (
cov_kwds.pop('approx_complex_step', True))
self._cov_approx_centered = cov_kwds.pop('approx_centered', False)
try:
self._rank = None
self._get_robustcov_results(cov_type=cov_type, use_self=True,
**cov_kwds)
except np.linalg.LinAlgError:
self._rank = 0
k_params = len(self.params)
self.cov_params_default = np.zeros((k_params, k_params)) * np.nan
self.cov_kwds['cov_type'] = (
'Covariance matrix could not be calculated: singular.'
' information matrix.')
# Copy over arrays
attributes = ['regime_transition', 'initial_probabilities',
'conditional_likelihoods',
'predicted_marginal_probabilities',
'predicted_joint_probabilities',
'filtered_marginal_probabilities',
'filtered_joint_probabilities',
'joint_likelihoods', 'expected_durations']
for name in attributes:
setattr(self, name, getattr(self.filter_results, name))
attributes = ['smoothed_joint_probabilities',
'smoothed_marginal_probabilities']
for name in attributes:
if self.smoother_results is not None:
setattr(self, name, getattr(self.smoother_results, name))
else:
setattr(self, name, None)
# Reshape some arrays to long-format
self.predicted_marginal_probabilities = (
self.predicted_marginal_probabilities.T)
self.filtered_marginal_probabilities = (
self.filtered_marginal_probabilities.T)
if self.smoother_results is not None:
self.smoothed_marginal_probabilities = (
self.smoothed_marginal_probabilities.T)
# Make into Pandas arrays if using Pandas data
if isinstance(self.data, PandasData):
index = self.data.row_labels
if self.expected_durations.ndim > 1:
self.expected_durations = pd.DataFrame(
self.expected_durations, index=index)
self.predicted_marginal_probabilities = pd.DataFrame(
self.predicted_marginal_probabilities, index=index)
self.filtered_marginal_probabilities = pd.DataFrame(
self.filtered_marginal_probabilities, index=index)
if self.smoother_results is not None:
self.smoothed_marginal_probabilities = pd.DataFrame(
self.smoothed_marginal_probabilities, index=index)
def _get_robustcov_results(self, cov_type='opg', **kwargs):
use_self = kwargs.pop('use_self', False)
if use_self:
res = self
else:
raise NotImplementedError
res = self.__class__(
self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
# Set the new covariance type
res.cov_type = cov_type
res.cov_kwds = {}
# Calculate the new covariance matrix
k_params = len(self.params)
if k_params == 0:
res.cov_params_default = np.zeros((0, 0))
res._rank = 0
res.cov_kwds['description'] = 'No parameters estimated.'
elif cov_type == 'none':
res.cov_params_default = np.zeros((k_params, k_params)) * np.nan
res._rank = np.nan
res.cov_kwds['description'] = 'Covariance matrix not calculated.'
elif self.cov_type == 'approx':
res.cov_params_default = res.cov_params_approx
res.cov_kwds['description'] = (
'Covariance matrix calculated using numerical'
' differentiation.')
elif self.cov_type == 'opg':
res.cov_params_default = res.cov_params_opg
res.cov_kwds['description'] = (
'Covariance matrix calculated using the outer product of'
' gradients.'
)
elif self.cov_type == 'robust':
res.cov_params_default = res.cov_params_robust
res.cov_kwds['description'] = (
'Quasi-maximum likelihood covariance matrix used for'
' robustness to some misspecifications; calculated using'
' numerical differentiation.')
else:
raise NotImplementedError('Invalid covariance matrix type.')
return res
@cache_readonly
def aic(self):
"""
(float) Akaike Information Criterion
"""
# return -2*self.llf + 2*self.params.shape[0]
return aic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def bic(self):
"""
(float) Bayes Information Criterion
"""
# return -2*self.llf + self.params.shape[0]*np.log(self.nobs)
return bic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def cov_params_approx(self):
"""
(array) The variance / covariance matrix. Computed using the numerical
Hessian approximated by complex step or finite differences methods.
"""
evaluated_hessian = self.model.hessian(self.params, transformed=True)
neg_cov, singular_values = pinv_extended(evaluated_hessian)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return -neg_cov
@cache_readonly
def cov_params_opg(self):
"""
(array) The variance / covariance matrix. Computed using the outer
product of gradients method.
"""
score_obs = self.model.score_obs(self.params, transformed=True).T
cov_params, singular_values = pinv_extended(
np.inner(score_obs, score_obs))
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params
@cache_readonly
def cov_params_robust(self):
"""
(array) The QMLE variance / covariance matrix. Computed using the
numerical Hessian as the evaluated hessian.
"""
cov_opg = self.cov_params_opg
evaluated_hessian = self.model.hessian(self.params, transformed=True)
cov_params, singular_values = pinv_extended(
np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian)
)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params
@cache_readonly
def fittedvalues(self):
"""
(array) The predicted values of the model. An (nobs x k_endog) array.
"""
return self.model.predict(self.params)
@cache_readonly
def hqic(self):
"""
(float) Hannan-Quinn Information Criterion
"""
# return -2*self.llf + 2*np.log(np.log(self.nobs))*self.params.shape[0]
return hqic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def llf_obs(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
return self.model.loglikeobs(self.params)
@cache_readonly
def llf(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
return self.model.loglike(self.params)
@cache_readonly
def resid(self):
"""
(array) The model residuals. An (nobs x k_endog) array.
"""
return self.model.endog - self.fittedvalues
def predict(self, start=None, end=None, probabilities=None,
conditional=False):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
probabilities : string or array_like, optional
Specifies the weighting probabilities used in constructing the
prediction as a weighted average. If a string, can be 'predicted',
'filtered', or 'smoothed'. Otherwise can be an array of
probabilities to use. Default is smoothed.
conditional: boolean or int, optional
Whether or not to return predictions conditional on current or
past regimes. If False, returns a single vector of weighted
predictions. If True or 1, returns predictions conditional on the
current regime. For larger integers, returns predictions
conditional on the current regime and some number of past regimes.
Returns
-------
predict : array
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array.
"""
return self.model.predict(self.params, start=start, end=end,
probabilities=probabilities,
conditional=conditional)
def forecast(self, steps=1, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, str, or datetime, optional
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency, steps
must be an integer. Default
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts. A (steps x k_endog) array.
"""
raise NotImplementedError
def summary(self, alpha=.05, start=None, title=None, model_name=None,
display_params=True):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
title : str, optional
The title of the summary table.
model_name : string
The name of the model used. Default is to use model class name.
display_params : boolean, optional
Whether or not to display tables of estimated parameters. Default
is True. Usually only used internally.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
# Model specification results
model = self.model
if title is None:
title = 'Markov Switching Model Results'
if start is None:
start = 0
if self.data.dates is not None:
dates = self.data.dates
d = dates[start]
sample = ['%02d-%02d-%02d' % (d.month, d.day, d.year)]
d = dates[-1]
sample += ['- ' + '%02d-%02d-%02d' % (d.month, d.day, d.year)]
else:
sample = [str(start), ' - ' + str(self.model.nobs)]
# Standardize the model name as a list of str
if model_name is None:
model_name = model.__class__.__name__
# Create the tables
if not isinstance(model_name, list):
model_name = [model_name]
top_left = [('Dep. Variable:', None)]
top_left.append(('Model:', [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(('', ['+ ' + model_name[i]]))
top_left += [
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [self.model.nobs]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
summary = Summary()
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
# Make parameters tables for each regime
from statsmodels.iolib.summary import summary_params
import re
def make_table(self, mask, title, strip_end=True):
res = (self, self.params[mask], self.bse[mask],
self.tvalues[mask], self.pvalues[mask],
self.conf_int(alpha)[mask])
param_names = [
re.sub('\[\d+\]$', '', name) for name in
np.array(self.data.param_names)[mask].tolist()
]
return summary_params(res, yname=None, xname=param_names,
alpha=alpha, use_t=False, title=title)
params = model.parameters
regime_masks = [[] for i in range(model.k_regimes)]
other_masks = {}
for key, switching in params.switching.items():
k_params = len(switching)
if key == 'regime_transition':
continue
other_masks[key] = []
for i in range(k_params):
if switching[i]:
for j in range(self.k_regimes):
regime_masks[j].append(params[j, key][i])
else:
other_masks[key].append(params[0, key][i])
for i in range(self.k_regimes):
mask = regime_masks[i]
if len(mask) > 0:
table = make_table(self, mask, 'Regime %d parameters' % i)
summary.tables.append(table)
mask = []
for key, _mask in other_masks.items():
mask.extend(_mask)
if len(mask) > 0:
table = make_table(self, mask, 'Non-switching parameters')
summary.tables.append(table)
# Transition parameters
mask = params['regime_transition']
table = make_table(self, mask, 'Regime transition parameters')
summary.tables.append(table)
# Add warnings/notes, added to text format only
etext = []
if hasattr(self, 'cov_type') and 'description' in self.cov_kwds:
etext.append(self.cov_kwds['description'])
if self._rank < len(self.params):
etext.append("Covariance matrix is singular or near-singular,"
" with condition number %6.3g. Standard errors may be"
" unstable." % np.linalg.cond(self.cov_params()))
if etext:
etext = ["[{0}] {1}".format(i + 1, text)
for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
summary.add_extra_txt(etext)
return summary
class MarkovSwitchingResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'cov_params_approx': 'cov',
'cov_params_default': 'cov',
'cov_params_opg': 'cov',
'cov_params_robust': 'cov',
}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {
'forecast': 'dates',
}
_wrap_methods = wrap.union_dicts(
tsbase.TimeSeriesResultsWrapper._wrap_methods, _methods)
wrap.populate_wrapper(MarkovSwitchingResultsWrapper, MarkovSwitchingResults)
| 38.530131
| 79
| 0.607725
|
7cd77f5505bcc4d14b703fb11fb347a93ad28fc4
| 1,544
|
py
|
Python
|
ejabberd/tests/namecoding.py
|
ismailqau/xid
|
e084f04b23c17ad0afc073f1d5ed72d236109832
|
[
"MIT"
] | 9
|
2019-02-14T15:21:55.000Z
|
2021-11-09T17:06:58.000Z
|
ejabberd/tests/namecoding.py
|
ismailqau/xid
|
e084f04b23c17ad0afc073f1d5ed72d236109832
|
[
"MIT"
] | 5
|
2019-02-27T16:10:37.000Z
|
2020-04-19T10:07:21.000Z
|
ejabberd/tests/namecoding.py
|
ismailqau/xid
|
e084f04b23c17ad0afc073f1d5ed72d236109832
|
[
"MIT"
] | 5
|
2020-05-01T13:47:02.000Z
|
2022-02-17T18:08:45.000Z
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright (C) 2019-2020 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from xidauth import EjabberdXidAuth
import logging
import sys
import unittest
class NameCodingTest (unittest.TestCase):
def setUp (self):
self.auth = EjabberdXidAuth ({}, "http://localhost",
logging.StreamHandler (sys.stderr))
def testSimpleNames (self):
simpleNames = ["domob", "0", "foo42bar", "xxx"]
for n in simpleNames:
self.assertEqual (self.auth.decodeXmppName (n), n)
def testEncodedNames (self):
names = {
"x-": "",
"x-782d666f6f": "x-foo",
"x-c3a4c3b6c3bc": u"äöü",
"x-466f6f20426172": "Foo Bar",
}
for enc, nm in names.items ():
self.assertEqual (self.auth.decodeXmppName (enc), nm)
def testInvalidNames (self):
invalid = [
# Empty string is invalid (should be hex-encoded).
"",
# Invalid characters for simple names.
"domob foobar", "Abc", "abc.def", "no-dash", "dom\nob", u"äöü",
# Invalid hex characters (including upper case for otherwise valid name).
"x-x", "x-2D", "x-\nabc",
# Hex-encoded name that is actually simple.
"x-616263",
# Hex-encoded name with an odd number of characters.
"x-a",
]
for n in invalid:
self.assertEqual (self.auth.decodeXmppName (n), None)
if __name__ == "__main__":
unittest.main ()
| 26.169492
| 79
| 0.630181
|
7e55416b86182567f1c0e9a6a9eb0d4bc9ce326b
| 2,182
|
py
|
Python
|
zolware_data/models/datasource.py
|
zolware/zolware_data
|
5d3e5d654e1282c04225e07ec7897a1d54fe7bb5
|
[
"MIT"
] | null | null | null |
zolware_data/models/datasource.py
|
zolware/zolware_data
|
5d3e5d654e1282c04225e07ec7897a1d54fe7bb5
|
[
"MIT"
] | null | null | null |
zolware_data/models/datasource.py
|
zolware/zolware_data
|
5d3e5d654e1282c04225e07ec7897a1d54fe7bb5
|
[
"MIT"
] | null | null | null |
import requests
import json
from bson.objectid import ObjectId
from zolware_data import signal_manager
from zolware_data.models import signal
from zolware_data import config
class Datasource:
def __init__(self, user, datasource=None):
self.signals = []
self.user = user
if datasource is not None:
self.id = datasource["_id"]
self.name = datasource["name"]
self.description = datasource["description"]
self.dt = datasource["dt"]
self.file_line_cursor = datasource["file_line_cursor"]
self.file_data_col_names = datasource["file_data_col_names"]
self.file_uri = datasource["file_uri"]
self.data_source = datasource["data_source"]
self.status = datasource["status"]
def fetch(self, user, datasource_id):
headers = self.__construct_headers__()
url = config.api_endpoint + '/datasources/' + datasource_id
data = {}
res = requests.get(url, data=data, headers=headers)
if res.ok:
self.datasource = res.json()['datasource']
else:
self.datasource = None
def get_signals(self):
signal_array = []
for sig in self.datasource["signals"]:
signalobject = Datasource.get_signal(sig)
signal_array.append(signalobject)
return signal_array
@staticmethod
def get_signal(signal_id):
signal_id = ObjectId(signal_id)
signalobject = signal.Signal(signal_id)
return signalobject
def populate_signals(self):
headers = self.__construct_headers__()
url = config.api_endpoint + '/datasources/' + self.id + '/signals'
data = {}
res = requests.get(url, data=data, headers=headers)
if res.ok:
signals = res.json()['signals']
for sig in signals:
self.signals.append(signal.Signal(sig))
else:
print(res.status_code)
return []
def __construct_headers__(self):
return {
"content-type": "application/json",
"Authorization": "Bearer " + self.user.token
}
| 32.567164
| 74
| 0.609533
|
8b7101aefe16d5a9593e362e0821775afeb917bf
| 4,855
|
py
|
Python
|
code/apps/Managed Software Center/Managed Software Center/MSCBadgedTemplateImage.py
|
dannooooo/munki
|
3c7cc64a0659e2d5def8dd455a89dd3edf67cfd9
|
[
"Apache-2.0"
] | 1
|
2018-07-25T21:29:43.000Z
|
2018-07-25T21:29:43.000Z
|
code/apps/Managed Software Center/Managed Software Center/MSCBadgedTemplateImage.py
|
bruienne/munki
|
55936d96ed2f45ede1469873836d61596486020a
|
[
"Apache-2.0"
] | null | null | null |
code/apps/Managed Software Center/Managed Software Center/MSCBadgedTemplateImage.py
|
bruienne/munki
|
55936d96ed2f45ede1469873836d61596486020a
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
#
# MSCBadgedTemplateImage.py
# Managed Software Center
#
# Copyright 2014-2016 Greg Neagle.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Foundation import *
from AppKit import *
class MSCBadgedTemplateImage(NSImage):
'''Subclass to handle our updates template image with a badge showing the count
of available updates'''
@classmethod
def imageNamed_withCount_(self, name, count):
'''Returns a template image with a count badge composited in the upper-right
corner of the image'''
# some magic values
NSBoldFontMask = 2
badgeFontSize = 11
badgeFontFamilyName = u'Helvetica'
rrRadius = 7.0
if count == 0:
# no badge if there are no updates
return super(MSCBadgedTemplateImage, self).imageNamed_(name)
# build badge string and get its size
badgeString = NSString.stringWithString_(unicode(count))
badgeFont = NSFontManager.sharedFontManager().fontWithFamily_traits_weight_size_(
badgeFontFamilyName, NSBoldFontMask, 0, badgeFontSize)
stringAttributes = { NSFontAttributeName: badgeFont }
textSize = badgeString.sizeWithAttributes_(stringAttributes)
# use textSize as the basis for the badge outline rect
badgeOutlineHeight = textSize.height
badgeOutlineWidth = textSize.width + rrRadius
if textSize.height > badgeOutlineWidth:
badgeOutlineWidth = badgeOutlineHeight
# get our base image
baseImage = super(MSCBadgedTemplateImage, self).imageNamed_(name).copy()
# size our composite image large enough to include the badge
compositeImageSize = NSMakeSize(baseImage.size().width + badgeOutlineHeight,
baseImage.size().height + badgeOutlineHeight)
# layout the rect for the text
badgeStringRect = NSMakeRect(compositeImageSize.width - textSize.width,
compositeImageSize.height - textSize.height,
textSize.width, textSize.height)
# layout the rect for the badge outline
badgeOutlineRect = NSMakeRect(compositeImageSize.width - badgeOutlineWidth,
compositeImageSize.height - badgeOutlineHeight,
badgeOutlineWidth, badgeOutlineHeight)
# shift the rects around to look better. These are magic numbers.
badgeStringRect = NSOffsetRect(badgeStringRect, -4.75, -2)
badgeOutlineRect = NSOffsetRect(badgeOutlineRect, -1, -5)
# our erase rect needs to be a little bigger than the badge itself
badgeEraseRect = NSInsetRect(badgeOutlineRect, -1.5, -1.5)
# build paths for the badge outline and the badge erase mask
badgeOutline = NSBezierPath.bezierPathWithRoundedRect_xRadius_yRadius_(
badgeOutlineRect, rrRadius, rrRadius)
badgeEraseMask = NSBezierPath.bezierPathWithRoundedRect_xRadius_yRadius_(
badgeEraseRect, rrRadius, rrRadius)
# start drawing our composite image
compositeImage = NSImage.alloc().initWithSize_(compositeImageSize)
compositeImage.lockFocus()
# draw base image
baseImageOrigin = NSMakePoint(badgeOutlineHeight/2, badgeOutlineHeight/2)
baseImage.drawAtPoint_fromRect_operation_fraction_(
baseImageOrigin, NSZeroRect, NSCompositeCopy, 1.0)
# erase the part that the badge will be drawn over
NSGraphicsContext.saveGraphicsState()
NSGraphicsContext.currentContext().setCompositingOperation_(NSCompositeCopy)
NSColor.blackColor().colorWithAlphaComponent_(0.0).setFill()
badgeEraseMask.fill()
NSGraphicsContext.restoreGraphicsState()
# draw badge outline
badgeOutline.stroke()
# draw count string
badgeString.drawWithRect_options_attributes_(badgeStringRect, 0, stringAttributes)
# all done drawing!
compositeImage.unlockFocus()
compositeImage.setTemplate_(True)
return compositeImage
| 44.541284
| 90
| 0.657673
|
6d3c24f99d3cca6ed0c708c1fd6d820f29a031cf
| 2,171
|
py
|
Python
|
architecture/face/dcgan.py
|
kad99kev/FGTD-Streamlit
|
0dc8d2894eadf2260d5e5dcf10ead12ff62f6cd8
|
[
"MIT"
] | null | null | null |
architecture/face/dcgan.py
|
kad99kev/FGTD-Streamlit
|
0dc8d2894eadf2260d5e5dcf10ead12ff62f6cd8
|
[
"MIT"
] | null | null | null |
architecture/face/dcgan.py
|
kad99kev/FGTD-Streamlit
|
0dc8d2894eadf2260d5e5dcf10ead12ff62f6cd8
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class DCGAN(nn.Module):
def __init__(
self,
noise_size=100,
feature_size=128,
num_channels=3,
embedding_size=768,
reduced_dim_size=256,
):
super(DCGAN, self).__init__()
self.reduced_dim_size = reduced_dim_size
self.projection = nn.Sequential(
nn.Linear(in_features=embedding_size, out_features=reduced_dim_size),
nn.BatchNorm1d(num_features=reduced_dim_size),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.layer = nn.Sequential(
nn.ConvTranspose2d(
noise_size + reduced_dim_size, feature_size * 8, 4, 1, 0, bias=False
),
nn.BatchNorm2d(feature_size * 8),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
# state size (ngf*4) x 4 x 4
nn.ConvTranspose2d(feature_size * 8, feature_size * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(feature_size * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(feature_size * 4, feature_size * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(feature_size * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(feature_size * 2, feature_size, 4, 2, 1, bias=False),
nn.BatchNorm2d(feature_size),
nn.ReLU(True),
# state size. (ngf*2) x 32 x 32
nn.ConvTranspose2d(feature_size, feature_size, 4, 2, 1, bias=False),
nn.BatchNorm2d(feature_size),
nn.ReLU(True),
# state size. (ngf) x 64 x 64
nn.ConvTranspose2d(feature_size, num_channels, 4, 2, 1, bias=False),
nn.Tanh(),
)
self.optimizer = torch.optim.Adam(
self.parameters(), lr=0.0002, betas=(0.5, 0.5)
)
def forward(self, noise, text_embeddings):
encoded_text = self.projection(text_embeddings)
concat_input = torch.cat([noise, encoded_text], dim=1).unsqueeze(2).unsqueeze(2)
output = self.layer(concat_input)
return output
| 37.431034
| 88
| 0.578075
|
956052869a0dfcbc18fed41bc11fed1b53dae2a6
| 4,645
|
py
|
Python
|
tests/github/python_native_tf.py
|
brandon-edwards/openfl
|
9e0521252253eab09571dc2be40f46bfeaf9746a
|
[
"Apache-2.0"
] | 297
|
2021-01-13T08:49:35.000Z
|
2022-03-31T15:06:43.000Z
|
tests/github/python_native_tf.py
|
brandon-edwards/openfl
|
9e0521252253eab09571dc2be40f46bfeaf9746a
|
[
"Apache-2.0"
] | 265
|
2021-02-02T09:57:33.000Z
|
2022-03-30T22:51:55.000Z
|
tests/github/python_native_tf.py
|
brandon-edwards/openfl
|
9e0521252253eab09571dc2be40f46bfeaf9746a
|
[
"Apache-2.0"
] | 81
|
2021-01-18T07:52:36.000Z
|
2022-03-26T18:55:54.000Z
|
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Python native tests."""
import numpy as np
import openfl.native as fx
def one_hot(labels, classes):
"""
One Hot encode a vector.
Args:
labels (list): List of labels to onehot encode
classes (int): Total number of categorical classes
Returns:
np.array: Matrix of one-hot encoded labels
"""
return np.eye(classes)[labels]
def build_model(input_shape,
num_classes,
conv_kernel_size=(4, 4),
conv_strides=(2, 2),
conv1_channels_out=16,
conv2_channels_out=32,
final_dense_inputsize=100,
**kwargs):
"""
Define the model architecture.
Args:
input_shape (numpy.ndarray): The shape of the data
num_classes (int): The number of classes of the dataset
Returns:
tensorflow.python.keras.engine.sequential.Sequential: The model defined in Keras
"""
import tensorflow as tf # NOQA
import tensorflow.keras as ke # NOQA
from tensorflow.keras import Sequential # NOQA
from tensorflow.keras.layers import Conv2D, Flatten, Dense # NOQA
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
config.intra_op_parallelism_threads = 112
config.inter_op_parallelism_threads = 1
sess = tf.compat.v1.Session(config=config)
model = Sequential()
model.add(Conv2D(conv1_channels_out,
kernel_size=conv_kernel_size,
strides=conv_strides,
activation='relu',
input_shape=input_shape))
model.add(Conv2D(conv2_channels_out,
kernel_size=conv_kernel_size,
strides=conv_strides,
activation='relu'))
model.add(Flatten())
model.add(Dense(final_dense_inputsize, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=ke.losses.categorical_crossentropy,
optimizer=ke.optimizers.Adam(),
metrics=['accuracy'])
# initialize the optimizer variables
opt_vars = model.optimizer.variables()
for v in opt_vars:
v.initializer.run(session=sess)
return model
if __name__ == '__main__':
fx.init('keras_cnn_mnist')
from openfl.federated import FederatedDataSet
from openfl.federated import FederatedModel
from tensorflow.python.keras.utils.data_utils import get_file
origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'
path = get_file('mnist.npz',
origin=origin_folder + 'mnist.npz',
file_hash='731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')
with np.load(path) as f:
# get all of mnist
X_train = f['x_train']
y_train = f['y_train']
X_valid = f['x_test']
y_valid = f['y_test']
img_rows, img_cols = 28, 28
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_valid = X_valid.reshape(X_valid.shape[0], img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_valid = X_valid.astype('float32')
X_train /= 255
X_valid /= 255
classes = 10
y_train = one_hot(y_train, classes)
y_valid = one_hot(y_valid, classes)
feature_shape = X_train.shape[1]
fl_data = FederatedDataSet(X_train, y_train, X_valid, y_valid,
batch_size=32, num_classes=classes)
fl_model = FederatedModel(build_model=build_model, data_loader=fl_data)
collaborator_models = fl_model.setup(num_collaborators=2)
collaborators = {'one': collaborator_models[0], 'two': collaborator_models[1]}
print(f'Original training data size: {len(X_train)}')
print(f'Original validation data size: {len(X_valid)}\n')
# Collaborator one's data
print(f'Collaborator one\'s training data size: '
f'{len(collaborator_models[0].data_loader.X_train)}')
print(f'Collaborator one\'s validation data size: '
f'{len(collaborator_models[0].data_loader.X_valid)}\n')
# Collaborator two's data
print(f'Collaborator two\'s training data size: '
f'{len(collaborator_models[1].data_loader.X_train)}')
print(f'Collaborator two\'s validation data size: '
f'{len(collaborator_models[1].data_loader.X_valid)}\n')
print(fx.get_plan())
final_fl_model = fx.run_experiment(collaborators, {'aggregator.settings.rounds_to_train': 5})
final_fl_model.save_native('final_pytorch_model.h5')
| 32.943262
| 97
| 0.654252
|
46741dc98d35375c8a1d01fc15147ebfd6d18d27
| 3,948
|
py
|
Python
|
high_performance_pyspark/bad_pyspark.py
|
fouradam/high-performance-spark-examples
|
877234d402b7a78c312c481985f57fb285a5ac8b
|
[
"Apache-2.0"
] | 504
|
2015-10-06T16:42:42.000Z
|
2022-02-01T02:56:52.000Z
|
high_performance_pyspark/bad_pyspark.py
|
pengshuangbao/high-performance-spark-examples
|
aa835d8cad02befe440a24f802eac5469bb40bbb
|
[
"Apache-2.0"
] | 53
|
2015-12-21T21:58:16.000Z
|
2021-04-02T14:05:37.000Z
|
high_performance_pyspark/bad_pyspark.py
|
pengshuangbao/high-performance-spark-examples
|
aa835d8cad02befe440a24f802eac5469bb40bbb
|
[
"Apache-2.0"
] | 232
|
2015-10-06T04:23:13.000Z
|
2022-03-25T16:48:41.000Z
|
# This script triggers a number of different PySpark errors
from pyspark import *
from pyspark.sql.session import SparkSession
global sc
def nonExistentInput(sc):
"""
Attempt to load non existent input
>>> nonExistentInput(sc)
Traceback (most recent call last):
...
Py4JJavaError:...
"""
# tag::nonExistent[]
failedRdd = sc.textFile("file:///doesnotexist")
failedRdd.count()
# end::nonExistent[]
def throwOuter(sc):
"""
Attempt to load non existant input
>>> throwOuter(sc)
Traceback (most recent call last):
...
Py4JJavaError:...
"""
# tag::throwOuter[]
data = sc.parallelize(range(10))
transform1 = data.map(lambda x: x + 1)
transform2 = transform1.map(lambda x: x / 0)
transform2.count()
# end::throwOuter[]
def throwInner(sc):
"""
Attempt to load non existant input
>>> throwInner(sc)
Traceback (most recent call last):
...
Py4JJavaError:...
"""
# tag::throwInner[]
data = sc.parallelize(range(10))
transform1 = data.map(lambda x: x / 0)
transform2 = transform1.map(lambda x: x + 1)
transform2.count()
# end::throwInner[]
# tag::rewrite[]
def add1(x):
"""
Add 1
>>> add1(2)
3
"""
return x + 1
def divZero(x):
"""
Divide by zero (cause an error)
>>> divZero(2)
Traceback (most recent call last):
...
ZeroDivisionError: integer division or modulo by zero
"""
return x / 0
def throwOuter2(sc):
"""
Attempt to load non existant input
>>> throwOuter2(sc)
Traceback (most recent call last):
...
Py4JJavaError:...
"""
data = sc.parallelize(range(10))
transform1 = data.map(add1)
transform2 = transform1.map(divZero)
transform2.count()
def throwInner2(sc):
"""
Attempt to load non existant input
>>> throwInner2(sc)
Traceback (most recent call last):
...
Py4JJavaError:...
"""
data = sc.parallelize(range(10))
transform1 = data.map(divZero)
transform2 = transform1.map(add1)
transform2.count()
# end::rewrite[]
def throwInner3(sc):
"""
Attempt to load non existant input
>>> throwInner3(sc)
Reject 10
"""
data = sc.parallelize(range(10))
rejectedCount = sc.accumulator(0)
def loggedDivZero(x):
import logging
try:
return [x / 0]
except Exception as e:
rejectedCount.add(1)
logging.warning("Error found " + repr(e))
return []
transform1 = data.flatMap(loggedDivZero)
transform2 = transform1.map(add1)
transform2.count()
print("Reject " + str(rejectedCount.value))
def runOutOfMemory(sc):
"""
Run out of memory on the workers.
In standalone modes results in a memory error, but in YARN may trigger YARN container
overhead errors.
>>> runOutOfMemory(sc)
Traceback (most recent call last):
...
Py4JJavaError:...
"""
# tag::worker_oom[]
data = sc.parallelize(range(10))
def generate_too_much(itr):
return range(10000000000000)
itr = data.flatMap(generate_too_much)
itr.count()
# end::worker_oom[]
def _setupTest():
globs = globals()
spark = SparkSession.builder \
.master("local[4]") \
.getOrCreate()
sc = spark._sc
globs['sc'] = sc
return globs
def _test():
"""
Run the tests.
Note this will print a lot of error message to stderr since we don't capture the JVM sub process
stdout/stderr for doctests.
"""
import doctest
globs = setupTest()
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
import sys
if __name__ == "__main__":
_test()
# Hack to support running in nose
elif sys.stdout != sys.__stdout__:
_setupTest()
| 23.927273
| 100
| 0.604357
|
0bf3ba75b24671016890cb5f061ad313534cc798
| 26,392
|
py
|
Python
|
lib/translations.py
|
alindt/wttr.in
|
f8e7aef267c720dec0303c8e31091815f8e51a40
|
[
"Apache-2.0"
] | null | null | null |
lib/translations.py
|
alindt/wttr.in
|
f8e7aef267c720dec0303c8e31091815f8e51a40
|
[
"Apache-2.0"
] | null | null | null |
lib/translations.py
|
alindt/wttr.in
|
f8e7aef267c720dec0303c8e31091815f8e51a40
|
[
"Apache-2.0"
] | null | null | null |
# vim: fileencoding=utf-8
"""
Translation of almost everything.
"""
FULL_TRANSLATION = [
"af", "da", "de", "fr", "fa", "id", "it", "nb", "nl", "pl", "ru",
]
PARTIAL_TRANSLATION = [
"az", "be", "bg", "bs", "ca", "cy", "cs",
"el", "eo", "es", "et", "fi",
"hi", "hr", "hu", "hy", "is",
"ja", "jv", "ka", "kk", "ko", "ky", "lt",
"lv", "mk", "ml", "nl", "nn", "pt",
"ro", "sk", "sl", "sr", "sr-lat",
"sv", "sw", "th", "tr", "te", "uk", "uz", "vi",
"zh", "zu",
"he",
]
PROXY_LANGS = [
'az', 'be', 'bs', 'ca', 'cy', 'eo', 'fa',
'he', 'hr', 'hy', 'id', 'is', 'it', 'ja',
'kk', 'lv', 'mk', 'nb', 'nn', 'sl', 'uz'
]
SUPPORTED_LANGS = FULL_TRANSLATION + PARTIAL_TRANSLATION
MESSAGE = {
'NOT_FOUND_MESSAGE': {
'en': u"""
We were unable to find your location
so we have brought you to Oymyakon,
one of the coldest permanently inhabited locales on the planet.
""",
'af': u"""
Ons kon nie u ligging opspoor nie
gevolglik het ons vir u na Oymyakon geneem,
een van die koudste permanent bewoonde plekke op aarde.
""",
'be': u"""
Ваша месцазнаходжанне вызначыць не атрымалася,
таму мы пакажам вам надвор'е ў Аймяконе,
самым халодным населеным пункце на планеце.
Будзем спадзявацца, што ў вас сёння надвор'е лепей!
""",
'bs': u"""
Nismo mogli pronaći vašu lokaciju,
tako da smo te doveli do Oymyakon,
jedan od najhladnijih stalno naseljena mjesta na planeti.
Nadamo se da ćete imati bolje vreme!
""",
'ca': u"""
Hem estat incapaços de trobar la seva ubicació,
és per aquest motiu que l'hem portat fins Oymyakon,
un dels llocs més freds inhabitats de manera permanent al planeta.
""",
'cs': u"""
Nepodařilo se nám najít vaši polohu,
takže jsme vás přivedl do Ojmjakonu.
Je to jedno z nejchladnějších trvale obydlených míst na planetě.
Doufáme, že budete mít lepší počasí!
""",
'cy': u"""
Ni darganfyddwyd eich lleoliad,
felly rydym wedi dod â chi i Oymyakon,
un o'r llefydd oeraf ar y blaned ble mae pobl yn dal i fyw!
""",
'de': u"""
Wir konnten Ihren Standort nicht finden,
also haben wir Sie nach Oimjakon gebracht,
einer der kältesten dauerhaft bewohnten Orte auf dem Planeten.
Wir hoffen, dass Sie besseres Wetter haben!
""",
'el': u"""
Δεν μπορέσαμε να βρούμε την τοποθεσία σου,
για αυτό διαλέξαμε το Οϊμιάκον για εσένα,
μία από τις πιο κρύες μόνιμα κατοικημένες περιοχές στον πλανήτη.
Ελπίζουμε να έχεις καλύτερο καιρό!
""",
'es': u"""
No hemos logrado encontrar tu ubicación,
asi que hemos decidido enseñarte el tiempo en Oymyakon,
uno de los sitios más fríos y permanentemente deshabitados del planeta.
""",
'fa': u"""
ما نتونستیم مکان شما رو پیدا کنیم. به همین خاطر شما رو به om بردیم
، یکی از سردترین مکان های روی زمین که اصلا قابل سکونت نیست!
""",
'fi': u"""
Emme löytänyt sijaintiasi, joten toimme sinut Oimjakoniin,
yhteen maailman kylmimmistä pysyvästi asutetuista paikoista.
Toivottavasti sinulla on parempi sää!
""",
'fr': u"""
Nous n'avons pas pu déterminer votre position,
Nous vous avons donc amenés à Oïmiakon,
l'un des endroits les plus froids habités en permanence sur la planète.
Nous espérons qu'il fait meilleur chez vous !
""",
'hy': u"""
Ձեր գտնվելու վայրը չհաջողվեց որոշել,
այդ պատճառով մենք ձեզ կցուցադրենք եղանակը Օյմյակոնում.
երկրագնդի ամենասառը բնակավայրում։
Հույս ունենք որ ձեր եղանակը այսօր ավելի լավն է։
""",
'id': u"""
Kami tidak dapat menemukan lokasi anda,
jadi kami membawa anda ke Oymyakon,
salah satu tempat terdingin yang selalu dihuni di planet ini!
""",
'is': u"""
Við finnum ekki staðsetninguna þína og vísum þér þar með á Ojmjakon,
ein af köldustu byggðum jarðar.
Vonandi er betra veður hjá þér.
""",
'it': u"""
Non siamo riusciti a trovare la sua posizione
quindi la abbiamo portato a Oymyakon,
uno dei luoghi abitualmente abitati più freddi del pianeta.
Ci auguriamo che le condizioni dove lei si trova siano migliori!
""",
'ja': u"""
指定された場所が見つかりませんでした。
代わりにオイミャコンの天気予報を表示しています。
オイミャコンは地球上で最も寒い居住地の一つです。
""",
'ko': u"""
지정된 장소를 찾을 수 없습니다,
대신 오이먀콘의 일기 예보를 표시합니다,
오이먀콘은 지구상에서 가장 추운 곳에 위치한 마을입니다!
""",
'lv': u"""
Mēs nevarējām atrast jūsu atrašanās vietu tādēļ nogādājām jūs Oimjakonā,
vienā no aukstākajām apdzīvotajām vietām uz planētas.
""",
'mk': u"""
Неможевме да ја пронајдеме вашата локација,
затоа ве однесовме во Ојмајкон,
еден од најладните трајно населени места на планетата.
""",
'nb': u"""
Vi kunne ikke finne din lokasjon,
så her får du Ojmjakon, et av de kaldeste bebodde stedene på planeten.
Vi håper været er bedre hos deg!
""",
'nl': u"""
Wij konden uw locatie niet vaststellen
dus hebben we u naar Ojmjakon gebracht,
één van de koudste permanent bewoonde gebieden op deze planeet.
""",
'pt': u"""
Não conseguimos encontrar a sua localização,
então decidimos te mostrar o tempo em Oymyakon,
um dos lugares mais frios e permanentemente desabitados do planeta.
""",
'pl': u"""
Nie udało nam się znaleźć podanej przez Ciebie lokalizacji,
więc zabraliśmy Cię do Ojmiakonu,
jednego z najzimniejszych, stale zamieszkanych miejsc na Ziemi.
Mamy nadzieję, że u Ciebie jest cieplej!
""",
'ro': u"""
Nu v-am putut identifica locația, prin urmare va aratam vremea din Oimiakon,
una dintre cele mai reci localități permanent locuite de pe planetă.
Sperăm că aveți vreme mai bună!
""",
'ru': u"""
Ваше местоположение определить не удалось,
поэтому мы покажем вам погоду в Оймяконе,
самом холодном населённом пункте на планете.
Будем надеяться, что у вас сегодня погода лучше!
""",
'sk': u"""
Nepodarilo sa nám nájsť vašu polohu,
takže sme vás priviedli do Ojmiakonu.
Je to jedno z najchladnejších trvale obývaných miest na planéte.
Dúfame, že budete mať lepšie počasie!
""",
'sr': u"""
Нисмо успели да пронађемо Вашу локацију,
па смо Вас довели у Ојмјакон,
једно од најхладнијих стално насељених места на планети.
Надамо се да је време код Вас боље него што је то случај овде!
""",
'sv': u"""
Vi lyckades inte hitta er plats så vi har istället tagit er till Ojmjakon,
en av planetens kallaste platser med permanent bosättning.
Vi hoppas att vädret är bättre hos dig!
""",
'tr': u"""
Aradığınız konum bulunamadı. O yüzden sizi dünyadaki en soğuk sürekli
yerleşim yerlerinden biri olan Oymyakon'e getirdik.
Umarız sizin olduğunuz yerde havalar daha iyidir!
""",
'te': u"""
మేము మీ స్థానాన్ని కనుగొనలేకపోయాము
కనుక మనం "ఓమాయకాన్కు" తీసుకొని వచ్చాము,
భూమిపై అత్యల్ప శాశ్వతంగా నివసించే స్థానిక ప్రదేశాలలో ఒకటి.
""",
'uk': u"""
Ваше місце розташування визначити не вдалося,
тому ми покажемо вам погоду в Оймяконе,
найхолоднішому населеному пункті на планеті.
Будемо сподіватися, що у вас сьогодні погода краще!
""",
'uz': u"""
Sizning joylashuvingizni aniqlay olmadik,
shuning uchun sizga sayyoramizning eng sovuq aholi punkti - Oymyakondagi ob-havo haqida ma'lumot beramiz.
Umid qilamizki, sizda bugungi ob-havo bundan yaxshiroq!
""",
'da': u"""
Vi kunne desværre ikke finde din lokation
så vi har bragt dig til Oymyakon,
En af koldeste og helt ubolige lokationer på planeten.
""",
},
'UNKNOWN_LOCATION': {
'en': u'Unknown location',
'af': u'Onbekende ligging',
'be': u'Невядомае месцазнаходжанне',
'bs': u'Nepoznatoja lokacija',
'ca': u'Localització desconeguda',
'cs': u'Neznámá poloha',
'cy': u'Lleoliad anhysbys',
'de': u'Unbekannter Ort',
'da': u'Ukendt lokation',
'el': u'Άνγωστη τοποθεσία',
'es': u'Ubicación desconocida',
'fa': u'مکان نامعلوم',
'fi': u'Tuntematon sijainti',
'fr': u'Emplacement inconnu',
'hy': u'Անհայտ գտնվելու վայր',
'id': u'Lokasi tidak diketahui',
'is': u'Óþekkt staðsetning',
'it': u'Località sconosciuta',
'ja': u'未知の場所です',
'ko': u'알 수 없는 장소',
'kk': u'',
'lv': u'Nezināma atrašanās vieta',
'mk': u'Непозната локација',
'nb': u'Ukjent sted',
'nl': u'Onbekende locatie',
'pl': u'Nieznana lokalizacja',
'pt': u'Localização desconhecida',
'ro': u'Locaţie necunoscută',
'ru': u'Неизвестное местоположение',
'sk': u'Neznáma poloha',
'sl': u'Neznano lokacijo',
'sr': u'Непозната локација',
'sv': u'Okänd plats',
'te': u'తెలియని ప్రదేశం',
'tr': u'Bilinmeyen konum',
'ua': u'Невідоме місце',
'uz': u'Аникланмаган худуд',
},
'LOCATION': {
'en': u'Location',
'af': u'Ligging',
'be': u'Месцазнаходжанне',
'bs': u'Lokacija',
'ca': u'Localització',
'cs': u'Poloha',
'cy': u'Lleoliad',
'de': u'Ort',
'da': u'Lokation',
'el': u'Τοποθεσία',
'es': u'Ubicación',
'fa': u'مکان',
'fi': u'Tuntematon sijainti',
'fr': u'Emplacement',
'hy': u'Դիրք',
'id': u'Lokasi',
'is': u'Staðsetning',
'it': u'Località',
'ja': u'位置情報',
'ko': u'위치',
'kk': u'',
'lv': u'Atrašanās vieta',
'mk': u'Локација',
'nb': u'Sted',
'nl': u'Locatie',
'pl': u'Lokalizacja',
'pt': u'Localização',
'ro': u'Locaţie',
'ru': u'Местоположение',
'sk': u'Poloha',
'sl': u'Lokacijo',
'sr': u'Локација',
'sv': u'Plats',
'te': u'స్థానము',
'tr': u'Konum',
'ua': u'Місце',
},
'CAPACITY_LIMIT_REACHED': {
'en': u"""
Sorry, we are running out of queries to the weather service at the moment.
Here is the weather report for the default city (just to show you, how it looks like).
We will get new queries as soon as possible.
You can follow https://twitter.com/igor_chubin for the updates.
======================================================================================
""",
'af': u"""
Verskoning, ons oorskry tans die vermoë om navrae aan die weerdiens te rig.
Hier is die weerberig van 'n voorbeeld ligging (bloot om aan u te wys hoe dit lyk).
Ons sal weereens nuwe navrae kan hanteer so gou as moontlik.
U kan vir https://twitter.com/igor_chubin volg vir opdaterings.
======================================================================================
""",
'be': u"""
Прабачце, мы выйшлі за ліміты колькасці запытаў да службы надвор'я ў дадзены момант.
Вось прагноз надвор'я для горада па змаўчанні (толькі, каб паказаць вам, як гэта выглядае).
Мы вернемся як мага хутчэй.
Вы можаце сачыць на https://twitter.com/igor_chubin за абнаўленнямі.
======================================================================================
""",
'bs': u"""
Žao mi je, mi ponestaje upita i vremenska prognoza u ovom trenutku.
Ovdje je izvještaj o vremenu za default grada (samo da vam pokažem kako to izgleda).
Mi ćemo dobiti nove upite u najkraćem mogućem roku.
Možete pratiti https://twitter.com/igor_chubin za ažuriranja.
======================================================================================
""",
'ca': u"""
Disculpi'ns, ens hem quedat sense consultes al servei meteorològic momentàniament.
Aquí li oferim l'informe del temps a la ciutat per defecte (només per mostrar, quin aspecte té).
Obtindrem noves consultes tan aviat com ens sigui possible.
Pot seguir https://twitter.com/igor_chubin per noves actualitzacions.
======================================================================================
""",
'de': u"""
Entschuldigung, wir können momentan den Wetterdienst nicht erreichen.
Dafür zeigen wir Ihnen das Wetter an einem Beispielort, damit Sie sehen wie die Seite das Wetter anzeigt.
Wir werden versuchen das Problem so schnell wie möglich zu beheben.
Folgen Sie https://twitter.com/igor_chubin für Updates.
======================================================================================
""",
'cy': u"""
Rydym yn brin o ymholiadau i'r gwasanaeth tywydd ar hyn o bryd.
Felly dyma'r adroddiad tywydd ar gyfer y ddinas ragosod (er mwyn arddangos sut mae'n edrych).
Byddwn gyda ymholiadau newydd yn fuan.
Gellir dilyn https://twitter.com/igor_chubin i gael newyddion pellach.
======================================================================================
""",
'es': u"""
Lo siento, hemos alcanzado el límite de peticiones al servicio de previsión del tiempo en este momento.
A continuación, la previsión del tiempo para una ciudad estándar (solo para que puedas ver que aspecto tiene el informe).
Muy pronto volveremos a tener acceso a las peticiones.
Puedes seguir https://twitter.com/igor_chubin para estar al tanto de la situación.
======================================================================================
""",
'fa': u"""
متأسفانه در حال حاضر ظرفیت ما برای درخواست به سرویس هواشناسی به اتمام رسیده.
اینجا می تونید گزارش هواشناسی برای شهر پیش فرض رو ببینید (فقط برای اینه که بهتون نشون بدیم چه شکلی هست)
ما تلاش میکنیم در اسرع وقت ظرفیت جدید به دست بیاریم.
برای دنبال کردن اخبار جدید میتونید https://twitter.com/igor_chubin رو فالو کنید.
======================================================================================
""",
'fr': u"""
Désolé, nous avons épuisé les requêtes vers le service météo.
Voici un bulletin météo de l'emplacement par défaut (pour vous donner un aperçu).
Nous serons très bientôt en mesure de faire de nouvelles requêtes.
Vous pouvez suivre https://twitter.com/igor_chubin pour rester informé.
======================================================================================
""",
'hy': u"""
Կներեք, այս պահին մենք գերազանցել ենք եղանակային տեսության կայանին հարցումների քանակը.
Կարող եք տեսնել տիպային եղանակը զեկուցում հիմնական քաղաքի համար (Ուղղակի որպես նմուշ):
Մենք մշտապես աշխատում ենք հարցումների քանակը բարելավելու ուղղությամբ:
Կարող եք հետևել մեզ https://twitter.com/igor_chubin թարմացումների համար.
======================================================================================
""",
'id': u"""
Maaf, kami kehabian permintaan ke layanan cuaca saat ini.
Ini adalah laporan cuaca dari kota standar (hanya untuk menunjukkan kepada anda bagaimana tampilannya).
Kami akan mencoba permintaan baru lagi sesegera mungkin.
Anda dapat mengikuti https://twitter.com/igor_chubin untuk informasi terbaru.
======================================================================================
""",
'it': u"""
Scusate, attualmente stiamo esaurendo le risorse a disposizione del servizio meteo.
Qui trovate il bollettino del tempo per la città di default (solo per mostrarvi come si presenta).
Potremo elaborare nuove richieste appena possibile.
Potete seguire https://twitter.com/igor_chubin per gli aggiornamenti.
======================================================================================
""",
'ko': u"""
죄송합니다. 현재 날씨 정보를 가져오는 쿼리 요청이 한도에 도달했습니다.
대신 기본으로 설정된 도시에 대한 일기 예보를 보여드리겠습니다. (이는 단지 어떻게 보이는지 알려주기 위함입니다).
쿼리 요청이 가능한 한 빨리 이루어질 수 있도록 하겠습니다.
업데이트 소식을 원하신다면 https://twitter.com/igor_chubin 을 팔로우 해주세요.
======================================================================================
""",
'lv': u"""
Atvainojiet, uz doto brīdi mēs esam mazliet noslogoti.
Šeit ir laika ziņas noklusējuma pilsētai (lai parādītu jums, kā izskatās izveidotais ziņojums).
Mēs atsāksim darbu cik ātri vien varēsim.
Jūs varat sekot https://twitter.com/igor_chubin lai redzētu visus jaunumus.
======================================================================================
""",
'mk': u"""
Извинете, ни снемуваат барања за до сервисот кој ни нуди временска прогноза во моментот.
Еве една временска прогноза за град (за да видите како изгледа).
Ќе добиеме нови барања најбрзо што можеме.
Следете го https://twitter.com/igor_chubin за известувања
======================================================================================
""",
'nb': u"""
Beklager, vi kan ikke nå værtjenesten for øyeblikket.
Her er værmeldingen for standardbyen så du får se hvordan tjenesten ser ut.
Vi vil forsøke å fikse problemet så snart som mulig.
Du kan følge https://twitter.com/igor_chubin for oppdateringer.
======================================================================================
""",
'nl': u"""
Excuse, wij kunnen u op dit moment dit weerbericht niet laten zien.
Hier is het weerbericht voor de standaard stad(zodat u weet hoe het er uitziet)
Wij lossen dit probleem zo snel mogelijk op.
voor updates kunt u ons op https://twitter.com/igor_chubin volgen.
======================================================================================
""",
'pl': u"""
Bardzo nam przykro, ale chwilowo wykorzystaliśmy limit zapytań do serwisu pogodowego.
To, co widzisz jest przykładowym raportem pogodowym dla domyślnego miasta.
Postaramy się przywrócić funkcjonalność tak szybko, jak to tylko możliwe.
Możesz śledzić https://twitter.com/igor_chubin na Twitterze, aby być na bieżąco.
======================================================================================
""",
'pt': u"""
Desculpe-nos, estamos atingindo o limite de consultas ao serviço de previsão do tempo neste momento.
Veja a seguir a previsão do tempo para uma cidade padrão (apenas para você ver que aspecto o relatório tem).
Em breve voltaremos a ter acesso às consultas.
Você pode seguir https://twitter.com/igor_chubin para acompanhar a situação.
======================================================================================
""",
'te': u"""
క్షమించండి, ప్రస్తుతానికి మేము వాతావరణ సేవకు ప్రశ్నలను గడుపుతున్నాం.
ఇక్కడ డిఫాల్ట్ నగరం కోసం వాతావరణ నివేదిక (కేవలం మీకు చూపించడానికి, ఇది ఎలా కనిపిస్తుంది).
సాధ్యమైనంత త్వరలో కొత్త ప్రశ్నలను పొందుతారు.
నవీకరణల కోసం https://twitter.com/igor_chubin ను మీరు అనుసరించవచ్చు.
======================================================================================
""",
'tr': u"""
Üzgünüz, an itibariyle hava durumu servisine yapabileceğimiz sorgu limitine ulaştık.
Varsayılan şehir için hava durumu bilgisini görüyorsunuz (neye benzediğini gösterebilmek için).
Mümkün olan en kısa sürede servise yeniden sorgu yapmaya başlayacağız.
Gelişmeler için https://twitter.com/igor_chubin adresini takip edebilirsiniz.
======================================================================================
""",
'da': u"""
Beklager, men vi er ved at løbe tør for forespørgsler til vejr-servicen lige nu.
Her er vejr rapporten for standard byen (bare så du ved hvordan det kan se ud).
Vi får nye forespørsler hurtigst muligt.
Du kan følge https://twitter.com/igor_chubin for at få opdateringer.
======================================================================================
""",
},
# Historical messages:
# 'Check new Feature: \033[92mwttr.in/Moon\033[0m or \033[92mwttr.in/Moon@2016-Mar-23\033[0m to see the phase of the Moon'
# 'New feature: \033[92mwttr.in/Rome?lang=it\033[0m or \033[92mcurl -H "Accept-Language: it" wttr.in/Rome\033[0m for the localized version. Your lang instead of "it"'
'NEW_FEATURE': {
'en': u'New feature: multilingual location names \033[92mwttr.in/станция+Восток\033[0m (in UTF-8) and location search \033[92mwttr.in/~Kilimanjaro\033[0m (just add ~ before)',
'af': u'Nuwe eienskap: veeltalige name vir liggings \033[92mwttr.in/станция+Восток\033[0m (in UTF-8) en ligging soek \033[92mwttr.in/~Kilimanjaro\033[0m (plaas net ~ vooraan)',
'be': u'Новыя магчымасці: назвы месц на любой мове \033[92mwttr.in/станция+Восток\033[0m (в UTF-8) i пошук месц \033[92mwttr.in/~Kilimanjaro\033[0m (трэба дадаць ~ ў пачатак)',
'bs': u'XXXXXXXXXXXXXXXXXXXX: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX\033[92mwttr.in/станция+Восток\033[0m (XX UTF-8) XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
'ca': u'Noves funcionalitats: noms d\'ubicació multilingües \033[92mwttr.in/станция+Восток\033[0m (en UTF-8) i la ubicació de recerca \033[92mwttr.in/~Kilimanjaro\033[0m (només cal afegir ~ abans)',
'es': u'Nuevas funcionalidades: los nombres de las ubicaciones en vários idiomas \033[92mwttr.in/станция+Восток\033[0m (em UTF-8) y la búsqueda por ubicaciones \033[92mwttr.in/~Kilimanjaro\033[0m (tan solo inserte ~ en frente)',
'fa': u'قابلیت جدید: پشتیبانی از نام چند زبانه مکانها \033[92mwttr.in/станция+Восток\033[0m (در فرمت UTF-8) و جسجتوی مکان ها \033[92mwttr.in/~Kilimanjaro\033[0m (فقط قبل از اون ~ اضافه کنید)',
'fr': u'Nouvelles fonctionnalités: noms d\'emplacements multilingues \033[92mwttr.in/станция+Восток\033[0m (en UTF-8) et recherche d\'emplacement \033[92mwttr.in/~Kilimanjaro\033[0m (ajouter ~ devant)',
'mk': u'Нова функција: повеќе јазично локациски имиња \033[92mwttr.in/станция+Восток\033[0m (во UTF-8) и локациско пребарување \033[92mwttr.in/~Kilimanjaro\033[0m (just add ~ before)',
'nb': u'Ny funksjon: flerspråklige stedsnavn \033[92mwttr.in/станция+Восток\033[0m (i UTF-8) og lokasjonssøk \033[92mwttr.in/~Kilimanjaro\033[0m (bare legg til ~ foran)',
'nl': u'Nieuwe functie: tweetalige locatie namen \033[92mwttr.in/станция+Восток\033[0m (in UTF-8) en locatie zoeken \033[92mwttr.in/~Kilimanjaro\033[0m (zet er gewoon een ~ voor)',
'cy': u'Nodwedd newydd: enwau lleoliadau amlieithog \033[92mwttr.in/станция+Восток\033[0m (yn UTF-8) a chwilio am leoliad \033[92mwttr.in/~Kilimanjaro\033[0m (ychwanegwch ~ yn gyntaf)',
'de': u'Neue Funktion: mehrsprachige Ortsnamen \033[92mwttr.in/станция+Восток\033[0m (in UTF-8) und Ortssuche \033[92mwttr.in/~Kilimanjaro\033[0m (fügen Sie ein ~ vor dem Ort ein)',
'hy': u'Փորձարկեք: տեղամասերի անունները կամայական լեզվով \033[92mwttr.in/Դիլիջան\033[0m (в UTF-8) և տեղանքի որոնում \033[92mwttr.in/~Kilimanjaro\033[0m (հարկավոր է ~ ավելացնել դիմացից)',
'id': u'Fitur baru: nama lokasi dalam multibahasa \033[92mwttr.in/станция+Восток\033[0m (in UTF-8) dan pencarian lokasi \033[92mwttr.in/~Kilimanjaro\033[0m (hanya tambah tanda ~ sebelumnya)',
'it': u'Nuove funzionalità: nomi delle località multilingue \033[92mwttr.in/станция+Восток\033[0m (in UTF-8) e ricerca della località \033[92mwttr.in/~Kilimanjaro\033[0m (basta premettere ~)',
'ko': u'새로운 기능: 다국어로 대응된 위치 \033[92mwttr.in/서울\033[0m (UTF-8에서) 장소 검색 \033[92mwttr.in/~Kilimanjaro\033[0m (앞에 ~를 붙이세요)',
'kk': u'',
'lv': u'Jaunums: Daudzvalodu atrašanās vietu nosaukumi \033[92mwttr.in/станция+Восток\033[0m (in UTF-8) un dabas objektu meklēšana \033[92mwttr.in/~Kilimanjaro\033[0m (tikai priekšā pievieno ~)',
'mk': u'Нова функција: повеќе јазично локациски имиња \033[92mwttr.in/станция+Восток\033[0m (во UTF-8) и локациско пребарување \033[92mwttr.in/~Kilimanjaro\033[0m (just add ~ before)',
'pl': u'Nowa funkcjonalność: wielojęzyczne nazwy lokalizacji \033[92mwttr.in/станция+Восток\033[0m (w UTF-8) i szukanie lokalizacji \033[92mwttr.in/~Kilimanjaro\033[0m (poprzedź zapytanie ~ - znakiem tyldy)',
'pt': u'Nova funcionalidade: nomes de localidades em várias línguas \033[92mwttr.in/станция+Восток\033[0m (em UTF-8) e procura por localidades \033[92mwttr.in/~Kilimanjaro\033[0m (é só colocar ~ antes)',
'ru': u'Попробуйте: названия мест на любом языке \033[92mwttr.in/станция+Восток\033[0m (в UTF-8) и поиск мест \033[92mwttr.in/~Kilimanjaro\033[0m (нужно добавить ~ спереди)',
'tr': u'Yeni özellik: çok dilli konum isimleri \033[92mwttr.in/станция+Восток\033[0m (UTF-8 ile) ve konum arama \033[92mwttr.in/~Kilimanjaro\033[0m (sadece önüne ~ ekleyin)',
'te': u'క్రొత్త లక్షణం: బహుభాషా స్థాన పేర్లు \ 033 [92mwttr.in/stancelя+Vostок\033 [0 U (UTF-8 లో) మరియు స్థానం శోధన \ 033 [92mwttr.in/~kilimanjaro\033 [0m (కేవలం ~ ముందుకి జోడించండి)',
'da': u'Ny funktion: flersprogede lokationsnavne \033[92mwttr.in/станция+Восток\033[0m (som UTF-8) og lokations søgning \033[92mwttr.in/~Kilimanjaro\033[0m (bare tilføj ~ inden)',
},
'FOLLOW_ME': {
'en': u'Follow \033[46m\033[30m@igor_chubin\033[0m for wttr.in updates',
'af': u'Volg \033[46m\033[30m@igor_chubin\033[0m vir wttr.in opdaterings',
'be': u'Сачыце за \033[46m\033[30m@igor_chubin\033[0m за навінамі wttr.in',
'bs': u'XXXXXX \033[46m\033[30m@igor_chubin\033[0m XXXXXXXXXXXXXXXXXXX',
'ca': u'Seguiu \033[46m\033[30m@igor_chubin\033[0m per actualitzacions de wttr.in',
'es': u'Seguir \033[46m\033[30m@igor_chubin\033[0m para recibir las novedades de wttr.in',
'cy': u'Dilyner \033[46m\033[30m@igor_Chubin\033[0m am diweddariadau wttr.in',
'fa': u'برای دنبال کردن خبرهای wttr.in شناسه \033[46m\033[30m@igor_chubin\033[0m رو فالو کنید.',
'fr': u'Suivez \033[46m\033[30m@igor_Chubin\033[0m pour rester informé sur wttr.in',
'de': u'Folgen Sie \033[46m\033[30mhttps://twitter.com/igor_chubin\033[0m für wttr.in Updates',
'hy': u'Նոր ֆիչռների համար հետևեք՝ \033[46m\033[30m@igor_chubin\033[0m',
'id': u'Ikuti \033[46m\033[30m@igor_chubin\033[0m untuk informasi wttr.in terbaru',
'it': u'Seguite \033[46m\033[30m@igor_chubin\033[0m per aggiornamenti a wttr.in',
'ko': u'wttr.in의 업데이트 소식을 원하신다면 \033[46m\033[30m@igor_chubin\033[0m 을 팔로우 해주세요',
'kk': u'',
'lv': u'Seko \033[46m\033[30m@igor_chubin\033[0m , lai uzzinātu wttr.in jaunumus',
'mk': u'Следете \033[46m\033[30m@igor_chubin\033[0m за wttr.in новости',
'nb': u'Følg \033[46m\033[30m@igor_chubin\033[0m for wttr.in oppdateringer',
'nl': u'Volg \033[46m\033[30m@igor_chubin\033[0m voor wttr.in updates',
'pl': u'Śledź \033[46m\033[30m@igor_chubin\033[0m aby być na bieżąco z nowościami dotyczącymi wttr.in',
'pt': u'Seguir \033[46m\033[30m@igor_chubin\033[0m para as novidades de wttr.in',
'ru': u'Все новые фичи публикуются здесь: \033[46m\033[30m@igor_chubin\033[0m',
'te': u'అనుసరించండి \ 033 [46m \ 033 [30m @ igor_chubin \ 033 [wttr.in నవీకరణలను కోసం',
'tr': u'wttr.in ile ilgili gelişmeler için \033[46m\033[30m@igor_chubin\033[0m adresini takip edin',
'da': u'Følg \033[46m\033[30m@igor_chubin\033[0m for at få wttr.in opdateringer',
},
}
def get_message(message_name, lang):
if message_name not in MESSAGE:
return ''
message_dict = MESSAGE[message_name]
return message_dict.get(lang, message_dict.get('en', ''))
| 49.515947
| 236
| 0.634624
|
432ba9f657cddc6e961d3399b5cf1c28eb5ad8e4
| 4,176
|
py
|
Python
|
render/BitcoinTradingGraph.py
|
skywalker0803r/Bitcoin-Trader-RL
|
e6b72d58dd773d9a775a580d6a35051ef8da513e
|
[
"MIT"
] | null | null | null |
render/BitcoinTradingGraph.py
|
skywalker0803r/Bitcoin-Trader-RL
|
e6b72d58dd773d9a775a580d6a35051ef8da513e
|
[
"MIT"
] | null | null | null |
render/BitcoinTradingGraph.py
|
skywalker0803r/Bitcoin-Trader-RL
|
e6b72d58dd773d9a775a580d6a35051ef8da513e
|
[
"MIT"
] | 2
|
2021-08-22T13:01:06.000Z
|
2021-10-14T02:47:52.000Z
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import style
from datetime import datetime
from mpl_finance import candlestick_ochl as candlestick
style.use('ggplot')
class BitcoinTradingGraph:
def __init__(self, df, title=None):
self.df = df
self.net_worths = np.zeros(len(df))
fig = plt.figure()
fig.suptitle(title)
self.net_worth_ax = plt.subplot2grid((6, 1), (0, 0), rowspan=2, colspan=1)
self.price_ax = plt.subplot2grid((6, 1), (2, 0), rowspan=8, colspan=1, sharex=self.net_worth_ax)
plt.subplots_adjust(left=0.11, bottom=0.24,right=0.90, top=0.90, wspace=0.2, hspace=0)
plt.show(block=False)
def _render_net_worth(self, current_step, net_worth, step_range, dates):
self.net_worth_ax.clear()
self.net_worth_ax.plot_date(dates, self.net_worths[step_range], '-', label='Net Worth')
self.net_worth_ax.legend()
legend = self.net_worth_ax.legend(loc=2, ncol=2, prop={'size': 8})
legend.get_frame().set_alpha(0.4)
last_date = self.df['Timestamp'].values[current_step]
last_net_worth = self.net_worths[current_step]
self.net_worth_ax.annotate('{0:.2f}'.format(net_worth), (last_date, last_net_worth),
xytext=(last_date, last_net_worth),
bbox=dict(boxstyle='round',fc='w', ec='k', lw=1),
color="black",
fontsize="small")
self.net_worth_ax.set_ylim(
min(self.net_worths[np.nonzero(self.net_worths)]) / 1.25, max(self.net_worths) * 1.25)
def _render_price(self, current_step, net_worth, step_range, dates):
self.price_ax.clear()
candlesticks = zip(dates,
self.df['Open'].values[step_range], self.df['Close'].values[step_range],
self.df['High'].values[step_range], self.df['Low'].values[step_range])
candlestick(self.price_ax, candlesticks, width=20)
last_date = self.df['Timestamp'].values[current_step]
last_close = self.df['Close'].values[current_step]
last_high = self.df['High'].values[current_step]
def _render_trades(self, current_step, trades, step_range):
for trade in trades:
if trade['step'] in step_range:
date = self.df['Timestamp'].values[trade['step']]
close = self.df['Close'].values[trade['step']]
high = self.df['High'].values[trade['step']]
low = self.df['Low'].values[trade['step']]
if trade['type'] == 'buy':
high_low = low
color = 'g'
else:
high_low = high
color = 'r'
total = '{0:.2f}'.format(trade['total'])
self.price_ax.annotate('$' + str(total), (date, close),
xytext=(date, high_low),
bbox=dict(boxstyle='round',fc='w', ec='k', lw=1, alpha=0.4),
color=color,
alpha=0.4,
fontsize="small")
def render(self, current_step, net_worth, trades, window_size=40):
self.net_worths[current_step] = net_worth
window_start = max(current_step - window_size, 0)
step_range = range(window_start, current_step + 1)
dates = self.df['Timestamp'].values[step_range]
self._render_net_worth(current_step, net_worth, step_range, dates)
self._render_price(current_step, net_worth, step_range, dates)
self._render_trades(current_step, trades, step_range)
date_labels = np.array([datetime.utcfromtimestamp(x).strftime(
'%Y-%m-%d %H:%M') for x in self.df['Timestamp'].values[step_range]])
self.price_ax.set_xticklabels(
date_labels, rotation=45, horizontalalignment='right')
plt.setp(self.net_worth_ax.get_xticklabels(), visible=False)
plt.pause(0.2)
def close(self):
plt.close()
| 49.714286
| 104
| 0.577586
|
dec05f70c75d896ee821a1eab4bb137c60d59d60
| 6,545
|
py
|
Python
|
selfdrive/car/hyundai/carcontroller.py
|
advpilot/advpilot
|
0d8940cd678c34c243a8590afb998c49d88599d0
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/carcontroller.py
|
advpilot/advpilot
|
0d8940cd678c34c243a8590afb998c49d88599d0
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/carcontroller.py
|
advpilot/advpilot
|
0d8940cd678c34c243a8590afb998c49d88599d0
|
[
"MIT"
] | null | null | null |
from cereal import car
from common.realtime import DT_CTRL
from common.numpy_fast import clip, interp
from common.conversions import Conversions as CV
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfahda_mfc, create_acc_commands, create_acc_opt, create_frt_radar_opt
from selfdrive.car.hyundai.values import Buttons, CarControllerParams, CAR
from opendbc.can.packer import CANPacker
VisualAlert = car.CarControl.HUDControl.VisualAlert
LongCtrlState = car.CarControl.Actuators.LongControlState
STEER_FAULT_MAX_ANGLE = 85 # EPS max is 90
STEER_FAULT_MAX_FRAMES = 90 # EPS counter is 95
def process_hud_alert(enabled, fingerprint, hud_control):
sys_warning = (hud_control.visualAlert in (VisualAlert.steerRequired, VisualAlert.ldw))
# initialize to no line visible
sys_state = 1
if hud_control.leftLaneVisible and hud_control.rightLaneVisible or sys_warning: # HUD alert only display when LKAS status is active
sys_state = 3 if enabled or sys_warning else 4
elif hud_control.leftLaneVisible:
sys_state = 5
elif hud_control.rightLaneVisible:
sys_state = 6
# initialize to no warnings
left_lane_warning = 0
right_lane_warning = 0
if hud_control.leftLaneDepart:
left_lane_warning = 1 if fingerprint in (CAR.GENESIS_G90, CAR.GENESIS_G80) else 2
if hud_control.rightLaneDepart:
right_lane_warning = 1 if fingerprint in (CAR.GENESIS_G90, CAR.GENESIS_G80) else 2
return sys_warning, sys_state, left_lane_warning, right_lane_warning
class CarController:
def __init__(self, dbc_name, CP, VM):
self.CP = CP
self.params = CarControllerParams(CP)
self.packer = CANPacker(dbc_name)
self.frame = 0
self.angle_limit_counter = 0
self.cut_steer_frames = 0
self.cut_steer = False
self.apply_steer_last = 0
self.car_fingerprint = CP.carFingerprint
self.steer_rate_limited = False
self.last_resume_frame = 0
self.accel = 0
def update(self, CC, CS):
actuators = CC.actuators
hud_control = CC.hudControl
pcm_cancel_cmd = CC.cruiseControl.cancel
# Steering Torque
new_steer = int(round(actuators.steer * self.params.STEER_MAX))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, self.params)
self.steer_rate_limited = new_steer != apply_steer
if not CC.latActive:
apply_steer = 0
self.apply_steer_last = apply_steer
sys_warning, sys_state, left_lane_warning, right_lane_warning = process_hud_alert(CC.enabled, self.car_fingerprint,
hud_control)
can_sends = []
# tester present - w/ no response (keeps radar disabled)
if self.CP.openpilotLongitudinalControl:
if self.frame % 100 == 0:
can_sends.append([0x7D0, 0, b"\x02\x3E\x80\x00\x00\x00\x00\x00", 0])
if CC.latActive and abs(CS.out.steeringAngleDeg) > STEER_FAULT_MAX_ANGLE:
self.angle_limit_counter += 1
else:
self.angle_limit_counter = 0
# stop requesting torque to avoid 90 degree fault and hold torque with induced temporary fault
# two cycles avoids race conditions every few minutes
if self.angle_limit_counter > STEER_FAULT_MAX_FRAMES:
self.cut_steer = True
elif self.cut_steer_frames > 1:
self.cut_steer_frames = 0
self.cut_steer = False
cut_steer_temp = False
if self.cut_steer:
cut_steer_temp = True
self.angle_limit_counter = 0
self.cut_steer_frames += 1
can_sends.append(create_lkas11(self.packer, self.frame, self.car_fingerprint, apply_steer, CC.latActive,
cut_steer_temp, CS.lkas11, sys_warning, sys_state, CC.enabled,
hud_control.leftLaneVisible, hud_control.rightLaneVisible,
left_lane_warning, right_lane_warning))
if not self.CP.openpilotLongitudinalControl:
if pcm_cancel_cmd:
can_sends.append(create_clu11(self.packer, self.frame, CS.clu11, Buttons.CANCEL))
elif CS.out.cruiseState.standstill:
# send resume at a max freq of 10Hz
if (self.frame - self.last_resume_frame) * DT_CTRL > 0.1:
# send 25 messages at a time to increases the likelihood of resume being accepted
can_sends.extend([create_clu11(self.packer, self.frame, CS.clu11, Buttons.RES_ACCEL)] * 25)
self.last_resume_frame = self.frame
if self.frame % 2 == 0 and self.CP.openpilotLongitudinalControl:
accel = actuators.accel
jerk = 0
if CC.longActive:
jerk = clip(2.0 * (accel - CS.out.aEgo), -12.7, 12.7)
if accel < 0:
accel = interp(accel - CS.out.aEgo, [-1.0, -0.5], [2 * accel, accel])
accel = clip(accel, CarControllerParams.ACCEL_MIN, CarControllerParams.ACCEL_MAX)
lead_visible = False
stopping = actuators.longControlState == LongCtrlState.stopping
set_speed_in_units = hud_control.setSpeed * (CV.MS_TO_MPH if CS.clu11["CF_Clu_SPEED_UNIT"] == 1 else CV.MS_TO_KPH)
can_sends.extend(create_acc_commands(self.packer, CC.enabled, accel, jerk, int(self.frame / 2), lead_visible,
set_speed_in_units, stopping, CS.out.gasPressed))
self.accel = accel
# 20 Hz LFA MFA message
if self.frame % 5 == 0 and self.car_fingerprint in (CAR.SONATA, CAR.PALISADE, CAR.IONIQ, CAR.KIA_NIRO_EV, CAR.KIA_NIRO_HEV_2021,
CAR.IONIQ_EV_2020, CAR.IONIQ_PHEV, CAR.KIA_CEED, CAR.KIA_SELTOS, CAR.KONA_EV,
CAR.ELANTRA_2021, CAR.ELANTRA_HEV_2021, CAR.SONATA_HYBRID, CAR.KONA_HEV, CAR.SANTA_FE_2022,
CAR.KIA_K5_2021, CAR.IONIQ_HEV_2022, CAR.SANTA_FE_HEV_2022, CAR.GENESIS_G70_2020, CAR.SANTA_FE_PHEV_2022):
can_sends.append(create_lfahda_mfc(self.packer, CC.enabled))
# 5 Hz ACC options
if self.frame % 20 == 0 and self.CP.openpilotLongitudinalControl:
can_sends.extend(create_acc_opt(self.packer))
# 2 Hz front radar options
if self.frame % 50 == 0 and self.CP.openpilotLongitudinalControl:
can_sends.append(create_frt_radar_opt(self.packer))
new_actuators = actuators.copy()
new_actuators.steer = apply_steer / self.params.STEER_MAX
new_actuators.accel = self.accel
self.frame += 1
return new_actuators, can_sends
| 42.225806
| 162
| 0.696409
|
b6157fe1318c14278f48513a6f3ca2726bddb06b
| 2,294
|
py
|
Python
|
python/ray/data/_internal/delegating_block_builder.py
|
LaudateCorpus1/ray
|
20cf2edfef7103c269358a49a48c2159315ee132
|
[
"Apache-2.0"
] | null | null | null |
python/ray/data/_internal/delegating_block_builder.py
|
LaudateCorpus1/ray
|
20cf2edfef7103c269358a49a48c2159315ee132
|
[
"Apache-2.0"
] | 41
|
2021-09-21T01:13:48.000Z
|
2022-03-19T07:12:22.000Z
|
python/ray/data/_internal/delegating_block_builder.py
|
LaudateCorpus1/ray
|
20cf2edfef7103c269358a49a48c2159315ee132
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any
from ray.data.block import Block, T, BlockAccessor
from ray.data._internal.block_builder import BlockBuilder
from ray.data._internal.simple_block import SimpleBlockBuilder
from ray.data._internal.arrow_block import ArrowRow, ArrowBlockBuilder
from ray.data._internal.pandas_block import PandasRow, PandasBlockBuilder
class DelegatingBlockBuilder(BlockBuilder[T]):
def __init__(self):
self._builder = None
self._empty_block = None
def add(self, item: Any) -> None:
if self._builder is None:
# TODO (kfstorm): Maybe we can use Pandas block format for dict.
if isinstance(item, dict) or isinstance(item, ArrowRow):
import pyarrow
try:
check = ArrowBlockBuilder()
check.add(item)
check.build()
self._builder = ArrowBlockBuilder()
except (TypeError, pyarrow.lib.ArrowInvalid):
self._builder = SimpleBlockBuilder()
elif isinstance(item, PandasRow):
self._builder = PandasBlockBuilder()
else:
self._builder = SimpleBlockBuilder()
self._builder.add(item)
def add_block(self, block: Block) -> None:
accessor = BlockAccessor.for_block(block)
if accessor.num_rows() == 0:
# Don't infer types of empty lists. Store the block and use it if no
# other data is added. https://github.com/ray-project/ray/issues/20290
self._empty_block = block
return
if self._builder is None:
self._builder = accessor.builder()
self._builder.add_block(block)
def build(self) -> Block:
if self._builder is None:
if self._empty_block is not None:
self._builder = BlockAccessor.for_block(self._empty_block).builder()
else:
self._builder = ArrowBlockBuilder()
return self._builder.build()
def num_rows(self) -> int:
return self._builder.num_rows() if self._builder is not None else 0
def get_estimated_memory_usage(self) -> int:
if self._builder is None:
return 0
return self._builder.get_estimated_memory_usage()
| 37.606557
| 84
| 0.622929
|
1be450d881fa28c0a6d072ff762f6992170ad80a
| 166
|
py
|
Python
|
play-travis/test.py
|
otus-devops-2019-05/kalinkina_infra
|
cf1e3fa1b90a3ba1243172e51af25f852566f452
|
[
"MIT"
] | null | null | null |
play-travis/test.py
|
otus-devops-2019-05/kalinkina_infra
|
cf1e3fa1b90a3ba1243172e51af25f852566f452
|
[
"MIT"
] | 2
|
2019-08-08T11:41:41.000Z
|
2019-08-26T10:43:35.000Z
|
play-travis/test.py
|
otus-devops-2019-05/kalinkina_infra
|
cf1e3fa1b90a3ba1243172e51af25f852566f452
|
[
"MIT"
] | null | null | null |
import unittest
class NumbersTest(unittest.TestCase):
def test_equal(self):
self.assertEqual(1 + 0, 1)
if __name__ == '__main__':
unittest.main()
| 15.090909
| 37
| 0.668675
|
3ced4170782a334ab502aff4feae84c9276a6445
| 19,985
|
py
|
Python
|
tests/unit_tests/gui/mainWmixin/test_tabEnviron.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 16
|
2020-01-11T22:32:26.000Z
|
2022-03-31T15:18:14.000Z
|
tests/unit_tests/gui/mainWmixin/test_tabEnviron.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 196
|
2020-01-16T13:56:01.000Z
|
2022-03-29T02:06:51.000Z
|
tests/unit_tests/gui/mainWmixin/test_tabEnviron.py
|
mworion/MountWizzard4
|
4e06b29ec2ef70be40e114b911b7bdf2f858a4b1
|
[
"Apache-2.0"
] | 6
|
2019-12-01T19:39:33.000Z
|
2021-05-27T13:14:20.000Z
|
############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
# Licence APL2.0
#
###########################################################
# standard libraries
import pytest
from unittest import mock
from pathlib import Path
# external packages
from PyQt5.QtGui import QImage
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QThreadPool
from PyQt5.QtCore import pyqtSignal
from mountcontrol.qtmount import Mount
import requests
from skyfield.api import wgs84
from skyfield.api import Loader
import numpy as np
# local import
from gui.mainWmixin.tabEnviron import Environ
from gui.widgets.main_ui import Ui_MainWindow
from gui.utilities.toolsQtWidget import MWidget
from logic.environment.sensorWeather import SensorWeather
from logic.environment.onlineWeather import OnlineWeather
from logic.environment.weatherUPB import WeatherUPB
from logic.environment.skymeter import Skymeter
from base.loggerMW import setupLogging
setupLogging()
@pytest.fixture(autouse=True, scope='module')
def module(qapp):
yield
@pytest.fixture(autouse=True, scope='function')
def function(module):
class Test1(QObject):
mount = Mount(host='localhost', MAC='00:00:00:00:00:00', verbose=False,
pathToData='tests/workDir/data')
update10s = pyqtSignal()
threadPool = QThreadPool()
class Test(QObject):
config = {'mainW': {}}
threadPool = QThreadPool()
update1s = pyqtSignal()
update30m = pyqtSignal()
message = pyqtSignal(str, int)
mount = Mount(host='localhost', MAC='00:00:00:00:00:00', verbose=False,
pathToData='tests/workDir/data')
mount.obsSite.location = wgs84.latlon(latitude_degrees=20,
longitude_degrees=10,
elevation_m=500)
loader = Loader('tests/testData', verbose=False)
planets = loader('de421_23.bsp')
sensorWeather = SensorWeather(app=Test1())
onlineWeather = OnlineWeather(app=Test1())
powerWeather = WeatherUPB(app=Test1())
skymeter = Skymeter(app=Test1())
class Mixin(MWidget, Environ):
def __init__(self):
super().__init__()
self.app = Test()
self.deviceStat = {}
self.threadPool = self.app.threadPool
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
Environ.__init__(self)
window = Mixin()
yield window
def test_initConfig_1(function):
suc = function.initConfig()
assert suc
def test_storeConfig_1(function):
suc = function.storeConfig()
assert suc
def test_updateRefractionUpdateType_1(function):
class Test:
weatherStatus = 3
function.refractionSource = 'onlineWeather'
suc = function.updateRefractionUpdateType(setting=Test())
assert not suc
def test_updateRefractionUpdateType_2(function):
class Test:
weatherStatus = 3
function.refractionSource = 'directWeather'
suc = function.updateRefractionUpdateType(setting=Test())
assert not suc
def test_updateRefractionUpdateType_3(function):
class Test:
weatherStatus = 0
function.refractionSource = 'directWeather'
function.ui.checkRefracNone.setChecked(False)
suc = function.updateRefractionUpdateType(setting=Test())
assert suc
assert function.ui.checkRefracNone.isChecked()
def test_updateRefractionUpdateType_4(function):
class Test:
weatherStatus = 1
function.refractionSource = 'directWeather'
function.ui.checkRefracNoTrack.setChecked(False)
suc = function.updateRefractionUpdateType(setting=Test())
assert suc
def test_updateRefractionUpdateType_5(function):
class Test:
weatherStatus = 2
function.refractionSource = 'directWeather'
function.ui.checkRefracCont.setChecked(False)
suc = function.updateRefractionUpdateType(setting=Test())
assert suc
def test_setRefractionUpdateType_1(function):
function.refractionSource = 'onlineWeather'
suc = function.setRefractionUpdateType()
assert not suc
def test_setRefractionUpdateType_2(function):
function.refractionSource = 'directWeather'
function.ui.checkRefracNone.setChecked(True)
suc = function.setRefractionUpdateType()
assert not suc
def test_setRefractionUpdateType_3(function):
function.refractionSource = 'directWeather'
function.ui.checkRefracNoTrack.setChecked(True)
suc = function.setRefractionUpdateType()
assert not suc
def test_setRefractionUpdateType_4(function):
function.refractionSource = 'directWeather'
function.ui.checkRefracCont.setChecked(True)
suc = function.setRefractionUpdateType()
assert not suc
def test_setRefractionSourceGui_1(function):
suc = function.setRefractionSourceGui()
assert suc
def test_setRefractionSourceGui_2(function):
function.refractionSource = 'onlineWeather'
suc = function.setRefractionSourceGui()
assert suc
def test_selectRefractionSource_1(function):
def Sender():
return function.ui.powerPort1
function.sender = Sender
suc = function.selectRefractionSource()
assert suc
def test_selectRefractionSource_2(function):
def Sender():
return function.ui.onlineWeatherGroup
function.refractionSource = 'onlineWeather'
function.sender = Sender
suc = function.selectRefractionSource()
assert suc
def test_selectRefractionSource_3(function):
def Sender():
return function.ui.onlineWeatherGroup
function.refractionSource = 'onlineWeather'
function.ui.onlineWeatherGroup.setChecked(True)
function.sender = Sender
suc = function.selectRefractionSource()
assert suc
def test_updateFilterRefractionParameters_1(function):
function.refractionSource = 'onlineWeather'
function.app.onlineWeather.data = {}
suc = function.updateFilterRefractionParameters()
assert not suc
def test_updateFilterRefractionParameters_2(function):
function.refractionSource = 'weather'
function.app.onlineWeather.data = {'temperature': 10,
'pressure': 1000}
suc = function.updateFilterRefractionParameters()
assert not suc
def test_updateFilterRefractionParameters_3(function):
function.refractionSource = 'onlineWeather'
function.app.onlineWeather.data = {'temperature': 10,
'pressure': 1000}
suc = function.updateFilterRefractionParameters()
assert suc
def test_updateFilterRefractionParameters_4(function):
function.refractionSource = 'sensorWeather'
suc = function.updateFilterRefractionParameters()
assert not suc
def test_updateFilterRefractionParameters_5(function):
function.refractionSource = 'sensorWeather'
function.app.sensorWeather.data = {'WEATHER_PARAMETERS.WEATHER_TEMPERATURE': 10,
'WEATHER_PARAMETERS.WEATHER_PRESSURE': 1000}
suc = function.updateFilterRefractionParameters()
assert suc
def test_updateFilterRefractionParameters_6(function):
function.refractionSource = 'sensorWeather'
function.filteredTemperature = None
function.app.sensorWeather.data = {'WEATHER_PARAMETERS.WEATHER_TEMPERATURE': 10,
'WEATHER_PARAMETERS.WEATHER_PRESSURE': 1000}
suc = function.updateFilterRefractionParameters()
assert suc
def test_updateFilterRefractionParameters_7(function):
function.refractionSource = 'sensorWeather'
function.filteredPressure = None
function.app.sensorWeather.data = {'WEATHER_PARAMETERS.WEATHER_TEMPERATURE': 10,
'WEATHER_PARAMETERS.WEATHER_PRESSURE': 1000}
suc = function.updateFilterRefractionParameters()
assert suc
def test_updateFilterRefractionParameters_8(function):
function.refractionSource = 'sensorWeather'
function.filteredTemperature = np.full(100, 10)
function.app.sensorWeather.data = {'WEATHER_PARAMETERS.WEATHER_TEMPERATURE': 10,
'WEATHER_PARAMETERS.WEATHER_PRESSURE': 1000}
suc = function.updateFilterRefractionParameters()
assert suc
def test_updateFilterRefractionParameters_9(function):
function.refractionSource = 'sensorWeather'
function.filteredPressure = np.full(100, 1000)
function.app.sensorWeather.data = {'WEATHER_PARAMETERS.WEATHER_TEMPERATURE': 10,
'WEATHER_PARAMETERS.WEATHER_PRESSURE': 1000}
suc = function.updateFilterRefractionParameters()
assert suc
def test_movingAverageRefractionParameters_1(function):
v1, v2 = function.movingAverageRefractionParameters()
assert v1 is None
assert v2 is None
def test_movingAverageRefractionParameters_2(function):
function.filteredTemperature = np.full(100, 10)
function.filteredPressure = np.full(100, 1000)
v1, v2 = function.movingAverageRefractionParameters()
assert v1 == 10.0
assert v2 == 1000.0
def test_updateRefractionParameters_1(function, qtbot):
function.refractionSource = 'directWeather'
suc = function.updateRefractionParameters()
assert not suc
def test_updateRefractionParameters_2(function, qtbot):
function.refractionSource = 'onlineWeather'
function.deviceStat['mount'] = False
suc = function.updateRefractionParameters()
assert not suc
def test_updateRefractionParameters_3(function, qtbot):
function.refractionSource = 'onlineWeather'
function.deviceStat['mount'] = True
with mock.patch.object(function,
'movingAverageRefractionParameters',
return_value=(None, None)):
suc = function.updateRefractionParameters()
assert not suc
def test_updateRefractionParameters_4(function, qtbot):
def Sender():
return function.ui.isOnline
function.sender = Sender
function.refractionSource = 'onlineWeather'
function.deviceStat['mount'] = True
function.ui.checkRefracNone.setChecked(True)
with mock.patch.object(function,
'movingAverageRefractionParameters',
return_value=(10, 10)):
suc = function.updateRefractionParameters()
assert not suc
def test_updateRefractionParameters_5(function, qtbot):
def Sender():
return function.ui.isOnline
function.sender = Sender
function.refractionSource = 'onlineWeather'
function.deviceStat['mount'] = True
function.ui.checkRefracNone.setChecked(False)
function.ui.checkRefracNoTrack.setChecked(True)
function.app.mount.obsSite.status = '0'
with mock.patch.object(function,
'movingAverageRefractionParameters',
return_value=(10, 10)):
suc = function.updateRefractionParameters()
assert not suc
def test_updateRefractionParameters_6(function, qtbot):
def Sender():
return function.ui.setRefractionManual
function.sender = Sender
function.refractionSource = 'onlineWeather'
function.deviceStat['mount'] = True
with mock.patch.object(function,
'movingAverageRefractionParameters',
return_value=(10, 10)):
with mock.patch.object(function.app.mount.setting,
'setRefractionParam',
return_value=False):
suc = function.updateRefractionParameters()
assert not suc
def test_updateRefractionParameters_7(function, qtbot):
def Sender():
return function.ui.setRefractionManual
function.sender = Sender
function.refractionSource = 'onlineWeather'
function.deviceStat['mount'] = True
with mock.patch.object(function,
'movingAverageRefractionParameters',
return_value=(10, 10)):
with mock.patch.object(function.app.mount.setting,
'setRefractionParam',
return_value=True):
suc = function.updateRefractionParameters()
assert suc
def test_clearEnvironGui_1(function):
function.clearSensorWeatherGui('test')
assert function.ui.sensorWeatherTemp.text() == '-'
assert function.ui.sensorWeatherPress.text() == '-'
assert function.ui.sensorWeatherDewPoint.text() == '-'
assert function.ui.sensorWeatherHumidity.text() == '-'
def test_updateEnvironGui_1(function):
function.app.sensorWeather.name = 'test'
function.app.sensorWeather.data['WEATHER_PARAMETERS.WEATHER_TEMPERATURE'] = 10.5
function.updateSensorWeatherGui()
assert function.ui.sensorWeatherTemp.text() == '10.5'
def test_updateEnvironGui_2(function):
function.app.sensorWeather.name = 'test'
function.app.sensorWeather.data['WEATHER_PARAMETERS.WEATHER_PRESSURE'] = 10.5
function.updateSensorWeatherGui()
assert function.ui.sensorWeatherPress.text() == '10.5'
def test_updateEnvironGui_3(function):
function.app.sensorWeather.name = 'test'
function.app.sensorWeather.data['WEATHER_PARAMETERS.WEATHER_DEWPOINT'] = 10.5
function.updateSensorWeatherGui()
assert function.ui.sensorWeatherDewPoint.text() == '10.5'
def test_updateEnvironGui_4(function):
function.app.sensorWeather.name = 'test'
function.app.sensorWeather.data['WEATHER_PARAMETERS.WEATHER_HUMIDITY'] = 10
function.updateSensorWeatherGui()
assert function.ui.sensorWeatherHumidity.text() == ' 10'
def test_clearSkymeterGui_1(function):
function.clearSkymeterGui()
assert function.ui.skymeterSQR.text() == '-'
assert function.ui.skymeterTemp.text() == '-'
def test_updateSkymeterGui_1(function):
function.app.skymeter.name = 'test'
function.app.skymeter.data['SKY_QUALITY.SKY_BRIGHTNESS'] = 10.5
function.updateSkymeterGui()
assert function.ui.skymeterSQR.text() == '10.50'
def test_updateSkymeterGui_2(function):
function.app.skymeter.name = 'test'
function.app.skymeter.data['SKY_QUALITY.SKY_TEMPERATURE'] = 10.5
function.updateSkymeterGui()
assert function.ui.skymeterTemp.text() == '10.5'
def test_clearPowerWeatherGui_1(function):
function.clearPowerWeatherGui()
assert function.ui.powerHumidity.text() == '-'
assert function.ui.powerTemp.text() == '-'
assert function.ui.powerDewPoint.text() == '-'
def test_updatePowerWeatherGui_1(function):
function.app.powerWeather.name = 'test'
function.app.powerWeather.data['WEATHER_PARAMETERS.WEATHER_TEMPERATURE'] = 10.5
function.updatePowerWeatherGui()
assert function.ui.powerTemp.text() == '10.5'
def test_updatePowerWeatherGui_2(function):
function.app.powerWeather.name = 'test'
function.app.powerWeather.data['WEATHER_PARAMETERS.WEATHER_HUMIDITY'] = 10
function.updatePowerWeatherGui()
assert function.ui.powerHumidity.text() == ' 10'
def test_updatePowerWeatherGui_3(function):
function.app.powerWeather.name = 'test'
function.app.powerWeather.data['WEATHER_PARAMETERS.WEATHER_DEWPOINT'] = 10.5
function.updatePowerWeatherGui()
assert function.ui.powerDewPoint.text() == '10.5'
def test_getWebDataWorker_1(function):
suc = function.getWebDataWorker()
assert not suc
def test_getWebDataWorker_2(function):
suc = function.getWebDataWorker(url='http://test')
assert not suc
def test_getWebDataWorker_3(function):
class Test:
status_code = 300
with mock.patch.object(requests,
'get',
return_value=Test()):
suc = function.getWebDataWorker(url='http://test')
assert not suc
def test_getWebDataWorker_4(function):
class Test:
status_code = 200
with mock.patch.object(requests,
'get',
return_value=Test()):
suc = function.getWebDataWorker(url='http://test')
assert suc
def test_getWebDataWorker_5(function):
class Test:
status_code = 200
with mock.patch.object(requests,
'get',
return_value=Test(),
side_effect=Exception):
suc = function.getWebDataWorker(url='http://test')
assert not suc
def test_processClearOutsideImage_1(function):
image = QImage('tests/testData/forecast.png')
suc = function.processClearOutsideImage(image=image)
assert suc
def test_updateClearOutsideImage_1(function):
suc = function.updateClearOutsideImage()
assert not suc
def test_updateClearOutsideImage_2(function):
class Test:
content = 'test'
suc = function.updateClearOutsideImage(Test())
assert not suc
def test_updateClearOutsideImage_3(function):
image = QImage('tests/testData/forecast.png')
pixmapBase = QPixmap().fromImage(image)
with open(Path('tests/testData/forecast.png'), 'rb') as image:
f = image.read()
b = bytes(f)
class Test:
content = b
with mock.patch.object(function,
'processClearOutsideImage',
return_value=pixmapBase):
suc = function.updateClearOutsideImage(Test())
assert suc
def test_updateClearOutsideImage_4(function):
class Test:
pass
suc = function.updateClearOutsideImage(Test())
assert not suc
def test_updateClearOutside_1(function):
function.ui.isOnline.setChecked(False)
suc = function.updateClearOutside()
assert not suc
def test_updateClearOutside_2(function):
function.ui.isOnline.setChecked(True)
suc = function.updateClearOutside()
assert suc
def test_clearOnlineWeatherGui_1(function):
function.clearOnlineWeatherGui()
assert function.ui.onlineWeatherTemp.text() == '-'
assert function.ui.onlineWeatherPress.text() == '-'
assert function.ui.onlineWeatherHumidity.text() == '-'
assert function.ui.onlineWeatherCloudCover.text() == '-'
assert function.ui.onlineWeatherWindSpeed.text() == '-'
assert function.ui.onlineWeatherWindDir.text() == '-'
assert function.ui.onlineWeatherRainVol.text() == '-'
def test_updateOnlineWeatherGui_1(function):
suc = function.updateOnlineWeatherGui()
assert not suc
def test_updateOnlineWeatherGui_2(function):
suc = function.updateOnlineWeatherGui(data={'temperature': 10,
'pressure': 1000,
'humidity': 50,
'dewPoint': 10,
'cloudCover': 50,
'windSpeed': 10,
'windDir': 120,
'rain': 5})
assert suc
def test_clearDirectWeatherGui_1(function):
suc = function.clearDirectWeatherGui()
assert suc
def test_updateDirectWeatherGui_1(function):
function.deviceStat['directWeather'] = False
suc = function.updateDirectWeatherGui()
assert not suc
def test_updateDirectWeatherGui_2(function):
function.deviceStat['directWeather'] = True
suc = function.updateDirectWeatherGui()
assert not suc
def test_updateDirectWeatherGui_3(function):
class Test:
weatherTemperature = 3
weatherPressure = 1000
weatherHumidity = 50
weatherDewPoint = 10
function.deviceStat['directWeather'] = True
suc = function.updateDirectWeatherGui(setting=Test())
assert suc
| 31.621835
| 84
| 0.677808
|
7511679e73371585ea0c3b5905da6003649ede50
| 20,982
|
py
|
Python
|
Products/CMFCore/exportimport/tests/test_actions.py
|
fulv/Products.CMFCore
|
1d6ce101b10aaefba8aa917b6aa404e6c49e254d
|
[
"ZPL-2.1"
] | null | null | null |
Products/CMFCore/exportimport/tests/test_actions.py
|
fulv/Products.CMFCore
|
1d6ce101b10aaefba8aa917b6aa404e6c49e254d
|
[
"ZPL-2.1"
] | null | null | null |
Products/CMFCore/exportimport/tests/test_actions.py
|
fulv/Products.CMFCore
|
1d6ce101b10aaefba8aa917b6aa404e6c49e254d
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Actions tool node adapter unit tests.
"""
import unittest
from Acquisition import Implicit
from Acquisition import aq_parent
from OFS.OrderedFolder import OrderedFolder
from Products.GenericSetup.testing import BodyAdapterTestCase
from Products.GenericSetup.testing import NodeAdapterTestCase
from Products.GenericSetup.tests.common import BaseRegistryTests
from Products.GenericSetup.tests.common import DummyExportContext
from Products.GenericSetup.tests.common import DummyImportContext
from zope.component import getSiteManager
from zope.interface import implementer
from ...ActionProviderBase import ActionProviderBase
from ...interfaces import IActionProvider
from ...interfaces import IActionsTool
from ...interfaces import IMembershipTool
from ...testing import ExportImportZCMLLayer
from ...tests.base.dummy import DummySite
_ACTION_XML = b"""\
<object name="foo_action" meta_type="CMF Action">
<property name="title">Foo</property>
<property name="description"></property>
<property name="url_expr">string:${object_url}/foo</property>
<property name="link_target"></property>
<property name="icon_expr"></property>
<property name="available_expr">python:1</property>
<property name="permissions"/>
<property name="visible">True</property>
</object>
"""
_ACTIONCATEGORY_XML = b"""\
<object name="foo_category" meta_type="CMF Action Category">
<property name="title"></property>
<object name="foo_action" meta_type="CMF Action">
<property name="title"></property>
<property name="description"></property>
<property name="url_expr"></property>
<property name="link_target"></property>
<property name="icon_expr"></property>
<property name="available_expr"></property>
<property name="permissions"/>
<property name="visible">True</property>
</object>
</object>
"""
_ACTIONSTOOL_BODY = b"""\
<?xml version="1.0" encoding="utf-8"?>
<object name="portal_actions" meta_type="CMF Actions Tool"
xmlns:i18n="http://xml.zope.org/namespaces/i18n">
<action-provider name="portal_actions"/>
<object name="foo_category" meta_type="CMF Action Category">
<property name="title"></property>
<object name="foo_action" meta_type="CMF Action" i18n:domain="foo_domain">
<property name="title" i18n:translate=""></property>
<property name="description" i18n:translate=""></property>
<property name="url_expr"></property>
<property name="link_target"></property>
<property name="icon_expr"></property>
<property name="available_expr"></property>
<property name="permissions"/>
<property name="visible">True</property>
</object>
</object>
</object>
"""
_EMPTY_EXPORT = b"""\
<?xml version="1.0"?>
<object name="portal_actions" meta_type="CMF Actions Tool"
xmlns:i18n="http://xml.zope.org/namespaces/i18n">
<action-provider name="portal_actions"/>
</object>
"""
_OLD_EXPORT = b"""\
<?xml version="1.0"?>
<object name="portal_actions" meta_type="CMF Actions Tool"
xmlns:i18n="http://xml.zope.org/namespaces/i18n">
<action-provider name="portal_actions">
<action action_id="baz"
title="Baz"
url_expr="string:${object_url}/baz"
condition_expr="python:1"
category="dummy"
visible="True"/>
</action-provider>
<action-provider name="portal_foo">
<action action_id="foo"
title="Foo"
url_expr="string:${object_url}/foo"
condition_expr="python:1"
category="dummy"
visible="True"/>
</action-provider>
<action-provider name="portal_bar">
<action action_id="bar"
title="Bar"
url_expr="string:${object_url}/bar"
condition_expr="python:0"
category="dummy"
visible="False">
<permission>Manage portal</permission>
</action>
</action-provider>
</object>
"""
_NORMAL_EXPORT = """\
<?xml version="1.0" encoding="utf-8"?>
<object name="portal_actions" meta_type="CMF Actions Tool"
xmlns:i18n="http://xml.zope.org/namespaces/i18n">
<action-provider name="portal_actions">
<action action_id="baz"
title="Baz"
url_expr="string:${object_url}/baz"
condition_expr="python:1"
category="dummy"
visible="True"/>
</action-provider>
<action-provider name="portal_foo"/>
<action-provider name="portal_bar"/>
</object>
"""
_NEWSYTLE_EXPORT = b"""\
<?xml version="1.0"?>
<object name="portal_actions" meta_type="CMF Actions Tool"
xmlns:i18n="http://xml.zope.org/namespaces/i18n">
<action-provider name="portal_actions"/>
<action-provider name="portal_foo"/>
<action-provider name="portal_bar"/>
<object name="dummy" meta_type="CMF Action Category">
<property name="title"></property>
<object name="baz" meta_type="CMF Action">
<property name="title">Baz</property>
<property name="description"></property>
<property name="url_expr">string:${object_url}/baz</property>
<property name="link_target"></property>
<property name="icon_expr"></property>
<property name="available_expr">python:1</property>
<property name="permissions"></property>
<property name="visible">True</property>
</object>
</object>
</object>
"""
_I18N_IMPORT = """\
<?xml version="1.0"?>
<object name="portal_actions" meta_type="CMF Actions Tool"
xmlns:i18n="http://xml.zope.org/namespaces/i18n">
<action-provider name="portal_actions"/>
<object name="dummy" meta_type="CMF Action Category">
<property name="title"></property>
<object name="foo" meta_type="CMF Action" i18n:domain="foo_domain">
<property name="title" i18n:translate="">Foo</property>
<property name="description" i18n:translate=""></property>
<property name="url_expr">string:${object_url}/foo</property>
<property name="link_target"></property>
<property name="icon_expr"></property>
<property name="available_expr">python:1</property>
<property name="permissions"></property>
<property name="visible">True</property>
</object>
</object>
</object>
"""
_INSERT_IMPORT = """\
<?xml version="1.0"?>
<object name="portal_actions">
<object name="dummy">
<object name="spam" meta_type="CMF Action" insert-before="*">
<property name="title">Spam</property>
<property name="description"></property>
<property name="url_expr">string:${object_url}/spam</property>
<property name="icon_expr">string:spam_icon.png</property>
<property name="available_expr"></property>
<property name="permissions">
<element value="View" /></property>
<property name="visible">True</property>
</object>
<object name="baz" insert-after="*">
<property name="icon_expr">string:baz_icon.png</property>
</object>
</object>
</object>
"""
_REMOVE_IMPORT = """\
<?xml version="1.0"?>
<object name="portal_actions">
<action-provider name="portal_actions" remove=""/>
<action-provider name="not_existing" remove=""/>
<action-provider name="portal_bar" remove=""/>
</object>
"""
@implementer(IActionProvider)
class DummyTool(OrderedFolder, ActionProviderBase):
pass
class DummyUser(Implicit):
def getId(self):
return 'dummy'
class DummyMembershipTool(DummyTool):
def isAnonymousUser(self):
return False
def getAuthenticatedMember(self):
return DummyUser().__of__(aq_parent(self))
@implementer(IActionsTool)
class DummyActionsTool(DummyTool):
id = 'portal_actions'
meta_type = 'CMF Actions Tool'
def __init__(self):
self._providers = []
def addActionProvider(self, provider_name):
self._providers.append(provider_name)
def listActionProviders(self):
return self._providers[:]
def deleteActionProvider(self, provider_name):
self._providers = [x for x in self._providers if x != provider_name]
class ActionNodeAdapterTests(NodeAdapterTestCase, unittest.TestCase):
layer = ExportImportZCMLLayer
def _getTargetClass(self):
from ..actions import ActionNodeAdapter
return ActionNodeAdapter
def _populate(self, obj):
obj._setPropValue('title', 'Foo')
obj._setPropValue('url_expr', 'string:${object_url}/foo')
obj._setPropValue('available_expr', 'python:1')
def _verifyImport(self, obj):
self.assertEqual(type(obj.title), str)
self.assertEqual(obj.title, 'Foo')
self.assertEqual(type(obj.description), str)
self.assertEqual(obj.description, '')
self.assertEqual(type(obj.url_expr), str)
self.assertEqual(obj.url_expr, 'string:${object_url}/foo')
self.assertEqual(type(obj.icon_expr), str)
self.assertEqual(obj.icon_expr, '')
self.assertEqual(type(obj.available_expr), str)
self.assertEqual(obj.available_expr, 'python:1')
self.assertEqual(type(obj.permissions), tuple)
self.assertEqual(obj.permissions, ())
self.assertEqual(type(obj.visible), bool)
self.assertEqual(obj.visible, True)
def setUp(self):
from ...ActionInformation import Action
self._obj = Action('foo_action')
self._XML = _ACTION_XML
class ActionCategoryNodeAdapterTests(NodeAdapterTestCase, unittest.TestCase):
layer = ExportImportZCMLLayer
def _getTargetClass(self):
from ..actions import ActionCategoryNodeAdapter
return ActionCategoryNodeAdapter
def _populate(self, obj):
from ...ActionInformation import Action
obj._setObject('foo_action', Action('foo_action'))
def _verifyImport(self, obj):
self.assertEqual(type(obj.title), str)
self.assertEqual(obj.title, '')
def setUp(self):
from ...ActionInformation import ActionCategory
self._obj = ActionCategory('foo_category')
self._XML = _ACTIONCATEGORY_XML
class ActionsToolXMLAdapterTests(BodyAdapterTestCase, unittest.TestCase):
layer = ExportImportZCMLLayer
def _getTargetClass(self):
from ..actions import ActionsToolXMLAdapter
return ActionsToolXMLAdapter
def _populate(self, obj):
from ...ActionInformation import Action
from ...ActionInformation import ActionCategory
obj._setObject('foo_category', ActionCategory('foo_category'))
obj.action_providers = ('portal_actions',)
obj.foo_category._setObject('foo_action', Action('foo_action'))
obj.foo_category.foo_action.i18n_domain = 'foo_domain'
def _verifyImport(self, obj):
self.assertEqual(type(obj.action_providers), tuple)
self.assertEqual(obj.action_providers, ('portal_actions',))
self.assertEqual(type(obj.action_providers[0]), str)
self.assertEqual(obj.action_providers[0], 'portal_actions')
def setUp(self):
from ...ActionsTool import ActionsTool
self._obj = ActionsTool('portal_actions')
self._BODY = _ACTIONSTOOL_BODY
getSiteManager().registerUtility(self._obj, IActionsTool)
class _ActionSetup(BaseRegistryTests):
def _initSite(self, foo=2, bar=2):
site = DummySite('site')
sm = getSiteManager()
sm.registerUtility(DummyMembershipTool(), IMembershipTool)
atool = DummyActionsTool()
atool.addActionProvider('portal_actions')
sm.registerUtility(atool, IActionsTool)
if foo > 0:
site.portal_foo = DummyTool()
if foo > 1:
site.portal_foo.addAction(id='foo',
name='Foo',
action='foo',
condition='python:1',
permission=(),
category='dummy',
visible=1)
atool.addActionProvider('portal_foo')
if bar > 0:
site.portal_bar = DummyTool()
if bar > 1:
site.portal_bar.addAction(id='bar',
name='Bar',
action='bar',
condition='python:0',
permission=('Manage portal',),
category='dummy',
visible=0)
atool.addActionProvider('portal_bar')
return site, atool
class exportActionProvidersTests(_ActionSetup):
layer = ExportImportZCMLLayer
def test_unchanged(self):
from ..actions import exportActionProviders
site, _atool = self._initSite(0, 0)
context = DummyExportContext(site)
exportActionProviders(context)
self.assertEqual(len(context._wrote), 1)
filename, text, content_type = context._wrote[0]
self.assertEqual(filename, 'actions.xml')
self._compareDOM(text.decode('utf8'), _EMPTY_EXPORT)
self.assertEqual(content_type, 'text/xml')
def test_normal(self):
from ..actions import exportActionProviders
site, atool = self._initSite()
# Set up an old action for added difficulty
atool.addAction(id='baz',
name='Baz',
action='baz',
condition='python:1',
permission=(),
category='dummy',
visible=1)
context = DummyExportContext(site)
exportActionProviders(context)
self.assertEqual(len(context._wrote), 1)
filename, text, content_type = context._wrote[0]
self.assertEqual(filename, 'actions.xml')
self._compareDOM(text.decode('utf-8'), _NORMAL_EXPORT)
self.assertEqual(content_type, 'text/xml')
class importActionProvidersTests(_ActionSetup):
layer = ExportImportZCMLLayer
def test_empty_default_purge(self):
from ..actions import importActionProviders
site, atool = self._initSite(2, 0)
self.assertEqual(len(atool.listActionProviders()), 2)
self.assertTrue('portal_foo' in atool.listActionProviders())
self.assertTrue('portal_actions' in atool.listActionProviders())
context = DummyImportContext(site)
context._files['actions.xml'] = _EMPTY_EXPORT
importActionProviders(context)
self.assertEqual(len(atool.listActionProviders()), 1)
self.assertFalse('portal_foo' in atool.listActionProviders())
self.assertTrue('portal_actions' in atool.listActionProviders())
self.assertEqual(len(atool.objectIds()), 0)
def test_empty_explicit_purge(self):
from ..actions import importActionProviders
site, atool = self._initSite(2, 0)
self.assertEqual(len(atool.listActionProviders()), 2)
self.assertTrue('portal_foo' in atool.listActionProviders())
self.assertTrue('portal_actions' in atool.listActionProviders())
context = DummyImportContext(site, True)
context._files['actions.xml'] = _EMPTY_EXPORT
importActionProviders(context)
self.assertEqual(len(atool.listActionProviders()), 1)
self.assertFalse('portal_foo' in atool.listActionProviders())
self.assertTrue('portal_actions' in atool.listActionProviders())
self.assertEqual(len(atool.objectIds()), 0)
def test_empty_skip_purge(self):
from ..actions import importActionProviders
site, atool = self._initSite(2, 0)
self.assertEqual(len(atool.listActionProviders()), 2)
self.assertTrue('portal_foo' in atool.listActionProviders())
self.assertTrue('portal_actions' in atool.listActionProviders())
context = DummyImportContext(site, False)
context._files['actions.xml'] = _EMPTY_EXPORT
importActionProviders(context)
self.assertEqual(len(atool.listActionProviders()), 2)
self.assertTrue('portal_foo' in atool.listActionProviders())
self.assertTrue('portal_actions' in atool.listActionProviders())
def test_normal(self):
from ..actions import exportActionProviders
from ..actions import importActionProviders
site, atool = self._initSite(1, 1)
foo = site.portal_foo
bar = site.portal_bar
self.assertEqual(len(atool.listActionProviders()), 1)
self.assertFalse('portal_foo' in atool.listActionProviders())
self.assertFalse(foo.listActions())
self.assertFalse('portal_bar' in atool.listActionProviders())
self.assertFalse(bar.listActions())
self.assertTrue('portal_actions' in atool.listActionProviders())
context = DummyImportContext(site)
context._files['actions.xml'] = _OLD_EXPORT
importActionProviders(context)
self.assertEqual(len(atool.listActionProviders()), 3)
self.assertTrue('portal_bar' in atool.listActionProviders())
self.assertTrue('portal_foo' in atool.listActionProviders())
self.assertTrue('portal_actions' in atool.listActionProviders())
self.assertEqual(len(atool.objectIds()), 1)
self.assertTrue('dummy' in atool.objectIds())
# Only one action appears. The importer only deals with actions
# defined by the actions tool. Other tools are responsible for
# exporting/importing actions themselves.
self.assertEqual(len(atool.dummy.objectIds()), 1)
self.assertTrue('baz' in atool.dummy.objectIds())
self.assertFalse(foo.listActions())
self.assertFalse(bar.listActions())
# complete the roundtrip
context = DummyExportContext(site)
exportActionProviders(context)
self.assertEqual(len(context._wrote), 1)
filename, text, content_type = context._wrote[0]
self.assertEqual(filename, 'actions.xml')
self._compareDOM(text.decode('utf8'), _NEWSYTLE_EXPORT)
self.assertEqual(content_type, 'text/xml')
def test_i18n(self):
from ..actions import exportActionProviders
from ..actions import importActionProviders
site, atool = self._initSite(0, 0)
context = DummyImportContext(site)
context._files['actions.xml'] = _I18N_IMPORT
importActionProviders(context)
self.assertEqual(len(atool.listActionProviders()), 1)
self.assertEqual(atool.objectIds(), ['dummy'])
self.assertEqual(atool.dummy.objectIds(), ['foo'])
self.assertEqual(atool.dummy.foo.i18n_domain, 'foo_domain')
# complete the roundtrip
context = DummyExportContext(site)
exportActionProviders(context)
self.assertEqual(len(context._wrote), 1)
filename, text, content_type = context._wrote[0]
self.assertEqual(filename, 'actions.xml')
self._compareDOM(text.decode('utf8'), _I18N_IMPORT)
self.assertEqual(content_type, 'text/xml')
def test_insert_skip_purge(self):
from ..actions import importActionProviders
site, atool = self._initSite(0, 0)
context = DummyImportContext(site)
context._files['actions.xml'] = _NEWSYTLE_EXPORT
importActionProviders(context)
self.assertEqual(len(atool.listActionProviders()), 3)
self.assertEqual(atool.objectIds(), ['dummy'])
self.assertEqual(atool.dummy.objectIds(), ['baz'])
self.assertEqual(atool.dummy.baz.icon_expr, '')
context = DummyImportContext(site, False)
context._files['actions.xml'] = _INSERT_IMPORT
importActionProviders(context)
self.assertEqual(len(atool.listActionProviders()), 3)
self.assertEqual(atool.objectIds(), ['dummy'])
self.assertEqual(atool.dummy.objectIds(), ['spam', 'baz'])
self.assertEqual(atool.dummy.baz.icon_expr, 'string:baz_icon.png')
def test_remove_skip_purge(self):
from ..actions import importActionProviders
site, atool = self._initSite(2, 2)
self.assertEqual(atool.listActionProviders(),
['portal_actions', 'portal_foo', 'portal_bar'])
context = DummyImportContext(site, False)
context._files['actions.xml'] = _REMOVE_IMPORT
importActionProviders(context)
self.assertEqual(atool.listActionProviders(), ['portal_foo'])
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ActionNodeAdapterTests),
unittest.makeSuite(ActionCategoryNodeAdapterTests),
unittest.makeSuite(ActionsToolXMLAdapterTests),
unittest.makeSuite(exportActionProvidersTests),
unittest.makeSuite(importActionProvidersTests),
))
| 34.738411
| 78
| 0.665761
|
a2ed779ee3b43671fe31f413817a02f08590fe08
| 8,641
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/layout/mapbox/layer/symbol/_textfont.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/mapbox/layer/symbol/_textfont.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/layout/mapbox/layer/symbol/_textfont.py
|
labaran1/plotly.py
|
7ec751e8fed4a570c11ea4bea2231806389d62eb
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Textfont(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.mapbox.layer.symbol"
_path_str = "layout.mapbox.layer.symbol.textfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Textfont object
Sets the icon text font (color=mapbox.layer.paint.text-color,
size=mapbox.layer.layout.text-size). Has an effect only when
`type` is set to "symbol".
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.mapbox.
layer.symbol.Textfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.mapbox.layer.symbol.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.mapbox.layer.symbol.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.569565
| 84
| 0.569841
|
fddb4ad33a5f5489b14fb299a7c665f266e660db
| 212
|
py
|
Python
|
compiler.py
|
Askar-LLC/hippopserver
|
5be08ed26f091ff80f46a097c300ad8ba6f19721
|
[
"Apache-2.0"
] | 1
|
2019-09-02T23:10:36.000Z
|
2019-09-02T23:10:36.000Z
|
compiler.py
|
Askar-LLC/hippopserver
|
5be08ed26f091ff80f46a097c300ad8ba6f19721
|
[
"Apache-2.0"
] | null | null | null |
compiler.py
|
Askar-LLC/hippopserver
|
5be08ed26f091ff80f46a097c300ad8ba6f19721
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
import debug
#import a debugger from git hub
def stateRun():
x = 1
if x:
return x
def runCode():
cprint = print()
def runStation():
runCode()
runStation()
| 9.636364
| 32
| 0.599057
|
67fac0137ddadd93b778cbc88082e6ba92e871d2
| 8,313
|
py
|
Python
|
attic/encrypt_files_in_dir_to_s3.py
|
BD2KGenomics/PrecisionImmunology
|
06310682c50dcf8917b912c8e551299ff7ee41ce
|
[
"Apache-2.0"
] | 28
|
2016-04-16T21:11:46.000Z
|
2022-02-07T18:01:56.000Z
|
attic/encrypt_files_in_dir_to_s3.py
|
BD2KGenomics/PrecisionImmunology
|
06310682c50dcf8917b912c8e551299ff7ee41ce
|
[
"Apache-2.0"
] | 207
|
2016-03-30T20:47:34.000Z
|
2021-10-15T20:58:10.000Z
|
attic/encrypt_files_in_dir_to_s3.py
|
BD2KGenomics/PrecisionImmunology
|
06310682c50dcf8917b912c8e551299ff7ee41ce
|
[
"Apache-2.0"
] | 9
|
2016-04-12T23:33:58.000Z
|
2020-08-17T19:27:49.000Z
|
#!/usr/bin/env python2.7
# Copyright (C) 2016 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Author : Arjun Arkal Rao
Affiliation : UCSC BME, UCSC Genomics Institute
File : encrypt_files_in_dir_to_s3.py
SOURCE: https://github.com/jvivian/one_off_scripts/blob/master/
encrypt_files_in_dir_to_s3.py
ORIGINAL AUTHOR: John Vivian
Move files in a directory, or entire directory structures to S3 with (or without) encryption.
"""
from __future__ import print_function
import argparse
import base64
import hashlib
import os
import subprocess
import sys
import re
from boto.s3.connection import S3Connection
class InputParameterError(Exception):
"""
This Error Class will be raised in the case of a bad parameter provided.
"""
__module__ = Exception.__module__
def generate_unique_key(master_key, url):
"""
This module will take a master key and a url, and then make a new key specific to the url, based
off the master.
:param str master_key: Path to the master key used for encryption.
:param str url: Full URL to the potential file location in S3.
:returns new_key: The new key that is obtained by hashing the master key:url combination.
"""
with open(master_key, 'r') as keyfile:
master_key = keyfile.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. Key: %s' % master_key + \
', Length: %s' % len(master_key)
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not ' + \
'32 characters: {}'.format(new_key)
return new_key
class BucketInfo(object):
"""
This class contains all the functions relevant to this script for working with a given S3
bucket.
"""
def __init__(self, bucket_name):
self.bucket_name = bucket_name
# Set up the https url base. A typical s3 endpoint url base would look like
# https://s3-ENDPOINT.amazonaws.com/
# However the https endpoints for us-east-1 are slightly different and use
# https://s3.amazonaws.com/
# REF: http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
endpoint = self._get_bucket_endpoint()
endpoint = '-' + endpoint if endpoint else ''
self._https_url_base = 'https://s3' + endpoint + '.amazonaws.com/' + self.bucket_name
# Set up the s3 url base
self._s3_url_base = 'S3://' + self.bucket_name
def _get_bucket_endpoint(self):
"""
Queries S3 to identify the region hosting the provided bucket.
"""
conn = S3Connection()
bucket = conn.lookup(self.bucket_name)
if not bucket:
# TODO: Make the bucket here?
raise InputParameterError('The provided bucket %s doesn\'t exist' % self.bucket_name)
endpoint = str(bucket.get_location())
return endpoint
def object_https_url(self, key):
"""
Returns the full https url for key given this bucket.
:param key: the remote filename
:return: Full https url to the file in S3
"""
return os.path.join(self._https_url_base, key)
def object_s3_url(self, key):
"""
Returns the full s3 url for key given this bucket.
:param key: the remote filename
:return: Full https url to the file in S3
"""
return os.path.join(self._s3_url_base, key)
def write_to_s3(datum, master_key, bucket_name, remote_dir=''):
"""
This module will take in some datum (a file, or a folder) and write it to
S3. It requires a master key to encrypt the datum with, and a bucket to
drop the results into. If remote dir is set, the datum is dropped into the
provided directory.
:param str datum: File or folder that needs to be transferred to S3
:param str master_key: Path to the master key used for encryption.
:param str bucket_name: AWS bucket to store the remote data
:param str remote_dir: An optional parameter describing a remote pseudo directory in the bucket
where the data will be stored.
"""
# Instantiate the bucket info class to set up the https and s3 url bases.
bucket_info = BucketInfo(bucket_name)
# Retain the base dir separately from the file name / folder structure of DATUM. This way it
# can be easily joined into an AWS filename
folder_base_dir = os.path.split(datum)[0]
# Ensure files are either "regular files" or folders
if os.path.isfile(datum):
files = [os.path.basename(datum)]
elif os.path.isdir(datum):
files = ['/'.join([re.sub(folder_base_dir, '', folder), filename]).lstrip('/')
for folder, _, files in os.walk(datum) for filename in files]
else:
raise RuntimeError(datum + 'was neither regular file nor folder.')
# Write each file to S3
for file_path in files:
# key, here, refers to the key or token used to access the file once it's in S3.
# THIS IS NOT RELATED TO THE ENCRYPTION KEY.
key = os.path.join(remote_dir, file_path)
# base command call
command = ['s3am', 'upload']
if master_key:
new_key = generate_unique_key(master_key, bucket_info.object_https_url(key))
# Add base64 encoded key
command.extend(['--sse-key-base64', base64.b64encode(new_key)])
# Add source path info to the call
command.extend(['file://' + os.path.join(folder_base_dir, file_path)])
# Add destination to the call
command.append(bucket_info.object_s3_url(key))
subprocess.call(command)
return None
def main():
"""
This is the main module for the script. The script will accept a file, or a directory, and then
encrypt it with a provided key before pushing it to S3 into a specified bucket.
"""
parser = argparse.ArgumentParser(description=main.__doc__, add_help=True)
parser.add_argument('-M', '--master_key', dest='master_key', help='Path to the master key ' +
'used for the encryption. Data is transferred without encryption if this' +
'is not provided.', type=str, required=False, default=None)
parser.add_argument('-B', '--bucket', dest='bucket', help='S3 bucket.', type=str, required=True)
parser.add_argument('-R', '--remote_dir', dest='remote_dir', help='Pseudo directory within ' +
'the bucket to store the file(s). NOTE: Folder structure below ' +
'REMOTE_DIR will be retained.', type=str, required=False, default='')
parser.add_argument('data', help='File(s) or folder(s) to transfer to S3.', type=str, nargs='+')
params = parser.parse_args()
# Input handling
if params.master_key and not os.path.exists(params.master_key):
raise InputParameterError('The master key was not found at ' +
params.master_key)
# If the user doesn't have ~/.boto , it doesn't even make sense to go ahead
if not os.path.exists(os.path.expanduser('~/.boto')):
raise RuntimeError('~/.boto not found')
# Ensure that the remote directory doesn't start with a /
if params.remote_dir.startswith('/'):
raise InputParameterError('The remote dir cannot start with a \'/\'')
# Process each of the input arguments.
for datum in params.data:
datum = os.path.abspath(datum)
if not os.path.exists(datum):
print('ERROR: %s could not be found.' % datum, file=sys.stderr)
continue
write_to_s3(datum, params.master_key, params.bucket, params.remote_dir)
return None
if __name__ == '__main__':
main()
| 42.630769
| 100
| 0.659569
|
caae29c7907bf9f98169c242c63d8112447bcc27
| 2,860
|
py
|
Python
|
keras/layers/merging/multiply.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
keras/layers/merging/multiply.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
keras/layers/merging/multiply.py
|
itsraina/keras
|
5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer that multiplies (element-wise) several inputs."""
from keras.layers.merging.base_merge import _Merge
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Multiply")
class Multiply(_Merge):
"""Layer that multiplies (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Multiply()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[ 0],
[ 6],
[14],
[24],
[36]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> multiplied = tf.keras.layers.Multiply()([x1, x2])
>>> multiplied.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = output * inputs[i]
return output
@keras_export("keras.layers.multiply")
def multiply(inputs, **kwargs):
"""Functional interface to the `Multiply` layer.
Example:
>>> x1 = np.arange(3.0)
>>> x2 = np.arange(3.0)
>>> tf.keras.layers.multiply([x1, x2])
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([0., 1., 4.], ...)>
Usage in a functional model:
>>> input1 = tf.keras.layers.Input(shape=(16,))
>>> x1 = tf.keras.layers.Dense(
... 8, activation='relu')(input1) #shape=(None, 8)
>>> input2 = tf.keras.layers.Input(shape=(32,))
>>> x2 = tf.keras.layers.Dense(
... 8, activation='relu')(input2) #shape=(None, 8)
>>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8)
>>> out = tf.keras.layers.Dense(4)(out)
>>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)
Args:
inputs: A list of input tensors.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the element-wise product of the inputs.
"""
return Multiply(**kwargs)(inputs)
| 33.647059
| 80
| 0.613287
|
2feb0316a00b27eb2af11be358cf57539ce25e72
| 6,456
|
py
|
Python
|
app.py
|
abhishekkagautam/AI-model-traing-platform
|
385a1b52c558e2bfc0856077ef41ddfd600e3cf7
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
abhishekkagautam/AI-model-traing-platform
|
385a1b52c558e2bfc0856077ef41ddfd600e3cf7
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
abhishekkagautam/AI-model-traing-platform
|
385a1b52c558e2bfc0856077ef41ddfd600e3cf7
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask ,render_template, request, session, redirect, url_for, jsonify, flash
import sklearn
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import confusion_matrix
import pickle
from flask_session import Session
from flask import send_file
#app = Flask(__name__)
app = Flask(__name__)
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
""" @app.route("/")
def hello_world():
return render_template("index.html")
"""
@app.route("/")
def index():
# check if the users exist or not
if not session.get("name"):
# if not there in the session then redirect to the login page
return redirect("/login")
return render_template('index.html')
@app.route("/login", methods=["POST", "GET"])
def login():
# if form is submited
if request.method == "POST":
# record the user name
session["name"] = request.form.get("name")
# redirect to the main page
return redirect("/")
return render_template("login.html")
@app.route("/logout")
def logout():
session["name"] = None
return redirect("/")
@app.route('/data_model', methods = ['POST'])
def data_model():
if request.method == 'POST':
problemTypeDic = {"1":"Classification","2":"Regression"}
modelNameDic = {"1":"Linear Regression",
"2":"Logistic Regression",
"3":"Random Forest",
"4":"Decision Tree",
"5":"kNN",
"6":"SVM"}
#title = request.form["title"]
#firstName = request.form['nameFirst']
#lastName = request.form['nameLast']
email = request.form['email']
contact = request.form['number']
modelName = request.form['modelName']
problemType = request.form['problemType']
dataSet = request.files['dataSet']
ytrainName = request.form["ytrain"]
problem = problemTypeDic[problemType]
model = modelNameDic[modelName]
#userData = [title,firstName,lastName,email,contact,problem,model]
dataframe = pd.read_csv(dataSet)
X = dataframe.loc[:, dataframe.columns != ytrainName] # Features
y = dataframe[ytrainName]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
userId = email[:email.index("@")]
# print(userId)
if problem == "Classification":
session["modelName"] = userId+"_"+"finalized_model.sav"
if model == "Linear Regression":
values,modeld = linearRegression(X_train, X_test, y_train, y_test)
pickle.dump(modeld, open(session["modelName"], 'wb'))
pass
elif model =="Logistic Regression":
values,modeld = logisticRegression(X_train, X_test, y_train, y_test)
pickle.dump(modeld, open(session["modelName"], 'wb'))
pass
elif model == "Random Forest":
values,modeld = randomForestClassifier(X_train, X_test, y_train, y_test)
pickle.dump(modeld, open(session["modelName"], 'wb'))
pass
elif model == "Decision Tree":
values,modeld = decisionTreeClassifier(X_train, X_test, y_train, y_test)
pickle.dump(modeld, open(session["modelName"], 'wb'))
pass
elif model == "kNN":
values,modeld = kNN(X_train, X_test, y_train, y_test)
pickle.dump(modeld, open(session["modelName"], 'wb'))
pass
elif model =="SVM":
values,modeld = sVM(X_train, X_test, y_train, y_test)
pickle.dump(modeld, open(session["modelName"], 'wb'))
pass
# fileSender(path)
session["path"] = str(session["name"]+'finalized_model.sav')
return render_template('output.html',values=values)
if problem == "Regression":
if model == "Linear Regression":
pass
pass
@app.route("/return-file")
def fileSender():
q= session["modelName"]
return send_file(q,
as_attachment=True)
def decisionTreeClassifier(X_train, X_test, y_train, y_test):
model = sklearn.tree.DecisionTreeClassifier()
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
acc=metrics.accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
values = {"cm":cm,"acc":acc}
return(values,model)
def randomForestClassifier(X_train, X_test, y_train, y_test):
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
acc=metrics.accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
values = {"cm":cm,"acc":acc}
return(values,model)
def linearRegression(X_train, X_test, y_train, y_test):
model = sklearn.linear_model.LinearRegression()
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
acc=metrics.accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
values = {"cm":cm,"acc":acc}
return(values,model)
def logisticRegression(X_train, X_test, y_train, y_test):
from sklearn import linear_model
model = linear_model.LogisticRegression()
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
acc=metrics.accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
values = {"cm":cm,"acc":acc}
return(values,model)
def sVM(X_train, X_test, y_train, y_test):
from sklearn import svm
model = svm.SVC()
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
acc=metrics.accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
values = {"cm":cm,"acc":acc}
return(values,model)
def kNN(X_train, X_test, y_train, y_test):
model = sklearn.neighbors.KNeighborsClassifier()
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
acc=metrics.accuracy_score(y_test, y_pred)
cm = confusion_matrix(y_test, y_pred)
values = {"cm":cm,"acc":acc}
return(values,model)
if __name__ == '__main__':
app.run(debug=True)
| 34.897297
| 96
| 0.620818
|
1469ae10a1eb10f50cf6b614d125dc6b6038c700
| 1,183
|
py
|
Python
|
src/python/__init__.py
|
Dnargne/blackhole
|
17ffa8c910039878d5352fbd71936c37c7576822
|
[
"MIT"
] | null | null | null |
src/python/__init__.py
|
Dnargne/blackhole
|
17ffa8c910039878d5352fbd71936c37c7576822
|
[
"MIT"
] | null | null | null |
src/python/__init__.py
|
Dnargne/blackhole
|
17ffa8c910039878d5352fbd71936c37c7576822
|
[
"MIT"
] | null | null | null |
# -*- coding:iso-8859-1 -*-
# MIT License
#
# Copyright (c) 2017 Dnargne
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = u'Tegona SA'
| 45.5
| 81
| 0.752325
|
037d3dbc95ead288eafa27da0f6fec1fb4e3e82f
| 1,892
|
py
|
Python
|
sololink/flightcode/dflog/loadLog.py
|
meee1/OpenSolo
|
6f299639adbad1e8d573c8ae1135832711b600e4
|
[
"Apache-2.0"
] | 68
|
2019-09-23T03:27:05.000Z
|
2022-03-12T03:00:41.000Z
|
sololink/flightcode/dflog/loadLog.py
|
meee1/OpenSolo
|
6f299639adbad1e8d573c8ae1135832711b600e4
|
[
"Apache-2.0"
] | 22
|
2019-10-26T20:15:56.000Z
|
2022-02-12T05:41:56.000Z
|
sololink/flightcode/dflog/loadLog.py
|
meee1/OpenSolo
|
6f299639adbad1e8d573c8ae1135832711b600e4
|
[
"Apache-2.0"
] | 33
|
2019-09-29T19:52:19.000Z
|
2022-03-12T03:00:43.000Z
|
#!/usr/bin/env python
import subprocess
import sys
import os
import time
from pymavlink import mavutil
import glob
import ConfigParser
import shutil
from datetime import datetime
import argparse
SELECT_GPIO = "21"
ENABLE_GPIO = "19"
#GPIO direction set
def setGPIODir(gpio, direction):
dir_fd = open("/sys/class/gpio/gpio"+str(gpio)+"/direction", "w")
dir_fd.write(direction)
dir_fd.close()
#Open the GPIO
def openGPIO(gpio):
#Check and see if the GPIO is already exported
if not os.path.isdir("/sys/class/gpio/gpio"+str(gpio)):
#otherwise export it
exp_fd = open("/sys/class/gpio/export", "w")
exp_fd.write(gpio)
exp_fd.close()
setGPIODir(gpio, "out");
def closeGPIO(gpio):
unexp_fd = open("/sys/class/gpio/unexport", "w")
unexp_fd.write(gpio)
unexp_fd.close()
def setGPIO(gpio, value):
val_fd = open("/sys/class/gpio/gpio"+str(gpio)+"/value", "w")
val_fd.write(value)
val_fd.close()
def openSetClose(gpio, value):
openGPIO(gpio)
setGPIO(gpio, value)
closeGPIO(gpio)
#Set the GPIO low
def disconnectAndExit():
openSetClose(SELECT_GPIO, "0")
openSetClose(ENABLE_GPIO, "1")
sys.exit()
parser = argparse.ArgumentParser()
parser.add_argument("lognum", help="Log number to download, or 'latest'")
args = parser.parse_args()
#Log downloading process
print "Pixhawk log loader"
#Set the USB select GPIOs
openSetClose(SELECT_GPIO, "1")
openSetClose(ENABLE_GPIO, "0")
time.sleep(1)
print "Checking for pixhawk on USB"
usb_devs = glob.glob('/dev/serial/by-id/usb-3D*')
if not usb_devs:
print "No pixhawk found on USB. Exiting."
disconnectAndExit()
print "Pixhawk found on USB, requesting log."
pixhawk_usb = usb_devs[-1]
m = mavutil.mavlink_connection(pixhawk_usb)
#Call the log downloader app
ret = subprocess.call(["dflog", str(args.lognum)])
disconnectAndExit()
| 22.795181
| 73
| 0.701374
|
7ef3d9b68f82e21ed6a6cbdaaa3e57f54a14a9f6
| 1,863
|
py
|
Python
|
active_directory_ldap/komand_active_directory_ldap/connection/schema.py
|
emartin-merrill-r7/insightconnect-plugins
|
a589745dbcc9f01d3e601431e77ab7221a84c117
|
[
"MIT"
] | 6
|
2020-11-10T03:07:00.000Z
|
2022-02-24T18:07:57.000Z
|
active_directory_ldap/komand_active_directory_ldap/connection/schema.py
|
OSSSP/insightconnect-plugins
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
[
"MIT"
] | 17
|
2020-01-21T16:02:04.000Z
|
2022-01-12T15:11:26.000Z
|
active_directory_ldap/komand_active_directory_ldap/connection/schema.py
|
OSSSP/insightconnect-plugins
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
[
"MIT"
] | 2
|
2020-12-26T11:33:23.000Z
|
2021-09-30T22:22:43.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
HOST = "host"
PORT = "port"
USE_SSL = "use_ssl"
USERNAME_PASSWORD = "username_password"
class ConnectionSchema(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"host": {
"type": "string",
"title": "Host",
"description": "Server Host, e.g. ldap://192.5.5.5. Must use either ldap:// or ldaps:// for SSL prefix",
"order": 1
},
"port": {
"type": "integer",
"title": "Port",
"description": "Port, e.g. 389",
"default": 389,
"order": 2
},
"use_ssl": {
"type": "boolean",
"title": "Use SSL",
"description": "Use SSL?",
"order": 3
},
"username_password": {
"$ref": "#/definitions/credential_username_password",
"title": "Username and Password",
"description": "Username and password",
"order": 4
}
},
"required": [
"host",
"port",
"use_ssl",
"username_password"
],
"definitions": {
"credential_username_password": {
"id": "credential_username_password",
"type": "object",
"title": "Credential: Username and Password",
"description": "A username and password combination",
"properties": {
"password": {
"type": "string",
"title": "Password",
"displayType": "password",
"description": "The password",
"format": "password"
},
"username": {
"type": "string",
"title": "Username",
"description": "The username to log in with"
}
},
"required": [
"username",
"password"
]
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 22.719512
| 110
| 0.520129
|
62d9edc006b52744bc7cc5ab5b83cc28c9f8dd3a
| 2,213
|
py
|
Python
|
images/backer/src/app.py
|
elston/flaskit
|
849e1fcfa8904771e2ebcb55877bb41440359cd5
|
[
"MIT"
] | 2
|
2018-08-08T22:01:33.000Z
|
2018-08-08T23:14:43.000Z
|
images/backer/src/app.py
|
elston/flaskit
|
849e1fcfa8904771e2ebcb55877bb41440359cd5
|
[
"MIT"
] | null | null | null |
images/backer/src/app.py
|
elston/flaskit
|
849e1fcfa8904771e2ebcb55877bb41440359cd5
|
[
"MIT"
] | null | null | null |
import traceback
from flask import (
Flask,
render_template)
from flask_uploads import (
configure_uploads,
patch_request_class)
from extensions import (
config,
bcrypt,
csrf_protect,
db,
login_manager,
migrate,
webpack
)
def register_extensions(app):
# ..config
config.init_app(app)
# ..bcrypt
bcrypt.init_app(app)
# ..db
db.init_app(app)
# ..csrf
csrf_protect.init_app(app)
# ...login_manager
login_manager.init_app(app)
login_manager.login_view = 'accounts.login'
login_manager.login_message = None
@login_manager.user_loader
def load_user(user_id):
from accounts.models import User
return User.get_by_id(user_id)
# ..migrate
migrate.init_app(app, db)
# ..webpack
if not app.config['MIGRATION_MODE']:
webpack.init_app(app)
# ...
return None
# ...blueprints
from lending import views as lending_views
from accounts import views as accounts_views
# ...
def register_blueprints(app):
# ..
app.register_blueprint(lending_views.blueprint)
app.register_blueprint(accounts_views.blueprint)
# ..
return None
def register_errorhandlers(app):
# ..
def render_error(error):
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
# ..
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
# ..
return None
# ...models
from accounts import models as accounts_models
# ...
def register_shellcontext(app):
# ...
def shell_context():
return {
'db': db,
'User': accounts_models.User,
}
# ...
app.shell_context_processor(shell_context)
# ..commands
import commands
# ..
def register_commands(app):
# ..
app.cli.add_command(commands.createadmin)
def create_app():
# ..
app = Flask(__name__.split('.')[0])
# ...
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_shellcontext(app)
register_commands(app)
# ..
return app
app = create_app()
# print(app.url_map)
| 18.139344
| 73
| 0.639404
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.