text stringlengths 8 6.05M |
|---|
import pandas as pd
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import MinMaxScaler
from sklearn import metrics
from sklearn import linear_model
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
import scipy.stats as stats
import random
from pathlib import Path
import sys
import matplotlib as mpl
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import log_loss
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, roc_auc_score
from sklearn.metrics import log_loss
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import hinge_loss
from sklearn.ensemble import RandomForestClassifier as rfc
from sklearn.inspection import permutation_importance
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
#Question 1
file = Path.cwd().joinpath('HW2_data.csv')
Diab = pd.read_csv(file)
def nan2value_random(df):
#input: DataFrame
#output: DataFrame without nans
df = df.dropna(axis=0, thresh=15) #Of the 565 patients, we want to remove patients with at least 3 NaN.
df_nan = df.copy()
# Patients with 2 nan values or less: we will have the NaNs replaced with random values from a feature values (column).
# Since this is a small number of patients from the group examined, if a bias is created it is very small, and yet the given data is large enough.
for col in df_nan:
bank_for_col = df_nan[col]
bank_for_col = bank_for_col.dropna()
bank_for_col = np.random.choice(bank_for_col, size=len(df_nan[col]))
df_nan[col] = df_nan[col].fillna(pd.Series(bank_for_col))
return df_nan
clean_Diab = nan2value_random(Diab)
#Question 2
X = clean_Diab[['Age','Gender','Increased Urination','Increased Thirst','Sudden Weight Loss','Weakness','Increased Hunger','Genital Thrush','Visual Blurring','Itching','Irritability','Delayed Healing','Partial Paresis','Muscle Stiffness','Hair Loss','Obesity','Family History']]
y= clean_Diab[['Diagnosis']]
X_train, x_test, Y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 10, stratify=y)
#Question 3
# 3a
#create binary train, test DataFrames (except for age feature)
X_train_binary = X_train.replace(['Yes','Female','Positive'],value = 1)
X_train_binary = X_train_binary.replace(['No','Male','Negative'],value = 0)
x_test_binary = x_test.replace(['Yes','Female','Positive'],value = 1)
x_test_binary = x_test_binary.replace(['No','Male','Negative'],value = 0)
#create a dictionary with features and values as %train, %test, %delta
list_train = [None]*16
list_test = [None]*16
delta = [None]*16
features_dictionary={}
features_dictionary['Positive Feature']=['%Train', '%Test', '%Delta']
for i in range(0,16):
list_train[i] = X_train_binary.iloc[:,i+1].sum()*(100/len(X_train_binary))
list_test[i] = x_test_binary.iloc[:,i+1].sum()*(100/len(x_test_binary))
delta[i] = abs(list_train[i]-list_test[i])
features_dictionary[clean_Diab.columns[i+1]]= [list_train[i], list_test[i], delta[i]]
df_features_dictionary = pd.DataFrame.from_dict(features_dictionary).T
print(df_features_dictionary)
#3b
#create a dictionary that shows the relationship between feature and label.
#keys: 1.positive feature 2.negative feature
#values for each key: 1. number of positive diagnosis 2.number of negative diagnosis
#We create a bar plot after defining a dictionary for a feature
# label_feature_relationship={}
# label_feature_relationship['Female'] = {}
# label_feature_relationship['Female']['Positive'] = len(clean_Diab[(clean_Diab.Gender.str.contains('Female')) & (clean_Diab.Diagnosis.str.contains('Positive'))])
# label_feature_relationship['Female']['Negative'] = len(clean_Diab[(clean_Diab.Gender.str.contains('Female')) & (clean_Diab.Diagnosis.str.contains('Negative'))])
#
# label_feature_relationship['Male'] = {}
# label_feature_relationship['Male']['Positive'] = len(clean_Diab[(clean_Diab.Gender.str.contains('Male')) & (clean_Diab.Diagnosis.str.contains('Positive'))])
# label_feature_relationship['Male']['Negative'] = len(clean_Diab[(clean_Diab.Gender.str.contains('Male')) & (clean_Diab.Diagnosis.str.contains('Negative'))])
#
# df = pd.DataFrame.from_dict(label_feature_relationship)
# df = df.T
# df.plot.bar(rot=0, title='Gender')
# plt.ylabel('Counts')
# plt.show()
#
# for i in range(2,16):
# title = clean_Diab.columns[i]
# label_feature_relationship = {}
# label_feature_relationship["Has %s" %title] = {}
# label_feature_relationship["Has %s" %title]['Positive'] = len(clean_Diab[(clean_Diab[title].str.contains('Yes')) & clean_Diab.Diagnosis.str.contains('Positive')])
# label_feature_relationship["Has %s" %title]['Negative'] = len(clean_Diab[(clean_Diab[title].str.contains('Yes')) & clean_Diab.Diagnosis.str.contains('Negative')])
# label_feature_relationship["No %s" % title] = {}
# label_feature_relationship["No %s" % title]['Positive'] = len(clean_Diab[(clean_Diab[title].str.contains('No')) & clean_Diab.Diagnosis.str.contains('Positive')])
# label_feature_relationship["No %s" % title]['Negative'] = len(clean_Diab[(clean_Diab[title].str.contains('No')) & clean_Diab.Diagnosis.str.contains('Negative')])
# df = pd.DataFrame.from_dict(label_feature_relationship)
# df = df.T
# df.plot.bar(rot=0, title=title)
# plt.ylabel('Counts')
# # plt.show()
#
# label_feature_relationship = {}
# label_feature_relationship["Has Family History"] = {}
# label_feature_relationship["Has Family History"]['Positive'] = len(clean_Diab.loc[clean_Diab['Family History']==1 & clean_Diab.Diagnosis.str.contains('Positive')])
# label_feature_relationship["Has Family History"]['Negative'] = len(clean_Diab.loc[clean_Diab['Family History']==1 & clean_Diab.Diagnosis.str.contains('Negative')])
# label_feature_relationship["No Family History"] = {}
# label_feature_relationship["No Family History"]['Positive'] = len(clean_Diab.loc[clean_Diab['Family History']==0 & clean_Diab.Diagnosis.str.contains('Positive')])
# label_feature_relationship["No Family History"]['Negative'] = len(clean_Diab.loc[clean_Diab['Family History']==0 & clean_Diab.Diagnosis.str.contains('Negative')])
# df = pd.DataFrame.from_dict(label_feature_relationship)
# df = df.T
# df.plot.bar(rot=0, title='Family History')
# plt.ylabel('Counts')
# plt.show()
#
# #3c
# #1. Is there an age from which the chance of getting sick increases significantly?- A histogram is drawn showing a connection between age and a positive diagnosis:
# ax = clean_Diab.hist(column='Age', bins= 100, figsize=(12,8), color='#86bf91', zorder=2, rwidth=0.9)
# ax = ax[0]
# for x in ax:
# x.set_ylabel("Count", labelpad=20, weight='bold', size=12)
# plt.show()
#
# #2. Compare age histogram for positive diagnosis and age histogram for negative diagnosis:
# positive_age_series = clean_Diab[clean_Diab.Diagnosis.str.contains('Positive')]['Age']
# negative_age_series = clean_Diab[clean_Diab.Diagnosis.str.contains('Negative')]['Age']
# plt.hist(positive_age_series, bins=100, label='Positive Diagnosis')
# plt.hist(negative_age_series, bins=100, label='Negative Diagnosis')
# plt.xlabel('Age')
# plt.ylabel('Count')
# plt.legend(loc='upper right')
# plt.show()
#
# #3. Pie chart of positive/negative diagnosis
# positive_count = 0
# negative_count = 0
# for idx, value in enumerate(clean_Diab['Diagnosis']):
# if value =='Positive':
# positive_count +=1
# else:
# negative_count +=1
# labels = ('Positive', 'Negative')
# sizes = [positive_count, negative_count]
# colors = ['lightcoral', 'yellowgreen']
# figureObject, axesObject = plt.subplots()
# plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140)
# plt.axis('equal')
# plt.title("Positive and negative diagnosis percentages")
# plt.show()
#Question 4
#Encoding our data as one hot vectors
#not relevant and wrong!!
# X_binary = X.replace(['Yes','Female','Positive'],value = 1)
# X_binary = X_binary.replace(['No','Male','Negative'],value = 0)
# y_binary = y.replace(['Positive'],value = 1)
# X_binary = X_binary.drop('Age', axis=1)
# y_binary = y_binary.replace(['Negative'],value = 0)
#
# encoder = OneHotEncoder(sparse=False,handle_unknown='ignore')
# x_onehotvector = encoder.fit_transform(X_binary)
# LE = LabelEncoder()
# y_onehotvector = y_binary
# y_onehotvector = LE.fit_transform(np.ravel(y_onehotvector))
#relevant code- Sapir
#Encoding our data as one hot vectors
# X_binary = X.replace(['Yes','Female','Positive'],value = 1)
# X_binary = X_binary.replace(['No','Male','Negative'],value = 0)
# # X_binary = X_binary.drop('Age', axis=1)
# X_binary['Age'] = (X_binary['Age']-X_binary['Age'].mean())/X_binary['Age'].std()
# y_binary = y.replace(['Positive'],value = 1)
# y_binary = y_binary.replace(['Negative'],value = 0)
#
# x_onehotvector = X_binary
# y_onehotvector = y_binary
#Moran's suggestion
X_binary = 1* (X == 'Yes' & X == 'Female' & X =='Positive')
# #Question 5
# X_train, x_test, Y_train, y_test = train_test_split(x_onehotvector, y_onehotvector, test_size = 0.20, random_state = 0, stratify = y_onehotvector)
# # K cross fold+ SVM ( linear for 'svm_kernel':['linear'], non linear for 'svm_kernel':['rbf'])
# # Linear SVM model
# n_splits = 5
# skf = StratifiedKFold(n_splits=n_splits, random_state=10, shuffle=True)
# svc = SVC(probability=True)
# C = np.array([0.001, 0.01, 1, 10, 100, 1000])
# pipe = Pipeline(steps=[('svm', svc)])
# svm_lin = GridSearchCV(estimator=pipe,
# param_grid={'svm__kernel':['linear'], 'svm__C':C}, scoring=['roc_auc'], cv=skf, refit='roc_auc', verbose=3, return_train_score=True)
# svm_lin.fit(X_train, Y_train.ravel())
# best_svm_lin = svm_lin.best_estimator_
#
#
# calc_TN = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 0]
# calc_FP = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 1]
# calc_FN = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 0]
# calc_TP = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 1]
#
# y_pred_test_lin = best_svm_lin.predict(x_test)
# y_pred_proba_test_lin = best_svm_lin.predict_proba(x_test)
# lin_loss = hinge_loss(y_test, y_pred_proba_test_lin[:,1])
# LSVM_score = roc_auc_score(y_test, y_pred_proba_test_lin[:,1])
#
# TN = calc_TN(y_test, y_pred_test_lin)
# FP = calc_FP(y_test, y_pred_test_lin)
# FN = calc_FN(y_test, y_pred_test_lin)
# TP = calc_TP(y_test, y_pred_test_lin)
# Se = TP/(TP+FN)
# Sp = TN/(TN+FP)
# PPV = TP/(TP+FP)
# NPV = TN/(TN+FN)
# Acc = (TP+TN)/(TP+TN+FP+FN)
# F1 = (2*Se*PPV)/(Se+PPV)
#
# #Non- linear SVM (RBF Kernel):
# pipe_rbf = Pipeline(steps=[('svm', svc)])
# svm_rbf = GridSearchCV(estimator=pipe,
# param_grid={'svm__kernel':['rbf'], 'svm__C':C}, scoring=['roc_auc'], cv=skf, refit='roc_auc', verbose=3, return_train_score=True)
#
# svm_rbf.fit(X_train, Y_train.values.ravel())
# best_svm_rbf = svm_rbf.best_estimator_
#
# y_pred_test_rbf = best_svm_rbf.predict(x_test)
# y_pred_proba_test_rbf = best_svm_rbf.predict_proba(x_test)
# rbf_loss = hinge_loss(y_test, y_pred_proba_test_rbf[:,1])
# rbf_SVM_score = roc_auc_score(y_test, y_pred_proba_test_rbf[:,1])
#
# TN_nl = calc_TN(y_test, y_pred_test_rbf)
# FP_nl = calc_FP(y_test, y_pred_test_rbf)
# FN_nl = calc_FN(y_test, y_pred_test_rbf)
# TP_nl = calc_TP(y_test, y_pred_test_rbf)
# Se_nl = TP/(TP+FN)
# Sp_nl = TN/(TN+FP)
# PPV_nl = TP/(TP+FP)
# NPV_nl = TN/(TN+FN)
# Acc_nl = (TP+TN)/(TP+TN+FP+FN)
# F1_nl = (2*Se*PPV)/(Se+PPV)
#
# print("svm with linear kernel:")
# print(f'Sensitivity is {Se:.2f}')
# print(f'Specificity is {Sp:.2f}')
# print(f'PPV is {PPV:.2f}')
# print(f'NPV is {NPV:.2f}')
# print(f'Accuracy is {Acc:.2f}')
# print(f'F1 is {F1:.2f}')
# print(f'The Linear Loss is {lin_loss:.2f}')
# print(f'AUC is {LSVM_score:.2f}')
# print("\n svm with rbf kernel:")
# print(f' Sensitivity is {Se_nl:.2f}')
# print(f' Specificity is {Sp_nl:.2f}')
# print(f' PPV is {PPV_nl:.2f}')
# print(f' NPV is {NPV_nl:.2f}')
# print(f' Accuracy is {Acc_nl:.2f}')
# print(f' F1 is {F1_nl:.2f}')
# print(f' Loss is {rbf_loss:.2f}')
# print(f' AUC is {rbf_SVM_score:.2f}')
#
# #Question 6- Feature Selection
# #Random Forest Network
# X_train, x_test, Y_train, y_test = train_test_split(x_onehotvector, y_onehotvector, test_size = 0.20, random_state = 0, stratify = y_onehotvector)
# clf = rfc(n_estimators=10)
# # scaler = StandardScaler()
# # X_train_scale = scaler.fit_transform(X_train)
# clf.fit(X_train, Y_train.values.ravel())
# w_ = clf.feature_importances_
# # w_positive = w_[::2]
#
# features=['Age','Gender','Increased Urination', 'Increased Thirst','Sudden Weight Loss','Weakness','Increased Hunger','Genital Thrush','Visual Blurring','Itching','Irritability','Delayed Healing','Partial Paresis','Muscle Stiffness','Hair Loss','Obesity','Family History']
# # features=['Male', 'Female', 'No Increased Urination', 'Increased Urination', 'No Increased Thirst', 'Increased Thirst', 'No Sudden Weight Loss', 'Sudden Weight Loss', 'No Weakness', 'Weakness', 'No Increased Hunger', 'Increased Hunger', 'No Genital Thrush', 'Genital Thrush', 'No Visual Blurring', 'Visual Blurring', 'No Itching', 'Itching', 'No Irritability', 'Irritability', 'No Delayed Healing', 'Delayed Healing', 'No Partial Paresis', 'Partial Paresis', 'No Muscle Stiffness', 'Muscle Stiffness', 'No Hair Loss', 'Hair Loss', 'No Obesity', 'Obesity', 'No Family History', 'Family History']
# # features=['Male', 'Female', 'No Increased Urination', 'Increased Urination', 'No Increased Thirst', 'Increased Thirst', 'No Sudden Weight Loss', 'Sudden Weight Loss', 'No Weakness', 'Weakness', 'No Increased Hunger', 'Increased Hunger', 'No Genital Thrush', 'Genital Thrush', 'No Visual Blurring', 'Visual Blurring', 'No Itching', 'Itching', 'No Irritability', 'Irritability', 'No Delayed Healing', 'Delayed Healing', 'No Partial Paresis', 'Partial Paresis', 'No Muscle Stiffness', 'Muscle Stiffness', 'No Hair Loss', 'Hair Loss', 'No Obesity', 'Obesity', 'No Family History', 'Family History']
# x = np.arange(len(features))
# x=np.ndarray.tolist(x)
# w_ = np.ndarray.tolist(w_)
# plt.bar(x, w_,0.5, color='c')
# plt.xticks(x,features,rotation=90);
# plt.ylabel("weights", fontsize=12)
# plt.title("Feature Weights- RFC")
# plt.show()
#
# # #Question 7
# #7a- PCA
# # scaler = StandardScaler()
# # X_train_scale = scaler.fit_transform(X_train)
# # x_test = scaler.transform(x_test)
# # x_test_binary = scaler.transform(x_test)
# n_components = 2
# pca = PCA(n_components = n_components, whiten= True)
# X_train_pca = pca.fit_transform(X_train)
# x_test_pca = pca.transform(x_test)
#
# def plt_2d_pca(X_pca,y):
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(111, aspect='equal')
# ax.scatter(X_pca[y==0, 0], X_pca[y==0, 1], color='b')
# ax.scatter(X_pca[y==1, 0], X_pca[y==1, 1], color='r')
# ax.legend(('Negative','Positive'))
# ax.plot([0], [0], "ko")
# ax.arrow(0, 0, 0, 1, head_width=0.05, length_includes_head=True, head_length=0.1, fc='k', ec='k')
# ax.arrow(0, 0, 1, 0, head_width=0.05, length_includes_head=True, head_length=0.1, fc='k', ec='k')
# ax.set_xlabel('$U_1$')
# ax.set_ylabel('$U_2$')
# ax.set_title('2D PCA')
#
# plt_2d_pca(x_test_pca,y_test)
# plt.show()
# #7c
# # Linear SVM model
# n_splits = 5
# skf = StratifiedKFold(n_splits=n_splits, random_state=10, shuffle=True)
# svc = SVC(probability=True)
# C = np.array([0.001, 0.01, 1, 10, 100, 1000])
# pipe = Pipeline(steps=[('scale', StandardScaler()), ('svm', svc)])
# svm_lin = GridSearchCV(estimator=pipe,
# param_grid={'svm__kernel':['linear'], 'svm__C':C}, scoring=['roc_auc'], cv=skf, refit='roc_auc', verbose=3, return_train_score=True)
# svm_lin.fit(X_train_pca, Y_train)
# best_svm_lin = svm_lin.best_estimator_
#
# calc_TN = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 0]
# calc_FP = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 1]
# calc_FN = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 0]
# calc_TP = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 1]
#
# y_pred_test_lin = best_svm_lin.predict(x_test_pca)
# y_pred_proba_test_lin = best_svm_lin.predict_proba(x_test_pca)
# lin_loss = hinge_loss(y_test.ravel(), y_pred_proba_test_lin[:,1])
# LSVM_score = roc_auc_score(y_test.ravel(), y_pred_proba_test_lin[:,1])
#
# TN = calc_TN(y_test, y_pred_test_lin)
# FP = calc_FP(y_test, y_pred_test_lin)
# FN = calc_FN(y_test, y_pred_test_lin)
# TP = calc_TP(y_test, y_pred_test_lin)
# Se = TP/(TP+FN)
# Sp = TN/(TN+FP)
# PPV = TP/(TP+FP)
# NPV = TN/(TN+FN)
# Acc = (TP+TN)/(TP+TN+FP+FN)
# F1 = (2*Se*PPV)/(Se+PPV)
#
# #Non- linear SVM (RBF Kernel):
# pipe_rbf = Pipeline(steps=[('scale', StandardScaler()), ('svm', svc)])
# svm_rbf = GridSearchCV(estimator=pipe,
# param_grid={'svm__kernel':['rbf'], 'svm__C':C}, scoring=['roc_auc'], cv=skf, refit='roc_auc', verbose=3, return_train_score=True)
# svm_rbf.fit(X_train_pca, Y_train)
# best_svm_rbf = svm_rbf.best_estimator_
#
# y_pred_test_rbf = best_svm_rbf.predict(x_test_pca)
# y_pred_proba_test_rbf = best_svm_rbf.predict_proba(x_test_pca)
# rbf_loss = hinge_loss(y_test, y_pred_proba_test_rbf[:,1])
# rbf_SVM_score = roc_auc_score(y_test, y_pred_proba_test_rbf[:,1])
#
# TN_nl = calc_TN(y_test, y_pred_test_rbf)
# FP_nl = calc_FP(y_test, y_pred_test_rbf)
# FN_nl = calc_FN(y_test, y_pred_test_rbf)
# TP_nl = calc_TP(y_test, y_pred_test_rbf)
# Se_nl = TP/(TP+FN)
# Sp_nl = TN/(TN+FP)
# PPV_nl = TP/(TP+FP)
# NPV_nl = TN/(TN+FN)
# Acc_nl = (TP+TN)/(TP+TN+FP+FN)
# F1_nl = (2*Se*PPV)/(Se+PPV)
#
# print("svm with linear kernel:")
# print(f'Sensitivity is {Se:.2f}')
# print(f'Specificity is {Sp:.2f}')
# print(f'PPV is {PPV:.2f}')
# print(f'NPV is {NPV:.2f}')
# print(f'Accuracy is {Acc:.2f}')
# print(f'F1 is {F1:.2f}')
# print(f'The Linear Loss is {lin_loss:.2f}')
# print(f'AUC is {LSVM_score:.2f}')
# print("\n svm with rbf kernel:")
# print(f' Sensitivity is {Se_nl:.2f}')
# print(f' Specificity is {Sp_nl:.2f}')
# print(f' PPV is {PPV_nl:.2f}')
# print(f' NPV is {NPV_nl:.2f}')
# print(f' Accuracy is {Acc_nl:.2f}')
# print(f' F1 is {F1_nl:.2f}')
# print(f' Loss is {rbf_loss:.2f}')
# print(f' AUC is {rbf_SVM_score:.2f}')
#
# #7d:Train the same models on the best two features from section 6: Increased Urination, Increased Thirst
# x_OHV_2feat = x_onehotvector[:,[3,5]]
# X_train, x_test, Y_train, y_test = train_test_split(x_OHV_2feat, y_onehotvector, test_size = 0.20, random_state = 0, stratify = y_onehotvector)
# # K cross fold+ SVM ( linear for 'svm_kernel':['linear'], non linear for 'svm_kernel':['rbf'])
# # Linear SVM model
# n_splits = 5
# skf = StratifiedKFold(n_splits=n_splits, random_state=10, shuffle=True)
# svc = SVC(probability=True)
# C = np.array([0.001, 0.01, 1, 10, 100, 1000])
# pipe = Pipeline(steps=[('scale', StandardScaler()), ('svm', svc)])
# svm_lin = GridSearchCV(estimator=pipe,
# param_grid={'svm__kernel':['linear'], 'svm__C':C}, scoring=['roc_auc'], cv=skf, refit='roc_auc', verbose=3, return_train_score=True)
# svm_lin.fit(X_train, Y_train)
# best_svm_lin = svm_lin.best_estimator_
#
# calc_TN = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 0]
# calc_FP = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 1]
# calc_FN = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 0]
# calc_TP = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 1]
#
# y_pred_test_lin = best_svm_lin.predict(x_test)
# y_pred_proba_test_lin = best_svm_lin.predict_proba(x_test)
# lin_loss = hinge_loss(y_test.ravel(), y_pred_proba_test_lin[:,1])
# LSVM_score = roc_auc_score(y_test.ravel(), y_pred_proba_test_lin[:,1])
#
# TN = calc_TN(y_test, y_pred_test_lin)
# FP = calc_FP(y_test, y_pred_test_lin)
# FN = calc_FN(y_test, y_pred_test_lin)
# TP = calc_TP(y_test, y_pred_test_lin)
# Se = TP/(TP+FN)
# Sp = TN/(TN+FP)
# PPV = TP/(TP+FP)
# NPV = TN/(TN+FN)
# Acc = (TP+TN)/(TP+TN+FP+FN)
# F1 = (2*Se*PPV)/(Se+PPV)
#
# #Non- linear SVM (RBF Kernel):
# pipe_rbf = Pipeline(steps=[('scale', StandardScaler()), ('svm', svc)])
# svm_rbf = GridSearchCV(estimator=pipe,
# param_grid={'svm__kernel':['rbf'], 'svm__C':C}, scoring=['roc_auc'], cv=skf, refit='roc_auc', verbose=3, return_train_score=True)
#
# svm_rbf.fit(X_train, Y_train)
# best_svm_rbf = svm_rbf.best_estimator_
#
# y_pred_test_rbf = best_svm_rbf.predict(x_test)
# y_pred_proba_test_rbf = best_svm_rbf.predict_proba(x_test)
# rbf_loss = hinge_loss(y_test, y_pred_proba_test_rbf[:,1])
# rbf_SVM_score = roc_auc_score(y_test, y_pred_proba_test_rbf[:,1])
#
# TN_nl = calc_TN(y_test, y_pred_test_rbf)
# FP_nl = calc_FP(y_test, y_pred_test_rbf)
# FN_nl = calc_FN(y_test, y_pred_test_rbf)
# TP_nl = calc_TP(y_test, y_pred_test_rbf)
# Se_nl = TP/(TP+FN)
# Sp_nl = TN/(TN+FP)
# PPV_nl = TP/(TP+FP)
# NPV_nl = TN/(TN+FN)
# Acc_nl = (TP+TN)/(TP+TN+FP+FN)
# F1_nl = (2*Se*PPV)/(Se+PPV)
#
# print("svm with linear kernel:")
# print(f'Sensitivity is {Se:.2f}')
# print(f'Specificity is {Sp:.2f}')
# print(f'PPV is {PPV:.2f}')
# print(f'NPV is {NPV:.2f}')
# print(f'Accuracy is {Acc:.2f}')
# print(f'F1 is {F1:.2f}')
# print(f'The Linear Loss is {lin_loss:.2f}')
# print(f'AUC is {LSVM_score:.2f}')
# print("\n svm with rbf kernel:")
# print(f' Sensitivity is {Se_nl:.2f}')
# print(f' Specificity is {Sp_nl:.2f}')
# print(f' PPV is {PPV_nl:.2f}')
# print(f' NPV is {NPV_nl:.2f}')
# print(f' Accuracy is {Acc_nl:.2f}')
# print(f' F1 is {F1_nl:.2f}')
# print(f' Loss is {rbf_loss:.2f}')
# print(f' AUC is {rbf_SVM_score:.2f}')
|
from TikTokAPI import TikTokAPI
import json
import datetime
api = TikTokAPI()
def get_all_video_stats_by_username(username):
user = api.getUserByName(username)
video_list = []
videos = api.getVideosByUserName(username, count=user['userInfo']['stats']['videoCount'])['items']
for item in videos:
video_list.append({
'id': item['id'],
'stats': item['stats'],
'createTime': item['createTime']
})
return video_list
def video_stats_7_days(username):
return get_video_stats_by_username_date(username, (datetime.datetime.today()-datetime.timedelta(days=7)).timestamp())
def video_stats_30_days(username):
return get_video_stats_by_username_date(username, (datetime.datetime.today()-datetime.timedelta(days=30)).timestamp())
def video_stats_all_time(username):
return get_all_video_stats_by_username(username)
def get_video_stats_by_username_date(username, date):
video_stats = get_all_video_stats_by_username(username)
an_iterator = filter(lambda video_stat: video_stat['createTime'] > date, video_stats)
return list(an_iterator)
print(len(video_stats_30_days('resolutionmovement')))
|
def getPic():
""" prompts a user to pick a file to be converted to a jython picture """
return makePicture(pickAFile())
def amazify():
pic = getPic()
increaseContrast(pic)
addBorder(pic)
droste(pic)
writePictureTo(pic, 'C:\\Users\\J.McGhee\\Documents\\Jake\\CST205\\midterm\\amazify.jpg')
def droste(pic):
copy = shrink(pic)
xOffset = 0
yOffset = 0
levels = 2
while levels > 0:
print(xOffset)
print(yOffset)
for x in range (0, getWidth(copy)-1):
for y in range (0, getHeight(copy)-1):
color = getColor(getPixel(copy, x, y))
targetX = getWidth(pic)-int(getWidth(copy)*1.1)-xOffset
targetY = getHeight(pic)-int(getHeight(copy)*1.1)-yOffset
p = getPixel(pic, targetX+x, targetY+y)
setColor(p, color)
levels -=1
xOffset += int(getWidth(copy)*1.1)-getWidth(copy)
yOffset += int(getHeight(copy)*1.1)-getHeight(copy)
copy = shrink(copy)
repaint(pic)
def shrink(pic):
width = getWidth(pic)
height = getHeight(pic)
canvas = makeEmptyPicture(width/3, height/3)
for x in range (0, width-2, 3):
for y in range (0, height-2, 3):
color = getColor(getPixel(pic, x, y))
setColor(getPixel(canvas, x/3, y/3), color)
return canvas
def addBorder(pic):
margin = 10
# top border
for x in range(0, getWidth(pic)):
for y in range(0, margin):
setColor(getPixel(pic, x, y), black)
# bottom border
for x in range(0, getWidth(pic)):
for y in range(getHeight(pic)-margin, getHeight(pic)):
setColor(getPixel(pic, x, y), black)
# left border
for x in range(0, margin):
for y in range(0, getHeight(pic)):
setColor(getPixel(pic, x, y), black)
# right border
for x in range(getWidth(pic)-margin, getWidth(pic)):
for y in range(0, getHeight(pic)):
setColor(getPixel(pic, x, y), black)
return pic
def increaseContrast(pic):
pixels = getPixels(pic)
for p in pixels:
color = getColor(p)
if distance(color, black) <= distance(color, white):
color = makeDarker(color)
else:
color = makeLighter(color)
setColor(p, color)
repaint(pic) |
"""
LeetCode - Easy
"""
"""
Given an array of integers A sorted in non-decreasing order, return an array of the squares of each number, also in sorted non-decreasing order.
Example 1:
Input: [-4,-1,0,3,10]
Output: [0,1,9,16,100]
Example 2:
Input: [-7,-3,2,3,11]
Output: [4,9,9,49,121]
Note:
1 <= A.length <= 10000
-10000 <= A[i] <= 10000
A is sorted in non-decreasing order.
"""
class Solution:
def sortedSquares(self, A):
positive, negative = len(A), -1
squares = []
for i in range(len(A)):
if A[i] >= 0:
positive = i
break
negative = positive - 1
while positive < len(A) and negative > -1:
if abs(A[positive]) < abs(A[negative]):
squares.append(A[positive] * A[positive])
positive += 1
elif abs(A[positive]) > abs(A[negative]):
squares.append(A[negative] * A[negative])
negative -= 1
else:
squares.append(A[positive] * A[positive])
squares.append(A[negative] * A[negative])
positive += 1
negative -= 1
while positive < len(A):
squares.append(A[positive] * A[positive])
positive += 1
while negative > -1:
squares.append(A[negative] * A[negative])
negative -= 1
return squares
if __name__ == '__main__':
A = [-4, -1, 0, 3, 10]
A = [-1]
print(Solution().sortedSquares(A))
|
from django.conf.urls import url
from . import views
urlpatterns = [
#/orders/places/
url(r'^places/$',views.PlaceOrderAPIView.as_view(),name='placeorder'),
url(r'^$',views.OrderAPIView.as_view(),name='order'),
] |
from bitutils import test_bit as tb, set_bit, reset_bit
def next_(x):
""" Find the smallest number larger than `x` and has the same number of 1 bits as `x`
Idea: For a positive number `x`, we always can find the suffix `s` of the
style "0b011...100...0", where the number of "0" in the tail >= 0, and the
number of "1"s > 0. For the next number, we only need to modify the suffix
(we can continuously add 1 to `x` and before changes are made to the
non-suffix part, we can always find a number satisfing the requirement).
First, we need to make the suffix larger, the only way to do this while
keeping the same number of 1 bits is setting the highest bit of the suffix
to 1, i.e., the suffix will like "0b1....". Finally, we want the suffix be
the smallest one, so the suffix should like "0b100...0111...1".
"""
assert x > 0
first_1_bit = 0
while not tb(x, first_1_bit):
first_1_bit += 1
afterward_first_0_bit = first_1_bit + 1
while tb(x, afterward_first_0_bit):
afterward_first_0_bit += 1
the_1_bit_count = afterward_first_0_bit - first_1_bit
# Change the suffix
x = set_bit(x, afterward_first_0_bit)
clear_mark = ~((0b1 << afterward_first_0_bit) - 1)
# Remember to reduce 1 from `the_1_bit_count` (because the highest bit of
# the suffix is set to 1)
setting_mark = (0b1 << (the_1_bit_count-1)) - 1
x &= clear_mark
x |= setting_mark
return x
def prev(x):
""" Find the largest number smaller than ... (see ``next()``) """
assert x > 0
# The algorithm is similar to ``next()``. The "suffix" should be "0b100..011..1"
first_0_bit = 0
while tb(x, first_0_bit):
first_0_bit += 1
afterward_first_1_bit = first_0_bit + 1
while not tb(x, afterward_first_1_bit):
afterward_first_1_bit += 1
the_1_bit_count = first_0_bit + 1
the_0_bit_count = afterward_first_1_bit - first_0_bit
# Change the suffix
x = reset_bit(x, afterward_first_1_bit)
# Clear the suffix except the highest bit
x = x >> afterward_first_1_bit << afterward_first_1_bit
# Set the 1 bits
x |= ((0x1 << the_1_bit_count) - 1) << (the_0_bit_count - 1)
return x
def test_1():
assert next_(0b110011) == 0b110101
assert next_(0b11001) == 0b11010
assert next_(0b1101100) == 0b1110001
assert next_(0b110100) == 0b111000
assert prev(0b110101) == 0b110011
assert prev(0b11010) == 0b11001
assert prev(0b1110001) == 0b1101100
assert prev(0b111000) == 0b110100
|
#!/usr/bin/env python
from flask import Blueprint, request, current_app
from .util import log
api_bp = Blueprint("api", __name__)
@api_bp.after_request
def log_response(response):
"""Log any requests/responses with an error code"""
if current_app.debug: # pragma: no cover, debugging only
log.debug('%7s: %s - %i', request.method, request.url,
response.status_code)
if response.status_code >= 400:
log.debug('Response data: \n%s', response.data)
log.debug('Request data: \n%s', request.data)
return response
# Import the resources to add the routes to the blueprint before the app is
# initialized
from . import ( # NOQA
s3url,
email,
)
|
# Generated by Django 3.0.1 on 2020-11-28 13:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
]
|
from ulugugu import BBox
from ulugugu.drawings import Drawing
class Empty(Drawing):
def __init__(self):
super().__init__(BBox((0, 0, 0, 0)))
def draw(self, ctx):
pass
class Rectangle(Drawing):
def __init__(self, size, color, fill='fill'):
super().__init__(BBox((0, 0, size[0], size[1])))
self.size = size
self.color = color
self.fill = fill
def draw(self, ctx):
ctx.set_source_rgb(*self.color)
ctx.rectangle(0, 0, *self.size)
getattr(ctx, self.fill)()
class Text(Drawing):
def __init__(self, text):
self.text = text
super().__init__(BBox((0, -15, len(self.text)*8, 0)))
def draw(self, ctx):
ctx.set_source_rgb(0,0,0)
ctx.set_font_size(15)
ctx.show_text(self.text)
ctx.new_path()
|
import greenlet
def eat(name):
print("%s eat 1" % name)
# 第二步
g2.switch("egon")
print("%s eat 2" % name)
# 第四步
g2.switch()
def play(name):
print("%s play 1" % name)
# 第三步
g1.switch()
print("%s play 2" % name)
g1 = greenlet.greenlet(eat)
g2 = greenlet.greenlet(play)
# 第一步
g1.switch("egon")
|
# 1.Given two integer numbers return their product.
# If the product is greater than 1000, then return their sum.
def summation(a, b):
sum = a + b
if(sum > 1000):
return sum
else:
return "Sumation is below 1000"
print(summation(100, 1000))
print(summation(100, 100))
|
posicion1 = input("Posición inicial canguro 1:")
longitud1 = input("Longitud de salto canguro 1:")
posicion2 = input("Posición inicial canguro 2:")
longitud2 = input("Longitud de salto canguro 2:")
if (posicion1 > posicion2 and longitud1 > longitud2) or (posicion1 < posicion2 and longitud1 < longitud2):
respuesta = "no"
else:
print "Alcanza?", respuesta
|
"""
CEASIOMpy: Conceptual Aircraft Design Software
Developed for CFS ENGINEERING, 1015 Lausanne, Switzerland
The scrpt will analyse the fuselage geometry from cpacs file for an
unconventional aircraft.
Python version: >=3.6
| Author : Stefano Piccini
| Date of creation: 2018-09-27
| Last modifiction: 2020-01-21 (AJ)
"""
#==============================================================================
# IMPORTS
#==============================================================================
import numpy as np
import math
import ceasiompy.utils.cpacsfunctions as cpsf
from ceasiompy.utils.ceasiomlogger import get_logger
log = get_logger(__file__.split('.')[0])
#==============================================================================
# CLASSES
#==============================================================================
"""All classes are defined inside the classes folder and into the
InputClasses/Uconventional folder"""
#==============================================================================
# FUNCTIONS
#==============================================================================
def check_segment_connection(fus_nb, fuse_seg_nb, fuse_sec_nb, tigl):
""" The function checks for each segment the start and end section index
and to reorder them.
Args:
fus_nb (int): Number of fuselages.
fuse_seg_nb (int): Number of segments.
fuse_sec_nb (int): Number of sections.
tigl (handel): Tigl handle.
Returns:
sec_nb (int): Number of sections for each fuselage.
start_index (int): Start section index for each fuselage.
seg_sec_reordered (float-array): Reordered segments with
respective start and end section
for each fuselage.
sec_index (float_array) : List of section index reordered.
"""
log.info('---------------------------------------------------------------')
log.info('---------- Checking fuselage segments connection --------------')
log.info('---------------------------------------------------------------')
# Initialising arrays
nbmax = np.amax(fuse_seg_nb)
seg_sec = np.zeros((nbmax,fus_nb,3))
seg_sec_reordered = np.zeros(np.shape(seg_sec))
sec_index = np.zeros((nbmax,fus_nb))
start_index = []
sec_nb = []
# First for each segment the start and end section are found, then
# they are reordered considering that the end section of a segment
# is the start sectio of the next one.
# The first section is the one that has the lowest x position.
# The code works if a section is defined and not used in the segment
# definition and if the segments are not defined
# with a consequential order.
# WARNING The code does not work if a segment is defined
# and then not used.
# The aircraft should be designed along the x axis
# and on the x-y plane
for i in range(1,fus_nb+1):
fuse_sec_index = []
for j in range(1,fuse_seg_nb[i-1]+1):
(seg_sec[j-1,i-1,0],e)\
= tigl.fuselageGetStartSectionAndElementIndex(i,j)
(seg_sec[j-1,i-1,1],e)\
= tigl.fuselageGetEndSectionAndElementIndex(i,j)
seg_sec[j-1,i-1,2] = j
(slpx,slpy,slpz) = tigl.fuselageGetPoint(i,1,0.0,0.0)
seg_sec_reordered[0,i-1,:] = seg_sec[0,i-1,:]
start_index.append(1)
for j in range(2,fuse_seg_nb[i-1]+1):
(x,y,z) = tigl.fuselageGetPoint(i,j,1.0,0.0)
if x < slpx:
(slpx,slpy,slpz) = (x,y,z)
start_index.append(j)
seg_sec_reordered[0,i-1,:] = seg_sec[j-1,i-1,:]
for j in range(2,fuse_seg_nb[i-1]+1):
end_sec = seg_sec_reordered[j-2,i-1,1]
start_next = np.where(seg_sec[:,i-1,0]==end_sec)
seg_sec_reordered[j-1,i-1,:] = seg_sec[start_next[0],i-1,:]
fuse_sec_index.append(seg_sec[0,i-1,0])
for j in range(2,fuse_seg_nb[i-1]+1):
if (seg_sec_reordered[j-1,i-1,0] in fuse_sec_index) == False:
fuse_sec_index.append(seg_sec_reordered[j-1,i-1,0])
if (seg_sec_reordered[-1,i-1,1] in fuse_sec_index) == False:
fuse_sec_index.append(seg_sec_reordered[-1,i-1,1])
nb = np.shape(fuse_sec_index)
if nb[0] > nbmax:
nbmax = nb[0]
sec_index.resize(nbmax,fus_nb)
sec_index[0:nb[0],i-1] = fuse_sec_index[0:nb[0]]
sec_nb.append(nb[0])
return(sec_nb, start_index, seg_sec_reordered, sec_index)
def rel_dist(i, sec_nb, seg_nb, tigl, seg_sec, start_index):
""" Function to evaluate the relative distance of each section
used from the start section.
Args:
i (int): Index of the current fuselage.
sec_nb (int): Number of sections of the current fuselage.
seg_nb (int): Number of segments of the current fuselage.
tigl (handel): Tigl handle.
seg_sec_reordered (float-array): Reordered segments with respective
start and end section for each fuselage.
start_index (int): Start section index of the current fuselage.
Returns:
rel_sec_dis[:,0] (float-array): Relative distance of each section from
the start section of the current fuselage [m].
rel_sec_dis[:,1] (float-array): Segment index relative to the section of
rel_sec_dis[:,0].
"""
log.info('-----------------------------------------------------------')
log.info('---------- Evaluating absolute section distance -----------')
log.info('-----------------------------------------------------------')
rel_sec_dis = np.zeros((sec_nb,2))
rel_sec_dist_index = np.zeros((sec_nb,2))
# Relative distance evaluated by the difference between the x position of
# of the 1st section of the aircraft and the x position of the jth section
rel_sec_dis[0,0] = 0.0
rel_sec_dis[0,1] = 0
(slpx,slpy,slpz) = tigl.fuselageGetPoint(i,start_index,0.0,0.0)
for j in range(1,seg_nb+1):
k = int(seg_sec[j-1,2])
(slpx2,slpy2,slpz2) = tigl.fuselageGetPoint(i,k,1.0,0.0)
rel_sec_dis[j,0] = abs(slpx2-slpx)
rel_sec_dis[j,1] = k
return(rel_sec_dis[:,0],rel_sec_dis[:,1])
def fuse_geom_eval(fus_nb, h_min, fuse_thick, F_FUEL, afg, cpacs_in):
""" Main function to evaluate the fuselage geometry
Args:
fus_nb (int): Number of fuselages.
h_min (float): Minimum height for the fuselage [m].
fuse_thick(float) : Thickness of the fuselage [mm].
F_FUEL (float-array): Percentage of the total volume of the fuel tank
fuselage, used for fuel straging (set False if
fuselage is ment for payload/passengers).
afg (class): AircraftGeometry class look at aircraft_geometry_class.py
in the classes folder for explanation.
cpacs_in (str): Path to the CPACS file
Returns:
afg (class): Updated aircraft_geometry class
"""
log.info('-----------------------------------------------------------')
log.info('---------- Analysing fuselage geometry --------------------')
log.info('-----------------------------------------------------------')
# Opening tixi and tigl
tixi = cpsf.open_tixi(cpacs_in)
tigl = cpsf.open_tigl(tixi)
#INITIALIZATION 1 ----------------------------------------------------------
afg.fus_nb = fus_nb
# Counting sections and segments----------------------------------------------
for i in range(1,afg.fus_nb + 1):
afg.fuse_sec_nb.append(tigl.fuselageGetSectionCount(i))
afg.fuse_seg_nb.append(tigl.fuselageGetSegmentCount(i))
afg.fuse_vol.append(tigl.fuselageGetVolume(i))
afg.fuse_surface.append(tigl.fuselageGetSurfaceArea(i))
# Checking segment and section connection and reordering them
(afg.fuse_sec_nb, start_index, seg_sec, fuse_sec_index)\
= check_segment_connection(afg.fus_nb, afg.fuse_seg_nb,\
afg.fuse_sec_nb, tigl)
afg.f_seg_sec = seg_sec
# INITIALIZATION 2 -----------------------------------------------------------
max_sec_nb = np.amax(afg.fuse_sec_nb)
max_seg_nb = np.amax(afg.fuse_seg_nb)
afg.fuse_sec_per = np.zeros((max_sec_nb,afg.fus_nb))
afg.fuse_sec_width = np.zeros((max_sec_nb,afg.fus_nb))
afg.fuse_sec_height = np.zeros((max_sec_nb,afg.fus_nb))
afg.fuse_sec_rel_dist = np.zeros((max_sec_nb,afg.fus_nb))
afg.fuse_seg_index = np.zeros((max_sec_nb,afg.fus_nb))
afg.fuse_seg_length = np.zeros((max_seg_nb,afg.fus_nb))
afg.fuse_center_section_point = np.zeros((max_sec_nb,afg.fus_nb,3))
afg.fuse_center_seg_point = np.zeros((max_sec_nb,afg.fus_nb,3))
afg.fuse_seg_vol = np.zeros((max_seg_nb,afg.fus_nb))
x1 = np.zeros((max_sec_nb,afg.fus_nb))
y1 = np.zeros((max_sec_nb,afg.fus_nb))
z1 = np.zeros((max_sec_nb,afg.fus_nb))
x2 = np.zeros((max_sec_nb,afg.fus_nb))
y2 = np.zeros((max_sec_nb,afg.fus_nb))
z2 = np.zeros((max_sec_nb,afg.fus_nb))
# FUSELAGE ANALYSIS ----------------------------------------------------------
# Aircraft total length ------------------------------------------------------
afg.tot_length = tigl.configurationGetLength()
# Evaluating fuselage: sections perimeter, segments volume and length ---
for i in range(1,afg.fus_nb+1):
(afg.fuse_sec_rel_dist[:,i-1],afg.fuse_seg_index[:,i-1])\
= rel_dist(i, afg.fuse_sec_nb[i-1], afg.fuse_seg_nb[i-1],\
tigl, seg_sec[:,i-1,:], start_index[i-1])
afg.fuse_length.append(round(afg.fuse_sec_rel_dist[-1,i-1],3))
for j in range(1, afg.fuse_seg_nb[i-1]+1):
k = int(afg.fuse_seg_index[j][i-1])
afg.fuse_sec_per[j][i-1]\
= tigl.fuselageGetCircumference(i,k,1.0)
(fpx,fpy,fpz) = tigl.fuselageGetPoint(i,k,1.0,0.0)
(fpx2,fpy2,fpz2) = tigl.fuselageGetPoint(i,k,1.0,0.5)
afg.fuse_seg_vol[j-1][i-1] = tigl.fuselageGetSegmentVolume(i,k)
afg.fuse_center_section_point[j][i-1][0] = (fpx+fpx2) / 2
afg.fuse_center_section_point[j][i-1][1] = (fpy+fpy2) / 2
afg.fuse_center_section_point[j][i-1][2] = (fpz+fpz2) / 2
hw1 = 0.0
hw2 = 0.0
for zeta in np.arange(0.0, 1.0, 0.001):
(fpx,fpy,fpz) = tigl.fuselageGetPoint(i,k,1.0,zeta)
if abs(fpz-afg.fuse_center_section_point[j][i-1][2])< 0.01:
if (fpy>afg.fuse_center_section_point[j][i-1][1] and hw1 == 0.0):
hw1 = abs(fpy-afg.fuse_center_section_point[j][i-1][1])
x1[j,i-1] = fpx
y1[j,i-1] = fpy
z1[j,i-1] = fpz
elif (fpy<afg.fuse_center_section_point[j][i-1][1]\
and hw2 == 0.0):
hw2= abs(fpy-afg.fuse_center_section_point[j][i-1][1])
x2[j,i-1] = fpx
y2[j,i-1] = fpy
z2[j,i-1] = fpz
break
afg.fuse_sec_width[j][i-1] = hw1 + hw2
hh1 = 0.0
hh2 = 0.0
for zeta in np.arange(0.0, 1.0, 0.001):
(fpx,fpy,fpz) = tigl.fuselageGetPoint(i,k,1.0,zeta)
if abs(fpy-afg.fuse_center_section_point[j][i-1][1])< 0.01:
if (fpz>afg.fuse_center_section_point[j][i-1][2]\
and hh1 == 0.0):
hh1 = abs(fpz-afg.fuse_center_section_point[j][i-1][2])
elif (fpz<afg.fuse_center_section_point[j][i-1][2]\
and hh2 == 0.0):
hh2 = abs(fpz-afg.fuse_center_section_point[j][i-1][2])
break
afg.fuse_sec_height[j][i-1] = hh1 + hh2
(fslpx,fslpy,fslpz) = tigl.fuselageGetPoint(1,k,0.0,0.0)
(fslpx2,fslpy2,fslpz2) = tigl.fuselageGetPoint(1,k,1.0,0.0)
afg.fuse_seg_length[j-1][i-1] = abs(fslpx2-fslpx)
k = int(afg.fuse_seg_index[1][i-1])
afg.fuse_sec_per[0][i-1]\
= tigl.fuselageGetCircumference(i,k,0.0)
(fpx,fpy,fpz) = tigl.fuselageGetPoint(i,k,0.0,0.0)
(fpx2,fpy2,fpz2) = tigl.fuselageGetPoint(i,k,0.0,0.5)
afg.fuse_center_section_point[0][i-1][0] = (fpx+fpx2) / 2
afg.fuse_center_section_point[0][i-1][1] = (fpy+fpy2) / 2
afg.fuse_center_section_point[0][i-1][2] = (fpz+fpz2) / 2
hw1 = 0
hw2 = 0
for zeta in np.arange(0.0, 1.0, 0.001):
(fpx,fpy,fpz) = tigl.fuselageGetPoint(i,k,0.0,zeta)
if abs(fpz-afg.fuse_center_section_point[0][i-1][2])< 0.01:
if (fpy>afg.fuse_center_section_point[0][i-1][1] and hw1 ==0):
hw1 = abs(fpy-afg.fuse_center_section_point[0][i-1][1])
x1[0,i-1] = fpx
y1[0,i-1] = fpy
z1[0,i-1] = fpz
elif (fpy<afg.fuse_center_section_point[0][i-1][1] and hw2 ==0):
hw2 = abs(fpy-afg.fuse_center_section_point[0][i-1][1])
x2[0,i-1] = fpx
y2[0,i-1] = fpy
z2[0,i-1] = fpz
break
afg.fuse_sec_width[0][i-1] = hw1 + hw2
hh1 = 0.0
hh2 = 0.0
for zeta in np.arange(0.0, 1.0, 0.001):
(fpx,fpy,fpz) = tigl.fuselageGetPoint(i,k,1.0,zeta)
if abs(fpy-afg.fuse_center_section_point[0][i-1][1])< 0.01:
if (fpz>afg.fuse_center_section_point[0][i-1][2]\
and hh1 == 0.0):
hh1 = abs(fpz-afg.fuse_center_section_point[0][i-1][2])
elif (fpz<afg.fuse_center_section_point[0][i-1][2]\
and hh2 == 0.0):
hh2 = abs(fpz-afg.fuse_center_section_point[0][i-1][2])
break
afg.fuse_sec_height[0][i-1] = hh1 + hh2
afg.fuse_mean_width.append(round(np.mean(afg.fuse_sec_width[:,i-1]),3))
# Evaluating the point at the center of each segment.
for i in range(int(afg.fuse_nb)):
for j in range(1, afg.fuse_seg_nb[i-1]+1):
afg.fuse_center_seg_point[j-1][i-1][0]\
= (afg.fuse_center_section_point[j-1][i-1][0]\
+ afg.fuse_center_section_point[j][i-1][0])/2
afg.fuse_center_seg_point[j-1][i-1][1]\
= (afg.fuse_center_section_point[j-1][i-1][1]\
+ afg.fuse_center_section_point[j][i-1][1])/2
afg.fuse_center_seg_point[j-1][i-1][2]\
= (afg.fuse_center_section_point[j-1][i-1][2]\
+ afg.fuse_center_section_point[j][i-1][2])/2
# Evaluating cabin length and volume, nose length and tail_length ------------
log.info('-----------------------------------------------------------')
log.info('----------- Analysing cabin dimensions --------------------')
log.info('-----------------------------------------------------------')
corr = (1.3) + np.zeros((afg.fus_nb))
c = False
afg.cabin_nb = np.zeros((afg.fus_nb))
afg.cabin_area = np.zeros((afg.fus_nb))
afg.fuse_cabin_length = np.zeros((afg.fus_nb))
afg.cabin_seg = np.zeros((max_seg_nb,afg.fus_nb))
afg.cabin_length = np.zeros((afg.fus_nb))
afg.fuse_cabin_vol = np.zeros((afg.fus_nb))
afg.fuse_nose_length = np.zeros((afg.fus_nb))
afg.fuse_tail_length = np.zeros((afg.fus_nb))
afg.fuse_fuel_vol = np.zeros((afg.fus_nb))
for i in range(1,afg.fus_nb+1):
ex = False
cabin_seg = np.zeros((max_seg_nb,1))
cabin_nb = 0
cabin_length = 0
cabin_volume = 0
nose_length = 0
tail_length = 0
if not F_FUEL[i-1]:
for j in range(1,afg.fuse_seg_nb[i-1]+1):
if (round(afg.fuse_sec_width[j][i-1],3)\
== round(np.amax(afg.fuse_sec_width[:,i-1]),3) and\
(h_min <= afg.fuse_sec_height[j,i-1])):
cabin_length += afg.fuse_seg_length[j-1,i-1]
cabin_volume += afg.fuse_seg_vol[j-1,i-1]
cabin_seg[j-1] = 1
c = True
elif not c:
nose_length += afg.fuse_seg_length[j-1,i-1]
if cabin_length >= 0.65 * afg.fuse_length[i-1]:
# If the aircraft is designed with 1 or more sections with
# maximum width and the sun of their length is greater the 65%
# of the total length, the cabin will be considered only in those
# sections
tail_length = afg.fuse_length[i-1] - cabin_length - nose_length
cabin_nb = 1
ex = True
while ex is False:
c = False
cabin_seg[:] = 0
nose_length = 0
tail_length = 0
cabin_length = 0
cabin_volume = 0
for j in range(1,afg.fuse_seg_nb[i-1]+1):
if (afg.fuse_sec_width[j][i-1] >= (corr[i-1]\
* afg.fuse_mean_width[i-1]) and\
(h_min <= afg.fuse_sec_height[j,i-1])):
cabin_length += afg.fuse_seg_length[j-1,i-1]
cabin_volume += afg.fuse_seg_vol[j-1,i-1]
cabin_seg[j-1] = 1
c += 1
elif c > 1:
tail_length += afg.fuse_seg_length[j-1,i-1]
else:
nose_length += afg.fuse_seg_length[j-1,i-1]
if corr[i-1] > 0.0 and cabin_length < (0.20 * afg.fuse_length[i-1]):
corr[i-1] -= 0.05
else:
ex = True
afg.fuse_nose_length[i-1] = round(nose_length,3)
afg.fuse_fuel_vol[i-1] = 0
afg.fuse_tail_length[i-1] = round(tail_length,3)
afg.fuse_cabin_length[i-1] = round(cabin_length,3)
afg.fuse_cabin_vol[i-1] = round(cabin_volume,3)
afg.cabin_nb[i-1] = cabin_nb
afg.cabin_seg[:,i-1] = cabin_seg[:,0]
afg.fuse_cabin_length[i-1] = round(cabin_length,3)
cabin_area = 0
for j in range(0,afg.fuse_seg_nb[i-1]):
if afg.cabin_seg[j,i-1] == 1:
(x11,y11,z11) = (x1[j,i-1],y1[j,i-1],z1[j,i-1])
(x12,y12,z12) = (x1[j+1,i-1],y1[j+1,i-1],z1[j+1,i-1])
(x21,y21,z21) = (x2[j,i-1],y2[j,i-1],z2[j,i-1])
(x22,y22,z22) = (x2[j+1,i-1],y2[j+1,i-1],z2[j+1,i-1])
cabin_area += (0.5\
* abs(x11*y12 + x12*y22 + x22*y21 + x21*y11\
- (y11*x12 + y12*x22 + y22*x21 + y21*x11)))
elif (cabin_area > 0 and afg.cabin_seg[j,i-1] == 0):
break
thick_area = afg.fuse_cabin_length[i-1] * (fuse_thick*2.0)
afg.cabin_area[i-1] = round((cabin_area-thick_area),3)
else:
afg.fuse_fuel_vol[i-1] *= F_FUEL[i-1]/100.0
afg.fuse_nose_length[i-1] = 0
afg.fuse_tail_length[i-1] = 0
afg.fuse_cabin_length[i-1] = 0
afg.fuse_cabin_vol[i-1] = 0
afg.cabin_area[i-1] = 0
cpsf.close_tixi(tixi, cpacs_in)
# log info display ------------------------------------------------------------
log.info('-----------------------------------------------------------')
log.info('---------- Fuselage Geometry Evaluations ------------------')
log.info('---------- USEFUL INFO ----------------------------------\n'\
+ 'If fuselage number is greater than 1 the\n'\
+ 'informations of each obj are listed in an\n '\
+ 'array ordered progressively')
log.info('-----------------------------------------------------------')
log.info('---------- Fuselage Results -------------------------------')
log.info('Number of fuselage [-]: ' + str(afg.fus_nb))
log.info('Number of fuselage sections [-]: ' + str(afg.fuse_sec_nb))
log.info('Number of fuselage segments [-]: ' + str(afg.fuse_seg_nb))
log.info('Cabin segments array [-]:\n' + str(cabin_seg))
log.info('Fuse Length [m]:\n' + str(afg.fuse_length))
log.info('Fuse nose Length [m]:\n' + str(afg.fuse_nose_length))
log.info('Fuse cabin Length [m]:\n' + str(afg.fuse_cabin_length))
log.info('Fuse tail Length [m]:\n' + str(afg.fuse_tail_length))
log.info('Aircraft Length [m]: ' + str(afg.tot_length))
log.info('Perimeter of each section of each fuselage [m]: \n'\
+ str(afg.fuse_sec_per))
log.info('Relative distance of each section of each fuselage [m]: \n'\
+ str(afg.fuse_sec_rel_dist))
log.info('Length of each segment of each fuselage [m]: \n'\
+ str(afg.fuse_seg_length))
log.info('Mean fuselage width [m]: ' + str(afg.fuse_mean_width))
log.info('Width of each section of each fuselage [m]: \n'\
+ str(afg.fuse_sec_width))
log.info('Cabin area [m^2]:\n' + str(afg.cabin_area))
log.info('Fuselage wetted surface [m^2]:\n' + str(afg.fuse_surface))
log.info('Volume of all the segmetns of each fuselage [m^3]: \n'\
+ str(afg.fuse_seg_vol))
log.info('Volume of each cabin [m^3]:\n' + str(afg.fuse_cabin_vol))
log.info('Volume of each fuselage [m^3]:\n' + str(afg.fuse_vol))
log.info('Volume of fuel in each fuselage [m^3]:\n'\
+ str(afg.fuse_fuel_vol))
log.info('-----------------------------------------------------------')
return(afg)
#==============================================================================
# MAIN
#==============================================================================
if __name__ == '__main__':
log.warning('#########################################################')
log.warning('# ERROR NOT A STANDALONE PROGRAM, RUN balanceuncmain.py #')
log.warning('#########################################################')
|
#linux_handler
from Xlib.display import Display
from Xlib import X
from Xlib.ext import record
from Xlib.protocol import rq
import time
from linux_map import keysym_map
disp = None
class KeyListener(object):
def __init__(self):
"""Really simple implementation of a keylistener
Simply define your keyevents by creating your keylistener obj,
and then calling addKeyListener("keycombination", callable)
Keycombinations are separated by plus signs:
examples:
>>> keylistener = KeyListener()
>>> keylistener.addKeyListener("L_CTRL+L_SHIFT+y", callable)
>>> keylistener.addKeyListener("b+a+u+L_CTRL", callable)
>>> keylistener.addKeyListener("a", callable)
ex:
>>> keylistener = KeyListener()
>>> def sayhi():
print "hi!"
>>> keylistener.addKeyListener("L_CTRL+a", sayhi)
from this moment on, python will execute sayhi every time you press
left ctrl and a at the same time.
Keycodes can be found in the keysym map above.
"""
self.pressed = set()
self.listeners = {}
def press(self, character):
""""must be called whenever a key press event has occurred
You'll have to combine this with release, otherwise
keylistener won't do anything
"""
self.pressed.add(character)
action = self.listeners.get(tuple(sorted(self.pressed)), False)
#print("current action: " + str(tuple(self.pressed)))
if action:
action()
def release(self, character):
"""must be called whenever a key release event has occurred."""
if character in self.pressed:
self.pressed.remove(character)
def addKeyListener(self, hotkeys, callable):
keys = tuple(sorted(hotkeys.split("+")))
#print("Added new keylistener for : " + str(keys))
self.listeners[keys] = callable
keylistener = KeyListener()
def keysym_to_character(sym):
if sym in keysym_map:
return keysym_map[sym]
else:
return sym
def handler(reply):
""" This function is called when a xlib event is fired """
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, disp.display, None, None)
keycode = event.detail
keysym = disp.keycode_to_keysym(event.detail, 0)
if keysym in keysym_map:
character = keysym_to_character(keysym)
#print(character)
if event.type == X.KeyPress:
keylistener.press(character)
elif event.type == X.KeyRelease:
keylistener.release(character)
def start():
global disp
# get current display
disp = Display()
root = disp.screen().root
# Monitor keypress and button press
ctx = disp.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyReleaseMask, X.ButtonReleaseMask),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
disp.record_enable_context(ctx, handler)
disp.record_free_context(ctx)
while True:
time.sleep(.1)
# Infinite wait, doesn't do anything as no events are grabbed
event = root.display.next_event()
|
import time
from urllib.parse import urlparse
homeurl = "https://www.sunlifeglobalinvestments.com/"
class Advisor_tools_and_calculator():
def __init__(self, driver):
self.driver = driver
locators = {
"header_text": "//div[@class='title-bar']//h1[contains(text(),'Advisor tools and calculators')]",
"active_breadcrumb": "//ol[@class='breadcrumb']//li[@class='active']//span[contains(text(),'Advisor tools and calculators')]",
"launch_illustration_tool": "//a[contains(text(),'Launch Illustration tool')]",
"launch_granite_tool": "//a[contains(text(),'Launch Granite tool')]",
"launch_series_t_calculator": "//a[contains(text(),'Launch Series T calculator')]"
}
def is_valid(self, url):
"""
Checks whether `url` is a valid URL.
"""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def verify_title(self):
title = self.driver.title
print(title)
assert title
def verify_header(self):
header = self.driver.find_element_by_xpath(self.locators["header_text"]).text
assert header
def verify_active_breadcrumb(self):
breadcrumb = self.driver.find_element_by_xpath(self.locators["active_breadcrumb"]).text
assert breadcrumb
def verify_illustration_tool_opens(self):
self.driver.find_element_by_xpath(self.locators["launch_illustration_tool"]).click()
time.sleep(1)
if len(self.driver.window_handles) > 1:
self.driver.switch_to.window(self.driver.window_handles[1])
print(self.driver.title)
assert "Sun Life Global Investments".casefold() in (self.driver.title).casefold()
def verify_granite_tool_opens(self):
self.driver.find_element_by_xpath(self.locators["launch_granite_tool"]).click()
time.sleep(1)
if len(self.driver.window_handles) > 1:
self.driver.switch_to.window(self.driver.window_handles[1])
print(self.driver.title)
assert "Sun Life Granite Managed Solutions Proposal Tool".casefold() in (self.driver.title).casefold()
def verify_series_t_cal_tool_opens(self):
self.driver.find_element_by_xpath(self.locators["launch_series_t_calculator"]).click()
time.sleep(1)
if len(self.driver.window_handles) > 1:
self.driver.switch_to.window(self.driver.window_handles[1])
print(self.driver.title)
assert "Sun Life Series T Calculator".casefold() in (self.driver.title).casefold()
|
from django.shortcuts import render,HttpResponse,redirect
from django.contrib.auth.models import User
from .models import ContactUs
from django.contrib.auth import authenticate,login,logout
from django.contrib import messages
import re
from django.db import IntegrityError
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
# Create your views here.
def Index(request):
return render(request,'account/login.html')
def Log_out(request):
logout(request)
return redirect('indexlogin')
def Login_Handle(request):
if request.method == 'POST':
user = request.POST['username']
password = request.POST['password']
u = authenticate(username=user,password=password)
if u is not None:
login(request,u)
messages.add_message(request,messages.INFO,'Login Sucess Welcome :' +str(user).title())
return redirect('HomePage')
else:
messages.add_message(request,messages.ERROR,"Login cridential's is wrong")
return redirect('indexlogin')
def SiginUp_Handle(request):
if request.method == 'POST':
user = request.POST['username']
email = request.POST['email']
password = request.POST['password']
conform_pass = request.POST['conformpassword']
error = False
if not user.isalnum():
messages.add_message(request,messages.INFO,'Username must contain one alphabet and one numaric value')
error = True
if re.search("'^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'", email):
messages.add_message(request,messages.ERROR,'Email is not in a correct format')
error = True
if password.isalnum():
messages.add_message(request,messages.INFO,'Password must contain one alphabet and one numaric value')
error = True
if password != conform_pass:
messages.add_message(request,messages.ERROR,'Conform passsword not match')
error = True
if error:
return redirect('SiginUp')
try:
User.objects.create_user(user,email,password)
except IntegrityError :
messages.add_message(request,messages.ERROR,'User Already Exists')
return render(request,'account/register.html')
def Contact_Us(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
comment = request.POST['comment']
import re
error = False
email_f = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if not re.search(email_f,email):
messages.add_message(request,messages.ERROR,'Email is not valid')
error = True
if len(comment) < 10:
messages.add_message(request,messages.ERROR,'Message is to short')
error = True
if len(name) < 3:
messages.add_message(request,messages.ERROR,'Name is not correct')
error = True
if error:
return render(request,'account/contact.html')
contact = ContactUs(name=name,email=email,comment=comment)
contact.save()
# send_mail(
# 'Testing',
# 'Hi iam here Farhan .',
# 'waqasdevolper@gmail.com',
# [email],
# fail_silently=False,
# )
plaintext = get_template('email/send.html')
htmly = get_template('email/send.html')
d = {'sender_name':name}
subject, from_email, to = 'Registration', 'waqasdevolper@gmail.com', email
text_content = plaintext.render(d)
html_content = htmly.render(d)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
messages.add_message(request,messages.INFO,'We will conatct u soon')
return render(request,'account/contact.html') |
"""
Author: Sidhin S Thomas (sidhin@trymake.com)
Copyright (c) 2017 Sibibia Technologies Pvt Ltd
All Rights Reserved
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
"""
from django.conf.urls import url, include
from trymake.website.core import views
# NAMESPACE "core:account"
my_account_urls = [
# Template
url(r'^$', views.my_account, name="myaccount"),
# AJAX views
url(r'^ajax/orders/get$',views.get_order_list, name="get_order_list"),
# TODO Cancel Order
# TODO return order
# TODO delivery details
# Feedback form
url(r'^form/feedback/get$', views.get_feedback_form, name="get_feedback_form"),
url(r'^form/feedback/submit$', views.process_feedback, name="submit_feedback_form"),
# Address Form
url(r'^form/address/get$', views.get_address_form, name="get_address_form"),
url(r'^form/address/submit$', views.process_address_add, name="submit_address"),
url(r'^form/address/edit$', views.edit_address, name="edit_address"),
# Product Feedback Form
url(r'^form/product_feedback/get$', views.get_product_feedback_form, name="get_product_feedback_form"),
url(r'^form/product/submit/(?P<product_id>[0-9]+)', views.process_product_feedback, name="submit_product_feedback"),
# Order Feedback Form
url(r'^form/order_feedback/get', views.get_order_feedback_form, name="get_order_feedback"),
url(r'^form/order_feedback/submit/(?P<order_id>[0-9]+)', views.process_order_feedback, name="process_order_feedback"),
]
# NAMESPACE "core"
urlpatterns = [
# Template
url(r'^$', views.index, name="index"),
# LOGIN views - Core url namespace
# AJAX views
url(r'^authenticated$', views.is_Logged_in, name="authenticated"),
url(r'^check_account$', views.check_account_exists, name="check_account"),
url(r'^process_registration', views.process_registration , name="process_registration"),
url(r'^register$', views.process_registration, name="reg"),
# Redirect view
url(r'logout$', views.logout_view, name="logout"),
url(r'^login$', views.process_login, name="login"),
# ACCOUNT views - core:account url namespace
url(r'^account/', include(my_account_urls, namespace="account"))
]
|
#!/bin/python3
import sys
S = input().strip()
# Attempt to print the given input. If the input is not valid as an integer,
# handle the exception by priting "Bad String"
try:
print(int(S))
# Use type ValueError for conversion failures
except ValueError:
print("Bad String")
|
# https://old.reddit.com/r/dailyprogrammer/comments/8jcffg/20180514_challenge_361_easy_tally_program/
def tally(scoreTrack):
scoreTrack = list(scoreTrack)
print(scoreTrack)
scoreBoard = dict()
for char in scoreTrack:
if (char not in scoreBoard) and (char.islower() is True):
scoreBoard[char] = 0
if char.islower():
scoreBoard[char] += 1
if char.isupper():
if char.lower() not in scoreBoard:
scoreBoard[char.lower()] = 0
scoreBoard[char.lower()] -= 1
print(scoreBoard)
tally("EbAAdbBEaBaaBBdAccbeebaec")
|
import numpy as np
a = np.array(range(1,11))
size = 5
# 모델을 구성하시오
def split_x(seq, size):
aaa = []
for i in range(len(seq) - size + 1):
subset = seq[i : (i+size)]
aaa.append(subset)
return np.array(aaa)
dataset = split_x(a,size)
x = dataset[:,:4] # (6, 4)
y = dataset[:,4] # (6,)
print(dataset)
print(x)
print(y)
print(x.shape)
print(y.shape)
x = x.reshape(6,4,1)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, LSTM, Input
input1 = Input(shape=(4,1))
dense = LSTM(64, activation='relu')(input1)
dense = Dense(32)(dense)
dense = Dense(16)(dense)
output1 = Dense(1)(dense)
model = Model(inputs=input1, outputs=output1)
model.compile(loss='mse', optimizer='adam')
model.fit(x, y, epochs=200, batch_size=4, verbose=2)
loss = model.evaluate(x,y)
print('loss :', loss)
x_pred = np.array([2,4,6,8]).reshape(1,4,1)
y_pred = model.predict(x_pred)
print('y_pred :', y_pred)
# result
# loss : 0.007185818161815405
# y_pred : [[7.9436164]]
x_pred = dataset[-1,1:].reshape(1,4,1)
print(x_pred)
y_pred = model.predict(x_pred)
print('y_pred :', y_pred)
# y_pred : [[10.746268]] |
import pytest
import pdb
from pytest_bdd import scenarios, given, when, then, parsers
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
localhost = 'http://127.0.0.1:8000/'
# Scenarios
scenarios('../features/login.feature')
#Fixtures
@pytest.mark.usefixtures('chromeBrowser')
# Given Step
@given('that I am on the login page')
def visit_login(chromeBrowser):
chromeBrowser.get(localhost)
# When step
@when(parsers.parse('I enter my username {username} and password {password}'))
def input_login(chromeBrowser, username, password):
chromeBrowser.find_element_by_id('id_username').send_keys(username)
chromeBrowser.find_element_by_id('id_password').send_keys(password, Keys.RETURN)
@when(parsers.parse('I click on {link}'))
def click_on(chromeBrowser, link):
chromeBrowser.maximize_window()
x = chromeBrowser.find_element_by_link_text(link)
x.click()
# Then step
@then(parsers.parse('I should see the page with title {title}'))
def see_dashboard(chromeBrowser, title):
assert (title in chromeBrowser.page_source)
# Then step
@then(parsers.parse('I should see a message saying \'{message}\''))
def check_message(chromeBrowser, message):
assert (message in chromeBrowser.page_source)
chromeBrowser.quit()
|
import tensorflow as tf
import numpy as np
def norm_boxes(boxes, shape):
'''
Converts boxes from pixel coordinates to normalized coordinates.
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
:param boxes: [N, (y1, x1, y2, x2)] in pixel coordinates
:param shape: (height, width) in pixels
:return: [N, (y1, x1, y2, x2)] in normalized coordinates
'''
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.divide((boxes - shift), scale).astype(np.float32)
def norm_boxes_graph(boxes, shape):
'''
Converts boxes from pixel coordinates to normalized coordinates.
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
:param boxes: [B, None, (y1, x1, y2, x2)] in pixel coordinates
:param shape: (height, width, ) in pixels
:return: [B, None, (y1, x1, y2, x2)] in normalized coordinates
'''
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.divide(boxes - shift, scale)
def denorm_boxes(boxes, shape):
'''
Converts boxes from normalized coordinates to pixel coordinates.
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
:param boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
:param shape: (height, width) in pixels
:return: [..., (y1, x1, y2, x2)] in pixel coordinates
'''
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
def denorm_boxes_graph(boxes, shape):
'''
Converts boxes from normalized coordinates to pixel coordinates.
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
:param boxes: [..., (y1, x1, y2, x2)] in normalized coordinates
:param shape: [..., (height, width)] in pixels
:return: [..., (y1, x1, y2, x2)] in pixel coordinates
'''
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
shift = tf.constant([0., 0., 1., 1.])
return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)
def extract_bboxes(mask):
'''
Compute bounding boxes from masks.
:param mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
:return: bbox array [num_instances, (y1, x1, y2, x2)].
'''
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
'''
Calculates IoU of the given box with the array of the given boxes.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
:param box: 1D vector [y1, x1, y2, x2]
:param boxes: [boxes_count, (y1, x1, y2, x2)]
:param box_area: float. the area of 'box'
:param boxes_area: array of length boxes_count
:return:
'''
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
'''
Computes IoU overlaps between two sets of boxes.
For better performance, pass the largest set first and the smaller second.
:param boxes1: [N1, (y1, x1, y2, x2)]
:param boxes2: [N2, (y1, x1, y2, x2)]
:return: iou of shape [N1, N2]
'''
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
|
import os
import shutil
from unittest import TestCase
from tempfile import TemporaryDirectory
import pandas as pd
__all__ = [
"test_base",
"test_empiric",
"test_enrich",
"test_enrich2",
"test_fasta",
"test_utilities",
"test_filters",
"test_validators",
"ProgramTestCase",
]
# TODO: think up a better name for this class
# TODO: remove the old self.bin stuff
class ProgramTestCase(TestCase):
def setUp(self):
self._data_dir = TemporaryDirectory() # store the object
self.data_dir = os.path.join(
self._data_dir.name, "data"
) # store the directory path
shutil.copytree(
src=os.path.join(os.path.dirname(os.path.abspath(__file__)), "data"),
dst=self.data_dir,
)
self.bin = []
def mock_multi_sheet_excel_file(self, path, data):
writer = pd.ExcelWriter(path, engine="xlsxwriter")
for i, di in enumerate(data):
df = pd.DataFrame(di)
df.to_excel(writer, sheet_name="Sheet{}".format(i), index=False)
writer.save()
self.bin.append(path)
def tearDown(self):
self._data_dir.cleanup()
for path in self.bin:
if os.path.exists(path) and os.path.isfile(path):
os.remove(path)
elif os.path.exists(path) and os.path.isdir(path):
os.removedirs(path)
|
import autograd.numpy as np
from CelestePy.util.data import mags2nanomaggies, df_from_fits
from CelestePy.util.dists.mog import MixtureOfGaussians
from CelestePy.util.dists.flux_prior import FluxColorMoG, GalShapeMoG, GalRadiusMoG, GalAbMoG
import cPickle as pickle
import pandas as pd
import pyprind
import fitsio
# cross validate mog function
def fit_mog(data, max_comps = 20, mog_class = MixtureOfGaussians):
from sklearn import mixture
N = data.shape[0]
if len(data.shape) == 1:
train = data[:int(.75 * N)]
test = data[int(.75 * N):]
else:
train = data[:int(.75*N), :]
test = data[int(.75*N):, :]
# do train/val GMM fit
num_comps = np.arange(1, max_comps+1)
scores = np.zeros(len(num_comps))
for i, num_comp in enumerate(num_comps):
g = mixture.GMM(n_components=num_comp, covariance_type='full')
g.fit(train)
logprobs, res = g.score_samples(test)
scores[i] = np.mean(logprobs)
print "num_comp = %d (of %d) score = %2.4f"%(num_comp, max_comps, scores[i])
print "best validation, num_comps = %d"%num_comps[scores.argmax()]
# fit final model to all data
g = mixture.GMM(n_components = num_comps[scores.argmax()], covariance_type='full')
g.fit(data)
# create my own GMM object - it's better!
return mog_class(g.means_, g.covars_, g.weights_)
if __name__=="__main__":
# read galaxy and star from FITS
print "reading in galaxy and star fluxes"
gals_df = df_from_fits('gals.fits')
stars_df = df_from_fits('stars.fits')
print "reading in co-added galaxies"
test_coadd_fn = "../../data/stripe_82_dataset/square_106_4.fit"
coadd_df = df_from_fits(test_coadd_fn)
#############################
# Fit Fluxes and save prior #
#############################
# get flux mags and nanomaggies for stars/gals
bands = ['u', 'g', 'r', 'i', 'z']
mags_gals = gals_df[['cmodelmag_%s'%b for b in bands]].values
mags_stars = stars_df[['psfmag_%s'%b for b in bands]].values
# create flux dataset (take log - make sure to remove any infinite values)
def to_log_nanomaggies(mags):
fluxes = np.log(mags2nanomaggies(mags))
bad_idx = np.any(np.isinf(fluxes), axis=1)
return fluxes[~bad_idx,:]
fluxes_gals = pd.DataFrame(to_log_nanomaggies(mags_gals), columns = bands)
fluxes_stars = pd.DataFrame(to_log_nanomaggies(mags_stars), columns = bands)
# create colors dataset
colors_gals = pd.DataFrame(FluxColorMoG.to_colors(fluxes_gals.values),
columns = ['cu', 'cg', 'ci', 'cz', 'r'])
colors_stars = pd.DataFrame(FluxColorMoG.to_colors(fluxes_stars.values),
columns = ['cu', 'cg', 'ci', 'cz', 'r'])
# fit model to colors and to r band flux (reference band)
star_flux_mog = fit_mog(colors_stars.values[::1000,:], max_comps = 40, mog_class=FluxColorMoG)
gal_flux_mog = fit_mog(colors_gals.values[::1000,:], max_comps = 50, mog_class=FluxColorMoG)
# save pickle files
with open('gal_fluxes_mog.pkl', 'wb') as f:
pickle.dump(gal_flux_mog, f)
with open('star_fluxes_mog.pkl', 'wb') as f:
pickle.dump(star_flux_mog, f)
#####################################################################
# fit model to galaxy shape parameters
#
# re - [0, infty], transformation log
# ab - [0, 1], transformation log (ab / (1 - ab))
# phi - [0, 180], transformation log (phi / (180 - phi))
#
######################################################################
print "fitting galaxy shape"
shape_df = np.row_stack([ coadd_df[['expRad_r', 'expAB_r', 'expPhi_r']].values,
coadd_df[['deVRad_r', 'deVAB_r', 'deVPhi_r']].values ])[::3,:]
shape_df[:,0] = np.log(shape_df[:,0])
shape_df[:,1] = np.log(shape_df[:,1]) - np.log(1.-shape_df[:,1])
shape_df[:,2] = shape_df[:,2] * (np.pi / 180.)
bad_idx = np.any(np.isinf(shape_df), axis=1)
shape_df = shape_df[~bad_idx,:]
gal_re_mog = fit_mog(shape_df[:,0], mog_class = GalRadiusMoG, max_comps=50)
gal_ab_mog = fit_mog(shape_df[:,1], mog_class = GalAbMoG, max_comps=50)
with open('gal_re_mog.pkl', 'wb') as f:
pickle.dump(gal_re_mog, f)
with open('gal_ab_mog.pkl', 'wb') as f:
pickle.dump(gal_ab_mog, f)
#####################################################################
# fit star => galaxy proposal distributions
#
# re - [0, infty], transformation log
# ab - [0, 1], transformation log (ab / (1 - ab))
# phi - [0, 180], transformation log (phi / (180 - phi))
#
######################################################################
import CelestePy.util.data as du
from sklearn.linear_model import LinearRegression
coadd_df = du.load_celeste_dataframe("../../data/stripe_82_dataset/coadd_catalog_from_casjobs.fit")
# make star => radial extent proposal
star_res = coadd_df.gal_arcsec_scale[ coadd_df.is_star ].values
star_res = np.clip(star_res, 1e-8, np.inf)
star_res_proposal = fit_mog(np.log(star_res).reshape((-1,1)), max_comps = 20, mog_class = MixtureOfGaussians)
with open('star_res_proposal.pkl', 'wb') as f:
pickle.dump(star_res_proposal, f)
if False:
xgrid = np.linspace(np.min(np.log(star_res)), np.max(np.log(star_res)), 100)
lpdf = star_res_proposal.logpdf(xgrid.reshape((-1,1)))
plt.plot(xgrid, np.exp(lpdf))
plt.hist(np.log(star_res), 25, normed=True)
plt.hist(np.log(star_res), 25, normed=True, alpha=.24)
plt.hist(star_res_proposal.rvs(684).flatten(), 25, normed=True, alpha=.24)
# make star fluxes => gal fluxes for tars
colors = ['ug', 'gr', 'ri', 'iz']
star_mags = np.array([du.colors_to_mags(r, c)
for r, c in zip(coadd_df.star_mag_r.values,
coadd_df[['star_color_%s'%c for c in colors]].values)])
gal_mags = np.array([du.colors_to_mags(r, c)
for r, c in zip(coadd_df.gal_mag_r.values,
coadd_df[['gal_color_%s'%c for c in colors]].values)])
# look at galaxy fluxes regressed on stars
x = star_mags[coadd_df.is_star.values]
y = gal_mags[coadd_df.is_star.values]
star_mag_model = LinearRegression()
star_mag_model.fit(x, y)
star_residuals = star_mag_model.predict(x) - y
star_mag_model.res_covariance = np.cov(star_residuals.T)
star_resids = np.std(star_mag_model.predict(x) - y, axis=0)
with open('star_mag_proposal.pkl', 'wb') as f:
pickle.dump(star_mag_model, f)
for i in xrange(5):
plt.scatter(star_mag_model.predict(x)[:,i], y[:,i], label=i, c=sns.color_palette()[i])
plt.legend()
plt.show()
# look at star fluxes regressed on galaxy fluxes
x = gal_mags[~coadd_df.is_star.values]
y = star_mags[~coadd_df.is_star.values]
gal_mag_model = LinearRegression()
gal_mag_model.fit(x, y)
gal_residuals = gal_mag_model.predict(x) - y
gal_mag_model.res_covariance = np.cov(gal_residuals.T)
with open('gal_mag_proposal.pkl', 'wb') as f:
pickle.dump(gal_mag_model, f)
for i in xrange(5):
plt.scatter(gal_mag_model.predict(x)[:,i], y[:,i], label=i, c=sns.color_palette()[i])
plt.legend()
plt.show()
######################################
# load in models and test them #
######################################
gal_flux_mog = pickle.load(open('gal_fluxes_mog.pkl', 'rb'))
gal_flux_mog.logpdf(fluxes_gals.values[:100,:])
from autograd import grad
grad(lambda th: np.sum(gal_flux_mog.logpdf(th)))(fluxes_gals.values[:10,:])
###############################################################
# Visualize distribution of fluxes, colors, shapes, etc #
###############################################################
# visualize pairwise distributions
import matplotlib.pyplot as plt
plt.ion()
import seaborn as sns
shape_df = pd.DataFrame(shape_df, columns=['sigma', 'ab', 'phi'])
sns.pairplot(shape_df)
plt.show()
colors_stars['r'] = fluxes_stars['r']
sns.pairplot(colors_stars)
plt.suptitle("Stars")
fig = plt.figure()
sns.pairplot(fluxes_gals.iloc[::1000,:])
plt.suptitle("Gals")
plt.show()
sns.jointplot(colors_stars['cg'] - colors_stars['cu'], colors_stars['ci'] - colors_stars['cz'])
plt.show()
|
import sys
sys.stdin=open("input.txt", "r")
'''
# 사용해야 하는 자료 구조 = stack
: String을 그대로 사용하거나 Array를 사용하면 수정/삭제가 잦아서 시간복잡도 문제가 생긴다.
: 커서를 기준으로 좌우로 나누어서 stack 2개에 저장하면 될 것 같다.
: dequeue가 아니라 stack을 사용하는 이유는 문자열을 컨트롤할 때는
: 한번에 커서 전후의 data 하나씩만 조작가능하기 때문에 data가 들어오고 나가는 출입구는 하나면 된다.
# 문제 풀이 아이디어
: 커서 좌우를 담당하는 stack을 만들어서
: 입력에 맞게 문자열을 조작하고
: 마지막에 합쳐서 출력한다.
# 의사코드
1. stack 2개를 선언한다.
2. 입력을 받는다.
3. 입력된 문자열을 반복문을 돌면서
3-1. 문자가 입력되면 왼쪽 stack에 넣는다.
3-2. 커서가 왼쪽으로 이동하면 왼쪽 stack에서 빼서 오른쪽에 넣는다.
3-3. 커서가 오른쪽으로 이동하면 오른쪽에서 왼쪽으로 넣는다.
3-4. 백스페이스가 들어오면 왼쪽 스택에서 pop한다.
4. stack에 남아있는 결과물을 출력한다.
4-1. 왼쪽 스택은 pop된 순서의 역순으로
4-2. 오른쪽 스택은 pop된 순서대로 출력한다.
'''
n = int(input())
for _ in range(n):
commands = input()
left = []
right = []
for command in commands:
if command == "-":
if left:
left.pop()
elif command == "<":
if left:
right.append(left.pop())
elif command == ">":
if right:
left.append(right.pop())
else:
left.append(command)
right.reverse()
result = left + right
print(''.join(result))
'''
# 사용해야 하는 자료 구조 = heap
: 가장 작은 카드묶음부터 비교해야 최소한으로 비교할 수 있다.
: 최소 힙을 사용하면 된다.
# 문제 풀이 아이디어
: 총 n - 1 비교를 하는데
: 가장 작은 두 개의 카드뭉치는 n - 1번 비교하고
: 세 번째로 작은 카드뭉치부터는 n - cnt - 1번 비교한다.
# 의사코드
1. 입력을 받는대로 최소 heap에 넣는다.
2. cnt = 1로 설정한다.
3. pop 2번한 것을 더하고 n - cnt을 곱한다.
4. while heap: 반복문을 돌면서
4-1. cnt += 1하고
4-2. pop한 것 곱하기 n - cnt해서 이전 합에 더한다.
5. 결과를 출력한다.
'''
'''
# 사용해야 하는 자료구조 = heap
: 매번 비교할 때마다 카드 뭉치 중에서 가장 작은 것 2개를 뽑아야 한다.
: 최소 힙 사용
# 문제 풀이 아이디어
: 총 n - 1번 비교를 하는데 그 때마다 가장 작은 뭉치 2개를 비교
: 비교해서 만들어진 카드 뭉치를 다시 힙에 넣어야 한다.
# 의사코드
1. 입력을 받아서 최소 heap에 넣는다.
2. result = 0으로 선언한다.
3. 최소 heap에서 2개를 뽑아서 더한 값을
3-1. result에 더하고
3-2. heap에 넣는다.
4. heap의 길이가 1이면 result를 출력한다.
# 시간복잡도
: heap에 넣고 빼는 것이 O(logn)
: n - 1 번 반복된다.
: 최종적으로 O(nlogn)
'''
# import sys
# import heapq
# hq = []
# n = int(input())
# for _ in range(n):
# heapq.heappush(hq, int(sys.stdin.readline()))
# result = 0
# while len(hq) > 1:
# current = heapq.heappop(hq) + heapq.heappop(hq)
# result += current
# heapq.heappush(hq, current)
# print(result)
'''
# 사용해야 하는 자료구조 = heap, deque
: 어떤 수열이 들어올지 모르니까 중앙값을 구하는 규칙성을 찾을 수는 없다.
: 따라서 중앙값을 구하려면 수열이 들어올 때마다 정렬을 해야하는데
: 그런 상황에서 가장 시간복잡도가 낮은 자료구조가 힙이다.
: 수열을 저장하고 하나하나 힙에 넣기 위해서 deque를 사용한다.
: Array를 사용하면 pop(0)가 시간복잡도가 O(n)이라 오래 걸린다.
# 문제 풀이 아이디어
: heap을 선언해서 수열을 순서대로 넣고
: 홀수인 n번째가 들어갈 때 마다 n // 2 + 1까지 빼서 출력한다.
# 의사코드
1. 입력을 받고 heap을 선언한다.
2. m // 10 + 1만큼 반복문을 돌면서 수열을 전부 deque에 저장한다.
3. 첫줄에는 m // 2 + 1을 출력한다.
4. deque에서 heap을 넣는데 넣을 때 마다 cnt를 센다.
4-1. cnt가 홀수일 때 cnt // 2 만큼빼고 하나 더 뺀 것을 출력한다.
4-2. 그리고 다시 넣는다.
5. cnt % 20 == 19일 때마다 줄바꿈을 한다.
# 시간복잡도
: heap은 삽입/삭제가 O(logn)
: 중간값을 구하는 과정에서 삽입/삭제가 많이 일어나는데 O(n) ~ O(n**2)가 아닐까 싶다.
# 블로그 참고
: 최대 heap하나와 최소 heap하나를 두고 두 heap의 길이를 동일하게 유지하면서
: 중앙값을 찾는 방법이 더 시간복잡도가 낮아 보인다 (삽입/삭제를 덜 실시함.)
'''
# import heapq
# from collections import deque
# t = int(input())
# for _ in range(t):
# m = int(input())
# print(m // 2 + 1)
# dq = []
# for _ in range(m // 10 + 1):
# dq += list(map(int, input().split()))
# dq = deque(dq)
# cnt = 0
# hq = []
# while cnt < m:
# heapq.heappush(hq, dq.popleft())
# cnt += 1
# if cnt % 2 == 1:
# interim = []
# for _ in range(cnt // 2):
# interim.append(heapq.heappop(hq))
# median = heapq.heappop(hq)
# print(median, end=" ")
# interim.append(median)
# for num in interim:
# heapq.heappush(hq, num)
# if cnt % 20 == 19:
# print()
# print()
|
from Heap import MaxHeap
# Binary max heap percolate down
def max_heap_percolate_down(node_index, heap_list, list_size):
child_index = 2 * node_index + 1
value = heap_list[node_index]
while child_index < list_size:
# Find the max among the node and all the node's children
max_value = value
max_index = -1
i = 0
while i < 2 and i + child_index < list_size:
if heap_list[i + child_index] > max_value:
max_value = heap_list[i + child_index]
max_index = i + child_index
i = i + 1
if max_value == value:
return
# Swap heap_list[node_index] and heap_list[max_index]
temp = heap_list[node_index]
heap_list[node_index] = heap_list[max_index]
heap_list[max_index] = temp
node_index = max_index
child_index = 2 * node_index + 1
# Sorts the list of numbers using the heap sort algorithm
def heap_sort(numbers):
print("Initial Array")
print(MaxHeap(numbers))
# Heapify numbers list
i = len(numbers) // 2 - 1
while i >= 0:
max_heap_percolate_down(i, numbers, len(numbers))
i = i - 1
print("After Heapify")
print(MaxHeap(numbers))
i = len(numbers) - 1
while i > 0:
# Swap numbers[0] and numbers[i]
temp = numbers[0]
numbers[0] = numbers[i]
numbers[i] = temp
print("After Swap")
print(MaxHeap(numbers))
max_heap_percolate_down(0, numbers, i)
print("After Filter")
print(MaxHeap(numbers))
i = i - 1
|
from __future__ import division
try:
from collections.abc import Iterable
except:
from collections import Iterable
import warnings
from itertools import product
import numpy as np
# TODO: Incorporate @pablodecm's cover API.
__all__ = ["Cover", "CubicalCover"]
class Cover:
"""Helper class that defines the default covering scheme
It calculates the cover based on the following formula for overlap. (https://arxiv.org/pdf/1706.00204.pdf)
::
|cube[i] intersection cube[i+1]|
overlap = --------------------------------------
|cube[i]|
Parameters
============
n_cubes: int
Number of hypercubes along each dimension. Sometimes referred to as resolution.
perc_overlap: float
Amount of overlap between adjacent cubes calculated only along 1 dimension.
limits: Numpy Array (n_dim,2)
(lower bound, upper bound) for every dimension
If a value is set to `np.float('inf')`, the bound will be assumed to be the min/max value of the dimension
Also, if `limits == None`, the limits are defined by the maximum and minimum value of the lens for all dimensions.
i.e. `[[min_1, max_1], [min_2, max_2], [min_3, max_3]]`
Example
---------
::
>>> import numpy as np
>>> from kmapper.cover import Cover
>>> data = np.random.random((100,2))
>>> cov = Cover(n_cubes=15, perc_overlap=0.75)
>>> cube_centers = cov.fit(data)
>>> cov.transform_single(data, cube_centers[0])
array([[0.3594448 , 0.07428465],
[0.14490332, 0.01395559],
[0.94988668, 0.03983579],
[0.73517978, 0.09420806],
[0.16903735, 0.06901085],
[0.81578595, 0.10708731],
[0.26923572, 0.12216203],
[0.89203167, 0.0711279 ],
[0.80442115, 0.10220901],
[0.33210782, 0.04365007],
[0.52207707, 0.05892861],
[0.26589744, 0.08502856],
[0.02360067, 0.1263653 ],
[0.29855631, 0.01209373]])
>>> hyper_cubes = cov.transform(data, cube_centers)
"""
def __init__(self, n_cubes=10, perc_overlap=0.5, limits=None, verbose=0):
self.centers_ = None
self.radius_ = None
self.inset_ = None
self.inner_range_ = None
self.bounds_ = None
self.di_ = None
self.n_cubes = n_cubes
self.perc_overlap = perc_overlap
self.limits = limits
self.verbose = verbose
# Check limits can actually be handled and are set appropriately
assert isinstance(
self.limits, (list, np.ndarray, type(None))
), "limits should either be an array or None"
if isinstance(self.limits, (list, np.ndarray)):
self.limits = np.array(self.limits)
assert self.limits.shape[1] == 2, "limits should be (n_dim,2) in shape"
def __repr__(self):
return "Cover(n_cubes=%s, perc_overlap=%s, limits=%s, verbose=%s)" % (
self.n_cubes,
self.perc_overlap,
self.limits,
self.verbose,
)
def _compute_bounds(self, data):
# If self.limits is array-like
if isinstance(self.limits, np.ndarray):
# limits_array is used so we can change the values of self.limits from None to the min/max
limits_array = np.zeros(self.limits.shape)
limits_array[:, 0] = np.min(data, axis=0)
limits_array[:, 1] = np.max(data, axis=0)
limits_array[self.limits != np.float("inf")] = 0
self.limits[self.limits == np.float("inf")] = 0
bounds_arr = self.limits + limits_array
""" bounds_arr[i,j] = self.limits[i,j] if self.limits[i,j] == inf
bounds_arr[i,j] = max/min(data[i]) if self.limits == inf """
bounds = (bounds_arr[:, 0], bounds_arr[:, 1])
# Check new bounds are actually sensible - do they cover the range of values in the dataset?
if not (
(np.min(data, axis=0) >= bounds_arr[:, 0]).all()
or (np.max(data, axis=0) <= bounds_arr[:, 1]).all()
):
warnings.warn(
"The limits given do not cover the entire range of the lens functions\n"
+ "Actual Minima: %s\tInput Minima: %s\n"
% (np.min(data, axis=0), bounds_arr[:, 0])
+ "Actual Maxima: %s\tInput Maxima: %s\n"
% (np.max(data, axis=0), bounds_arr[:, 1])
)
else: # It must be None, as we checked to see if it is array-like or None in __init__
bounds = (np.min(data, axis=0), np.max(data, axis=0))
return bounds
def fit(self, data):
""" Fit a cover on the data. This method constructs centers and radii in each dimension given the `perc_overlap` and `n_cube`.
Parameters
============
data: array-like
Data to apply the cover to. Warning: First column must be an index column.
Returns
========
centers: list of arrays
A list of centers for each cube
"""
# TODO: support indexing into any columns
di = np.array(range(1, data.shape[1]))
indexless_data = data[:, di]
n_dims = indexless_data.shape[1]
# support different values along each dimension
## -- is a list, needs to be array
## -- is a singleton, needs repeating
if isinstance(self.n_cubes, Iterable):
n_cubes = np.array(self.n_cubes)
assert (
len(n_cubes) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
n_cubes = np.repeat(self.n_cubes, n_dims)
if isinstance(self.perc_overlap, Iterable):
perc_overlap = np.array(self.perc_overlap)
assert (
len(perc_overlap) == n_dims
), "Custom cubes in each dimension must match number of dimensions"
else:
perc_overlap = np.repeat(self.perc_overlap, n_dims)
assert all(0.0 <= p <= 1.0 for p in perc_overlap), (
"Each overlap percentage must be between 0.0 and 1.0., not %s"
% perc_overlap
)
bounds = self._compute_bounds(indexless_data)
ranges = bounds[1] - bounds[0]
# (n-1)/n |range|
inner_range = ((n_cubes - 1) / n_cubes) * ranges
inset = (ranges - inner_range) / 2
# |range| / (2n ( 1 - p))
radius = ranges / (2 * (n_cubes) * (1 - perc_overlap))
# centers are fixed w.r.t perc_overlap
zip_items = list(bounds) # work around 2.7,3.4 weird behavior
zip_items.extend([n_cubes, inset])
centers_per_dimension = [
np.linspace(b + r, c - r, num=n) for b, c, n, r in zip(*zip_items)
]
centers = [np.array(c) for c in product(*centers_per_dimension)]
self.centers_ = centers
self.radius_ = radius
self.inset_ = inset
self.inner_range_ = inner_range
self.bounds_ = bounds
self.di_ = di
if self.verbose > 0:
print(
" - Cover - centers: %s\ninner_range: %s\nradius: %s"
% (self.centers_, self.inner_range_, self.radius_)
)
return centers
def transform_single(self, data, center, i=0):
""" Compute entries of `data` in hypercube centered at `center`
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
center: array-like
Center points for the cube. Cube is found as all data in `[center-self.radius_, center+self.radius_]`
i: int, default 0
Optional counter to aid in verbose debugging.
"""
lowerbounds, upperbounds = center - self.radius_, center + self.radius_
# Slice the hypercube
entries = (data[:, self.di_] >= lowerbounds) & (
data[:, self.di_] <= upperbounds
)
hypercube = data[np.invert(np.any(entries == False, axis=1))]
if self.verbose > 1:
print(
"There are %s points in cube %s/%s"
% (hypercube.shape[0], i + 1, len(self.centers_))
)
return hypercube
def transform(self, data, centers=None):
""" Find entries of all hypercubes. If `centers=None`, then use `self.centers_` as computed in `self.fit`.
Empty hypercubes are removed from the result
Parameters
===========
data: array-like
Data to find in entries in cube. Warning: first column must be index column.
centers: list of array-like
Center points for all cubes as returned by `self.fit`. Default is to use `self.centers_`.
Returns
=========
hypercubes: list of array-like
list of entries in each hypercobe in `data`.
"""
centers = centers or self.centers_
hypercubes = [
self.transform_single(data, cube, i) for i, cube in enumerate(centers)
]
# Clean out any empty cubes (common in high dimensions)
hypercubes = [cube for cube in hypercubes if len(cube)]
return hypercubes
def fit_transform(self, data):
self.fit(data)
return self.transform(data)
class CubicalCover(Cover):
"""
Explicit definition of a cubical cover as the default behavior of the cover class. This is currently identical to the default cover class.
"""
pass
|
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.implicitly_wait(10)
driver.get("https://www.baidu.com")
sreach_windows = driver.current_window_handle #获得当前窗口的句柄
driver.find_element_by_link_text("登录").click()
driver.find_element_by_link_text("立即注册").click()
all_handles = driver.window_handles #获得当前打开的所有窗口的句柄
driver.switch_to_window(all_handles[1]) #切换到第二个窗口,handles[]默认为0,即第一个
driver.find_element_by_name("userName").send_keys("username")
driver.find_element_by_name("phone").send_keys("15708420051")
driver.find_element_by_id("TANGRAM__PSP_3__password").send_keys("password")
time.sleep(2)
driver.switch_to_window(all_handles[0]) #切换到第一个窗口
driver.find_element_by_id("TANGRAM__PSP_4__closeBtn").click() #关闭弹窗
driver.find_element_by_id("kw").send_keys("selenium")
driver.find_element_by_id("su").click()
time.sleep(2)
# 利用for循环判断当前窗口并进入
# for handle in all_handles:
# if handle != sreach_windows:
# driver.switch_to_window(handle)
# driver.find_element_by_name("userName").send_keys("username")
# driver.find_element_by_name("phone").send_keys("15708420051")
# driver.find_element_by_id("TANGRAM__PSP_3__password").send_keys("password")
# time.sleep(2)
# for handle in all_handles:
# if handle == sreach_windows:
# driver.switch_to_window(handle)
# driver.find_element_by_id("TANGRAM__PSP_4__closeBtn").click()
# driver.find_element_by_id("kw").send_keys("selenium")
# driver.find_element_by_id("su").click()
# time.sleep(2)
driver.close()
time.sleep(2)
driver.quit() |
#Johnathan Hinebrook 9-24-14 -- 10-14-14
#mainprog.py
#This program calls all created mdels and plays the in order
################################################
##Imports
################################################
import time #for timeing
import stry #holds story and most text
import mov2 #holds map and movement
import inv #hold players invatory
import form #holds formatting options
import RanBat2 #hold Random battle genorator
import namelog #Makes a name file for mods to read
import restart #save mode feature
################################################
##Globals
################################################
playerName = "???"
################################################
##Functions
################################################
def askname():
global playerName
text_file = open("name.txt", "w+")
ins = input()
if ins == "":
ins = "Hazel"
print('I couldnt be bothered to put a name so call me Hazel')
text_file.write(ins)
text_file.close()
text_file = open("name.txt","r")
playerName = text_file.read()
print('')
print('The girls closes her eyes like she is trying to keep it in memory')
print('')
time.sleep(2)
print('Girl: Nice to meet you',playerName)
print('Girl: My name is Vofasakai but every one calls me Vofa')
print(playerName,': Nice to meet you Vofa')
print(playerName,': Lets get going before the astroid storm')
def renren():
text_file = open("renren.txt","r+")
renren = text_file.read()
text_file.close()
print(renren)
if renren == "1":
##load play name from last save
text_file = open("curmov.txt","r")
curmov = int(text_file.read())
while curmov <= 9999:
if curmov <=33:
mov2.main()
time.sleep(2)
RanBat2.main()
if curmov >= 34:
form.clear()
print('You have Won!!!~')
break
if renren == "0":
##adds playername and starts from begining
stry.intro()
stry.gnam()
askname()
stry.pt1()
RanBat2.main()
stry.pt2()
time.sleep(7)
print('Vofa :Okay lets go.')
text_file = open("curmov.txt","r")
curmov = int(text_file.read())
while curmov <= 9999:
if curmov <=33:
mov2.main()
time.sleep(2)
RanBat2.main()
if curmov >= 34:
form.clear()
print("Congrats you escaped")
print('You have Won!!!~')
break
def layout():
"""Basic layout of game"""
print('Rember to break after your selection')
restart.main()
renren()
stry.pt3()
##
##################################################
##Main Function
##################################################
def main():
layout()
##################################################
##start
##################################################
main()
##################################################
|
import numpy as np
import random as random
from math import *
from point import Point
class EESTOPlanner():
def __init__(self):
pass
def get_path(self, costMap, startPoint, endPoint):
num_paths = 20
max_num_its = 100
decay_factor = .99
cur_decay = decay_factor
N = 30 |
{
"uidPageCalendar": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidCalendarPanel",
"uidCalendarAddPanel"
]
},
# Calendar
"uidCalendarPanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidCalendar",
"uidCalendarAddEventButton"
]
},
"uidCalendarAddEventButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidAdd",
W3Const.w3PropCSS: {
"float": "right",
"margin-top": "10px"
},
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
"W3HideUI('uidCalendarPanel')",
"W3DisplayUI('uidCalendarAddPanel')"
]
}
},
"uidCalendar": {
W3Const.w3PropType: W3Const.w3TypeCalendar
},
# Add
"uidCalendarAddPanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidCalendarAddTable",
"uidCalendarAddOperationPanel"
],
W3Const.w3PropCSS: {
"display": "none"
}
},
"uidCalendarAddOperationPanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidCalendarAddOperationTable"
]
},
"uidCalendarAddOperationTable": {
W3Const.w3PropType: W3Const.w3TypeTable,
W3Const.w3PropSubUI: [
[], # No header
["uidCalendarAddSubmitButton", "uidCalendarAddCancelButton"]
],
W3Const.w3PropCSS: {
"float": "right"
}
},
"uidCalendarAddCancelButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidCancel",
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
"W3HideUI('uidCalendarAddPanel')",
"W3DisplayUI('uidCalendarPanel')"
]
}
},
"uidCalendarAddSubmitButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidSubmit",
W3Const.w3PropTriggerApi: [
{
W3Const.w3ApiID: "aidAddCalendarEvent",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidCalendarEventName"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidCalendarEventDate"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidCalendarEventRepeat"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidCalendarEventNote"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeVar,
W3Const.w3ApiDataValue: W3Const.w3Session
}]
}],
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
W3Const.w3PlaceHolder_1
]
}
},
"uidCalendarAddTable": {
W3Const.w3PropType: W3Const.w3TypeTable,
W3Const.w3PropSubUI: [
[], # No header
["uidNameLabel", "uidCalendarEventName"],
["uidDatetimeLabel", "uidCalendarEventDate"],
["uidRepeatLabel", "uidCalendarEventRepeat"],
["uidNoteLabel", "uidCalendarEventNote"]
]
},
"uidCalendarEventDate": {
W3Const.w3PropType: W3Const.w3TypeDatePicker
},
"uidCalendarEventName": {
W3Const.w3PropType: W3Const.w3TypeText
},
"uidCalendarEventRepeat": {
W3Const.w3PropType: W3Const.w3TypeText
},
"uidCalendarEventNote": {
W3Const.w3PropType: W3Const.w3TypeText
}
}
|
from flask import Flask, request, redirect, render_template, flash, session
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://blogz:locker@localhost:8889/blogz'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = "123"
class Blog(db.Model): #Blog class
id = db.Column(db.Integer, primary_key=True)
blogpost = db.Column(db.String(5000))
blogtitle = db.Column(db.String(500))
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, blogpost, blogtitle, owner):
self.blogpost = blogpost
self.blogtitle = blogtitle
self.owner = owner
class User(db.Model):#User class
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True)
password = db.Column(db.String(120))
blog = db.relationship('Blog', backref='owner')
def __init__(self, email, password):
self.email = email
self.password = password
@app.before_request
def require_login():
allowed_routes = ['login', 'signup', "static", 'allposts', 'usersblogs','root']
if request.endpoint not in allowed_routes and 'email' not in session:
return redirect('/login')
@app.route("/")
def root():
#somevariable = request.args.get("id")
return render_template("index.html", users=User.query.all())
@app.route("/login", methods = ['GET', 'POST'])
def login():
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
user = User.query.filter_by(email=email).first()
if user and password == user.password:
session['email'] = email
flash("Logged in")
return redirect("/")
else:
flash('User password incorrect, or user does not exist', 'error')
return render_template("login.html")
else:
return render_template ('login.html')
@app.route("/signup", methods = ['GET', 'POST'])
def signup():
if request.method == 'POST':
password = request.form['password']
verify = request.form ['verify']
email = request.form ['email']
existing_user = User.query.filter_by(email=email).first()
has_error = False
if not is_email(email):
flash ("Please enter a valid email")
has_error = True
if password != verify:
flash ('Please enter matching passwords')
has_error = True
if " " in password:
flash ('Please exclude spaces from password')
has_error = True
if existing_user:
flash('This email is already in use')
has_error=True
if has_error:
return redirect ("/signup")
else:
new_user = User(email, password)
db.session.add(new_user)
db.session.commit()
session['email'] = email
return render_template('/Blog-it.html')
else:
return render_template("signup.html")
def is_email(string):
string = request.form["email"]
atsign_index = string.find('@')
atsign_present = atsign_index >= 0
length =len(string)
if not atsign_present:
return False
if length < 3 or length > 20 :
return False
else:
domain_dot_index = string.find('.', atsign_index)
domain_dot_present = domain_dot_index >= 0
return domain_dot_present
if length < 3 or length > 20:
return False
else:
return True
return render_template("signup.html")
def is_email(string):
string = request.form["email"]
atsign_index = string.find('@')
atsign_present = atsign_index >= 0
length =len(string)
if not atsign_present:
return False
if length < 3 or length > 20 :
return False
else:
domain_dot_index = string.find('.', atsign_index)
domain_dot_present = domain_dot_index >= 0
return domain_dot_present
@app.route("/Blog-it", methods = ['GET', 'POST'])
def blog():
if request.method == "POST":
owner = User.query.filter_by(email = session ['email']).first()
blog_title = request.form["blogtitle"]
blogpost = request.form["textarea"]
blogsubmit = Blog(blogpost, blog_title, owner )
error_present = False
if not blog_title:
flash("You left the Blog title blank! Give your entry a title :)")
error_present = True
if not blogpost:
flash("You left Blog Content blank! Give us your thoughts :)")
error_present = True
if error_present:
return redirect ("/Blog-it")
else:
db.session.add(blogsubmit)
db.session.commit()
blogid = blogsubmit.id
return redirect("/allposts?id="+str(blogid))
else:
return render_template("Blog-it.html")
@app.route("/showblogs")
def usersblogs():
user_id = request.args.get("user_id")
blogs = Blog.query.filter_by(owner_id=int(user_id))
return render_template("showblogs.html", blogs=blogs)
@app.route("/allposts")
def allposts():
somevariable = request.args.get("id")
theblog = Blog.query.filter_by(id=somevariable).first()
return render_template("allposts.html", allblogs = Blog.query.all(),
theblog=theblog, somevariable=somevariable)
@app.route("/logout")
def logout():
session.clear()
return render_template ("logout.html")
if __name__=="__main__":
app.run()
|
from django.urls import path
from . import views
urlpatterns = [
path('ajouter_table', views.ajouter_table, name = 'ajouter_table'),
] |
"""
CSC131 - Computational Thinking
Missouri State University, Spring 2018
This module contains solutions to the problems found at the end of Chapter 5.
File: projects.py
"""
from functools import reduce
def is_sorted(my_list: list) -> bool:
"""
Project 6.5 - Defining a predicate.
:param my_list:
:return: Returns True if my_list is sorted in ascending order or False otherwise.
"""
if len(my_list) == 0 or len(my_list) == 1:
return True
else:
for index in range(len(my_list) - 1):
if my_list[index] > my_list[index + 1]:
return False
return True
def project6_5() -> None:
my_list = []
print(is_sorted(my_list))
my_list = [1]
print(is_sorted(my_list))
my_list = list(range(10))
print(is_sorted(my_list))
my_list[9] = 3
print(is_sorted(my_list))
def project6_9(file_name: str = "numbers.dat") -> None:
"""
Project 6.9
:param file_name: The file containing numbers
:return: None
"""
# Accept the input file name and open the file
if file_name is None:
file_name = input("Enter the input file name: ")
input_file = open(file_name, 'r')
# Read the numbers as strings into a list
a_list = []
for line in input_file:
a_list.extend(line.split())
# Convert all the strings in the list to numbers
a_list = list(map(float, a_list))
# Compute the sum of the numbers
summation = reduce(lambda x, y: x + y, a_list)
# Print the average
if len(a_list) == 0:
average = 0
else:
average = summation / len(a_list)
print("The average is", average)
def main():
project6_5()
project6_9()
if __name__ == "__main__":
main() |
import pandas as pd
student1 = pd. Series({'국어':100,"영어":80,'수학':90})
student2 = pd.Series({'수학':80,'국어':90})
print(student1,student2, sep='\n')
print()
print("# 두 학생의 과목별 점수로 사칙연산 수행 (시리즈 vs. 시리즈)")
addition = student1 + student2
subtraction = student1 - student2
multipication = student1 * student2
division = student1 / student2
print(type(division))
print("# 사칙연산 결과를 데이터 프레임으로 합치기 (시리즈 -> 데이터프레임)")
result = pd.DataFrame([addition,subtraction,multipication,division], index=['덧셈','뺄셈','곱셈','나눗셈'])
print(result) |
# class sports:
# game = "cricket"
# def __init__(self, name, game, value):
# self.name=name
# self.game=game
# self.value=value
# def details(self):
# print(f"name of the player is {self.name}")
# print(f"game of the player is {self.game}")
# print(f"value of the player is {self.value}")
# rohit =sports("sunil chetri","football",150)
# rohit1 =sports("virat kohli","cricket",140)
# rohit.details()
# rohit1.details()
class employee:
company = "adobe"
def __init__(self, name, prod, sal):
self.name = name
self.product=prod
self.salary = sal
def getinfo(self):
print(f"name of the employee {self.name} and product is {self.product} sand salary {self.salary}")
def getSalary(self, signature):
print(f"Salary for this employee working in {self.company} is {self.salary}\n{signature}")
cali=employee("robin","antivirus", 10000)
cali.getinfo()
cali.getSalary("sign")
|
import pygame, sys
from player import Player
from bullet import Bullet
class Game:
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((1500, 800))
pygame.display.set_caption('Runner!')
self.clock = pygame.time.Clock()
self.fps = 60
self.player = Player(self)
self.bg = pygame.image.load('images/bg.png')
self.bg = pygame.transform.scale(self.bg, (1500, 800))
self.bullets = pygame.sprite.Group()
def shoot(self):
"""Метод для стрельбы игрока вверх."""
if self.player.moving_right:
bullet = Bullet(self, self.player.rect.midright, self.player.rect.midright)
self.bullets.add(bullet)
elif self.player.moving_left:
bullet = Bullet(self, self.player.rect.midleft, self.player.rect.midleft)
self.bullets.add(bullet)
def update_screen(self):
self.player.jump()
self.player.update()
self.bullets.update()
self.screen.blit(self.bg, (0, 0))
self.player.blitme()
self.bullets.draw(self.screen)
pygame.display.flip()
def run_game(self):
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
sys.exit()
if event.key == pygame.K_d:
self.player.moving_right = True
if event.key == pygame.K_a:
self.player.moving_left = True
if event.key == pygame.K_KP_ENTER:
self.shoot()
if not self.player.is_jump:
if event.key == pygame.K_SPACE:
self.player.is_jump = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_d:
self.player.moving_right = False
if event.key == pygame.K_a:
self.player.moving_left = False
self.update_screen()
self.clock.tick(self.fps)
if __name__ == '__main__':
# Создание экземпляра и запуск игры.
game = Game()
game.run_game()
|
'''
Complete the function that accepts a string parameter, and reverses each word in the string. All spaces in the string should be retained.
Examples:
"This is an example!" ==> "sihT si na !elpmaxe"
"double spaces" ==> "elbuod secaps"
'''
def reverse_words(text):
textSplit = text.split(" ")
reverse = []
for i in textSplit:
reverse.append(i[::-1])
result = " ".join(reverse)
print(textSplit)
return result
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
buzpin = 23
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(buzpin,GPIO.OUT)
def beep(cnt):
for i in range(cnt):
GPIO.output(buzpin,True)
time.sleep(0.2)
GPIO.output(buzpin,False)
time.sleep(0.2)
beep(2)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from naoqi import ALProxy
from naoconfig import *
proxyMo = ALProxy('ALMotion',robot_IP,robot_port)
proxyMo.stiffnessInterpolation('Body', 1.0, 1.0)
proxyMo.angleInterpolation('HeadYaw', 1.2, 1.0, True)
|
import random
from numpy import array
from scipy.cluster.vq import kmeans
import numpy as np
#author name as key , and index as value
def read_authors_dict():
infile = open("data/authors.txt","r")
infile.readline()
authors_dict = {}
for line in infile:
fields = line.strip().split('|')
if fields[1] not in authors_dict:
authors_dict[fields[1]] = fields[0]
infile.close()
return authors_dict
#interest name as key, and index as value
def read_interests_dict():
interests_dict = {}
infile = open("data/subDomain.txt","r")
for line in infile:
fields = line.strip().split('|')
if fields[1] not in interests_dict:
#interests_dict[fields[1]] = str(int(fields[0])-1)
interests_dict[fields[1]] = fields[0]
infile.close()
return interests_dict
#different from build_author_interest_dict in combine_feature.py
def build_author_interest_dict(interests_dict, authors_dict):
infile = open("data/labeled_authors_info.txt","r")
author_interest_dict = {}
for line in infile:
interests_list = [] #for each author create a interests_list
fields = line.strip().split("|")
author = fields[0]
for field in fields[1:-1]:
if field in interests_dict:
index = int(interests_dict[field])
interests_list.append(index)
else:
print field
raise SyntaxError
if fields[0] not in author_interest_dict:
index = authors_dict[fields[0]]
author_interest_dict[index] = interests_list
print len(author_interest_dict)
return author_interest_dict
"""
def author_interest_output_dict():
infile = open("data/2014/mf/recovered_matrix4.txt","r")
author_interest_output_dict = {}
for line in infile:
interest_index = 1
fields = line.strip().split(' ')
if fields[0] not in author_interest_output_dict:
author_interest_output_dict[fields[0]] = []
else:
print 'error'
for f in fields[65:]:
if float(f) > 0:
author_interest_output_dict[fields[0]].append(interest_index)
interest_index += 1
infile.close()
return author_interest_output_dict
"""
def author_interest_output_dict():
infile = open("data/2014/mf/new_recovered_matrix16.txt","r")
author_interest_output_dict = {}
for line in infile:
interest_index = 1
fields = line.strip().split(' ')
author = fields[0]
if author not in author_interest_output_dict:
author_interest_output_dict[author] = []
else:
print 'error'
interests = [float(i) for i in fields[65:]]
r1, r2 = kmeans(array(interests),3)
index = np.where(r1 == r1.max())[0][0]
for i in range(24):
distances = abs(r1-interests[i])
index2 = np.where(distances == distances.min())[0][0]
if index2 == index:
author_interest_output_dict[author].append(i+1)
infile.close()
return author_interest_output_dict
def eval(author_interest_dict,author_interest_output_dict):
size = len(author_interest_dict)
net_precision = 0
net_accuracy = 0
net_recall = 0
net_F_score = 0
count = 0
for author in author_interest_output_dict:
print 'author:',author
real = author_interest_dict[author]
print 'real',real
length_real = len(real)
predict = author_interest_output_dict[author]
print 'predict',predict
length_intersection = len(set(real).intersection(predict))
length_union = len(set(real).union(predict))
precision = length_intersection / float(len(predict))
recall = length_intersection / float(len(real))
if (precision + recall) != 0:
F_score = 2*precision*recall / (precision+recall)
else:
F_score = 0
accuracy = length_intersection / float(length_real)
net_accuracy += accuracy
net_precision += precision
net_recall += recall
net_F_score += F_score
print 'accuracy', net_accuracy / float(size)
print 'precision',net_precision / float(size)
print 'recall',net_recall / float(size)
print 'F_score',net_F_score / float(size)
if __name__ == '__main__':
authors_dict = read_authors_dict()
interests_dict = read_interests_dict()
author_interest_dict = build_author_interest_dict(interests_dict,authors_dict)
author_interest_output_dict = author_interest_output_dict()
eval(author_interest_dict,author_interest_output_dict)
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
diabetes = datasets.load_diabetes()
type(diabetes)
type(diabetes.data)
diabetes.data.shape
x = diabetes.data[:, np.newaxis, 2]
y = diabetes.target
modelo = linear_model.LinearRegression()
modelo.fit(x, y)
modelo.coef_
modelo.intercept_
plt.scatter(x, y, color = "black")
plt.plot(x, modelo.predict(x), color = "cyan", linewidth = 3)
mean_squared_error(y, modelo.predict(x))
r2_score(y, modelo.predict(x))
# Entrenamiento y validacion
x_train = x[:-60]
x_test = x[-60:]
y_train = y[:-60]
y_test = y[-60:]
## Entrenamiento
modelo = linear_model.LinearRegression
modelo.fit(x_train, y_train)
modelo.coef_
modelo.intercept_
plt.scatter(x_train, y_train, color = "black")
plt.plot(x_train, modelo.predict(x_train), color = "cyan", linewidth = 3)
mean_squared_error(y_train, modelo.predict(x_train))
r2_score(y_train, modelo.predict(x_train))
## Validacion
y_pred = modelo.predict(x_test)
mean_squared_error(y_test, y_pred)
r2_score(y_test, y_pred)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import json
import os
import getpass
from bs4 import BeautifulSoup
import sys
import getopt
import subprocess
api_key_path = os.getcwd() + "/api.txt"
# 1。获得app路径
# 2。生成ipa包
# 3。登录蒲公英得到 key (保存key到文件)
# 4。上传ipa到蒲公英 (添加进度条)
# 5。通知测试用户
def get_app_path():
try:
opts, args = getopt.getopt(sys.argv[1:], "p:",)
_opts = opts[0]
if '-p' in _opts:
path = _opts[1]
if os.path.exists(path):
return path
else:
print(path)
raise Exception("\n***************xxx.app文件不存在\n")
except getopt.GetoptError as e:
print('error is %s' % str(e))
sys.exit(1)
#编译打包 得到ipa
def bulidIPA(app_path):
dirs = app_path.split("/")
dirs.pop()
# print(dirs)
app_dir = "/".join(dirs)
pack_bag_path = app_dir + "/packBagPath"
pay_load_path = app_dir + "/PayLoadPath"
# print(packBagPath)
# print(PayLoadPath)
subprocess.call(["rm", "-rf", pack_bag_path])
subprocess.call(["mkdir", "-p", pay_load_path])
subprocess.call(["cp", "-r", app_path, pay_load_path])
subprocess.call(["mkdir", "-p", pack_bag_path])
subprocess.call(["cp", "-r", pay_load_path, pack_bag_path])
subprocess.call(["rm", "-rf", pay_load_path])
os.chdir(pack_bag_path)
subprocess.call(["zip", "-r", "./Payload.zip", "."])
print("\n***************打包成功\n")
subprocess.call(["mv", "payload.zip", "Payload.ipa"])
subprocess.call(["rm", "-rf", "./Payload"])
return pack_bag_path
# 获取apikey
def get_api_key():
if os.path.exists(api_key_path):
f = open(api_key_path, "r+")
api_key = f.read()
f.close()
if len(api_key) != 32:
print("\n***************api_key错误,重新登录\n")
return pgy_api(login())[1]
else:
return api_key
else:
return pgy_api(login())[1]
# 登录蒲公英
def login():
url = 'https://www.pgyer.com/user/login'
email = input("请输入蒲公英账号 Enter结束:")
password = getpass.getpass('请输入蒲公英密码 Enter结束:')
data = {
'email': email,
'password': password
}
req = requests.post(url, data=data)
req.encoding = 'UTF-8'
rp = json.loads(req.text)
# print(rp)
status_code = rp['code']
if status_code == 0:
return req.cookies
else:
print(rp['message'])
login()
# 获取蒲公英api_key user_k
def pgy_api(cookies):
user_url = 'https://www.pgyer.com/account/api'
req_user = requests.get(url=user_url, cookies=cookies)
req_user.encoding = 'UTF_8'
b = BeautifulSoup(req_user.text, 'html.parser')
code_tag = b.find_all("code")
api_key = ''
user_key = ''
for i in range(len(code_tag)):
n = code_tag[i]
if i == 0:
api_key = n.contents[0]
else:
user_key = n.contents[0]
# print('api_key \t' + api_key)
# print('user_key \t' + user_key)
f = open(api_key_path,"w")
f.write(api_key)
f.close()
return (user_key, api_key)
# 上传ipa到蒲公英
def my_callback(monitor):
progress = (monitor.bytes_read / monitor.len) * 100
print('文件上传进度:%d%%(%d/%s)' % (progress,monitor.bytes_read,monitor.len),end = "")
# print("\r 文件上传进度:%d%%(%d/%d)" %(progress, monitor.bytes_read, monitor.len), end = " ")
def uploadIPA(ipa_path,updata_des):
api_key = get_api_key()
if (os.path.exists(ipa_path)):
print("\n***************开始上传到蒲公英\n")
url = 'https://www.pgyer.com/apiv2/app/upload'
data = {
'_api_key': api_key,
'buildInstallType': '3',
'buildPassword': '',
'buildUpdateDescription': updata_des
}
loading = True
st = 'send.'
# while loading:
# st += "."
# print(st)
files = {'file': open(ipa_path, 'rb')}
r = requests.post(url ,data=data,files=files)
r.encoding = 'UTF-8'
r = json.loads(r.text)
status_code = r['code']
if status_code == 0:
# loading = False
print("\n***************上传成功\n")
else:
# loading = False
raise Exception("\n***************%s - %s\n" % (r['message'], r['code']))
else:
raise Exception("\n***************没有找到iap包\n")
if __name__ == '__main__':
app_path = get_app_path()
updata_des = input("请输入更新的日志描述:")
ipa_path = bulidIPA(app_path) + "/Payload.ipa"
uploadIPA(ipa_path,updata_des)
|
# Copyright 2016 Husky Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import time
from pyhusky.common.binstream import BinStream
from pyhusky.common.operation import Operation
from pyhusky.frontend import communication
from pyhusky.frontend import config
from pyhusky.frontend.datareceiver import data_receiver
id_counter = 0
def visit_deps(op, bs):
"""
structure of binstream
[node id][op][dep_id1, dep_id2, ...][node id][op][dep_id1, dep_id2, ...]
"""
global id_counter
id_counter += 1
bid = id_counter
self_dep_list = []
for dep in op.op_deps:
if dep.is_materialized: # create a virtual node for cache
dep = Operation("Functional#load_cache_py", dep.op_param, [])
dep_id = visit_deps(dep, bs)
self_dep_list.append(dep_id)
bs << bid
bs << op
bs << self_dep_list
config.log_msg(op)
return bid
def serialize_dag(pending_op):
global id_counter
id_counter = -1
bs = BinStream()
visit_deps(pending_op, bs)
bs << -1 # mark of end
return bs
def submit_task(bin_dag, op):
task_id = random.randint(0, 1000)
bin_dag << task_id
communication.send(communication.NEW_TASK, bin_dag.data_buf)
result = None
cur_prgs = -1
while True:
time.sleep(0.001)
question = BinStream()
question << task_id
reply = BinStream()
reply.data_buf = communication.ask(communication.QUERY_TASK, question.data_buf)
status = reply.load_str()
if status == "progress":
prgs = reply.load_int32()
if cur_prgs != prgs:
cur_prgs = prgs
config.log_msg("Executing... "+str(prgs)+" %")
if prgs == 100:
break
elif status == "data":
ret = data_receiver(reply, op)
if ret is not None:
if result is None:
result = ret
else:
result += ret
return result
def compute(op):
bin_dag = serialize_dag(op)
submit_task(bin_dag, op)
def compute_collect(op):
bin_dag = serialize_dag(op)
result = submit_task(bin_dag, op)
return result
|
vowels='aeiou'
string=input('enter the string')
count=0
for i in vowels:
if i in string:
count+=1
print(count)
|
import os
from pyspark.sql import SparkSession
import pyspark.sql.functions as pssf
import pyspark.sql.types as psst
import pytest
import sparklib
@pytest.fixture(scope='session')
def spark():
os.environ['SPARK_LOCAL_IP'] = "127.0.0.1"
spark = SparkSession\
.builder\
.getOrCreate()
yield spark
spark.stop()
def test_sum(spark: SparkSession):
df = spark.range(101)
actual = sparklib.sum(df, 'id')
expected = 5050
assert actual == expected
def test_to_dict_should_sort(spark):
input = spark.createDataFrame(
[
psst.Row(id=2, name='sam'),
psst.Row(id=1, name='ivan')
]
)
expect = [
{ 'id': 1, 'name': 'ivan' },
{ 'id': 2, 'name': 'sam' },
]
actual = sparklib.to_dict(input, ['id'])
assert actual == expect
def test_find_duplicates(spark: SparkSession):
input = spark.createDataFrame(
[(1, 'a'),
(2, 'b'),
(1, 'c'),],
['id', 'value'])
expect = spark.createDataFrame(
[(1, 'a', None),
(1, 'c', 'Duplicate key.'),
(2, 'b', None),],
['id', 'value', 'error'])
sort_by = ['id', 'value', 'error']
actual = sparklib.tr_find_duplicates(input, "id")
assert sparklib.to_dict(actual, sort_by) == sparklib.to_dict(expect, sort_by)
def test_as_list_deep(spark):
input = [ psst.Row(id=1, a=psst.Row(b=101)) ]
input_df = spark.createDataFrame(input)
actual = sparklib.to_list(input_df, True)
expect = [{'id':1, 'a':{'b':101}}]
assert actual == expect
def test_as_list_shallow(spark):
input = [ psst.Row(id=1, a=psst.Row(b=101)) ]
input_df = spark.createDataFrame(input)
actual = sparklib.to_list(input_df, False)
expect = [{'id':1, 'a':psst.Row(b=101)}]
assert actual == expect
|
#! /usr/bin/env python
import sys
import ddlib # Load the ddlib Python library for NLP functions
# For each input row
for row in sys.stdin:
# Parse tab-separated values
column1, column2, column3, ... = row.strip().split('\t')
# Output rows
print '\t'.join(map(str, [
column1,
column2,
column3
]))
|
"""
the config info. in pre process.
"""
import os
# project path
PROJECT_PATH = os.path.abspath(os.path.dirname(os.getcwd()))
DATA_PATH = os.path.join(PROJECT_PATH, 'text_detector/data/train_data/')
IMG_PATH = os.path.join(DATA_PATH, 'sorted_image_9000/')
LABEL_PATH = os.path.join(DATA_PATH, 'sorted_txt_9000/')
# raw data path
RAW_DATA_PATH = '/media/super/Dev Data/Data Set & Weight/ICPR_text_train/train_9000/'
RAW_IMG_PATH = os.path.join(RAW_DATA_PATH, 'image_9000/')
RAW_LABEL_PATH = os.path.join(RAW_DATA_PATH, 'txt_9000/')
""" use to generate test data.
# project path
PROJECT_PATH = os.path.abspath(os.path.dirname(os.getcwd()))
DATA_PATH = os.path.join(PROJECT_PATH, 'text_detector/data/test_data/')
IMG_PATH = os.path.join(DATA_PATH, 'sorted_image_1000/')
LABEL_PATH = os.path.join(DATA_PATH, 'sorted_txt_1000/')
# raw data path
RAW_DATA_PATH = '/media/super/Dev Data/Data Set & Weight/ICPR_text_train/train_1000/'
RAW_IMG_PATH = os.path.join(RAW_DATA_PATH, 'image_1000/')
RAW_LABEL_PATH = os.path.join(RAW_DATA_PATH, 'txt_1000/')
"""
# save path
SAVE_PATH = os.path.join(PROJECT_PATH, 'pre_process/markdown_resource/')
# index path
INDEX_PATH = os.path.join(DATA_PATH + 'index/trainval.txt')
BROKEN_INDEX_PATH = os.path.join(DATA_PATH + 'index/broken_index.txt')
# report
FROM_EMAIL = '502612842@qq.com'
FROM_EMAIL_TOKEN = 'bvbekgpwcrslbgha'
TO_EMAIL = 'wangcser@qq.com'
SUBJECT = 'Text Detection Task Running Report'
CONTENT = '''
下方为项目目前运行情况:
''' |
from django.conf.urls import url
from eduprocess.views import GroupListView
urlpatterns = [
url(r'^list/$', GroupListView.as_view(), name='group_list'),
# url(r'^detail/$', GroupDetailView.as_view(), name='group_add'),
# url(r'^detail/(?P<pk>\d+)/$', GroupDetailView.as_view(), name='group_detail'),
] |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# author: hao 2019/7/17-21:44
from django.urls import path
from apps.news import views
app_name = 'news'
urlpatterns = [
path('', views.index, name='index'),
path('search/', views.search, name='search'),
]
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 31 16:32:44 2016
@author: nmvenuti
Modeling grid search
"""
#Import packages
import pandas as pd
import numpy as np
import glob
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.ensemble import RandomForestRegressor
import time
#from sknn import mlp
startTime=time.time()
################################
#####Import and clean data######
################################
#Define data filepath
dataPath='./github/nmvenuti/DSI_Religion/variableAnalysis/Third Run-Cuts/'
#Get data frame for each cut
signalDF2=pd.read_csv(dataPath+'cocowindow_2/run0/masterOutput.csv')
signalDF3=pd.read_csv(dataPath+'cocowindow_3/run0/masterOutput.csv')
signalDF4=pd.read_csv(dataPath+'cocowindow_4/run0/masterOutput.csv')
signalDF5=pd.read_csv(dataPath+'cocowindow_5/run0/masterOutput.csv')
signalDF6=pd.read_csv(dataPath+'cocowindow_6/run0/masterOutput.csv')
def addRank(signalDF):
#Add in group ranking
groupNameList=['WBC', 'PastorAnderson', 'NaumanKhan', 'DorothyDay', 'JohnPiper', 'Shepherd',
'Rabbinic', 'Unitarian', 'MehrBaba']
groupRankList=[1,2,3,4,4,4,6,7,8]
groupRankDF=pd.DataFrame([[groupNameList[i],groupRankList[i]] for i in range(len(groupNameList))],columns=['groupName','rank'])
signalDF['groupName']=signalDF['groupId'].map(lambda x: x.split('_')[0])
signalDF=signalDF.merge(groupRankDF, on='groupName')
return(signalDF)
signalDF2=addRank(signalDF2)
signalDF3=addRank(signalDF3)
signalDF4=addRank(signalDF4)
signalDF5=addRank(signalDF5)
signalDF6=addRank(signalDF6)
##################################################################
#####Hyperparameter optimzation for MLP RF and SVM Regression#####
##################################################################
def modelAnalysis(signalDF,cutoff):
xList=['perPos','perNeg','judgementFrac','avgSD', 'avgEVC']
yList=['rank']
signalDF=signalDF[signalDF['files']>5]
signalDF=signalDF.dropna()
#Set up test train splits
trainIndex=[x for x in signalDF['groupId'] if 'train' in x]
testIndex=[x for x in signalDF['groupId'] if 'test' in x]
signalTrainDF=signalDF[signalDF['groupId'].isin(trainIndex)]
signalTestDF=signalDF[signalDF['groupId'].isin(testIndex)]
yActual=signalTestDF['rank'].tolist()
#SVM
svmAccuracy=0
svmParam=[0]
cList=[0.01,0.05,0.1,0.5,1.0,1.5,2.0,2.5,3.0,3.5]
epsilonList=[0.01,0.05,0.1,0.5,1.0]
kernelList=['linear','poly','rbf','sigmoid']
degreeList=[0,1,2,3,4]
coefList=[0,1,2,3]
for C in cList:
for epsilon in epsilonList:
for kernel in kernelList:
for degree in degreeList:
for coef in coefList:
signalSVR=svm.SVR(C=C,epsilon=epsilon,kernel=kernel,degree=degree,coef0=coef,max_iter=100000)
signalSVR.fit(signalTrainDF[xList],signalTrainDF[yList])
#Predict New Data
yPred=signalSVR.predict(signalTestDF[xList])
#Get accuracy
x=float(len([i for i in range(len(yPred)) if abs(yActual[i]-yPred[i])<cutoff])/float(len(yPred)))
#Add to parameter list
if x>svmAccuracy:
svmAccuracy=x
svmParam=[C,epsilon,kernel,degree,coef,svmAccuracy]
#Random Forest Regressor
rfAccuracy=0
estimatorList=[10,25,50,100,150,200]
depthList=[5,10,15,20,25,30]
featureList=['auto','sqrt','log2']
splitList=[1,2,3]
rfParam=[0]
for estimator in estimatorList:
for depth in depthList:
for feature in featureList:
for split in splitList:
rfModel=RandomForestRegressor(n_estimators=estimator,max_depth=depth,
min_samples_split=split, max_features=feature,
random_state=0,n_jobs=-1)
rfModel.fit(signalTrainDF[xList],signalTrainDF[yList])
#Predict New Data
yPred=rfModel.predict(signalTestDF[xList])
#Get accuracy
x=float(len([i for i in range(len(yPred)) if abs(yActual[i]-yPred[i])<cutoff])/float(len(yPred)))
#Add to parameter list
if x>rfAccuracy:
rfAccuracy=x
rfParam=[estimator,depth,split,feature,rfAccuracy]
#Perform same analysis with scaled data
#Scale the data
sc = StandardScaler()
sc=sc.fit(signalTrainDF[xList])
signalStdTrainDF= pd.DataFrame(sc.transform(signalTrainDF[xList]),columns=xList)
signalStdTestDF = pd.DataFrame(sc.transform(signalTestDF[xList]),columns=xList)
svmAccuracy=0
svmStdParam=[0]
for C in cList:
for epsilon in epsilonList:
for kernel in kernelList:
for degree in degreeList:
for coef in coefList:
signalSVR=svm.SVR(C=C,epsilon=epsilon,kernel=kernel,degree=degree,coef0=coef,max_iter=100000)
signalSVR.fit(signalStdTrainDF[xList],signalTrainDF[yList])
#Predict New Data
yPred=signalSVR.predict(signalStdTestDF[xList])
#Get accuracy
x=float(len([i for i in range(len(yPred)) if abs(yActual[i]-yPred[i])<cutoff])/float(len(yPred)))
#Add to parameter list
if x>svmAccuracy:
svmAccuracy=x
svmStdParam=[C,epsilon,kernel,degree,coef,svmAccuracy]
#STDRandom Forest Regressor
rfAccuracy=0
rfStdParam=[0]
for estimator in estimatorList:
for depth in depthList:
for feature in featureList:
for split in splitList:
rfModel=RandomForestRegressor(n_estimators=estimator,max_depth=depth,
min_samples_split=split, max_features=feature,
random_state=0,n_jobs=-1)
rfModel.fit(signalStdTrainDF[xList],signalTrainDF[yList])
#Predict New Data
yPred=rfModel.predict(signalStdTestDF[xList])
#Get accuracy
x=float(len([i for i in range(len(yPred)) if abs(yActual[i]-yPred[i])<cutoff])/float(len(yPred)))
#Add to parameter list
if x>rfAccuracy:
rfAccuracy=x
rfStdParam=[estimator,depth,split,feature,rfAccuracy]
return(svmParam+rfParam+svmStdParam+rfStdParam)
accuracyList=[]
accuracyList.append([2]+modelAnalysis(signalDF2,1))
accuracyList.append([3]+modelAnalysis(signalDF3,1))
accuracyList.append([4]+modelAnalysis(signalDF4,1))
accuracyList.append([5]+modelAnalysis(signalDF5,1))
accuracyList.append([6]+modelAnalysis(signalDF6,1))
pd.DataFrame(accuracyList).to_csv('./github/nmvenuti/DSI_Religion/variableAnalysis/outputs/testOutput1.csv')
|
PORT = 80
HTTPS_PORT = 443
SERIAL = "/dev/ttyAMA0"
DOMAIN = "door.flipdot.space"
EMAIL = "my@invalid.email"
DEBUG=False
STAGING=True
fake_door = True |
# Generated by Django 2.2 on 2019-04-15 04:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('onlclass', '0008_auto_20190415_1319'),
]
operations = [
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lesson_date', models.DateField(verbose_name='開催日')),
('lesson_time', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='onlclass.Timeunit', verbose_name='開始時刻')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='onlclass.Subject', verbose_name='教科')),
('tutor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='onlclass.User', verbose_name='先生')),
],
options={
'unique_together': {('tutor', 'lesson_date', 'lesson_time')},
},
),
]
|
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
from profiles.views import home
from django.contrib.auth import views as auth_views
from django.views.generic.base import TemplateView
from profiles.views import home
urlpatterns = [
path('', home , name='home'),
path('profiles/', include('profiles.urls')),
path('reviews/', include('reviews.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('django.contrib.auth.urls')),
path('country/', include('country.urls')),
path('trip/', include('trips.urls')),
path('message/', include('message.urls')),
path('reviews/', include('reviews.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import numpy as np
import soundfile as sf
import pyworld as pw
WAV_FILE = ("./data/vaiueo2d")
data, fs = sf.read(WAV_FILE+".wav")
f0, t = pw.harvest(data, fs, 71.0, 800.0, 1.0)
option_cheaptrick_fft_size = 8192
sp = pw.cheaptrick(data, f0, t, fs, -0.15, 71.0, option_cheaptrick_fft_size)
option_d4c_fftsize = option_cheaptrick_fft_size
ap = pw.d4c(data, f0, t, fs, -0.15, option_d4c_fftsize)
Magnification_f0 = 2.0 #数値を変えることでF0が変化
ChangeValue_sp = 0 #数値を変えることでSPが変化
print( "F0 magnification = " + str(Magnification_f0) ) #F0の変換倍率の表示
print( "Spectram shift = " + str(ChangeValue_sp*(fs/2)/option_cheaptrick_fft_size) + " Hz" ) #スペクトル包絡のシフト量の表示
conversion_f0 = f0*Magnification_f0
conversion_sp = np.zeros_like(sp)
for f in range(conversion_sp.shape[1]):
if f - ChangeValue_sp < 0:
conversion_sp[:,f] = sp[:,1]
elif f - ChangeValue_sp > conversion_sp.shape[1] -1:
conversion_sp[:,f] = sp[:, conversion_sp.shape[1] -1]
else:
conversion_sp[:,f] = sp[:, f - ChangeValue_sp ]
synthesized = pw.synthesize(conversion_f0, conversion_sp, ap, fs, 1.0)
sf.write(WAV_FILE+"_conversion.wav", synthesized, fs) |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from rocketmq.client import Producer, PushConsumer
@pytest.fixture(scope='session')
def producer():
prod = Producer('producer_group')
prod.set_name_server_address('127.0.0.1:9876')
prod.start()
yield prod
prod.shutdown()
@pytest.fixture(scope='session')
def orderly_producer():
prod = Producer('orderly_producer_group', True)
prod.set_name_server_address('127.0.0.1:9876')
prod.start()
yield prod
prod.shutdown()
@pytest.fixture(scope='function')
def push_consumer():
consumer = PushConsumer('push_consumer_group')
consumer.set_name_server_address('127.0.0.1:9876')
yield consumer
consumer.shutdown()
|
from onegov.core.utils import module_path
from onegov.org.theme import OrgTheme
NEWSGOT = '"NewsGot", Verdana, Arial, sans-serif;'
# options editable by the user
user_options = {
'primary-color': '#e33521',
'font-family-sans-serif': NEWSGOT
}
class WinterthurTheme(OrgTheme):
name = 'onegov.winterthur.foundation'
@property
def post_imports(self):
return super().post_imports + [
'winterthur'
]
@property
def extra_search_paths(self):
base_paths = super().extra_search_paths
return [module_path('onegov.winterthur.theme', 'styles')] + base_paths
@property
def pre_imports(self):
return super().pre_imports + [
'winterthur-foundation-mods'
]
|
import logging
from datetime import datetime
from typing import List, Optional
from dbcat.catalog import Catalog
from dbcat.catalog.models import JobExecution, JobExecutionStatus
from pglast.parser import ParseError
from data_lineage.parser.dml_visitor import (
CopyFromVisitor,
DmlVisitor,
SelectIntoVisitor,
SelectSourceVisitor,
)
from data_lineage.parser.node import Parsed, parse
def parse_queries(queries: List[str]) -> List[Parsed]:
parsed: List[Parsed] = []
for query in queries:
try:
parsed.append(parse(query))
except ParseError as e:
logging.warning("Syntax error while parsing {}.\n{}".format(query, e))
return parsed
def visit_dml_query(catalog: Catalog, parsed: Parsed) -> Optional[DmlVisitor]:
select_source_visitor: DmlVisitor = SelectSourceVisitor(parsed.name)
select_into_visitor: DmlVisitor = SelectIntoVisitor(parsed.name)
copy_from_visitor: DmlVisitor = CopyFromVisitor(parsed.name)
for visitor in [select_source_visitor, select_into_visitor, copy_from_visitor]:
parsed.node.accept(visitor)
if len(visitor.select_tables) > 0 and visitor.target_table is not None:
visitor.bind(catalog)
return visitor
return None
def extract_lineage(
catalog: Catalog, visited_query: DmlVisitor, parsed: Parsed
) -> JobExecution:
job = catalog.add_job(parsed.name, {"query": parsed.query})
job_execution = catalog.add_job_execution(
job, datetime.now(), datetime.now(), JobExecutionStatus.SUCCESS
)
for source, target in zip(
visited_query.source_columns, visited_query.target_columns
):
edge = catalog.add_column_lineage(source, target, job_execution.id, {})
logging.debug("Added {}".format(edge))
return job_execution
|
a = [1, 2, 3]
b = [4, 5, 6]
c = [1]
d = [8, 8, 10]
list1 = c + b + a + d
list2 = a + b
list3 = c + d
list4 = a + a
print(list1)
print(list2)
print(list3)
print(list4) |
from manimlib.imports import *
def align_with(mob1, mob2):
'''
Will align the y component of the center of mob1 with mob2 by moving mob1 or mob2 horizontally
'''
mob1_center = mob1.get_center()
mob2_center = mob2.get_center()
mob1.shift((mob2_center[0] - mob1_center[0]) * RIGHT)
class IntroScene(Scene):
def construct(self):
## Intro Text
diagonalization_text = TextMobject("Diagonalization")
diagonalization_text.set_color(YELLOW)
formula = TexMobject(
"\\Large \\text{A} = ",
"\\text{X }",
"\\text{D } ",
"\\text{X}^{-1}")
formula[0].set_color(RED)
formula[1].set_color(BLUE)
formula[3].set_color(BLUE)
formula[2].set_color(GREEN)
## Define RHS as a V group for future ease
rhs = VGroup(*[formula[1:]])
## Defining explanation text boxes with arrows pointing to respective elements
x_explanation = TextMobject("X is the matrix of eigenvectors", color = BLUE).shift(2*DOWN)
align_with(x_explanation, formula[2])
d_explanation = TextMobject("D is the diagonal matrix of eigenvalues", color = GREEN).shift(2*UP)
align_with(d_explanation, formula[2])
arrows = VGroup(
*[Arrow(x_explanation.get_top(), formula[i].get_bottom(), color = BLUE) for i in (1, 3)],
Arrow(d_explanation.get_center(), formula[2].get_top(), color = GREEN))
## Now defining the 2x2 matrix which we will diagonalize
A_matrix = Matrix(np.array([[2, 0], [-2, 1]])).move_to(rhs.get_center() + RIGHT / 2)
A_matrix.set_color_by_gradient(BLUE, GREEN)
self.play(Write(diagonalization_text), run_time = 1)
self.play(diagonalization_text.to_edge, UP)
self.play(Write(formula))
self.play(Write(x_explanation), Write(d_explanation))
self.play(*[FadeIn(arrow) for arrow in arrows])
self.wait(3)
self.play(
*[FadeOut(arrow) for arrow in arrows],
FadeOut(x_explanation), FadeOut(d_explanation))
self.wait()
self.play(ReplacementTransform(rhs, A_matrix))
class ShowNakedTransformation(LinearTransformationScene):
CONFIG = {
"show_basis_vectors": True,
"transposed_matrix": [[2, 0], [-1, 1]]
}
def construct(self):
self.setup()
self.wait()
self.apply_transposed_matrix(self.transposed_matrix)
self.wait()
class ShowDiagonalizedBasisVectors(LinearTransformationScene):
CONFIG = {
"show_basis_vectors": False,
"x_transpose": [[1, 1], [1, 0]],
"d_transpose": [[1, 0], [0, 2]]
}
def construct(self):
self.setup()
self.wait()
evec_1 = Vector([1, 1], color = X_COLOR)
evec_2 = Vector([1, 0], color = Y_COLOR)
self.add_vector(evec_1)
self.add_vector(evec_2)
self.apply_inverse_transpose(self.x_transpose)
self.apply_transposed_matrix(self.d_transpose)
self.apply_transposed_matrix(self.x_transpose)
class ShowDecomposedTransformation(LinearTransformationScene):
CONFIG = {
"show_basis_vectors": False,
"x_transpose": [[1, 1], [1, 0]],
"d_transpose": [[1, 0], [0, 2]]
}
def construct(self):
self.setup()
self.wait()
i_hat = Vector([1, 0], color = X_COLOR)
j_hat = Vector([0, 1], color = Y_COLOR)
self.add_vector(i_hat)
self.add_vector(j_hat)
self.apply_inverse_transpose(self.x_transpose)
self.apply_transposed_matrix(self.d_transpose)
self.apply_transposed_matrix(self.x_transpose) |
#import sys
#input = sys.stdin.readline
from copy import deepcopy
def solve(N,Q):
Z = [0]*(N+1)
P = deepcopy(Q)
for i, p in enumerate(P):
Z[p] = i
permuted = [False]*N
for n in range(1,N+1):
if Z[n] == n-1:
continue
K = Z[n]
for i in range(K-1,n-2,-1):
if permuted[i]:
return False
return
Z[P[i+1]] = i
Z[P[i]] = i+1
P[i], P[i+1] = P[i+1], P[i]
permuted[i] = True
return P
from itertools import permutations
def main():
N = int( input())
for p in permutations(range(1,N+1)):
Q = list(p)
print(Q, end=" ")
print(solve(N,Q))
# print(solve(4,[1,4,2,3]))
if __name__ == '__main__':
main()
|
r,c=3,3
mainArray = [[0 for x in range(r)] for y in range(c)]
def printGrid():
for i in range(len(mainArray)):
for j in range(len(mainArray[i])):
if(mainArray[i][j]== 0):
print("|" + str(i) + str(j)+ "|" ,end="")
else:
print("|" + str(mainArray[i][j]) + " |" ,end="")
print("",end="\n")
printGrid()
def getUserInput():
return input("Enter position: ")
def checkWinCondition(val,userSign):
value=str(val)
x=int(value[0])
y=int(value[1])
mainArray[x][y]=userSign
printGrid()
for i in range(r):
if(mainArray[x][i]!=userSign):
break
if(i==r-1):
print("Winner"+ userSign)
for i in range(r):
if(mainArray[i][y]!=userSign):
break
if(i==r-1):
print("Winner"+ userSign)
for i in range(r):
if(mainArray[i][i]!=userSign):
break
if(i==r-1):
print("Winner"+ userSign)
if(x+y==(r-1)):
for i in range(r):
if(mainArray[i][(r-1)-i] != userSign):
break;
if(i == r-1):
print("Winner"+ userSign)
turnCount = 0;
while turnCount < 9:
firstUserInput = getUserInput()
checkWinCondition(firstUserInput,'X')
secondUserInput = getUserInput()
checkWinCondition(secondUserInput,'O')
|
import gi, os
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import Gdk
import threading
class Ventana(Gtk.Window):
def __init__(self):
#Creamos la ventana
Gtk.Window.__init__(self, title="rfid_gtk.py")
self.connect("destroy", Gtk.main_quit)
self.set_border_width(10)
#Creamos una caja para colocar la etiqueta arriba y el boton abajo
self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
#añadimos la caja a la ventana
self.add(self.box)
#Añadimos el fondo de la caja (RGBA(rojo, verde, azul, alfa)) valores del 0 al 1 siendo el ultimo opacidad
#css provider más complejo pero sin errores
self.evbox = Gtk.EventBox()
self.evbox.override_background_color(Gtk.StateType.NORMAL, Gdk.RGBA(0,0,1,1))
#Creamos el texto que se situa en la caja y la añadimos
self.label = Gtk.Label('<span foreground="white" size="x-large">Porfavor, identifiquese con su id universitario</span>')
self.label.set_use_markup(True)
self.label.set_size_request(600,150)
self.evbox.add(self.label)
#Creamos el boton
self.button = Gtk.Button(label="Clear")
self.button.connect("clicked", self.clicked)
#Añadimos a la caja con la funcion de expansion
self.box.pack_start(self.evbox, True, True, 0)
self.box.pack_start(self.button, True, True, 0)
#Creamos hilo y lo inicializamos
thread = threading.Thread(target=self.scan_uid)
#Si cerramos ventana, cerramos hilo tambien
thread.setDaemon(True)
thread.start()
self.show_all()
Gtk.Window.set_focus(self)
Gtk.main()
#Funcion llamada al pulsar el boton
def clicked(self, widget):
#Volvemos a pedir que se introduzca el id
self.label.set_label('<span foreground="white" size="x-large">Porfavor, identifiquese con su id universitario</span>')
self.evbox.override_background_color(Gtk.StateType.NORMAL, Gdk.RGBA(0,0,1,1))
thread = threading.Thread(target=self.scan_uid)
thread.start()
#Funcion llamada cuando se lee el id
def scan_uid(self):
#Eliminamos eco
os.system("stty -echo")
#Creamos una entrada para la ventana Gtk
entry = Gtk.Entry()
#Introducimos el valor de dicha entrada
entry.set_text(input())
#Lo pasamos a integer y lo sacamos
numB = int(entry.get_text())
# traducimos el int a hexadecimal y lo pasamos a mayusculas
uid = hex(numB).upper()[2:] #con el 2: cogemos el valor introducido a partir del segundo caracter saltandonos el 0x
#Cambiamos el texto y el color de la caja
self.label.set_label('<span foreground="white" size="x-large">UID: '+uid+'</span>')
self.evbox.override_background_color(Gtk.StateType.NORMAL, Gdk.RGBA(1,0,0,1))
#Por ultimo damos focust a la ventana para que no se quede en segundo plano al recibir el uid
self.present()
main = Ventana()
os.system("stty echo") #en el caso de cerrar la ventana antes de introducir el uid
quit()
|
import numpy as np
import os.path
import matplotlib.pyplot as plt
def writemr(filename):
path2mrfn='/kroot/rel/ao/qfix/data/ControlParms/Recon/'
file = np.fromfile(path2mrfn+filename, dtype='f', offset=1)
plt.plot(file)
np.savetxt('new_'+filename, file)
print('File '+ 'new_'+filename +' written')
|
import unittest
from pymongo import ReadPreference
from mongoengine.python_support import IS_PYMONGO_3
if IS_PYMONGO_3:
from pymongo import MongoClient
CONN_CLASS = MongoClient
READ_PREF = ReadPreference.SECONDARY
else:
from pymongo import ReplicaSetConnection
CONN_CLASS = ReplicaSetConnection
READ_PREF = ReadPreference.SECONDARY_ONLY
import mongoengine
from mongoengine import *
from mongoengine.connection import MongoEngineConnectionError
class ConnectionTest(unittest.TestCase):
def setUp(self):
mongoengine.connection._connection_settings = {}
mongoengine.connection._connections = {}
mongoengine.connection._dbs = {}
def tearDown(self):
mongoengine.connection._connection_settings = {}
mongoengine.connection._connections = {}
mongoengine.connection._dbs = {}
def test_replicaset_uri_passes_read_preference(self):
"""Requires a replica set called "rs" on port 27017
"""
try:
conn = connect(db='mongoenginetest',
host="mongodb://localhost/mongoenginetest?replicaSet=rs",
read_preference=READ_PREF)
except MongoEngineConnectionError as e:
return
if not isinstance(conn, CONN_CLASS):
# really???
return
self.assertEqual(conn.read_preference, READ_PREF)
if __name__ == '__main__':
unittest.main()
|
import socket
HEADER_SIZE = 10
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostname(), 8086))
full_msg = ""
new_msg = True
while True:
msg = s.recv(16)
if new_msg:
print(f"new message length: {msg[:HEADER_SIZE]}")
msglen = int(msg[:HEADER_SIZE])
new_msg = False
full_msg += msg.decode("utf-8")
if len(full_msg)-HEADER_SIZE == msglen:
print(full_msg[HEADER_SIZE:])
new_msg = True
break
filename = input(str("enter file to be transfer: "))
file = open(filename, 'rb')
file_data = file.read(1024)
s.send(file_data)
print("Data file transfered successfully!")
file.close()
s.close()
|
# Variables and Names
# defining cars variable and assigning value of 100
cars = 100
# defining space_in_a_car variable and assigning value of 4.0
space_in_a_car = 4.0
# defining drivers variable and assigning value of 30
drivers = 30
# defining passangers variable and assigning value of 90
passangers = 90
# calculating not driven cars and assigning the result to cars_not_driven variable
cars_not_driven = cars - drivers
# defining cars_driven variable and assigning drivers variable's value to it
cars_driven = drivers
# calculating carpool capacity and assigning the result to carpool_capacity variable
carpool_capacity = cars_driven * space_in_a_car
# calculating average pasangers per car
average_passengers_per_car = passangers / cars_driven
# printing out available cars
print("There are", cars, "cars available.")
# printing out value of drivers
print("There are only", drivers, "drivers available.")
# prin out empty cars
print("There will be", cars_not_driven, "empty cars today.")
# prin out carpool capacity
print("We can transport", carpool_capacity, "people today.")
# printing out how may passangers we have today
print("We have", passangers, "to carpool today.")
# printing out how many passangers we need to place to each car.
print("We need to put about", average_passengers_per_car, "in each car.")
|
from random import randint
"""
Rock Paper Scissors Game Implementation
Rock smashes scissors
Scissors cuts paper
Paper covers rock
"""
"""
improvement suggestions
get rid of the numbers, just use the strings directly and keep score of wins
"""
def name_to_number(name):
"""
Takes string name and converts to corresponding number
Rock - 0
paper - 1
scissors - 2
"""
if name == 'rock':
return 0
elif name == 'paper':
return 1
elif name == 'scissors':
return 2
else:
return 3
def number_to_name(number):
"""
Takes an integer number (0, 1, 2)
and converts to corresponding string
0 - rock
1 - paper
2 - scissors
"""
if number == 0:
return 'rock'
elif number == 1:
return 'paper'
elif number == 2:
return 'scissors'
else:
return 'invalid choice'
def player_choice():
"""
This takes in the human players input and returns an integer value
0 - rock
1 - paper
2 - scissors
"""
done = True
while done:
choice = input("Please choose rock, paper or scissors: ")
if choice !='rock' and choice != 'paper' and choice !='scissors':
print("Invalid choice, please choose either rock, paper or scissors")
else:
return name_to_number(choice)
def computer_choice():
"""
returns a randomly choosen integer - 0, 1, 2 for computer choice"
"""
return randint(0, 2)
def play_game(player1 , player2):
player1choice = player_choice()
player2choice = computer_choice()
print(player1 + ' choses ' + number_to_name(player1choice) )
print(player2 + ' choses ' + number_to_name(player2choice) )
if player1choice == 0 and player2choice == 2:
print("Rock smashes scissors, " + player1+ ' ' + 'wins')
return 0
elif player1choice == 2 and player2choice == 1:
print("Scissors cuts paper, " + player1 + ' ' +'wins')
return 0
elif player1choice == 1 and player2choice == 0:
print("Paper covers rock, "+ player1+ ' ' + 'wins')
return 0
elif player2choice == 0 and player1choice == 2:
print("Rock smashes scissors, " + player2+ '' + 'wins')
return 1
elif player2choice == 2 and player1choice == 1:
print("Scissors cuts paper, "+ player2+ ' ' +'wins')
return 1
elif player2choice == 1 and player1choice == 0:
print("Paper covers rock, " + player2 + ' ' + 'wins')
return 1
else:
print("There is a tie, try again")
return 2
player_total = 0
computer_total = 0
continue_game ='Y'
while continue_game == 'Y':
game_result = play_game(player1= 'Player', player2 = 'Computer')
if game_result == 0:
player_total = player_total + 1
elif game_result == 1:
computer_total = computer_total + 1
print("Scores")
print("-----------")
print("Human Score: {} | Computer Score: {} ".format(player_total, computer_total))
continue_game = input('Enter Y to continue or any key to quit: ')
print("Scores are: Human: {} | Computer: {}". format(player_total, computer_total))
print("Thanks for playing!" )
|
from spider_lib import log_print
import threading
import spider_lib
class DownloadThread(threading.Thread):
'''下载线程
属性:
thread_name: 线程名字,用于区分
download_manager: 下载管理器
task: 下载任务
'''
def __init__(self, thread_name, download_manager, task):
threading.Thread.__init__(self)
self.thread_name = thread_name
self.download_manager = download_manager
self.task = task
log_print('%s 初始化完成' % self.thread_name)
def start_download(self):
'''开始下载
:return: None
'''
log_print('%s 开始线程 URL: %s' % (self.thread_name, self.task.url))
spider_lib.download_img(self.task.name, self.task.url)
log_print('%s 线程结束 %s' % (self.thread_name, self.task.name))
log_print('<<<<')
self.download_manager.task_complete()
def run(self):
''' 线程入口
:return: None
'''
self.start_download()
|
#Ejercicio 03
def parEimpar(array):
#Itera todo el array separando en dos arrays los pares e impares, devolviendo una dupla de arrays
par = []
impar = []
for n in array:
if n % 2 == 0:
par.append(n)
else:
impar.append(n)
return (par, impar)
array = [1, 2, 3, 4, 5, 6, 7]
print(parEimpar(array))
|
# chat/views.py
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
User = get_user_model()
@login_required
def room(request):
to_username = request.GET.get('to')
if to_username is None:
context = {
'username': mark_safe(json.dumps(request.user.username)),
}
else:
user = get_object_or_404(User, username=to_username)
context = {
'to_username': mark_safe(json.dumps(to_username)),
'username': mark_safe(json.dumps(request.user.username)),
}
return render(request, 'chat/room.html', context=context)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 4 22:02:20 2017
@author: cvpr
Sort and save weighted patches
"""
import cv2
import numpy as np
import os
prewitt_img_path = '../data/Imageset/prewitt_images/' #path to gradient image
saliency_img_path = '../data/Imageset/saliency_images/' #path to saliency image
file_path = '../patches_weight.txt'
img_name = os.listdir(prewitt_img_path)
img_name.sort()
f_ptr = open(file_path, 'wt')
for i in img_name:
patch_gradient = []
patch_saliency = []
weight = []
prewitt_img = cv2.imread(prewitt_img_path + i, 0)
saliency_img = cv2.imread(saliency_img_path + i[0:6] + '_HC.bmp', 0)
height = prewitt_img.shape[0]
width = prewitt_img.shape[1]
#calculate weight
for m in range(height/32):
for n in range(width/32):
prewitt_img_crop = prewitt_img[m*32:(m+1)*32, n*32:(n+1)*32]
saliency_img_crop = saliency_img[m*32:(m+1)*32, n*32:(n+1)*32]
prewitt_sum = np.sum(prewitt_img_crop)/255
patch_gradient.append(prewitt_sum)
saliency_sum = np.sum(saliency_img_crop)/255
patch_saliency.append(saliency_sum)
patch_weight = [prewitt_sum*0.6+saliency_sum*0.4, m*(width/32)+n] #[weight, number of patches]
weight.append(patch_weight)
weight.sort(reverse=True)
label = [weight[index][1] for index in range(len(weight))] #get patches label
#write top 300 patches, delete '[',']' and replace ',' with ' '
f_ptr.write('{:s} {:s}\n'.format(i, str(label).strip('[').strip(']').replace(',', '')))
f_ptr.close()
|
class Solution():
def k_largest_v1(self, numbers, k):
"""
Sort the array and return the kth largest element in the array
Naive solution
"""
numbers.sort(reverse=True)
return numbers[:k]
def k_largest_v2(self, numbers, k):
numbers = [7, 9, 10, 2, 35, 90, 23, 0, 3, 5, 20]
print(Solution().k_largest_v1(numbers, 3)) |
from django.contrib import admin
from .models import *
class Messageadmin(admin.ModelAdmin):
""" enable Chart Group admin """
list_display = ('author','timestamp')
list_filter = ('author', 'timestamp')
list_display_links = ('author', 'timestamp')
admin.site.register(Message,Messageadmin)
class ChatGroupAdmin(admin.ModelAdmin):
""" enable Chart Group admin """
list_display = ('id', 'name', 'description', 'mute_notifications', 'date_created', 'date_modified')
list_filter = ('id', 'name', 'description', 'mute_notifications', 'date_created', 'date_modified')
list_display_links = ('name',)
admin.site.register(ChatGroup, ChatGroupAdmin)
class ProfileAdmin(admin.ModelAdmin):
""" enable Chart Group admin """
list_display = ('id', 'user','status')
list_filter = ('id', 'user','status')
admin.site.register(UserProfile,ProfileAdmin)
|
import random as rn
nums=[ rn.randint(100,500) for i in range(100,500) if i%7 == 0 ]
print(nums) |
from collections import OrderedDict
from itertools import islice
from uuid import uuid4
from django.utils.functional import cached_property
from six import iteritems
from .exceptions import ValidationError
__all__ = [
'cached_property',
'cached_property_ignore_set',
'class_property',
'short_guid',
'unspecified',
'uuid4',
'validate_type',
'AttributeDict',
'OrderedIndexedTransformDict',
]
JSON_LEN = len('.json')
_SENTINEL = object()
class cached_property_ignore_set(cached_property):
def __set__(self, instance, value):
pass
class class_property(object):
"""A read-only descriptor that works on the class too"""
def __init__(self, fget=None):
if fget is not None and not isinstance(fget, classmethod):
fget = classmethod(fget)
self.fget = fget
def __get__(self, instance, instance_type=None):
return self.fget.__get__(instance, instance_type)()
def short_guid():
return '-'.join(str(uuid4()).split('-')[1:4])
unspecified = object()
def validate_type(value, model):
if not isinstance(value, model):
raise ValidationError(
"'{!r}' is not an instance of type '{}'".format(
value, model.__name__))
def unwrap_envelopes(data, many, pk_field, remove_key):
unwrapped = []
for pk, obj in iteritems(data):
if not remove_key:
try:
if obj[pk_field] != pk:
raise ValidationError(
u"Envelope id does not match value of primary key "
u"field")
except KeyError:
pass
obj[pk_field] = pk
unwrapped.append(obj)
if not many and len(unwrapped) == 1:
return unwrapped[0]
return unwrapped
def wrap_envelopes(data, many, pk_field, remove_key):
if not many:
data = [data]
wrapped = OrderedDict()
for obj in data:
pk = obj[pk_field]
if remove_key:
del obj[pk_field]
wrapped[pk] = obj
return wrapped
class AttributeDict(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(
u"'{}' object has no attribute '{}'".format(
self.__class__.__name__, name))
def strip_json(fname):
if fname.endswith('.json'):
return fname[:-JSON_LEN]
return fname
class OrderedIndexedTransformDict(object):
__slots__ = ('_transform', '_data')
def __init__(self, transform, init_dict=None, **kwargs):
if not callable(transform):
raise TypeError('expected callable, got %r' % transform.__class__)
self._transform = transform
self._data = OrderedDict()
if init_dict:
self.update(init_dict)
if kwargs:
self.update(kwargs)
def getitem(self, key):
transformed = self._transform(key)
value = self._data[transformed]
return transformed, value
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data.keys())
def __getitem__(self, key):
try:
return self._data[key]
except KeyError:
return self._data[self._transform(key)]
def __setitem__(self, key, value=None):
transformed = self._transform(key)
self._data[transformed] = len(self._data)
def __delitem__(self, key):
if hasattr(key, 'indices'):
data_len = len(self._data)
indices = key.indices(len(self))
if data_len <= len(range(*indices)):
self._data = OrderedDict()
return
keys_to_remove = list(islice(self._data.keys(), *indices))
for key in keys_to_remove:
del self._data[key]
self._data = OrderedDict((k, i) for i, k in enumerate(self._data))
else:
transformed = self._transform(key)
self._remove(transformed)
del self._data[transformed]
def clear(self):
self._data.clear()
def __contains__(self, key):
return self._transform(key) in self._data
def get(self, key, default=None):
return self._data.get(self._transform(key), default)
def pop(self, key, default=_SENTINEL):
transformed = self._transform(key)
self._remove(transformed)
if default is _SENTINEL:
return self._data.pop(transformed)
else:
return self._data.pop(transformed, default)
def items(self):
return self._data.items()
def update(self, value, **kws):
self._data.update(value, **kws)
def insert(self, index, value):
data = self._data
for k, v in data.items():
if v >= index:
data[k] += 1
data[value] = index
def replace(self, key, value):
key = self._transform(key)
value = self._transform(value)
if key == value:
return
self._data = OrderedDict((value if k == key else k, i)
for k, i in self._data.items())
def _remove(self, key):
data = self._data
if key not in data:
return
index = data[key]
for key, value in data.items():
if value > index:
data[key] = value - 1
def popitem(self):
transformed, value = self._data.popitem()
return transformed, value
def copy(self):
other = self.__class__(self._transform)
other._data = self._data.copy()
return other
__copy__ = copy
def __getstate__(self):
return self._transform, self._data
def __setstate__(self, state):
self._transform, self._data = state
def __repr__(self):
try:
equiv = dict(self)
except TypeError:
# Some keys are unhashable, fall back on .items()
equiv = list(self.items())
return '{}({!r}, {})'.format(self.__class__.__name__,
self._transform, equiv)
|
import os, sys
from pkgutil import iter_modules, importlib
from inspect import getmembers
# sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import sys
sys.path.append(".")
from CoreLib.AbsHandler import AbsHandler
from importlib.util import spec_from_file_location, module_from_spec
def load_handlers(path_to_folder, abs_class = AbsHandler):
items_in = os.listdir(path_to_folder)
hanlders_folders = [ ]
for f in items_in:
f_path = os.path.join(path_to_folder, f)
if os.path.isdir(f_path) and "__" not in f: hanlders_folders.append(f_path)
installed_handlers={}
for (importer, modname, ispkg) in iter_modules(hanlders_folders):
mod = importer.find_module(modname).load_module(modname)
for member_name, member in getmembers(mod):
if member_name.startswith('__'): continue
if isinstance(member, type) and issubclass(member, abs_class) and not member==AbsHandler:
installed_handlers[member_name] = member()
return installed_handlers
def load_module(module_name, module_abs_path, abs_class = AbsHandler):
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, module_abs_path)
imp_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(imp_module)
from inspect import getmembers, isclass
for member_name, member in getmembers(imp_module):
if isinstance(member, type) and issubclass(member, abs_class) and not member==AbsHandler:
return member()
if __name__ == "__main__":
while True:
try:
handling_params_string = input("Enter the folder: ")
if handling_params_string=="quit" or handling_params_string=="q": break
print(handling_params_string)
hs = load_handlers(handling_params_string)
print(hs)
except Exception as ex:
print("Exception happened: " + ex.__str__())
except:
print("Fatal exception") |
import asyncio
import functools
import re
from .authentication import authenticate, initialize as initialize_authentication
from .configuration import settings
from .logging import getlogger
from .queuemanager import getqueue, dispatcher, AlreadySubscribedError, NotSubscribedError, \
unsubscribe_all, subscribe, unsubscribe
logger = getlogger('PROTO')
class ServerProtocol(asyncio.Protocol):
identity = None
transport = None
chunk = None
peername = None
class Patterns:
regex = functools.partial(re.compile, flags=re.DOTALL + re.IGNORECASE)
login = regex(b'^LOGIN (?P<credentials>.+)$')
push = regex(b'^PUSH (?P<message>.+)(?:\s|\n)INTO (?P<queue>[0-9a-zA-Z\._:-]+)$')
pull = regex(b'^PULL FROM (?P<queue>[0-9a-zA-Z\._:-]+)$')
ignore = regex(b'^IGNORE (?P<queue>[0-9a-zA-Z\._:-]+)$')
def connection_made(self, transport):
self.peername = transport.get_extra_info('peername')
logger.info(f'Connection from {self.peername}')
self.transport = transport
def connection_lost(self, exc):
logger.info(f'Connection lost: {self.peername}')
def eof_received(self):
logger.debug(f'EOF Received: {self.peername}')
unsubscribe_all(self)
self.transport.close()
def data_received(self, data):
logger.debug(f'Data received: {data.strip()}')
if self.chunk:
data = self.chunk + data
if self.identity is None:
if b';' not in data:
data = data.rstrip(b'\x00')
self.chunk = data
return
credentials, self.chunk = data.split(b';', 1)
# Suspending all other commands before authentication
self.transport.pause_reading()
# Scheduling a login task, if everything went ok, then the resume_reading will be
# called in the future.
asyncio.ensure_future(self.login(credentials.strip()))
return
# Splitting the received data with \n and adding buffered chunk if available
lines = data.split(b';')
# Adding unterminated command into buffer (if available) to be completed with the next call
if not lines[-1].endswith(b';'):
self.chunk = lines.pop()
# Exiting if there is no command to process
if not lines:
return
for command in lines:
command = command.strip()
if command:
asyncio.ensure_future(self.process_command(command))
async def login(self, credentials):
logger.info(f'Authenticating: {self.peername}')
m = self.Patterns.login.match(credentials)
if m is None:
await self.login_failed(credentials)
return
credentials = m.groupdict()['credentials']
self.identity = await authenticate(credentials)
if self.identity is None:
await self.login_failed(credentials)
return
logger.info(f'Login success: {self.identity} from {self.peername}')
self.transport.write(b'HI %s;\n' % self.identity.encode())
self.transport.resume_reading()
self.data_received(b';')
async def login_failed(self, credentials):
logger.info(
f'Login failed for {self.peername} with credentials: {credentials}, Closing socket.'
)
self.transport.write(b'LOGIN FAILED\n')
self.transport.close()
async def push(self, message, queue):
try:
getqueue(queue).push(message)
except asyncio.QueueFull:
self.logger.warning(f'Queue is full: {self.name}')
self.transport.write(b'ERROR: QUEUE %s IS FULL;\n' % queue.decode())
async def pull(self, queue):
try:
subscribe(queue, self)
except AlreadySubscribedError:
self.transport.write(b'ERROR: QUEUE %s IS ALREADY SUBSCRIBED;\n' % queue)
async def ignore(self, queue):
try:
unsubscribe(queue, self)
except NotSubscribedError:
self.transport.write(b'ERROR: QUEUE %s IS NOT SUBSCRIBED;\n' % queue)
async def process_command(self, command):
m = self.Patterns.push.match(command)
if m is not None:
return await self.push(**m.groupdict())
m = self.Patterns.pull.match(command)
if m is not None:
return await self.pull(**m.groupdict())
m = self.Patterns.ignore.match(command)
if m is not None:
return await self.ignore(**m.groupdict())
logger.debug(f'Invalid command: {command}')
self.transport.write(b'ERROR: Invalid command: %s;\n' % command)
async def dispatch(self, queue, message):
self.transport.write(b'MESSAGE %s FROM %s;\n' % (message, queue))
def create_dispatchers(workers=1, **kwargs):
logger.info(f'Creating {workers} dispatchers')
return asyncio.gather(*())
class Server:
_server = None
_dispatchers_task = None
def __init__(self, bind=None, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.logger = getlogger('SERVER')
# Host and Port to listen
bind = bind or settings.bind
self.host, self.port = bind.split(':') if ':' in bind else ('', bind)
# Configuring the authenticator
initialize_authentication()
async def start(self):
self._server = await self.loop.create_server(ServerProtocol, self.host, self.port)
self._dispatchers_task = self.loop.create_task(asyncio.gather(
*(dispatcher('WORKER %d' % i, **settings.dispatcher)
for i in range(settings.dispatchers))
))
async def close(self):
self.logger.info('Shutting down...')
self._dispatchers_task.cancel()
self._server.close()
await self._server.wait_closed()
while self._dispatchers_task.cancelled():
await asyncio.sleep(.2)
@property
def address(self):
return self._server.sockets[0].getsockname()
|
'''
Setup for 2l/3l/4l ttV selections.
Can use different lepton IDs, cuts etc for different channels.
Still missing:
- most of the 2l channel cuts/selections
- additional cuts for 4l channel (dl mass etc)
'''
#Standard import
import copy
# RootTools
from RootTools.core.standard import *
# Logging
import logging
logger = logging.getLogger(__name__)
#user specific
from TopEFT.Tools.user import analysis_results
from TopEFT.Tools.helpers import getObjFromFile
##define samples
# 2016
from TopEFT.samples.cmgTuples_Data25ns_80X_07Aug17_postProcessed import *
from TopEFT.samples.cmgTuples_Summer16_mAODv2_postProcessed import *
# 2017
from TopEFT.samples.cmgTuples_Data25ns_94X_Run2017_postProcessed import *
from TopEFT.samples.cmgTuples_Fall17_94X_mAODv2_postProcessed import *
from TopEFT.Analysis.SystematicEstimator import jmeVariations, metVariations
from TopEFT.Analysis.SetupHelpers import getZCut, channel, trilepChannels, quadlepChannels
from TopEFT.Tools.objectSelection import getFilterCut
from TopEFT.Tools.triggerSelector import triggerSelector
from TopEFT.Analysis.regions import *
from TopEFT.Tools.cutInterpreter import *
#to run on data
dataLumi2016 = 35922
dataLumi2017 = 41530
#dataLumi2016 = 35900
#dataLumi2017 = 41900
dataLumi201617 = dataLumi2016 + dataLumi2017
dataLumi20161718 = 150000
dataHighLumi = 3e6
#10/fb to run on MC
#lumi = {c:10000 for c in channels}
#lumi = dataLumi201678
lumi = dataLumi2016
#Define defaults here
zMassRange = 10
default_mllMin = 12
default_zWindow1 = "onZ"
default_zWindow2 = "offZ"
default_nJets = (3, -1) # written as (min, max)
default_nBTags = (1, -1)
default_metMin = 0
#default_sys = {'weight':'weight', 'reweight':['reweightPU36fb', 'reweightBTagDeepCSV_SF', 'reweightTrigger', 'reweightLeptonTrackingSF'], 'selectionModifier':None}
default_parameters = {
'mllMin': default_mllMin,
'metMin': default_metMin,
'zWindow1': default_zWindow1,
'zWindow2': default_zWindow2,
'zMassRange': zMassRange,
'nJets': default_nJets,
'nBTags': default_nBTags,
}
class Setup:
def __init__(self, year=2017, nLeptons=3, nonprompt=False):
self.name = "defaultSetup"
self.channels = [channel(-1,-1)]
self.resultsFile= 'calculatedLimits_%s.db'%self.name
self.year = year
self.nLeptons = nLeptons
self.short = False
if nLeptons == 1:
self.tight_ID = "tight_3l" # for parton shower study
self.FO_ID = "FO_3l"
elif nLeptons == 2:
self.tight_ID = "tight_SS"
self.FO_ID = "FO_SS"
elif nLeptons == 3:
self.tight_ID = "tight_3l"
self.FO_ID = "FO_3l"
elif nLeptons == 4:
self.tight_ID = "tight_4l"
self.FO_ID = "FO_4l"
else:
raise NotImplementedError("Can't handle 0,1,5,.. lepton cases")
self.nonprompt = nonprompt
self.leptonId = self.FO_ID if self.nonprompt else self.tight_ID
self.default_sys = {'weight':'weight', 'reweight':['reweightPU36fb', 'reweightBTagDeepCSV_SF'], 'selectionModifier':None} # 'reweightTrigger_%s'%self.leptonId, 'reweightLeptonTrackingSF_%s'%self.leptonId
if nLeptons == 1:
# no trigger/lepton reweighting
pass
#elif nLeptons == 3:
# self.default_sys['reweight'] += ['reweightTrigger_tight_3l', 'reweightLeptonSF_tight_3l']
# if self.year == 2017: #in 2016 already included in leptonSF
# self.default_sys['reweight'] += ['reweightLeptonTrackingSF_tight_3l']
#elif nLeptons == 4:
# self.default_sys['reweight'] += ['reweightTrigger_tight_4l', 'reweightLeptonSF_tight_4l']
# if self.year == 2017: #in 2016 already included in leptonSF
# self.default_sys['reweight'] += ['reweightLeptonTrackingSF_tight_4l']
elif nLeptons == 3:
self.default_sys['reweight'] += ['reweightTrigger_tight_3l', 'reweightLeptonSFSyst_tight_3l', 'reweightEleSFStat_tight_3l', 'reweightMuSFStat_tight_3l', 'reweightLeptonTrackingSF_tight_3l']
elif nLeptons == 4:
self.default_sys['reweight'] += ['reweightTrigger_tight_4l', 'reweightLeptonSFSyst_tight_4l', 'reweightEleSFStat_tight_4l', 'reweightMuSFStat_tight_4l', 'reweightLeptonTrackingSF_tight_4l']
self.resultsColumns = ['signal', 'exp', 'obs', 'exp1up', 'exp1down', 'exp2up', 'exp2down', 'NLL_prefit', 'dNLL_postfit_r1', 'dNLL_bestfit']
self.uncertaintyColumns = ["region", "channel", "PDFset"]
self.analysis_results = analysis_results
self.prefixes = []
self.externalCuts = []
#Default cuts and requirements. Those three things below are used to determine the key in the cache!
self.parameters = copy.deepcopy(default_parameters)
self.sys = self.default_sys
if year == 2017:
self.lumi = dataLumi2017
self.dataLumi = dataLumi2017
elif year == 2016:
self.lumi = dataLumi2016
self.dataLumi = dataLumi2016
elif year == 20167:
self.lumi = dataLumi201617
self.dataLumi = dataLumi201617
elif year == "run2":
self.lumi = dataLumi20161718
self.dataLumi = dataLumi20161718
elif year == "HLLHC":
self.lumi = dataHighLumi
self.dataLumi = dataHighLumi
self.genSelection = "Sum$(GenJet_pt>30)>=3&& abs(Z_mass-91.2)<10&&(abs(Z_daughterPdg)==11 || abs(Z_daughterPdg)==13 || abs(Z_daughterPdg)==15 )"
self.WZselection = cutInterpreter.cutString('trilep-Zcand-onZ-lepSelTTZ-njet1p')
# Data
if year == 2017:
data = Run2017
else:
data = Run2016
# MC
if year == 2017:
TTZSample = TTZtoLLNuNu_17
WZSample = WZ_amcatnlo_17
TTXSample = TTX_17
TTWSample = TTW_17
TZQSample = TZQ_17
ZGSample = ZGTo2LG
XGSample = Xgamma
ZZSample = ZZ_17
rareSample = rare_17
nonpromptSample = nonpromptMC_17
pseudoDataSample = pseudoData_17
ttbarSample = TTLep_pow_17
WWZSample = WWZ_17
WZZSample = WZZ_17
ZZZSample = ZZZ_17
else:
## use 2016 samples as default (we do combine on card file level)
TTZSample = TTZtoLLNuNu
WZSample = WZ_amcatnlo
TTXSample = TTX
TTWSample = TTW
TZQSample = TZQ
ZGSample = ZGTo2LG
XGSample = Xgamma
ZZSample = ZZ
rareSample = rare
nonpromptSample = nonpromptMC
pseudoDataSample = pseudoData
ttbarSample = TTLep_pow
WWZSample = WWZ
WZZSample = WZZ
ZZZSample = ZZZ
# removed the channel dependence.
self.samples = {
'TTZ': TTZSample,
'WZ' : WZSample,
'TTX' : TTXSample,
'TTW' : TTWSample,
'TZQ' : TZQSample,
'ZG' : ZGSample,
'XG' : XGSample,
'rare': rareSample,
'WWZ': WWZSample,
'WZZ': WZZSample,
'ZZZ': ZZZSample,
'ZZ': ZZSample,
'nonprompt': nonpromptSample,
'ttbar': ttbarSample,
'pseudoData': pseudoDataSample,
'Data' : data,
}
def prefix(self):
return '_'.join(self.prefixes+[self.preselection('MC')['prefix']])
def defaultCacheDir(self):
return os.path.join(self.analysis_results, self.prefix(), 'cacheFiles')
#Clone the setup and optinally modify the systematic variation
def defaultClone(self):
'''Clone setup and change systematics to default'''
res = copy.copy(self)
res.sys = copy.deepcopy(self.default_sys)
res.parameters = copy.deepcopy(default_parameters)
return res
#Clone the setup and optinally modify the systematic variation
def systematicClone(self, sys=None, parameters=None):
'''Clone setup and change systematic if provided'''
res = copy.copy(self)
res.sys = copy.deepcopy(self.sys)
res.parameters = copy.deepcopy(self.parameters)
if sys:
for k in sys.keys():
if k=='remove':
for i in sys[k]:
res.sys['reweight'].remove(i)
elif k=='reweight':
res.sys[k] = list(set(res.sys[k]+sys[k])) #Add with unique elements
for upOrDown in ['Up','Down']:
if 'reweightPU36fb'+upOrDown in res.sys[k]: res.sys[k].remove('reweightPU36fb')
if 'reweightTrigger%s_%s'%(upOrDown, self.leptonId) in res.sys[k]: res.sys[k].remove('reweightTrigger_%s'%self.leptonId)
if 'reweightBTagDeepCSV_SF_b_'+upOrDown in res.sys[k]: res.sys[k].remove('reweightBTagDeepCSV_SF')
if 'reweightBTagDeepCSV_SF_l_'+upOrDown in res.sys[k]: res.sys[k].remove('reweightBTagDeepCSV_SF')
if 'reweightLeptonTrackingSF%s_%s'%(upOrDown, self.leptonId) in res.sys[k]: res.sys[k].remove('reweightLeptonTrackingSF_%s'%self.leptonId)
if 'reweightLeptonSF%s_%s'%(upOrDown, self.leptonId) in res.sys[k]: res.sys[k].remove('reweightLeptonSF_%s'%self.leptonId)
else:
res.sys[k] = sys[k] # if sys[k] else res.sys[k]
if parameters:
for k in parameters.keys():
res.parameters[k] = parameters[k]
return res
def defaultParameters(self, update={}):
assert type(update)==type({}), "Update arguments with key arg dictionary. Got this: %r"%update
res = copy.deepcopy(self.parameters)
res.update(update)
return res
def weightString(self):
return "*".join([self.sys['weight']] + (self.sys['reweight'] if self.sys['reweight'] else []))
def preselection(self, dataMC , nElectrons=-1, nMuons=-1, isFastSim = False):
'''Get preselection cutstring.'''
return self.selection(dataMC, nElectrons=nElectrons, nMuons=nMuons, isFastSim = isFastSim, hadronicSelection = False, **self.parameters)
def selection(self, dataMC,
mllMin, metMin, zWindow1, zWindow2, zMassRange,
nJets, nBTags,
nElectrons=-1, nMuons=-1,
hadronicSelection = False, isFastSim = False):
'''Define full selection
dataMC: 'Data' or 'MC'
nElectrons, nMuons: Number of E and M. -1: all
zWindow: offZ, onZ, or allZ
hadronicSelection: whether to return only the hadronic selection
isFastSim: adjust filter cut etc. for fastsim
'''
nLeptons = self.nLeptons
# Get the right channel and do sanity checks
if nElectrons < 0 and nMuons < 0:
# this should be the new "all" case. Just
pass
else:
if nElectrons + nMuons != nLeptons: raise NotImplementedError("Electrons and Muons don't add up!")
#Consistency checks
if self.sys['selectionModifier']:
assert self.sys['selectionModifier'] in jmeVariations+metVariations+['genMet'], "Don't know about systematic variation %r, take one of %s"%(self.sys['selectionModifier'], ",".join(jmeVariations + ['genMet']))
assert dataMC in ['Data','MC'], "dataMC = Data or MC, got %r."%dataMC
#Postfix for variables (only for MC and if we have a jme variation)
sysStr = ""
metStr = ""
if dataMC == "MC" and self.sys['selectionModifier'] in jmeVariations: sysStr = "_" + self.sys['selectionModifier']
if dataMC == "MC" and self.sys['selectionModifier'] in metVariations: metStr = "_" + self.sys['selectionModifier']
res={'cuts':[], 'prefixes':[]}
if nJets and not (nJets[0]==0 and nJets[1]<0):
assert nJets[0]>=0 and (nJets[1]>=nJets[0] or nJets[1]<0), "Not a good nJets selection: %r"%nJets
njetsstr = "nJetSelected"+sysStr+">="+str(nJets[0])
prefix = "nJets"+str(nJets[0])
if nJets[1]>=0:
njetsstr+= "&&"+"nJetSelected"+sysStr+"<="+str(nJets[1])
if nJets[1]!=nJets[0]: prefix+=str(nJets[1])
else:
prefix+='p'
res['cuts'].append(njetsstr)
res['prefixes'].append(prefix)
if nBTags and not (nBTags[0]==0 and nBTags[1]<0):
assert nBTags[0]>=0 and (nBTags[1]>=nBTags[0] or nBTags[1]<0), "Not a good nBTags selection: %r"% nBTags
nbtstr = "nBTag"+sysStr+">="+str(nBTags[0])
prefix = "nbtag"+str(nBTags[0])
if nBTags[1]>=0:
nbtstr+= "&&nBTag"+sysStr+"<="+str(nBTags[1])
if nBTags[1]!=nBTags[0]: prefix+=str(nBTags[1])
else:
prefix+='p'
res['cuts'].append(nbtstr)
res['prefixes'].append(prefix)
if metMin and metMin>0:
res['cuts'].append('met_pt'+sysStr+metStr+'>='+str(metMin))
res['prefixes'].append('met'+str(metMin))
if not hadronicSelection:
if mllMin and mllMin>0:
res['cuts'].append('min_dl_mass>='+str(mllMin))
res['prefixes'].append('mll'+str(mllMin))
if nMuons >= 0 and nElectrons >= 0:
chStr = "(nMuons_%s==%i&&nElectrons_%s==%i)"%(self.leptonId, nMuons, self.leptonId, nElectrons)
else:
# this is for the 'all' channel
if not self.nonprompt:
chStr = "nLeptons_%s==%i"%(self.leptonId, nLeptons)
else:
chStr = "nLeptons_%s>=%i"%(self.leptonId, nLeptons)
#Z window
# two different cases: Z_mass for 3l, Z1_mass_4l and Z2_mass_4l for 4l
if nLeptons == 3:
res['cuts'].append(getZCut(zWindow1, "Z_mass", zMassRange))
if not self.nonprompt: res['cuts'].append("Z_fromTight>0")
elif nLeptons == 4:
res['cuts'].append(getZCut(zWindow1, "Z1_mass_4l", zMassRange))
if zWindow2 == 'offZ':
# if number of muons is even, go off-Z
if nMuons%2 == 0:
logger.info("Z window 2 off Z, nMuons %s", nMuons)
res['cuts'].append(getZCut("offZ", "Z2_mass_4l", zMassRange))
else:
logger.info("Z window 2 all Z, nMuons %s", nMuons)
res['cuts'].append(getZCut("allZ", "Z2_mass_4l", zMassRange))
else:
logger.info("Z window 2 %s, nMuons %s", zWindow2, nMuons)
res['cuts'].append(getZCut(zWindow2, "Z2_mass_4l", zMassRange))
# no Z-mass cut for 2l case
res['cuts'].append(chStr)
if not self.nonprompt:
res['cuts'].append('nLeptons_%s==%i'%(self.leptonId, nLeptons))
else:
res['cuts'].append('nLeptons_%s>=%i'%(self.leptonId, nLeptons))
if nLeptons==1:
lep_pt = "(lep_%s*(lep_pt - lep_ptCorr) + lep_ptCorr)"%self.tight_ID
res['cuts'].append("Sum$(%s>40&&lep_%s>0)>0"%(lep_pt, self.leptonId))
res['cuts'].append("nlep==1") # loose lepton veto
elif nLeptons==2:
raise NotImplementedError("Not yet thought about SS selection")
elif nLeptons==3:
lep_pt = "(lep_%s*(lep_pt - lep_ptCorr) + lep_ptCorr)"%self.tight_ID
leptonSelection = [\
"Sum$(%s>40&&lep_%s>0)>0"%(lep_pt, self.leptonId),\
"Sum$(%s>20&&lep_%s>0)>1"%(lep_pt, self.leptonId),\
"Sum$(%s>10&&lep_%s>0)>2"%(lep_pt, self.leptonId),\
]
res['cuts'].append("&&".join(leptonSelection))
res['cuts'].append("!(nLeptons_tight_4l>=4)") # make sure to remove full overlap with 4l. This is enought, what is below shouldn't be necessary.
if self.nonprompt: res['cuts'].append("nLeptons_tight_3l<3")
#res['cuts'].append("min_dl_mass_FO_3l>12") #######################UPDATE
## need to veto 4l events to remove overlap
#baseline4l = Setup(self.year, nLeptons=4)
#baseline4l.parameters.update({'nJets':(2,-1), 'nBTags':(0,-1), 'zMassRange':20})
#for c in quadlepChannels:
# res['cuts'].append("!(%s)"%baseline4l.preselection(dataMC, nElectrons=c.nE, nMuons=c.nM, short=True)['cut'])
elif nLeptons==4:
res['cuts'].append("Sum$(lep_pt>40&&lep_%s>0)>0 && Sum$(lep_pt>10&&lep_%s>0)>3"%(self.leptonId, self.leptonId)) #check if this is good enough
res['cuts'].append("min_dl_mass>12&&totalLeptonCharge==0")
else:
raise NotImplementedError("nLeptons has to be 1 or 2 or 3 or 4. That's already more than enough to think about.")
# Need a better solution for the Setups for different eras
if self.year == 20167: self.year = 2016 #FIXME since we use 2016 MC for now
if not self.short: res['cuts'].append(getFilterCut(isData=(dataMC=='Data'), isFastSim=isFastSim, year = self.year))
# apply triggers in MC
if not dataMC == 'Data':
tr = triggerSelector(self.year)
if not self.short: res['cuts'].append(tr.getSelection("MC"))
res['cuts'].extend(self.externalCuts)
return {'cut':"&&".join(res['cuts']), 'prefix':'-'.join(res['prefixes']), 'weightStr': ( self.weightString() if dataMC == 'MC' else 'weight')}
|
# coding: utf-8
def integer_partition(n):
return _integer_partition(n, n)
def _integer_partition(n, m):
"""n的分割中,最大值为m的分割总数"""
if n < 0:
return 0
if n == 0 or m == 1:
return 1
return _integer_partition(n - m, m) + _integer_partition(n, m - 1)
if __name__ == "__main__":
for i in range(1, 10):
print(i, integer_partition(i))
|
import os
from setuptools import setup, find_packages
from packageinfo import VERSION, NAME
with open('README.rst', 'r') as readme:
README_TEXT = readme.read()
def write_version_py(filename=None):
if filename is None:
filename = os.path.join(
os.path.dirname(__file__), 'simlammps', 'version.py')
ver = """\
version = '%s'
"""
fh = open(filename, 'wb')
try:
fh.write(ver % VERSION)
finally:
fh.close()
write_version_py()
setup(
name=NAME,
version=VERSION,
author='SimPhoNy, EU FP7 Project (Nr. 604005) www.simphony-project.eu',
description='The LAMMPS engine-wrapper for the SimPhoNy framework',
long_description=README_TEXT,
entry_points={
'simphony.engine': ['lammps = simlammps']},
packages=find_packages(),
install_requires=["simphony>=0.5"]
)
|
# -*- coding: utf-8 -*-
import pytest
@pytest.mark.asyncio
async def test_whois_fail(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.whois(nick='gawel')
assert len(bot.registry.events_re['in']) > 2
bot.dispatch(':localhost 401 me gawel :No such nick')
result = await task
assert result['success'] is False
assert len(bot.registry.events_re['in']) == 0
@pytest.mark.asyncio
async def test_whois_success(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.whois(nick='Ga[W]el', timeout=0.1)
assert len(bot.registry.events_re['in']) > 2
bot.dispatch(':localhost 311 me ga[w]el username localhost * :realname')
bot.dispatch(':localhost 319 me ga[w]el :@#irc3')
bot.dispatch(':localhost 312 me ga[w]el localhost :Paris, FR')
bot.dispatch(':localhost 671 me ga[w]el :is using a secure connection')
bot.dispatch(':localhost 330 me ga[w]el gawel :is logged in as')
bot.dispatch(':localhost 318 me ga[w]el :End')
result = await task
assert len(bot.registry.events_re['in']) == 0
assert result['success']
assert result['timeout'] is False
assert result['username'] == 'username'
assert result['realname'] == 'realname'
@pytest.mark.asyncio
async def test_whois_timeout(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.whois(nick='GaWel', timeout=.1)
assert len(bot.registry.events_re['in']) > 2
result = await task
assert result['timeout'] is True
@pytest.mark.asyncio
async def test_who_channel(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.who('#irc3')
assert len(bot.registry.events_re['in']) == 2
bot.dispatch(
':card.freenode.net 352 nick #irc3 ~irc3 host1 srv1 irc3 H :0 bot')
bot.dispatch(
':card.freenode.net 352 nick #irc3 ~gael host2 srv2 gawel H@ :1 g')
bot.dispatch(':card.freenode.net 315 nick #irc3 :End of /WHO list.')
result = await task
assert result['timeout'] is False
assert len(result['users']) == 2
@pytest.mark.asyncio
async def test_who_channel_flags(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.who('#irc3', 'ahinrsu')
assert len(bot.registry.events_re['in']) == 2
bot.dispatch(
':card.freenode.net 354 nick ~irc3 1.1.1.1 host1 srv1 irc3 0 :0 bot')
bot.dispatch(
':card.freenode.net 354 nick ~gael 2.2.2.2 host2 srv2 gawel g :1 g')
bot.dispatch(':card.freenode.net 315 nick #irc3 :End of /WHO list.')
result = await task
assert result['timeout'] is False
assert len(result['users']) == 2
# First user
assert result['users'][0]['account'] is None
assert result['users'][0]['host'] == 'host1'
assert result['users'][0]['ip'] == '1.1.1.1'
assert result['users'][0]['nick'] == 'irc3'
assert result['users'][0]['realname'] == '0 bot'
assert result['users'][0]['server'] == 'srv1'
assert result['users'][0]['user'] == '~irc3'
# Second user
assert result['users'][1]['account'] == 'g'
assert result['users'][1]['host'] == 'host2'
assert result['users'][1]['ip'] == '2.2.2.2'
assert result['users'][1]['nick'] == 'gawel'
assert result['users'][1]['realname'] == '1 g'
assert result['users'][1]['server'] == 'srv2'
assert result['users'][1]['user'] == '~gael'
@pytest.mark.asyncio
async def test_who_nick(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.who('irc3')
assert len(bot.registry.events_re['in']) == 2
bot.dispatch(
':card.freenode.net 352 nick * ~irc3 host1 serv1 irc3 H :0 bot')
bot.dispatch(':card.freenode.net 315 nick irc3 :End of /WHO list.')
result = await task
assert result['timeout'] is False
assert result['hopcount'] == '0'
@pytest.mark.asyncio
async def test_topic(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.topic('#chan', topic='test', timeout=.1)
assert len(bot.registry.events_re['in']) > 0
bot.dispatch(':localhost TOPIC #chan :test')
result = await task
assert result['timeout'] is False
assert result['topic'] == 'test'
@pytest.mark.asyncio
async def test_no_topic(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.topic('#chan', timeout=.1)
assert len(bot.registry.events_re['in']) > 0
bot.dispatch(':localhost 331 me #chan :Not topic')
result = await task
assert result['timeout'] is False
assert result['topic'] is None
@pytest.mark.asyncio
async def test_ison(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.ison('GaWel', timeout=.1)
assert len(bot.registry.events_re['in']) > 0
bot.dispatch(':localhost 303 me :gawel')
result = await task
assert result['timeout'] is False
assert result['names'] == ['gawel']
@pytest.mark.asyncio
async def test_names(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.names('#irc3')
assert len(bot.registry.events_re['in']) == 2
bot.dispatch(
':card.freenode.net 353 nick @ #irc3 :irc3 @gawel')
bot.dispatch(
':card.freenode.net 353 nick @ #irc3 :+panoramisk')
bot.dispatch(
':card.freenode.net 366 nick #irc3 :End of /NAMES list.')
result = await task
assert result['timeout'] is False
assert len(result['names']) == 3
@pytest.mark.asyncio
async def test_channel_bans(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.channel_bans('#irc3')
assert len(bot.registry.events_re['in']) == 2
bot.dispatch(':card.freenode.net 367 nick #irc3 *!*@host irc3 1494621383')
bot.dispatch(':card.freenode.net 368 nick #irc3 :End of Channel Ban List')
result = await task
assert result['timeout'] is False
assert len(result['bans']) == 1
assert result['bans'][0]['mask'] == '*!*@host'
assert result['bans'][0]['user'] == 'irc3'
assert result['bans'][0]['timestamp'] == 1494621383
@pytest.mark.asyncio
async def test_ctcp(irc3_bot_factory):
bot = irc3_bot_factory(includes=['irc3.plugins.asynchronious'])
assert len(bot.registry.events_re['in']) == 0
task = bot.async_cmds.ctcp_async('irc3', 'VERSION')
assert len(bot.registry.events_re['in']) == 2
bot.dispatch(':irc3!irc3@host1 NOTICE nick :\x01VERSION IRC3 Library\x01')
result = await task
assert result['timeout'] is False
assert result['mask'] == 'irc3!irc3@host1'
assert result['ctcp'] == 'VERSION'
assert result['reply'] == 'IRC3 Library'
|
import os
import sys
# Path hacks to make the code available for testing
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../src')))
# Import the required classes and functions
from src.land_cover_classification import LandCoverClassification #pylint: disable=unused-import,wrong-import-position
from src.helpers import LOG_FORMAT, get_logger, set_capability, get_capability, load_params, ensure_data_directories_exist #pylint: disable=unused-import,wrong-import-position,line-too-long
|
import json
build = None # command for running a user's program
email = None
error = None
test_dict = {} # (key, val) where key is the name of the test case and val is its entire json object
output_list = [] # output value for every test case in json file
assert_list = [] # assert value for every test case in josn file
exit_list = [] # exit value for every test case in josn file
def parse_config(path):
global build, email, test_dict, error
try:
file = open(path)
except:
error = 'config file not found'
return -1
json_file = json.load(file)
# Check that mandatory build property is present and filled out
try:
build = json_file['build']
except:
error = 'build property is required'
return -1
if len(build) <= 0:
error = 'build property value cannot be empty'
return -1
try:
email = json_file['email'] # see if optional email property is present in the file
except:
email = None
for (key, val) in json_file.items():
if key not in ['build', 'email']: # save all other properties as test case objects
test_dict[key] = val
return 0
def isolate_result_checks(test_cases): # (key, val) where key is the name of the test case and val is its entire json object
global output_list, assert_list, exit_list, error
for case in test_cases:
# A valid test case must have either a valid exit status code to check, or a valid output + assertion to check
try:
output = test_cases[case]['output']
except:
output = None
output_list.append(output)
try:
assertion = test_cases[case]['assert']
if assertion not in ["==","!=","<","<=",">",">="]:
error = "assertion in " + case + " must be of one of the forms: [==,!=,<,<=,>,>=]"
return -1
if output == None:
error = 'error in test case ' + case + '. assertion provided without an output property'
return -1
except:
assertion = None
if output != None:
error = 'error in test case ' + case + '. output provided without an assertion property'
return -1
assert_list.append(assertion)
try:
exit = test_cases[case]['exit']
except:
exit = None
exit_list.append(exit)
if assertion is None or output is None:
if exit is None:
error = 'error in test case ' + case + '. No expected exit code or output provided'
return -1
return 0
# return the build command with all input values appended
def get_build(test_case):
global build
command = build
for key in test_case:
if key not in ['exit', 'output', 'assert']:
command = command + ' ' + str(test_case[key])
return command
|
#VBO/IBO/DSC Model Writing from Blender
#Custom Properties:
#scene.hzg_file_name - name of zip file to write
#object.hzg_type - type of object to be populated by the reading program
# - Defaults to "ENTITY"
# - GEO_MIPMAP - Used for terrain geometry
#object.hzg_export_mode - how to export the data for this object
# - V - Write tightly packed vertex position data, no normals or texture coordinates (VVV)
# - VC - Write Vertex position and Texture Coordinates in VVVCC format.
# - VNC - Write packed Vertex/Normal/TexCoord data in VVVNNNCC format
#object.hzg_round - integer, how many places to round decimals
import bpy
import bmesh
import zipfile
import struct
import sys
import os
try:
import zlib
compression = zipfile.ZIP_DEFLATED
except:
compression = zipfile.ZIP_STORED
bpf = 4
bpi = 4
bps = 2
clear_work_directory = True
proj_path = os.environ.get("BLENDER_EXPORT_WORK", "C:\\Users\\nicholas.waun\\git\\openglsandbox\\ModelConverter")
#out_path = proj_path+"\\res\\out\\"
out_path = "C:\\Users\\nicholas.waun\\git\\openglsandbox\\SimpleRender2_5\\res\\raw\\"
work_path = proj_path+"\\res\\work\\"
dsc_ext = ".dsc"
vbo_ext = ".v"
ibo_ext = ".i"
def write_mesh_files(obj, scene):
scene.objects.active = obj
file_name = obj.name
export_mode = obj["hzg_export_mode"] or "VNC"
print("HZG_EXPORT_MODE is",export_mode)
bpy.ops.object.modifier_apply(modifier='Subsurf')
round_verts = obj.get("hzg_round",0)
bm = bmesh.new()
bm.from_mesh(obj.data)
bmesh.ops.triangulate(bm, faces=bm.faces)
print("*** NUM FACES:",len(bm.faces))
print("*** ROUND VERTEXES:",round_verts)
#print("MESH VOLUME:",bm.calc_volume(False))
uv_lay = bm.loops.layers.uv.active #This will be None if object is not unwrapped
print("Active layer:",uv_lay)
if(uv_lay is None or export_mode == "V"):
print("UV coords will not be exported for",obj.name)
nrm_offset = 3 * bpf
txc_offset = -1
stride = 6 * bpf
elif(export_mode == "VC"):
print("Normals will not be exported for",obj.name)
nrm_offset = -1
txc_offset = 3 * bpf
stride = 5 * bpf
else:
print("Exporting data in VNC format for",obj.name)
nrm_offset = 3 * bpf
txc_offset = 6 * bpf
stride = 8 * bpf
vboBytes = bytearray(len(bm.faces)*3*stride)
iboBytes = bytearray()
dscString = "OBJECT_NAME=%s" % obj.name
dscString += "\nOBJECT_TYPE=%s" % (obj["hzg_type"] or "ENTITY")
dscString += "\nNUM_ELEMENTS=%i" % (len(bm.faces)*3)
dscString += "\nPOS_OFFSET=0"
dscString += "\nNRM_OFFSET=%i" % nrm_offset
dscString += "\nTXC_OFFSET=%i" % txc_offset
dscString += "\nELEMENT_STRIDE=%i" % stride
dscString += "\nNRM_SIZE=3"
dscString += "\nPOS_SIZE=3"
dscString += "\nTXC_SIZE=2"
print("**** STRIDE IS:",stride)
ctr = 0
for face in bm.faces:
# print("**** Face #",face.index)
for loop in face.loops:
# print("**** Loop vert #",loop.vert.index)
pos = bm.verts[loop.vert.index].co
# print("**** Vert: %.5f,%.5f,%.5f" % (pos.x,pos.y,pos.z))
if(round_verts):
struct.pack_into(">fff", vboBytes, ctr * stride, round(pos.x,5), round(pos.y,5), round(pos.z,5))
else:
struct.pack_into(">fff", vboBytes, ctr * stride, pos.x,pos.y,pos.z)
if(export_mode == "VNC"):
nrm = bm.verts[loop.vert.index].normal
# print("**** Normal: %.4f,%.4f,%.4f" % (nrm.x, nrm.y, nrm.z))
struct.pack_into(">fff", vboBytes, (ctr * stride) + nrm_offset, nrm.x,nrm.y,nrm.z)
if(uv_lay is not None and export_mode != "V"):
txc = loop[uv_lay].uv
# print("**** Texture Coords:", txc.x, ",", txc.y)
struct.pack_into(">ff", vboBytes, (ctr * stride) + txc_offset, txc.x, 1-txc.y)
iboBytes += struct.pack(">h",ctr)
ctr += 1
bm.free()
fileVbo = open(work_path + file_name + vbo_ext, 'wb')
fileVbo.write(vboBytes)
fileVbo.close()
fileIbo = open(work_path + file_name + ibo_ext, 'wb')
fileIbo.write(iboBytes)
fileIbo.close()
fileDsc = open(work_path + file_name + dsc_ext, 'w')
fileDsc.write(dscString)
fileDsc.close()
idxString = "+" + file_name + "\n"
idxString += "-" + file_name + vbo_ext + "\n"
idxString += "-" + file_name + ibo_ext + "\n"
idxString += "-" + file_name + dsc_ext + "\n"
fileIdx = open(work_path + "index", 'a')
fileIdx.write(idxString)
fileIdx.close()
bm.free()
return file_name
#**********************************************************
print("\n*** Welcome to Blender Python Zip Exporter. ***\n");
print("Running in",sys.argv[0])
print(sys.version_info);
print("")
fileIdx = open(work_path + "index", 'w')
fileIdx.write("")
fileIdx.close()
for scene in bpy.data.scenes:
print("* Operating on Scene:",scene.name)
print("* Using zip file",scene["hzg_file_name"],"\n")
zf = zipfile.ZipFile(out_path + scene["hzg_file_name"], mode='w')
for obj in scene.objects:
if(obj.type == "MESH" and obj.is_visible(scene)):
print("** Writing",obj.name)
file_name = write_mesh_files(obj, scene)
zf.write(work_path + file_name + dsc_ext, file_name + dsc_ext, compress_type=compression)
zf.write(work_path + file_name + vbo_ext, file_name + vbo_ext, compress_type=compression)
zf.write(work_path + file_name + ibo_ext, file_name + ibo_ext, compress_type=compression)
print("")
else:
print("Not including",obj.name,"type is",obj.type,"visibility is",obj.is_visible(scene))
zf.write(work_path + "index", "index", compress_type=compression)
zf.close()
if(clear_work_directory):
print("not implemented")
|
'''
Created on 14/04/2013
@author: carlos
'''
import unittest
from models import Content, Piece
class TestContent(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testCreate (self):
content1 = Content (1, 'Title1')
content1.append_piece(Piece('piece text'))
content2 = Content(2, 'Title2')
content2.append_piece(Piece('piece text'))
content2.append_piece(Piece(content1))
def testEq(self):
content1 = Content(1,'asdf')
content2 = Content(1,'asdf')
self.assertEqual(content1, content2)
content3 = Content(1,'1234')
self.assertEqual(content1, content3)
content4 = Content(2,'asdf')
self.assertNotEqual(content1, content4)
if __name__ == "__main__":
unittest.main() |
# -*- coding: utf-8 -*-
#Copyright (C) 2011 Seán Hayes
import researchhub.settings as dj_settings
from fabric.api import local, run, sudo, env, prompt, settings, cd
from fabric.contrib.files import exists
from fabric.decorators import roles, runs_once
from fabric.tasks import execute
import json
import logging
import os
import string
logging.getLogger('').setLevel(logging.INFO)
logger = logging.getLogger(__name__)
env.user = 'sean'
#env.hosts = ['sean@50.56.208.20',]
env.hosts = ['108.171.187.214',]
env.roledefs['web'] = ['108.171.187.214',]
env.roledefs['cache'] = ['108.171.187.214',]
env.roledefs['db'] = ['108.171.187.214',]
env.roledefs['celery'] = []
env.code_dir = '/srv/'
env.project_name = dj_settings.PROJECT_MODULE
env.project_parent_dir = dj_settings.PROJECT_PARENT_DIR
env.project_dir = '%s%s/' % (env.code_dir, dj_settings.PROJECT_MODULE,)
env.package_dir = '%s%s/' % (env.project_dir, dj_settings.PROJECT_MODULE,)
env.project_git_uri = 'ssh://sean@seanhayes.name/development/researchhub/'
env.config_dir = '%sconfig/generated/' % env.project_dir
env.pip_dir = '%spip/' % env.code_dir
env.celery_script_dir = '%scelery/init.d/' % env.config_dir
main_dirs = [
# env.code_dir,
env.pip_dir,
]
project_dirs = [
'/%s/logs/' % env.project_dir,
]
apt_packages = [
'debconf-utils',
'git',
'mercurial',
'gdebi-core',
'graphviz',
'graphviz-dev',
'libmemcached-tools',
'memcached',
'nginx',
'pkg-config',
'postfix',
'python-pip',
'python-virtualenv',
'python-dev',
'postgresql',
#'rabbitmq-server',
#TODO: try to get as many of these as possible in requirements.txt
#'python-django-doc',
#some require 1/4 GB of dependencies to build the PIP version, which is unacceptable for this kind of application
'python-imaging',
'python-psycopg2',
#'python-exactimage',
#'python-crypto',
]
#Run the following to make binary eggs when setuptools isn't used
#python -c "import setuptools; execfile('setup.py')" bdist_egg
# tasks
def create_user():
"Create admin user on fresh cloud instance."
username = prompt('Enter username to create: ', default=env.user)
with settings(user='root'):
run('useradd --groups sudo,www-data -d /home/%s -m %s' % (username, username))
run('passwd %s' % username)
def switch_to_bash():
"switch from dash (the Ubuntu default) to bash"
with cd('/bin'):
#has to be one command since each call to sudo() is a different session,
#and you can't login if sh isn't set
sudo('rm sh; ln -s bash sh')
def set_django_colors():
local('export DJANGO_COLORS="%s"' % dj_settings.DJANGO_COLORS)
@roles('db')
def setup_pgsql():
"Sets up PostgreSQL user and databases."
name = prompt('Enter PostgreSQL role/db to create: ', default=env.project_name)
sudo('createuser -s -P %s' % name, user='postgres')
sudo('createdb -O %s %s' % (name, name), user='postgres')
#sudo('createdb -O %s celery_results' % name, user='postgres')
def mkdirs(dirs):
"Sets up the directories we need and sets the right permissions."
for d in dirs:
if not exists(d):
sudo('mkdir %s' % d)
sudo('chown %s:www-data %s' % (env.user, d))
sudo('chmod 775 %s' % d)
def upgrade_ubuntu():
"Probably shouldn't run this through Fabric, but here's the commands for it anyway."
sudo('apt-get install update-manager-core')
#edit /etc/update-manager/release-upgrades, set Prompt=normal
sudo('do-release-upgrade')
def install_apt():
"Updates package list, upgrades all packages to latest available version, and installs Apt dependencies for this project."
sudo('apt-get update')
sudo('apt-get upgrade')
sudo('apt-get install -f %s' % string.join(apt_packages, ' '))
def install_pip():
"Installs the PIP requirements for this project."
with cd(env.pip_dir):
sudo('pip install -r %srequirements.txt' % env.project_dir)
def install_project():
"Clones this project's Git repo if there's no copy on the target machine, else it pulls the latest version."
if exists(env.project_dir):
with cd(env.project_dir):
run('git pull origin master')
else:
with cd(env.code_dir):
run('git clone %s' % env.project_git_uri)
sudo('chown -R %s:www-data %s' % (env.user, env.project_dir))
sudo('chmod 775 %s' % env.project_dir)
mkdirs(project_dirs)
def install():
"Runs the commands to create all necessary directories, install Apt and PIP dependencies, and install project files."
mkdirs(main_dirs)
install_apt()
sudo('chown :www-data %s' % (env.code_dir,))
sudo('chmod 775 %s' % env.code_dir)
install_project()
install_pip()
def refresh_config_files():
"Regenerates dynamic config files using django-config-gen."
with cd(env.project_dir):
run('./manage.py config_gen')
def link_config_file(source, destination):
with settings(warn_only=True):
sudo('rm %s' % destination)
sudo('ln -s %s %s' % (source, destination))
@roles('web')
def config_nginx():
with settings(warn_only=True):
sudo('rm /etc/nginx/sites-available/*')
link_config_file(os.path.join(env.config_dir, 'nginx'), '/etc/nginx/sites-available/default')
@roles('db')
def config_postgresql():
link_config_file(os.path.join(env.config_dir, 'pg_hba.conf'), '/etc/postgresql/9.1/main/pg_hba.conf')
@roles('cache')
def config_memcached():
link_config_file(os.path.join(env.config_dir, 'memcached.conf'), '/etc/memcached.conf')
@roles('celery')
def config_celery():
"Links Celery's Debian init scripts to /etc/init.d/."
init_list=string.split(run('ls %s' % env.celery_script_dir))
for script in init_list:
p = '/etc/init.d/%s' % script
link_config_file(os.path.join(env.celery_script_dir, script), p)
link_config_file(os.path.join(env.config_dir, 'celery/celeryd_default'), '/etc/default/celeryd')
def config_tzdata():
"Configures the time zone for the server."
run('echo \'America/New_York\'| sudo tee /etc/timezone')
sudo('dpkg-reconfigure -f noninteractive tzdata')
@runs_once
def config():
"Runs the commands to generate config files using django-config-gen and symlinks the generated files to the normal config file locations for Apache, Nginx, Memcached, etc."
execute(refresh_config_files)
execute(config_nginx)
execute(config_memcached)
#execute(config_celery)
execute(config_tzdata)
def start_servers():
"Restarts Nginx, Rabbit MQ, and Celery."
sudo('/etc/init.d/nginx start')
sudo('uwsgi --ini /srv/researchhub/config/generated/uwsgi.ini', user='www-data')
#sudo('/etc/init.d/rabbitmq-server start')
#sudo('/etc/init.d/celeryd start')
#sudo('/etc/init.d/celerybeat start')
#sudo('/etc/init.d/celeryevcam start')
sudo('/etc/init.d/memcached start')
def restart_servers():
"Restarts Nginx, Rabbit MQ, and Celery."
sudo('/etc/init.d/nginx restart')
sudo('kill -TERM `cat /srv/researchhub/uwsgi.pid`', user='www-data')
#sudo('/etc/init.d/rabbitmq-server restart')
#sudo('/etc/init.d/celeryd restart')
#sudo('/etc/init.d/celerybeat restart')
#sudo('/etc/init.d/celeryevcam restart')
sudo('/etc/init.d/memcached restart')
def reload_servers():
"Reloads Nginx, Rabbit MQ, and Celery where possible, otherwise it restarts them. Reloading config files is faster than restarting the processes."
sudo('/etc/init.d/nginx reload')
sudo('kill -HUP `cat /srv/researchhub/uwsgi.pid`', user='www-data')
#sudo('/etc/init.d/rabbitmq-server reload')
#sudo('/etc/init.d/celeryd restart')#no reload
#sudo('/etc/init.d/celerybeat restart')#no reload
#sudo('/etc/init.d/celeryevcam restart')#no reload
sudo('/etc/init.d/memcached restart')#no reload
def stop_servers():
"Restarts Nginx, Rabbit MQ, and Celery."
sudo('/etc/init.d/nginx stop')
sudo('kill -INT `cat /srv/researchhub/uwsgi.pid`', user='www-data')
#sudo('/etc/init.d/rabbitmq-server stop')
#sudo('/etc/init.d/celeryd stop')
#sudo('/etc/init.d/celerybeat stop')
#sudo('/etc/init.d/celeryevcam stop')
sudo('/etc/init.d/memcached stop')
#local development scripts
def venv():
run('virtualenv --no-site-packages virtualenv')
run('pip install -I -E virtualenv -r requirements.txt')
fixtures = {
'user': ['auth.User'],
'sites': ['sites'],
'flatpages': ['flatpages'],
#'djcelery': ['djcelery'],
'researchhub_app': ['researchhub_app'],
}
def reload_db():
"""
Deletes and recreates the DB, runs sync and migrations, and loads fixtures.
If PG whines about encoding differences, see: https://wiki.archlinux.org/index.php/PostgreSQL#Change_Default_Encoding_of_New_Databases_To_UTF-8_.28Optional.29
"""
local('%s/manage.py reset_db --noinput --router=default' % dj_settings.PROJECT_ROOT)
local('%s/manage.py syncdb --migrate --noinput' % dj_settings.PROJECT_ROOT)
for k in fixtures:
local('%s/manage.py loaddata %s' % (dj_settings.PROJECT_ROOT, k))
local('%s/manage.py loaddata the_rest' % dj_settings.PROJECT_ROOT)
#local('%s/manage.py createsuperuser --username=sean --email=sean@seanhayes.name' % dj_settings.PROJECT_ROOT)
def backup_fixtures():
exclude = []
for k, v in fixtures.items():
local('%s/manage.py dumpdata --format=json --indent=4 --natural %s > %s/%s.json' % (dj_settings.PROJECT_ROOT, ' '.join(v), dj_settings.MAIN_FIXTURE_DIR, k))
exclude.extend(v)
for e in exclude:
e = '--exclude=%s' % e
local('%s/manage.py dumpdata --format=json --indent=4 --natural --exclude=contenttypes --exclude=admin %s > %s/the_rest.json' % (dj_settings.PROJECT_ROOT, ' '.join(exclude), dj_settings.MAIN_FIXTURE_DIR))
def check_for_pdb():
"Easily check for instances of pdb.set_trace() in your code before committing."
local('find . -name \'*.py\'|xargs grep \'pdb.set_trace\'')
#TODO: handle exit code
def start_local_env():
local('sudo /etc/init.d/nginx start')
local('sudo /etc/init.d/postgresql start')
#local('sudo /etc/init.d/rabbitmq-server start')
local('sudo /etc/init.d/celeryd start')
local('sudo /etc/init.d/celerybeat start')
local('sudo /etc/init.d/celeryevcam start')
local('sudo /etc/init.d/memcached start')
def restart_local_env():
local('sudo /etc/init.d/nginx restart')
local('sudo /etc/init.d/postgresql restart')
#local('sudo /etc/init.d/rabbitmq-server restart')
local('sudo /etc/init.d/celeryd restart')
local('sudo /etc/init.d/celerybeat restart')
local('sudo /etc/init.d/celeryevcam restart')
local('sudo /etc/init.d/memcached restart')
def stop_local_env():
local('sudo /etc/init.d/nginx stop')
local('sudo /etc/init.d/postgresql stop')
#local('sudo /etc/init.d/rabbitmq-server stop')
local('sudo /etc/init.d/celeryd stop')
local('sudo /etc/init.d/celerybeat stop')
local('sudo /etc/init.d/celeryevcam stop')
local('sudo /etc/init.d/memcached stop')
|
list1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
list2 = list1
# Add 16 to the second list:
list2.append(16)
#
# Both lists are updated because when the second list is created
# its actually just a pointer to the first list in memory
#
print (list1)
print (list2) |
'''QLabel控件
setAlignment():设置文本的对齐方式
setIndent():设置文本缩进
text():获取文本内容
setBuddy():设置伙伴关系
setText():设置文本内容
selectedText():返回所选择的字符
setWordWrap():设置是否允许换行
QLabel常用的信号(事件)
1. 当鼠标滑过QLabel控件时触发:linkHovered
2. 当鼠标单击QLabel控件时触发:linkActivated
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt
class QLabelDemo(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
label1 = QLabel(self)
label2 = QLabel(self)
label3 = QLabel(self)
label4 = QLabel(self)
label1.setText("<font color=yellow>这是一个文本标签。</font>")
label1.setAutoFillBackground(True)
palette=QPalette()
palette.setColor(QPalette.Window,Qt.blue) #设置背景颜色
label1.setPalette(palette)
label1.setAlignment(Qt.AlignCenter)
label2.setText("<a href='#'>欢迎使用GUI程序。</a>")
label3.setAlignment(Qt.AlignCenter)
label3.setToolTip('这是一个图片')
#如果为True,用浏览器打开网页
label4.setOpenExternalLinks(True)
label4.setText("<a href='https://t.bilibili.com/'>bilibili</a>")
label4.setAlignment(Qt.AlignRight)
label4.setToolTip('这是一个超链接')
vbox=QVBoxLayout()#垂直布局
vbox.addWidget(label1)
vbox.addWidget(label2)
vbox.addWidget(label3)
vbox.addWidget(label4)
label2.linkHovered.connect(self.linkHovered)
label4.linkActivated.connect(self.linkClicked)
self.setLayout(vbox)
self.setWindowTitle('QLabel控件学习')
def linkHovered(self):
print('当鼠标滑过label2标签时,触发事件')
def linkClicked(self):
print('当鼠标单击label4标签时,触发事件')
if __name__=='__main__':
app=QApplication(sys.argv)
main=QLabelDemo()
main.show()
sys.exit(app.exec_())
|
# 530. Minimum Absolute Difference in BST
#
# refer to 783. Minimum Distance Between BST Nodes
# Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.
#
# Example:
#
# Input:
#
# 1
# \
# 3
# /
# 2
#
# Output:
# 1
#
# Explanation:
# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
Solution.mindiff = float('inf')
Solution.prev = None
def inorder(node):
if not node: return
inorder(node.left)
if Solution.prev:
Solution.mindiff = min(Solution.mindiff, abs(node.val - Solution.prev.val))
Solution.prev = node
inorder(node.right)
inorder(root)
return Solution.mindiff
if __name__ == '__main__':
sol = Solution()
"""
# 1
# \
# 3
# /
# 2
"""
root = TreeNode(1)
node1 = TreeNode(3)
node2 = TreeNode(2)
root.right = node1
node1.left = node2
assert (sol.getMinimumDifference(root) == 1)
"""
# 5
# / \
# 4 7
"""
root = TreeNode(5)
node1 = TreeNode(4)
node2 = TreeNode(7)
root.left = node1
root.right = node2
assert (sol.getMinimumDifference(root) == 1)
|
#!/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import copy
import configparser
import pandas as pd
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Create report.
class CreateReport:
def __init__(self, utility):
self.utility = utility
# Read config file.
config = configparser.ConfigParser()
self.file_name = os.path.basename(__file__)
self.full_path = os.path.dirname(os.path.abspath(__file__))
self.root_path = os.path.join(self.full_path, '../')
config.read(os.path.join(self.root_path, 'config.ini'))
try:
self.report_dir = os.path.join(self.root_path, config['Report']['report_path'])
self.report_path = os.path.join(self.report_dir, config['Report']['report_name'])
self.header = str(config['Report']['header']).split('@')
except Exception as e:
self.utility.print_message(FAIL, 'Reading config.ini is failure : {}'.format(e))
self.utility.write_log(40, 'Reading config.ini is failure : {}'.format(e))
sys.exit(1)
# Create report's header.
def create_report_header(self, fqdn):
self.utility.print_message(NOTE, 'Create report header : {}'.format(self.report_path))
self.utility.write_log(20, '[In] Create report header [{}].'.format(self.file_name))
report_file_name = self.report_path.replace('*', fqdn)
pd.DataFrame([], columns=self.header).to_csv(report_file_name, mode='w', index=False)
self.utility.write_log(20, '[Out] Create report header [{}].'.format(self.file_name))
# Create report's body.
def create_report_body(self, url, fqdn, port, cloud, method, products, type, comments, errors, srv_header, log_file, date):
self.utility.print_message(NOTE, 'Create {}:{} report\'s body.'.format(fqdn, port))
self.utility.write_log(20, '[In] Create report body [{}].'.format(self.file_name))
# Build base structure.
report = []
login_prob = ''
login_reason = ''
if len(type) != 0:
login_prob = 'Log : ' + type['ml']['prob'] + ' %\n' + 'Url : ' + type['url']['prob'] + ' %'
login_reason = 'Log : ' + type['ml']['reason'] + '\n' + 'Url : ' + type['url']['reason']
else:
login_prob = '*'
login_reason = '*'
record = []
record.insert(0, fqdn) # FQDN.
record.insert(1, self.utility.forward_lookup(fqdn)) # IP address.
record.insert(2, str(port)) # Port number.
record.insert(3, cloud) # Cloud service type.
record.insert(4, method) # Using method.
record.insert(5, url) # Target URL.
record.insert(6, '-') # Vendor name.
record.insert(7, '-') # Product name.
record.insert(8, '-') # Product version.
record.insert(9, '-') # Trigger of identified product.
record.insert(10, '-') # Product category.
record.insert(11, '-') # CVE number of product.
record.insert(12, login_prob) # Login probability.
record.insert(13, login_reason) # Trigger of login page.
record.insert(14, '-') # Unnecessary comments.
record.insert(15, '-') # Unnecessary Error messages.
record.insert(16, srv_header) # Server header.
record.insert(17, log_file) # Path of log file.
record.insert(18, date) # Creating date.
report.append(record)
# Build prduct record.
for product in products:
product_record = copy.deepcopy(record)
product_record[6] = product[1]
product_record[7] = product[2]
product_record[8] = product[3]
product_record[9] = product[4]
product_record[10] = product[0]
product_record[11] = product[5]
report.append(product_record)
# Build comment record.
for comment in comments:
comment_record = copy.deepcopy(record)
comment_record[14] = comment
report.append(comment_record)
# Build error message record.
for error in errors:
error_record = copy.deepcopy(record)
error_record[15] = error
report.append(error_record)
# Output report.
msg = 'Create report : {}'.format(self.report_path)
self.utility.print_message(OK, msg)
self.utility.write_log(20, msg)
report_file_name = self.report_path.replace('*', fqdn)
pd.DataFrame(report).to_csv(report_file_name, mode='a', header=False, index=False)
self.utility.write_log(20, '[Out] Create report body [{}].'.format(self.file_name))
|
import datetime
import os
import random
import string
from typing import List, Dict
import json
from api.models import Citizen
from school import settings
def generate_correct_birth_date():
day = random.randint(1, 28)
month = random.randint(1, 12)
year = random.randint(1990, 2010)
return datetime.date(day=day, month=month, year=year).strftime("%d.%m.%Y")
def generate_correct_special_string():
return random.choice(string.digits + string.ascii_letters) + "".join(
[random.choice(string.ascii_letters) for _ in range(random.randint(0, 50))]
)
def generate_correct_town():
return generate_correct_special_string()
def generate_correct_street():
return generate_correct_special_string()
def generate_correct_building():
return generate_correct_special_string()
def generate_correct_apartment():
return random.randint(0, 5000)
def generate_correct_name():
return "".join(
[random.choice(string.ascii_letters) for _ in range(random.randint(1, 50))]
)
def generate_correct_gender():
return random.choice([Citizen.MALE, Citizen.FEMALE])
def generate_correct_relatives_table(number_of_citizens):
relatives_table = {citizen_id: [] for citizen_id in range(number_of_citizens)}
for i in range(number_of_citizens):
new_rels = random.sample(range(number_of_citizens), random.randint(0, 10))
for relative in new_rels:
if i not in relatives_table[relative]:
relatives_table[relative].append(i)
if relative not in relatives_table[i]:
relatives_table[i].append(relative)
return relatives_table
def print_relatives_table(relatives_table: Dict):
for citizen_id in relatives_table:
print(citizen_id, "->", relatives_table[citizen_id])
def is_valid_citizens_relative_table(table):
for citizen_id in table:
relatives = table[citizen_id]
if relatives is None:
return False
for relative_id in relatives:
if relative_id not in table:
return False
if citizen_id not in table[relative_id]:
return False
return True
def generate_correct_citizens(number_of_citizens: int) -> List[Dict]:
cl = []
relatives_table = generate_correct_relatives_table(number_of_citizens)
if not is_valid_citizens_relative_table(relatives_table):
raise Exception
for i in range(number_of_citizens):
cl.append(
{
"citizen_id": i,
"town": generate_correct_town(),
"street": generate_correct_street(),
"building": generate_correct_building(),
"apartment": generate_correct_apartment(),
"name": generate_correct_name(),
"gender": generate_correct_gender(),
"birth_date": generate_correct_birth_date(),
"relatives": relatives_table[i],
}
)
return cl
def export_test_post(number):
filepath = os.path.join(settings.BASE_DIR, "out.json")
table = json.dumps({"citizens": generate_correct_citizens(number)})
with open(filepath, "w") as file:
print(table, file=file)
def compute_age(start_date: datetime.date, end_date=None):
end_date = end_date or datetime.datetime.utcnow().date()
if end_date < start_date:
raise ValueError(f"birth_date: {start_date} in the future. Can't find age")
return end_date.year - start_date.year
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 17:48:38 2019
@author: 2020shatgiskessell
"""
import cv2
import numpy as np
import timeit
from matplotlib import pyplot as plt
template = cv2.imread("/Users/2020shatgiskessell/Desktop/Wheres_Waldo/template.png")
img = cv2.imread("/Users/2020shatgiskessell/Desktop/Wheres_Waldo/Test_Images/wheres-waldo.jpg")
def template_matching(roi, template):
h,w,_ = template.shape
#methods = ['cv2.TM_SQDIFF', 'cv2.TM_CCORR','cv2.TM_CCORR_NORMED']
methods = ['cv2.TM_CCORR_NORMED']
for method in methods:
method = eval(method)
# Apply template Matching
res = cv2.matchTemplate(img,template,method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
cv2.imwrite("matched_" + str(method) + "10.png", img)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(str(method))
plt.show()
start = timeit.default_timer()
template_matching(img, template)
stop = timeit.default_timer()
run_time = stop - start
print('Time: ', str(run_time)) |
# Test Case 1:
# The following variables contain values as described below:
# balance - the outstanding balance on the credit card
# annualInterestRate - annual interest rate as a decimal+
import math
balance = 999999
annualInterestRate = 0.18
# aprxmonthlypay = int(round(totalpay/12, 0)) #increased divider from 12->14 so we start with a lesser aprxmonthlypay value and adjust from there
totaliter = 0
monthlyint = annualInterestRate/12
totalint = pow(1+monthlyint,12)
LB = round(balance / 12 , 2)
UB = round(balance * (pow(1+monthlyint,12)/12),2)
aprxmonthlypay = round((LB + UB) / 2 , 2)
print('totalint: ' +str(totalint))
print('STARTING WITH aprxmonthlypay: ' +str(aprxmonthlypay))
print('LB: ' +str(LB))
print('UB: ' +str(UB))
#x = input("are you ready?")
wait = ''
wait = input('PRESS ENTER TO CONTINUE.')
while aprxmonthlypay > 0:
iterbalance = balance
for i in range(1,13):
totaliter += 1
iterbalance -= aprxmonthlypay
iterbalance = iterbalance + (annualInterestRate / 12) * iterbalance
print("Month " + str(i) + " remaining balance is = " + "%.2f" % iterbalance)
if iterbalance < -0.01: #the 12th month payment will result in a negative balance (ie. the balance is OVERPAID) #removed: iterbalance + aprxmonthlypay < 0 or
UB = aprxmonthlypay
aprxmonthlypay = (LB + UB) / 2
print("balance is OVERPAID. New monthly pay is: " + str(aprxmonthlypay) + " within range: " + str(LB) + " and " + str(UB))
#wait = input('PRESS ENTER TO CONTINUE.')
elif iterbalance > 0.01: #UNDERPAID #removed: it1erbalance - aprxmonthlypay > 0 or
LB = aprxmonthlypay
aprxmonthlypay = (LB + UB) / 2
print("balance is UNDERPAID. New monthly pay is: " + str(aprxmonthlypay) + " within range: " + str(LB) + " and " + str(UB))
#wait = input('PRESS ENTER TO CONTINUE.')
else:
#wait = input('breaking')
break
# if iterbalance > aprxmonthlypay: #FINAL ADJUSTMENT: IF PAYMENT dropped PAST paying BALANCE in full AND NOW IT IS UNDERPAID, EXECUTES ONLY ONCE
# print("Final adjustment needed, increasing monthpay from: " + str(aprxmonthlypay) + " to " + str(aprxmonthlypay + 10))
# aprxmonthlypay += .10
# print("Lowest Payment: " + str(round(aprxmonthlypay,2)))
# elif aprxmonthlypay == 0: #cannot make zero payments...
# aprxmonthlypay += .10
# print("Lowest Payment: " + str(round(aprxmonthlypay,2)))
# elif iterbalance > 0: #cannot leave positive balance
# aprxmonthlypay += .10
# print("Lowest Payment: " + str(round(aprxmonthlypay,2)))
# else:
print("Lowest Payment: " + str(round(aprxmonthlypay,2)))
print(str(totaliter/12) + " # of iterations")
#totalint += (annualInterestRate/12)*balance
#print("Remaining balance: " + "%.2f" % iterbalance) |
s=input();
if(s.isalpha()==True):
print("Al[phabet")
else:
print("No")
|
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
import os
os.environ['KERAS_BACKEND']='theano' # Why theano why not
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Embedding
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding, Dropout, LSTM, GRU, Bidirectional
from keras.models import Model
from keras.callbacks import ModelCheckpoint
import csv
# %matplotlib inline
MAX_SEQUENCE_LENGTH = 100
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
def clean_str(string):
string = re.sub(r"\\", "", string)
string = re.sub(r"\'", "", string)
string = re.sub(r"\"", "", string)
return string.strip().lower()
df = pd.read_csv('train.tsv', sep='\t', header=0)
df = df.dropna()
df = df.reset_index(drop=True)
print('Shape of dataset ',df.shape)
print(df.columns)
print('No. of unique classes',len(set(df['label'])))
macronum=sorted(set(df['label']))
macro_to_id = dict((note, number) for number, note in enumerate(macronum))
def fun(i):
return macro_to_id[i]
df['label']=df['label'].apply(fun)
texts = []
labels = []
for idx in range(df.comment.shape[0]):
text = BeautifulSoup(df.comment[idx])
texts.append(clean_str(str(text.get_text().encode())))
for idx in df['label']:
labels.append(idx)
#print(labels[:10])
#print(texts[:10])
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Number of Unique Tokens',len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of Data Tensor:', data.shape)
print('Shape of Label Tensor:', labels.shape)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
embeddings_index = {}
f = open('glove.6B.100d.txt',encoding='utf8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
#print('Total %s word vectors in Glove 6B 100d.' % len(embeddings_index))
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,trainable=True)
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
#Build a model
#l_cov1= Conv1D(200, 5, activation='relu')(embedded_sequences)
#l_pool1 = MaxPooling1D(2)(l_cov1)
#l_cov2 = Conv1D(200, 9, activation='relu')(l_pool1)
#l_pool2 = MaxPooling1D(2)(l_cov2)
#l_lstm = LSTM(300, return_sequences=True, activation="relu")(l_pool2)
#l_cov3 = Conv1D(300, 5, activation='relu')(l_lstm)
#l_pool3 = MaxPooling1D(16)(l_cov3) # global max pooling
#l_flat = Flatten()(l_pool3)
#l_dense = Dense(400, activation='relu')(l_flat)
#preds = Dense(len(macronum), activation='softmax')(l_dense)
#
#model = Model(sequence_input, preds)
#model.compile(loss='categorical_crossentropy',
# optimizer='rmsprop',
#metrics=['acc'])
#print("Simplified convolutional neural network")
#model.summary()
#RNN
l_lstm = Bidirectional(LSTM(100))(embedded_sequences)
preds = Dense(len(macronum), activation='softmax')(l_lstm)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("Bidirectional LSTM")
model.summary()
cp=ModelCheckpoint('model_cnn.hdf5',monitor='val_acc',verbose=1,save_best_only=True)
history=model.fit(x_train, y_train, validation_data=(x_val, y_val),epochs=6, batch_size=200,callbacks=[cp])
y_predict = model.predict(x_val)
# Processing test data to make prediction
df_test = pd.read_csv('test.tsv', sep='\t', header=0)
df_test = df_test.dropna()
df_test = df_test.reset_index(drop = True)
print('Shape of dataset ',df_test.shape)
print(df_test.columns)
print('No. of unique classes',len(set(df_test['id'])))
texts_test = []
testlist = []
for idx in range(df_test.comment.shape[0]):
text_test = BeautifulSoup(df_test.comment[idx])
texts_test.append(str(text_test.get_text().encode()))
for idx in df_test['id']:
testlist.append(idx)
print(df_test.comment.shape[0])
print(len(texts_test))
#tokenizer_test = Tokenizer(num_words=MAX_NB_WORDS)
#tokenizer_test.fit_on_texts(texts_test)
sequences_test = tokenizer.texts_to_sequences(texts_test)
#word_index_test = tokenizer.word_index
data_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
#indices_test = np.arange(data_test.shape[0])
#np.random.shuffle(indices_test)
#data_test = data_test[indices_test]
#prediction
final_pre = model.predict(data_test)
finalpre_list = []
for x,y in final_pre:
if x > y:
finalpre_list.append(0);
else:
finalpre_list.append(1);
#Write into csv
with open('fileName.csv', 'w') as f:
writer = csv.writer(f)
count = 0
writer.writerow(["id", "label"])
for item in set(df_test['id']):
writer.writerow([str(item), str(finalpre_list[count])])
count += 1
|
# coding=utf-8
import urllib
import urllib2
import cookielib
import sys
from StringIO import StringIO
import login
#print resp.read()
def zhuangtai(sss):
html=login.login()
xn2={}
xn2['status']=sss
xn2['update']="发布"
zhuangtai=urllib.urlencode(xn2)
req2=urllib2.Request('http://3g.renren.com/status/wUpdateStatus.do',zhuangtai)
resp2=urllib2.urlopen(req2)
print sss
|
import sys
from config import load_config
from peewee import MySQLDatabase
from models import Order
config = load_config()
db = MySQLDatabase(**config["DB_CONFIG"])
try:
db.create_tables([Order])
except Exception as e:
pass
|
'''
Script used to experiment with random vs. stratified sampling schemes
to build random forest model for age mapping.
'''
import sys, os
import numpy as np
def main(args):
stratPath = args[1] #csv path
randPath = args[2] #csv path
# pctRand_str = args[3] #0-100
pcts = np.arange(5,80,5)
for p in pcts:
print "STARTING ", p
pctRand = float(p)/100. #decimal
#extract csv data
print "\nExtracting Input CSV data..."
stratFile = open(stratPath, 'rb')
stratData = np.genfromtxt(stratFile, delimiter=',', names=True, case_sensitive=False, dtype=None)
stratFile.close()
randFile = open(randPath, 'rb')
randData = np.genfromtxt(randFile, delimiter=',', names=True, case_sensitive=False, dtype=None)
randFile.close()
print "\nSplitting Sets..."
numTraining_strat = stratData.size/2 #original samples
numTraining_rand = randData.size/2 #original samples
print numTraining_strat, numTraining_rand
ind_strat = np.random.choice(range(numTraining_strat), np.floor(numTraining_strat*(1.-pctRand)), replace=False)
ind_rand = np.random.choice(range(numTraining_rand), np.ceil(numTraining_strat*pctRand), replace=False)
print ind_strat.size, ind_rand.size
newData = np.zeros(stratData.size, dtype=[(l,'f8') for l in stratData.dtype.names]) #structured array
newData[numTraining_strat+1:] = randData[-(numTraining_strat):]
newData[:ind_strat.size] = stratData[ind_strat]
newData[ind_strat.size:numTraining_strat] = randData[ind_rand]
print "\nSaving new CSV..."
outDir = os.path.dirname(stratPath)
outputPath = os.path.join(outDir, 'rfsamples_trainstrat{0}pctrand_testrand.csv'.format(str(p)))
np.savetxt(outputPath, newData, delimiter=",", comments="", header=",".join(i for i in newData.dtype.names), fmt='%s')
print " Done! New CSV here: ", outputPath
if __name__ == '__main__':
args = sys.argv
main(args) |
EMPTY_RESULTS = {
'count': 0,
'items': []
}
ALL_INCOMPLETE_TODOS = {
'count': 7,
'items': [
{
'end_date': None,
'file_path': '/bugs.note',
'line_number': '7',
'start_date': None,
'status': 'incomplete',
'todo_text': 'Read through https://url-that-looks-like-a-tag.com/foo:bar:baz',
'tags': []
}, {
'end_date': None,
'file_path': '/section/mixed.note',
'line_number': '23',
'start_date': None,
'status': 'incomplete',
'tags': ['food', 'baking'],
'todo_text': 'Test different cooking times for this recipe',
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '6',
'start_date': None,
'status': 'incomplete',
'todo_text': 'Something to do',
'tags': []
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '7',
'start_date': None,
'status': 'incomplete',
'todo_text': 'With a space in the brackets already',
'tags': []
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '8',
'start_date': '2120-06-01',
'status': 'incomplete',
'todo_text': 'Indented todo which won\'t start for a **long** time',
'tags': ['future']
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '12',
'start_date': '2019-05-30',
'status': 'incomplete',
'todo_text': 'A follow up item which is still open',
'tags': []
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '15',
'start_date': None,
'status': 'incomplete',
'todo_text': 'A more specific todo',
'tags': ['nested']
}
]
}
ALL_SKIPPED_TODOS = {
'count': 4,
'items': [
{
'end_date': None,
'file_path': '/todos.note',
'line_number': '20',
'start_date': None,
'status': 'skipped',
'tags': [],
'todo_text': "Something that I could have done but didn't"
}, {
'end_date': '2019-06-03',
'file_path': '/todos.note',
'line_number': '21',
'start_date': '2019-05-30',
'status': 'skipped',
'tags': ['pointless'],
'todo_text': 'Indented'
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '25',
'start_date': '2019-05-30',
'status': 'skipped',
'tags': [],
'todo_text': 'A follow up item which seemed like a good idea but wasn\'t'
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '28',
'start_date': None,
'status': 'skipped',
'tags': ['nested'],
'todo_text': 'A more specific todo'
}
]
}
ALL_COMPLETE_TODOS = {
'count': 4,
'items': [
{
'end_date': None,
'file_path': '/todos.note',
'line_number': '33',
'start_date': None,
'status': 'complete',
'tags': [],
'todo_text': 'Something that got completed'
}, {
'end_date': '2019-06-04',
'file_path': '/todos.note',
'line_number': '34',
'start_date': '2019-05-27',
'status': 'complete',
'tags': ['topic'],
'todo_text': 'Indented and with both start and end stamped'
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '38',
'start_date': '2019-05-30',
'status': 'complete',
'tags': [],
'todo_text': 'A follow up item which seemed like a good idea and is now finished'
}, {
'end_date': None,
'file_path': '/todos.note',
'line_number': '41',
'start_date': None,
'status': 'complete',
'tags': ['nested'],
'todo_text': 'A more specific todo'
}
]
}
ALL_QUESTIONS = {
'count': 7,
'items': [
{
'answer': 'Granny Smith',
'file_path': '/section/mixed.note',
'line_number': '20',
'question': 'Which kind of apples make the best apple pie',
'tags': ['baking'],
'question_date': None,
'answer_date': None
}, {
'answer': '42',
'file_path': '/questions.note',
'line_number': '6',
'question': 'What is the meaning of life',
'tags': ['philosophy'],
'question_date': None,
'answer_date': None
}, {
'answer': 'blue',
'file_path': '/questions.note',
'line_number': '8',
'question': 'What color is the sky',
'tags': [],
'question_date': None,
'answer_date': None
}, {
'answer': 'To get to the other side',
'file_path': '/questions.note',
'line_number': '12',
'question': 'Why did the chicken cross the road',
'tags': [],
'question_date': None,
'answer_date': None
}, {
'answer': None,
'file_path': '/questions.note',
'line_number': '16',
'question': 'Is the world eternal',
'tags': [],
'question_date': None,
'answer_date': None
}, {
'answer': None,
'file_path': '/questions.note',
'line_number': '17',
'question': 'What is the best kind of cereal',
'tags': ['food'],
'question_date': None,
'answer_date': None
}, {
'answer': None,
'file_path': '/questions.note',
'line_number': '21',
'question': 'How do you write software with no bugs',
'tags': ['software'],
'question_date': None,
'answer_date': None
}
]
}
SEARCH_RESULTS_FOOD = [
{
'file_path': '/definitions.note',
'line_number': '7',
'match_content': r'- {food} Something that you eat when you are hungry'
}, {
'file_path': '/questions.note',
'line_number': '17',
'match_content': '* ? What is the best kind of cereal :food:'
}, {
'file_path': '/questions.note',
'line_number': '19',
'match_content': 'Food is an essential part of a balanced diet'
}, {
'file_path': '/section/mixed.note',
'line_number': '23',
'match_content': '- [] Test different cooking times for this recipe :food: :baking:'
}
]
SEARCH_RESULTS_FOOD_SENSITIVE = [
{
'file_path': '/questions.note',
'line_number': '19',
'match_content': 'Food is an essential part of a balanced diet'
}
]
SEARCH_RESULTS_BALANCED_DIET = [
{
'file_path': '/questions.note',
'line_number': '19',
'match_content': 'Food is an essential part of a balanced diet'
}
]
ALL_DEFINITIONS = [
{'definition': 'A formal meaning attached to a specific term',
'display_path': 'definitions.note',
'file_path': '/definitions.note',
'line_number': '5',
'term': 'definition'},
{'definition': 'A tool you can use to make your life easier and better',
'display_path': 'definitions.note',
'file_path': '/definitions.note',
'line_number': '6',
'term': 'software'},
{'definition': 'Something that you eat when you are hungry',
'display_path': 'definitions.note',
'file_path': '/definitions.note',
'line_number': '7',
'term': 'food'},
{'definition': 'The flaky part at the bottom of the pie :baking:',
'display_path': 'section → mixed.note',
'file_path': '/section/mixed.note',
'line_number': '9',
'term': 'crust'}
]
ALL_LOCATIONS = [
{'latitude': '40.757898',
'longitude': '-73.985502',
'name': 'Times Square',
'file_path': '/locations.note',
'display_path': 'locations.note',
'line_number': '4'},
{'latitude': '29.978938',
'longitude': '31.134116',
'name': 'The Great Pyramid',
'file_path': '/locations.note',
'display_path': 'locations.note',
'line_number': '5'},
{'latitude': '36.193521',
'longitude': '-112.048667',
'name': 'The Grand Canyon',
'file_path': '/locations.note',
'display_path': 'locations.note',
'line_number': '6'},
{'latitude': '48.858212',
'longitude': '2.294513',
'name': '',
'file_path': '/locations.note',
'display_path': 'locations.note',
'line_number': '7'}
]
ALL_FILES = [
'/questions.note',
'/rec.note',
'/locations.note',
'/todos.note',
'/section/mixed.note',
'/definitions.note',
'/bugs.note'
]
TOC = {
'dirs': [
{
'dirs': [],
'files': ['mixed.note'],
'path': '/section',
'text': 'section'
}
],
'files': [
'bugs.note',
'definitions.note',
'todos.note',
'locations.note',
'rec.note',
'questions.note'
],
'path': '',
'text': '',
}
CALENDAR = {
'2019': {
'05': {
'30': [
{
'date': '2019-05-30',
'element_id': '',
'event': 'A follow up item which is still open',
'file_path': '/todos.note',
'line_number': '12',
'type': 'incomplete_todo'
}
]
},
'06': {
'03': [
{
'date': '2019-06-03',
'element_id': '',
'event': 'Indented',
'file_path': '/todos.note',
'line_number': '21',
'type': 'skipped_todo'
}
],
'04': [
{
'date': '2019-06-04',
'element_id': '',
'event': 'Indented and with both start and end stamped',
'file_path': '/todos.note',
'line_number': '34',
'type': 'completed_todo'
}
]
}
}
}
ALL_LINKS = [
{
'line_number': '26',
'source': '/section/mixed.note',
'target': '/todos.note',
'text': 'todos',
'internal': True,
'valid': True
}, {
'line_number': '26',
'source': '/section/mixed.note',
'target': '/questions.note',
'text': 'questions',
'internal': True,
'valid': True
}, {
'line_number': '26',
'source': '/section/mixed.note',
'target': '/definitions.note',
'text': 'definitions',
'internal': True,
'valid': True
}, {
'line_number': '30',
'source': '/section/mixed.note',
'target': '/does/not/exist.note',
'text': 'broken',
'internal': True,
'valid': True
}, {
'line_number': '30',
'source': '/section/mixed.note',
'target': '/lokations.note',
'text': 'typos',
'internal': True,
'valid': True
}, {
'line_number': '2',
'source': '/todos.note',
'target': '/section/mixed.note',
'text': 'the mixed example',
'internal': True,
'valid': True
}, {
'line_number': '57',
'source': '/todos.note',
'target': '/bugs.note',
'text': 'bugs document',
'internal': True,
'valid': True
}, {
'line_number': '23',
'source': '/questions.note',
'target': '/section/mixed.note#Recipes',
'text': 'mixed example',
'internal': True,
'valid': True
}, {
'line_number': '28',
'source': '/section/mixed.note',
'target': 'https://nytimes.com',
'text': 'NY Times',
'internal': False,
'valid': True
}, {
'line_number': '32',
'source': '/section/mixed.note',
'target': '/todos.note',
'text': 'relative paths',
'internal': True,
'valid': True
}, {
'line_number': '13',
'source': '/bugs.note',
'target': '/todos.note',
'text': 'todos',
'internal': True,
'valid': True
}, {
'line_number': '13',
'source': '/bugs.note',
'target': '/locations.note',
'text': 'locations',
'internal': True,
'valid': True
}, {
'line_number': '13',
'target': '/README.md',
'text': 'outside',
'source': '/bugs.note',
'internal': True,
'valid': False
}
]
INVALID_LINKS = [
{
'line_number': '30',
'source': '/section/mixed.note',
'target': '/does/not/exist.note',
'text': 'broken',
'internal': True,
'valid': False
}, {
'line_number': '30',
'target': '/lokations.note',
'text': 'typos',
'source': '/section/mixed.note',
'internal': True,
'valid': False
}, {
'line_number': '13',
'target': '/README.md',
'text': 'outside',
'source': '/bugs.note',
'internal': True,
'valid': False
},
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.