blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5593ce842b79c29877bb25941b0b6530c4ce8540
|
66767138fb776ed9d91dc22bc4796f625824275a
|
/rss_fetcher_test.py
|
a22a0c31a1879f8475d85a74b80c0d887c7d727a
|
[] |
no_license
|
jamalzkhan/Twitter-News
|
f4441957a238e3230e49c5df31b6544bcf69f8c0
|
d7c053e12cd74cc459b80af8177f8d8ad1f604a1
|
refs/heads/master
| 2020-04-06T06:39:49.273043
| 2012-01-14T19:40:55
| 2012-01-14T19:40:55
| 3,179,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
import rss_fetcher
import loggers.log as log
import loggers.logger as logger
import unittest
class RssFetcherTest(unittest.TestCase):
def setUp (self, rss_file="dummy_rss.rss", rss_url=None):
self.rss_file = rss_file
self.main_logger = logger.Logger()
self.r_log = log.Log("RSS Test Fetcher", self.main_logger)
f = open(self.rss_file, 'r+')
rss = f.read()
# print rss
f.close()
self.rss = rss
self.rss_fetcher = rss_fetcher.RssFetcher(rss=self.rss, log=self.r_log)
def test_rss_is_broken_url(self):
""" Test to see what happens if the RSS Feed that is passed is broken"""
self.rss_fetcher.rss_link = "http://thisisfake.com"
self.rss_fetcher.getNews()
print self.rss_fetcher.news_stories
self.assertTrue(len(self.rss_fetcher.news_stories) == 0)
def test_rss_returns_correct_format(self):
"""Test to see that given a dummy feed we get the correct stories"""
self.rss_fetcher.getNews()
stories = self.rss_fetcher.news_stories
# Various checks for the dummy rss feed, whose values are known
self.assertTrue(len(stories) == 2)
if __name__ == "__main__":
print "Starting Unit Testing for RSS Fetcher Thread"
unittest.main()
|
[
"me@jamalkhan.com"
] |
me@jamalkhan.com
|
3802578c4bdb8ff686f1ca7901c16d4bf7ab1688
|
7a784eb73f13c1df4bb60cad26774a76abceb475
|
/day7.py
|
a85150ceb77b3f1fa0a60c98a5f135374d64e4a5
|
[] |
no_license
|
albarralnunez/advent-of-code-16
|
8e98e3d08643a53208e66b770eb7cf877bf283f6
|
776616ec82325126259324c9edfc0d5e0596bcb7
|
refs/heads/master
| 2021-06-08T17:12:21.540471
| 2016-12-19T23:43:37
| 2016-12-19T23:43:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
#!/usr/local/bin/python
import logging
from collections import Counter
from libs import commons
from libs7.ipv7 import IPv7
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def get_ips(input_file):
return (IPv7(x[:-1]) for x in open(input_file, 'r').xreadlines())
@commons.speed_test
def problem_1(ips):
return len(filter(lambda x: x.has_tls(), ips))
@commons.speed_test
def problem_2(ips):
return len(filter(lambda x: x.has_ssl(), ips))
def main():
ips = get_ips('inputs/day_7.in')
print 'Problem 1: %s IPs supports TLS' % problem_1(ips)
ips = get_ips('inputs/day_7.in')
print 'Problem 2: %s IPs supports SSL' % problem_2(ips)
if __name__ == "__main__":
main()
|
[
"danielalbarral@gmail.com"
] |
danielalbarral@gmail.com
|
48075fcc4afeaf9cb88f2c15f4919d29e15f5a03
|
e8a6fcaf493d4a03691e949993d3e26989b4e742
|
/code/lmfit_PseudoVoigt_CP_D_RT.py
|
dfe244a5aece902d850fc87a400a300c70e69ce6
|
[] |
no_license
|
andrewkim47/local_pithy
|
93183235425a2ee8681e960c1f18f1e2c4b1d65a
|
259acd8d2b48ac29d1d9328a1744402c111834d5
|
refs/heads/master
| 2020-03-29T22:39:18.600081
| 2018-10-22T18:48:33
| 2018-10-22T18:48:33
| 150,433,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,936
|
py
|
from Histogram_Band import *
from lmfit.models import PseudoVoigtModel
bins = range(256)
fdir = '/Users/andrewkim/Documents/AA_Discharge/TIFFS/'
x = arange(256)
#Grab Data
key = 'CP_D_RT'
code = datas[key]['code']
folder = fdir + code + '/Histogram/'
endp = datas[key]['endpoint']
ndata,adata,bands = getBandHist(folder)
####################################
############ FIRST PASS , grab peak positions
####################################
# numband = 1
# datas[key]['zband'] = mergeBand(ndata,endp[0],endp[1],numband)
# #Y = Total Histogram of Zinc:
# band = 0
# Y = array(mergeBand(ndata,endp[0],endp[1],1)[band])
# y = Y
# pv1 = PseudoVoigtModel(prefix='pv1_')
# pars = pv1.guess(y, x=x)
# pars['pv1_amplitude'].set(0.33,min=0)
# pars['pv1_sigma'].set(13)
# pars['pv1_center'].set(73.9)
# pars['pv1_fraction'].set(0.10)
# pars['pv1_fwhm'].set(50)
# pars['pv1_height'].set(0.05)
# pv2 = PseudoVoigtModel(prefix='pv2_')
# pars.update(pv2.make_params())
# pars['pv2_amplitude'].set(0.33,min=0)
# pars['pv2_sigma'].set(13)
# pars['pv2_center'].set(88.7)
# pars['pv2_fraction'].set(0.10)
# pars['pv2_fwhm'].set(50)
# pars['pv2_height'].set(0.05)
# pv3 = PseudoVoigtModel(prefix='pv3_')
# pars.update(pv3.make_params())
# pars['pv3_amplitude'].set(0.33,min=0)
# pars['pv3_sigma'].set(13)
# pars['pv3_center'].set(112.)
# pars['pv3_fraction'].set(0.10)
# pars['pv3_fwhm'].set(50)
# pars['pv3_height'].set(0.05)
# # mod = pv1
# mod = pv1 + pv2 + pv3
# init = mod.eval(pars, x=x)
# out = mod.fit(y, pars, x=x)
# comps = out.eval_components(x=x)
# figure(figsize=(6.4,4.8*3))
# subplot(6,1,1)
# plot(x,y,lw=3)
# plot(x,out.best_fit,c='k',lw=3,ls='--')
# for subkey in comps.keys():
# plot(x,comps[subkey],ls='--')
# # title(key+'_B'+str(band))
# title(key+ ' Total Zinc Region')
# grid()
# xlim(50,200)
# # showme()
# # clf()
# results = out.fit_report(min_correl=0.5)
# print results.split('[[')[3]
# numband = 5
# datas[key]['zband'] = mergeBand(ndata,endp[0],endp[1],numband)
# for band in range(5):
# # for band in [0,4]:
# y = array(datas[key]['zband'][band])
# # mod = pv1
# mod = pv1 + pv2 + pv3
# init = mod.eval(pars, x=x)
# out = mod.fit(y, pars, x=x)
# comps = out.eval_components(x=x)
# subplot(6,1,band+2)
# plot(x,y,lw=3)
# plot(x,out.best_fit,c='k',lw=3,ls='--')
# for subkey in comps.keys():
# plot(x,comps[subkey],ls='--')
# # title(key+'_B'+str(band))
# title(key+ ' Zinc SubRegion '+str(band))
# grid()
# xlim(50,200)
# results = out.fit_report(min_correl=0.5)
# print results.split('[[')[3]
# tight_layout()
# showme()
# clf()
# # ####################################
# # ############ 2nd pass
# # ####################################
numband = 1
datas[key]['zband'] = mergeBand(ndata,endp[0],endp[1],numband)
#Y = Total Histogram of Zinc:
band = 0
Y = array(mergeBand(ndata,endp[0],endp[1],1)[band])
y = Y
pv1 = PseudoVoigtModel(prefix='pv1_')
pars = pv1.guess(y, x=x)
pars['pv1_amplitude'].set(0.27,min=0)
pars['pv1_sigma'].set(16)
pars['pv1_center'].set(73.9, vary = False)
pars['pv1_fraction'].set(0.10)
pars['pv1_fwhm'].set(50)
pars['pv1_height'].set(0.03)
pv2 = PseudoVoigtModel(prefix='pv2_')
pars.update(pv2.make_params())
pars['pv2_amplitude'].set(0.39,min=0)
pars['pv2_sigma'].set(13)
pars['pv2_center'].set(88.7, vary = False)
pars['pv2_fraction'].set(0.10)
pars['pv2_fwhm'].set(50)
pars['pv2_height'].set(0.05)
pv3 = PseudoVoigtModel(prefix='pv3_')
pars.update(pv3.make_params())
pars['pv3_amplitude'].set(0.34,min=0)
pars['pv3_sigma'].set(12)
pars['pv3_center'].set(112., vary = False)
pars['pv3_fraction'].set(0.10)
pars['pv3_fwhm'].set(50)
pars['pv3_height'].set(0.05)
# mod = pv1
mod = pv1 + pv2 + pv3
init = mod.eval(pars, x=x)
out = mod.fit(y, pars, x=x)
comps = out.eval_components(x=x)
figure(figsize=(6.4,4.8*3))
subplot(6,1,1)
plot(x,y,lw=3)
plot(x,out.best_fit,c='k',lw=3,ls='--')
for subkey in comps.keys():
plot(x,comps[subkey],ls='--')
# title(key+'_B'+str(band))
title(key+ ' Total Zinc Region')
grid()
xlim(50,150)
# showme()
# clf()
results = out.fit_report(min_correl=0.5)
print results.split('[[')[3]
numband = 5
datas[key]['zband'] = mergeBand(ndata,endp[0],endp[1],numband)
print array_split(arange(endp[0],endp[1]),numband)
for band in range(5):
# for band in [0,4]:
y = array(datas[key]['zband'][band])
# mod = pv1
mod = pv1 + pv2 + pv3
init = mod.eval(pars, x=x)
out = mod.fit(y, pars, x=x)
comps = out.eval_components(x=x)
subplot(6,1,band+2)
plot(x,y,lw=3)
plot(x,out.best_fit,c='k',lw=3,ls='--')
for subkey in comps.keys():
plot(x,comps[subkey],ls='--')
# title(key+'_B'+str(band))
title(key+ ' Zinc SubRegion '+str(band))
grid()
xlim(50,150)
results = out.fit_report(min_correl=0.5)
print results.split('[[')[3]
tight_layout()
showme()
clf()
|
[
"noreply@github.com"
] |
andrewkim47.noreply@github.com
|
4f54a215ed076c56c7ce649c75f4e4cb4637d034
|
302ef0325c80692957d389a28e06a683b55858e8
|
/UX/auth/views.py
|
11af3883855be4212aaf98ad1ad5533038ac1afc
|
[] |
no_license
|
souhagaa/markov_link_prediction
|
6cf4534c4bea7d8fa1a815c9fd65d98f6ad3fc5f
|
3f9997ef757d640814b480b7b330bd55b5aa2114
|
refs/heads/master
| 2020-07-02T18:55:01.218815
| 2019-12-03T21:42:17
| 2019-12-03T21:42:17
| 201,630,178
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,569
|
py
|
from flask import request, jsonify, Blueprint, current_app as app
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_required,
jwt_refresh_token_required,
get_jwt_identity,
get_raw_jwt
)
from UX.models import User
from UX.extensions import pwd_context, jwt
from UX.auth.helpers import (
revoke_token,
is_token_revoked,
add_token_to_database
)
blueprint = Blueprint('auth', __name__, url_prefix='/auth')
@blueprint.route('/login', methods=['POST'])
def login():
"""Authenticate user and return token
"""
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username or not password:
return jsonify({"msg": "Missing username or password"}), 400
user = User.query.filter_by(username=username).first()
if user is None or not pwd_context.verify(password, user.password):
return jsonify({"msg": "Bad credentials"}), 400
access_token = create_access_token(identity=user.id)
refresh_token = create_refresh_token(identity=user.id)
add_token_to_database(access_token, app.config['JWT_IDENTITY_CLAIM'])
add_token_to_database(refresh_token, app.config['JWT_IDENTITY_CLAIM'])
ret = {
'access_token': access_token,
'refresh_token': refresh_token
}
return jsonify(ret), 200
@blueprint.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
current_user = get_jwt_identity()
access_token = create_access_token(identity=current_user)
ret = {
'access_token': access_token
}
add_token_to_database(access_token, app.config['JWT_IDENTITY_CLAIM'])
return jsonify(ret), 200
@blueprint.route('/revoke_access', methods=['DELETE'])
@jwt_required
def revoke_access_token():
jti = get_raw_jwt()['jti']
user_identity = get_jwt_identity()
revoke_token(jti, user_identity)
return jsonify({"message": "token revoked"}), 200
@blueprint.route('/revoke_refresh', methods=['DELETE'])
@jwt_refresh_token_required
def revoke_refresh_token():
jti = get_raw_jwt()['jti']
user_identity = get_jwt_identity()
revoke_token(jti, user_identity)
return jsonify({"message": "token revoked"}), 200
@jwt.user_loader_callback_loader
def user_loader_callback(identity):
return User.query.get(identity)
@jwt.token_in_blacklist_loader
def check_if_token_revoked(decoded_token):
return is_token_revoked(decoded_token)
|
[
"souha.echelon20@gmail.com"
] |
souha.echelon20@gmail.com
|
9481547229af17b8ed0134a977c9a946bdf8de00
|
24a15591f9ff280d030cd9f966b3977dc11fd027
|
/ImarketBD/Departments/apps.py
|
cfc5d3de8de21eda26f01447d389e65418751744
|
[] |
no_license
|
Arfin99/Integrated-Market-Platform
|
54ae1d67226b24c0d259af129e96f090ccb598b4
|
8a3340edbe8a9629f2d41f7e063a657e8d710db4
|
refs/heads/master
| 2023-09-03T20:57:48.188204
| 2021-02-10T10:37:04
| 2021-02-10T10:37:04
| 337,688,153
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
from django.apps import AppConfig
class DepartmentsConfig(AppConfig):
name = 'Departments'
|
[
"jobyerarfin1997@gmail.com"
] |
jobyerarfin1997@gmail.com
|
475e321d312d173b6927057ffe431fb59be085fb
|
d4795c386ba45a884ff125736ca70a0813531f9b
|
/Carrier_Assign1.py
|
225ba602b3a9beea6fec903fdab0cf66a809f95f
|
[] |
no_license
|
AhamadHussainD/TakeHomeAssign_Carrier
|
4399e9d758978f71ec19fbdf14d160e459b11621
|
77b8735131851515b5337609e4736e8ea51d2f8e
|
refs/heads/main
| 2023-03-18T22:38:22.311000
| 2021-03-11T18:16:37
| 2021-03-11T18:16:37
| 343,303,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,572
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 19:05:49 2021
@author: Ahamad Husssain, D
Carrier Inc. Data Science & Innovation Take Home Challenge
THE CHALLENGE: Zeta Disease Predictio
Note: the Code is Confidential, needs author prior approval
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
# Function to Get the current
# working directory
def current_path():
print("Current working directory before")
print(os.getcwd())
print()
current_path()
# Changing the CWD
os.chdir('F:/Analytics_course/Python_practice/TakeHome_Carrier')
# Printing CWD after
current_path()
data=pd.read_csv('2021-01-21_zeta-disease_training-data_dsi-take-home-challenge.csv',index_col=False)
print(len(data))
print(len(data.columns))
data.info()
print(data.describe)
df=data
df.shape
######Pre processing Steps/ Feature selection/DAta Visuvalization steps********
dupli_df=df[df.duplicated()]
print("no of duplicate rows:",dupli_df.shape)
dupli_df
df.sum(axis = 0, skipna = True)
df['zeta_disease'].sum(axis = 0, skipna = True)
df.iloc[:,-1:].sum()
df=df.drop_duplicates()
df.info()
plt.hist(df['zeta_disease'])
plt.xlabel('Zeta_Desease')
plt.ylabel('Frequency')
plt.xticks([0,1])
plt.title("Zeta Disease Distribution")
plt.show()
df=df.dropna()
print(len(df))
df.isnull().sum()
#histogram of all variables
df.hist(figsize=(10,10))
plt.show()
df1=df.iloc[:,0:8]
df1.info()
df1.describe()
dfo=[]
dfo=df.iloc[:,8:9]
dfo.info()
dfo.describe()
corrmat=df1.corr()
top_corr_features=corrmat.index
print(corrmat)
plt.figure(figsize=(10,10))
plt.title('Variables Correlation map')
g=sns.heatmap(df1[top_corr_features].corr(),annot=True,cmap="RdYlGn")
#sns.pairplot(df1)
sns.pairplot(df, hue = 'zeta_disease')
corrmat['age'].sort_values(ascending=False)
from scipy import stats
import pylab
df2=df1
summary1=df2.describe()
print(summary1)
for i in range(len(df1.columns)):
mean1=summary1.iloc[1,i]
std1=summary1.iloc[2,i]
df2.iloc[:,i:(i+1)]=(df1.iloc[:,i:(i+1)]-mean1)/std1
print(df2.describe())
plt.figure(figsize=(10,10))
plotray= df2.values
#boxplot(plotray)
#plot.xticks(range(1,9),abalone.columns[1:9])
sns.boxplot(data=plotray)
plt.xlabel('Varialbes in the order of Data Frame')
plt.ylabel('Standard Deviations')
plt.title("Standardised Varialbes Box Plots")
plt.show()
z=np.abs(stats.zscore(plotray))
print(z)
q=np.amax(z)
print(q)
df2['zeta_disease']=dfo
df3 = df2[np.abs(z < 6).all(axis=1)]
df2.info()
df3.describe()
df2.describe()
X=df3.iloc[:,0:8]
Y=df3.iloc[:,8:9]
from sklearn import*
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
clf=ExtraTreesClassifier()
clf.fit(X, Y)
clf.feature_importances_
model = SelectFromModel(clf, prefit=True)
X_new = model.transform(X)
X_new.shape
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X=df3.iloc[:,0:8]
y=df3.iloc[:,8:9]
# Build a forest and compute the impurity-based feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the impurity-based feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
################Model Buildings*****************
from sklearn.model_selection import *
from sklearn.metrics import *
from sklearn.preprocessing import *
from sklearn.linear_model import *
# Creating an empty Dataframe with column names only
AlSumm = pd.DataFrame(columns=['Model','ModelParameter','TN','FP','FN','TP','Accuracy','F1 Score','Precesion','Recall','FNR'])
#Logistic Regression
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=42)
cv = KFold(n_splits=5, random_state=1, shuffle=True)
# create model
model = LogisticRegression()
# evaluate model
y_train_pred=cross_val_predict(model,X_train,y_train.values.ravel(),cv=cv)
#lrcm=confusion_matrix(y_train,y_train_pred)
scores = cross_val_score(model, X_train,y_train.values.ravel(), scoring='accuracy', cv=cv)
# report performance
print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores)))
y_pred=cross_val_predict(model,X_test,y_test.values.ravel(),cv=cv)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'LogisticRegression','ModelParameter':0,'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
#apply the below code to store all confusion parameters
#print(metrics.classification_report(y_test, y_pred, *))
#LDA
# grid search solver for lda
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# define dataset
model = LinearDiscriminantAnalysis()
# define model evaluation method
# define grid
grid = dict()
grid['solver'] = ['svd', 'lsqr', 'eigen']
# define search
search = GridSearchCV(model, grid, scoring='accuracy', cv=cv)
# perform the search
results = search.fit(X_train, y_train)
y_pred=search.predict(X_test)
lrcm=confusion_matrix(y_test,y_pred)
# summarize #print('Mean Accuracy: %.3f' % results.best_score_)
#print('Config: %s' % results.best_params_)
AlSumm= AlSumm.append({'Model':'LDA','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
#QDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
qda = QuadraticDiscriminantAnalysis()
qda.fit(X_train,y_train.values.ravel())
y_pred=(qda.predict(X_test))
qdacm=confusion_matrix(y_pred,y_test)
# create model
model = QuadraticDiscriminantAnalysis()
# evaluate model
y_train_pred=cross_val_predict(model,X_train,y_train.values.ravel(),cv=cv)
#lrcm=confusion_matrix(y_train,y_train_pred)
scores = cross_val_score(model, X_train,y_train.values.ravel(), scoring='accuracy', cv=cv)
# report performance
print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores)))
y_pred=cross_val_predict(model,X_test,y_test.values.ravel(),cv=cv)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'QDA','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
#Support Vector Classifier
from sklearn.svm import *
clf1=SVC(kernel='linear',coef0=1,C=5)
model = clf1
# evaluate model
y_train_pred=cross_val_predict(model,X_train,y_train.values.ravel(),cv=cv)
#lrcm=confusion_matrix(y_train,y_train_pred)
scores = cross_val_score(model, X_train,y_train.values.ravel(), scoring='accuracy', cv=cv)
# report performance
print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores)))
y_pred=cross_val_predict(model,X_test,y_test.values.ravel(),cv=cv)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'SVC_linear','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
clf1=SVC(kernel='rbf',gamma=0.01)
model = clf1
# evaluate model
y_train_pred=cross_val_predict(model,X_train,y_train.values.ravel(),cv=cv)
#lrcm=confusion_matrix(y_train,y_train_pred)
scores = cross_val_score(model, X_train,y_train.values.ravel(), scoring='accuracy', cv=cv)
# report performance
print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores)))
y_pred=cross_val_predict(model,X_test,y_test.values.ravel(),cv=cv)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'SVC_rbf','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
#Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(max_depth=15, random_state=0)
model = rf
# evaluate model
y_train_pred=cross_val_predict(model,X_train,y_train.values.ravel(),cv=cv)
#lrcm=confusion_matrix(y_train,y_train_pred)
scores = cross_val_score(model, X_train,y_train.values.ravel(), scoring='accuracy', cv=cv)
# report performance
print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores)))
y_pred=cross_val_predict(model,X_test,y_test.values.ravel(),cv=cv)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'Random Forest Classifier','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
#ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier(n_estimators=500, random_state=0)
# evaluate model
y_train_pred=cross_val_predict(model,X_train,y_train.values.ravel(),cv=cv)
#lrcm=confusion_matrix(y_train,y_train_pred)
scores = cross_val_score(model, X_train,y_train.values.ravel(), scoring='accuracy', cv=cv)
# report performance
print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores)))
y_pred=cross_val_predict(model,X_test,y_test.values.ravel(),cv=cv)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'ExtraTreesClassifier','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
#KNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
for i in range(1,100,5):
model = KNeighborsClassifier(n_neighbors=i)
# evaluate model
y_train_pred=cross_val_predict(model,X_train,y_train.values.ravel(),cv=cv)
scores = cross_val_score(model, X_train,y_train.values.ravel(), scoring='accuracy', cv=cv)
y_pred=cross_val_predict(model,X_test,y_test.values.ravel(),cv=cv)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'KNN','ModelParameter':i, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
#*******SDG Classifier
import sklearn
from sklearn.linear_model import SGDClassifier
model=SGDClassifier(random_state=42)
y_train_pred=cross_val_predict(model,X_train,y_train.values.ravel(),cv=cv)
#lrcm=confusion_matrix(y_train,y_train_pred)
#scores = cross_val_score(model, X_train,y_train.values.ravel(), scoring='accuracy', cv=cv)
# report performance
#print('Accuracy: %.3f (%.3f)' % (np.mean(scores), np.std(scores)))
y_pred=cross_val_predict(model,X_test,y_test.values.ravel(),cv=cv)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'SGD Classifier','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
##################
#DNN
import tensorflow as tf
from keras.models import Sequential
import pandas as pd
from keras.layers import Dense
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
#Swish
model.add(Dense(8, activation='swish', input_shape=(8,)))
model.add(Dense(8, activation='swish'))
model.add(Dense(8, activation='swish'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(X_train, y_train,epochs=5, batch_size=1, verbose=1)
y_pred = model.predict_classes(X_test)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'DNN-Swish','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
mode2 = Sequential()
#Swish
mode2.add(Dense(8, activation='relu', input_shape=(8,)))
mode2.add(Dense(8, activation='relu'))
mode2.add(Dense(8, activation='relu'))
mode2.add(Dense(1, activation='sigmoid'))
mode2.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
mode2.fit(X_train, y_train,epochs=5, batch_size=1, verbose=1)
y_pred = mode2.predict_classes(X_test)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'DNN-ReLU','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
###********* Bagging, Out of Bag, Ada Boosting
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
bag_clf = BaggingClassifier( DecisionTreeClassifier(),max_samples=100, bootstrap=True)
bag_clf.fit(X_train, y_train)
y_pred = bag_clf.predict(X_test)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'Bagging','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
bag_clf = BaggingClassifier(DecisionTreeClassifier(),bootstrap=True, oob_score=True)
bag_clf.fit(X_train, y_train)
y_pred = bag_clf.predict(X_test)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'OOB','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
from sklearn.ensemble import AdaBoostClassifier
ada_clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), n_estimators=500,
algorithm="SAMME.R", learning_rate=0.4 )
ada_clf.fit(X_train, y_train)
lrcm=confusion_matrix(y_test,y_pred)
AlSumm= AlSumm.append({'Model':'AdaBoost','ModelParameter':0, 'TN':lrcm[0][0],'FP':lrcm[0][1],'FN':lrcm[1][0],'TP':lrcm[1][1],
'Accuracy':accuracy_score(y_test, y_pred),'F1 Score':f1_score(y_test, y_pred),
'Precesion':precision_score(y_test, y_pred),'Recall':recall_score(y_test, y_pred),
'FNR':(1-recall_score(y_test, y_pred))}, ignore_index=True)
#### ****** Selecting Best Model *********
AlSumm['score']=AlSumm['Accuracy']+AlSumm['F1 Score']-AlSumm['FNR']
maxs=max(AlSumm['score'])
for i in range(len(AlSumm)):
if (AlSumm['score'][i])==maxs:
poli=i
print(poli)
AlSumm.iloc[poli,:]
######FINAL Testing With Data********
tdf=pd.read_csv('2021-01-21_zeta-disease_prediction-data_dsi-take-home-challenge.csv',index_col=False)
tdf2=tdf.iloc[:,0:8]
tdf2.info()
tdf.head()
for i in range(len(tdf.columns)-1):
mean1=summary1.iloc[1,i]
std1=summary1.iloc[2,i]
tdf2.iloc[:,i:(i+1)]=(tdf.iloc[:,i:(i+1)]-mean1)/std1
tdf2.head()
#Select the Final Model based on confusion matrix and predict using that final Model
tdf.iloc[:,8:9]=(mode2.predict_classes(tdf2))
tdf.head()
tdf.to_csv('zeta-disease_predictions_AhamadHussain.csv')
AlSumm.to_csv('All_Models_Summary_AhamadHussain.csv')
|
[
"noreply@github.com"
] |
AhamadHussainD.noreply@github.com
|
63fee9bed9fb25aa6626ce7ec9c02f43b7d2bfbe
|
cb751afc18cde24e3974adbe60f55534a0b6e0cf
|
/gdgsite/settings.py
|
86605eddaec2afeebe0d52e2a06297aa9b5501b6
|
[] |
no_license
|
HargovindArora/website
|
6d7f5c9571615f23c39c1ebde0d866f37de12b21
|
f9024048f37cccdaa3ffa504d05efce92020a464
|
refs/heads/master
| 2021-01-21T02:10:38.613307
| 2017-08-30T16:50:26
| 2017-08-30T16:50:26
| 101,881,662
| 0
| 0
| null | 2017-08-30T12:59:30
| 2017-08-30T12:59:30
| null |
UTF-8
|
Python
| false
| false
| 3,099
|
py
|
"""
Django settings for gdgsite project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v6$8b53a&74w2_eyh&*g7^@dm6v!!=ra$cm)1l*adqxav&bt%0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gdgsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gdgsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"himanshuagrawal1998@gmail.com"
] |
himanshuagrawal1998@gmail.com
|
60cd8b2aebcdbe232e347a8235a5b108d9419628
|
28729bdabcb1c83429752bc15b14f2ac1950028f
|
/firmware/python_modules/shared/dashboard/terminal/launcher.py
|
4d84887ce0cca0ac96378c72ae7a94d700de5ff0
|
[] |
no_license
|
badgeteam/ESP32-platform-firmware
|
434020769b36df164fd1719b3bcf996851d55294
|
04282f7fe84ddd0f0c3887fa948da68a9ade8126
|
refs/heads/master
| 2023-08-17T07:07:51.048777
| 2023-08-14T20:53:37
| 2023-08-14T20:53:37
| 194,534,857
| 31
| 49
| null | 2023-08-15T21:00:09
| 2019-06-30T15:59:30
|
C
|
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
import term, system, sys, uos as os, ujson
system.serialWarning()
apps = []
def add_app(app,information):
global apps
try:
title = information["name"]
except:
title = app
try:
category = information["category"]
except:
category = ""
info = {"file":app,"title":title,"category":category}
apps.append(info)
def populate_apps():
global apps
apps = []
try:
userApps = os.listdir('apps')
except OSError:
userApps = []
try:
userApps.extend(os.listdir('lib'))
except OSError:
pass
for app in userApps:
add_app(app,read_metadata(app))
currentListTitles = []
currentListTargets = []
def populate_category(category="",system=True):
global apps
global currentListTitles
global currentListTargets
currentListTitles = []
currentListTargets = []
for app in apps:
if (category=="" or category==app["category"] or (system and app["category"]=="system")) and (not app["category"]=="hidden"):
currentListTitles.append(app["title"])
currentListTargets.append(app)
def read_metadata(app):
try:
install_path = get_install_path()
info_file = "%s/%s/metadata.json" % (install_path, app)
print("Reading "+info_file+"...")
with open(info_file) as f:
information = f.read()
return ujson.loads(information)
except BaseException as e:
print("[ERROR] Can not read metadata for app "+app)
sys.print_exception(e)
information = {"name":app,"title":"---", "category":""}
return information
def expandhome(s):
if "~/" in s:
h = os.getenv("HOME")
s = s.replace("~/", h + "/")
return s
def get_install_path():
global install_path
if install_path is None:
# sys.path[0] is current module's path
install_path = sys.path[1]
install_path = expandhome(install_path)
return install_path
install_path = None
term.empty_lines()
term.header("Loading application list...")
populate_apps()
populate_category()
currentListTitles.append("< Back to the main menu")
selected = term.menu("Application launcher", currentListTitles)
if selected == len(currentListTitles) - 1:
system.home()
else:
system.start(currentListTargets[selected]['file'])
|
[
"renze@rnplus.nl"
] |
renze@rnplus.nl
|
2f018fb4a4dcd300694252c10fb0a589577d9c18
|
62b0c608a4f3839f3443903b666b19632c13cac2
|
/ComicEnv/lib/python3.2/sre_constants.py
|
b766b82e7f13ed1382fa318a9df1c120c5d9b238
|
[
"MIT"
] |
permissive
|
Silight/ComicEnv
|
a26e77c390e997de456e0e3ef7ce513dd8a908bd
|
2909f5405923f8767969aae7a6b36eda26571002
|
refs/heads/master
| 2020-04-01T08:19:17.463999
| 2015-04-21T15:05:11
| 2015-04-21T15:05:11
| 33,143,359
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
/usr/lib/python3.2/sre_constants.py
|
[
"silight@silight-Virtual-Machine.(none)"
] |
silight@silight-Virtual-Machine.(none)
|
ab4056f63a320739dcf5b284af25ffb7e8e5cc4b
|
6ee24ed025a3dce06716c60180e8acb7ea382074
|
/Data_Manipulation/data_processing/nodeJson_import_mongo.py
|
4a6a2ad0858287aa35111d7123f0d55bf9e3b4f8
|
[] |
no_license
|
wxy000/MHKG
|
54ebaa6bde115981a734557c0595eb62e053ca23
|
b0f70bc2e185154f651958499a1632ce879075c8
|
refs/heads/master
| 2021-10-28T10:31:49.401982
| 2019-04-23T15:18:06
| 2019-04-23T15:18:06
| 175,953,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
# -*- coding:utf-8 -*-
import os
from pymongo import *
import json
class JsonToMongo(object):
def __init__(self):
self.local_url = os.path.abspath(os.path.join(os.getcwd(), "../data"))
self.host = 'localhost'
self.port = 27017
# 读取json文件
def __open_file(self):
self.file = open(os.path.join(self.local_url, 'final_node.json'), 'r')
# 创建mongodb客户端
self.client = MongoClient(self.host, self.port)
# 创建数据库
self.db = self.client.mhkg
# 创建集合
self.collection = self.db.node
# 关闭文件
def __close_file(self):
self.file.close()
# 写入数据库
def write_database(self):
self.__open_file()
# 转换为python对象
data = json.load(self.file)
try:
self.collection.insert_many(data)
print('写入成功')
except Exception as e:
print(e)
finally:
self.__close_file()
if __name__ == '__main__':
j2m = JsonToMongo()
j2m.write_database()
|
[
"396310583@qq.com"
] |
396310583@qq.com
|
226a7bcfbe6f2eb263aab65dbe5beeebf1b5d173
|
237162607427106ae9564670d47427a62356861f
|
/core/migrations/0101_merge_20180116_1453.py
|
06162ec4e37501aaf9fff70de7035df75ad94e97
|
[] |
no_license
|
pitipund/basecore
|
8648c1f4fa37b6e6075fd710ca422fe159ba930e
|
a0c20cec1e17dd0eb6abcaaa7d2623e38b60318b
|
refs/heads/master
| 2020-09-13T20:16:02.622903
| 2019-11-20T09:07:15
| 2019-11-20T09:07:15
| 221,885,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-16 14:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0099_procedure_active'),
('core', '0100_auto_20180112_1623'),
]
operations = [
]
|
[
"longman_694@hotmail.com"
] |
longman_694@hotmail.com
|
d0843099546bd3768c362e42f6bb8f28d9fe269f
|
68bfa9d7a6b267f52354415fc72fada1ddfdc9ab
|
/src/lamplib/src/genny/cmd_runner.py
|
9839bdab7da4894de4a389ec42df53c64eb8f86f
|
[
"Apache-2.0"
] |
permissive
|
mongodb/genny
|
5aa3c3be01d8bd8e5b7c9a9d019c5b206d7e97fb
|
788eaf26e3b8b08d76c71d54fb0013befee9b032
|
refs/heads/master
| 2023-09-06T09:25:21.933670
| 2023-09-05T15:59:07
| 2023-09-05T15:59:07
| 121,291,048
| 44
| 76
|
Apache-2.0
| 2023-09-14T16:50:21
| 2018-02-12T19:23:44
|
C++
|
UTF-8
|
Python
| false
| false
| 1,971
|
py
|
import subprocess
import os
import shlex
from uuid import uuid4
import structlog
from typing import List, NamedTuple
SLOG = structlog.get_logger(__name__)
class RunCommandOutput(NamedTuple):
returncode: int
stdout: List[str]
stderr: List[str]
def run_command(
cmd: List[str],
check: bool,
cwd: str,
shell: bool = False,
env: dict = None,
capture: bool = True,
) -> RunCommandOutput:
env = os.environ.copy() if env is None else env
uuid = str(uuid4())[:8]
SLOG.debug("Running command", uuid=uuid, cwd=cwd, command=" ".join(shlex.quote(x) for x in cmd))
genny_repo_root = os.environ.get("GENNY_REPO_ROOT", None)
assert genny_repo_root, "Code error: env GENNY_REPO_ROOT not set"
env["LSAN_OPTIONS"] = f"suppressions={genny_repo_root}/lsan.ignorelist"
success = False
old_cwd = os.getcwd()
try:
if not os.path.exists(cwd):
raise Exception(f"Cannot chdir to {cwd} from cwd={os.getcwd()}")
os.chdir(cwd)
result: subprocess.CompletedProcess = subprocess.run(
cmd,
env=env,
shell=shell,
check=check,
text=capture, # capture implies text. No binary output from genny.
capture_output=capture,
bufsize=0,
)
success = result.returncode == 0
return RunCommandOutput(
returncode=result.returncode,
stdout=[] if not capture else result.stdout.strip().split("\n"),
stderr=[] if not capture else result.stderr.strip().split("\n"),
)
except subprocess.CalledProcessError as e:
SLOG.error(
"Error in command",
uuid=uuid,
cmd=cmd,
env=env,
cwd=cwd,
returncode=e.returncode,
output=e.output,
)
raise e
finally:
SLOG.debug("Finished command", uuid=uuid, success=success)
os.chdir(old_cwd)
|
[
"noreply@github.com"
] |
mongodb.noreply@github.com
|
0eb00156414dc60670691a31f453d8a8e2fdc0bb
|
4c77c3f68ddd280ad26ed78a9f4927ff9eb5e1f1
|
/src/ledger/lib/python2.7/site-packages/pip/_internal/cmdoptions.py
|
58854e34f9b269a44463a8de48e2a181f83bbc36
|
[
"MIT"
] |
permissive
|
neoscoin/neos-core
|
5f4a4e9fcdf13a21d1dbedfc7c01a8a8ba454a98
|
22cecda54875e3554e7c2a4569551c042fa6c0a2
|
refs/heads/master
| 2020-03-23T18:54:58.602764
| 2019-08-04T16:44:27
| 2019-08-04T16:44:27
| 141,940,658
| 4
| 4
|
MIT
| 2018-07-28T21:39:26
| 2018-07-23T00:05:03
|
C++
|
UTF-8
|
Python
| false
| false
| 16,679
|
py
|
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
import warnings
from functools import partial
from optparse import SUPPRESS_HELP, Option, OptionGroup
from pip._internal.index import (
FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary,
)
from pip._internal.locations import USER_CACHE_DIR, src_prefix
from pip._internal.models import PyPI
from pip._internal.utils.hashes import STRONG_HASHES
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.ui import BAR_TYPES
if MYPY_CHECK_RUNNING:
from typing import Any
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def check_install_build_global(options, check_options=None):
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
fmt_ctl_no_binary(control)
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2,
)
###########
# options #
###########
help_ = partial(
Option,
'-h', '--help',
dest='help',
action='help',
help='Show help.',
) # type: Any
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP
) # type: Any
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
no_color = partial(
Option,
'--no-color',
dest='no_color',
action='store_true',
default=False,
help="Suppress colored output",
)
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.',
) # type: Any
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help=(
'Give less output. Option is additive, and can be used up to 3'
' times (corresponding to WARNING, ERROR, and CRITICAL logging'
' levels).'
),
) # type: Any
progress_bar = partial(
Option,
'--progress-bar',
dest='progress_bar',
type='choice',
choices=list(BAR_TYPES.keys()),
default='on',
help=(
'Specify type of progress to be displayed [' +
'|'.join(BAR_TYPES.keys()) + '] (default: %default)'
),
) # type: Any
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
) # type: Any
no_input = partial(
Option,
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP
) # type: Any
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port."
) # type: Any
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).",
) # type: Any
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).',
) # type: Any
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP,
) # type: Any
def exists_action():
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b', 'a'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort).",
)
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
metavar='path',
help="Path to alternate CA bundle.",
) # type: Any
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.",
) # type: Any
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help="Base URL of Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.",
) # type: Any
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url.",
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).',
) # type: Any
def find_links():
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory, "
"then look for archives in the directory listing.",
)
def trusted_host():
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
# Remove after 1.5
process_dependency_links = partial(
Option,
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
) # type: Any
def constraints():
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.'
)
def requirements():
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.'
)
def editable():
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
) # type: Any
def _get_format_control(values, option):
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.no_binary, existing.only_binary,
)
def _handle_only_binary(option, opt_str, value, parser):
existing = getattr(parser.values, option.dest)
fmt_ctl_handle_mutual_exclude(
value, existing.only_binary, existing.no_binary,
)
def no_binary():
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.",
)
def only_binary():
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=FormatControl(set(), set()),
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.",
)
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.",
) # type: Any
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in. Note that '
'an initial build still takes place in a temporary directory. '
'The location of temporary directories can be controlled by setting '
'the TMPDIR environment variable (TEMP on Windows) appropriately. '
'When passed, build directories are not cleaned in case of failures.'
) # type: Any
ignore_requires_python = partial(
Option,
'--ignore-requires-python',
dest='ignore_requires_python',
action='store_true',
help='Ignore the Requires-Python information.'
) # type: Any
no_build_isolation = partial(
Option,
'--no-build-isolation',
dest='build_isolation',
action='store_false',
default=True,
help='Disable isolation when building a modern source distribution. '
'Build dependencies specified by PEP 518 must be already installed '
'if this option is used.'
) # type: Any
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.",
) # type: Any
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.",
) # type: Any
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories)."
) # type: Any
pre = partial(
Option,
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.",
) # type: Any
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.",
) # type: Any
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
) # type: Any
def _merge_hash(option, opt_str, value, parser):
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {}
try:
algo, digest = value.split(':', 1)
except ValueError:
parser.error('Arguments to %s must be a hash name '
'followed by a value, like --hash=sha256:abcde...' %
opt_str)
if algo not in STRONG_HASHES:
parser.error('Allowed hash algorithms for %s are %s.' %
(opt_str, ', '.join(STRONG_HASHES)))
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
'--hash',
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest='hashes',
action='callback',
callback=_merge_hash,
type='string',
help="Verify that the package's archive matches this "
'hash before installing. Example: --hash=sha256:abcdef...',
) # type: Any
require_hashes = partial(
Option,
'--require-hashes',
dest='require_hashes',
action='store_true',
default=False,
help='Require a hash to check each requirement against, for '
'repeatable installs. This option is implied when any package in a '
'requirements file has a --hash option.',
) # type: Any
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
no_color,
]
}
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
process_dependency_links,
]
}
|
[
"kris@blockchaindatasystems.com"
] |
kris@blockchaindatasystems.com
|
20e145131223914cc0e519e20273c104a72d4688
|
4ccf02e5712d742daf524b70b6ebc77c7f64a502
|
/extras/annotation_pipeline/quality_assurance/check_jigs_up_annotations.py
|
2679fadc2929b330112e0c3bd3ed0e23fe837b4c
|
[
"MIT"
] |
permissive
|
rajammanabrolu/jiminy-cricket
|
e77ebae6e02ad68dfd13c8e564b5f7203c59d564
|
35a0d979d8ddc77371ead991f04230495279fa99
|
refs/heads/main
| 2023-08-21T01:28:16.814615
| 2021-09-23T03:28:33
| 2021-09-23T03:28:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,629
|
py
|
import argparse
import os
import re
import csv
import pprint
pp = pprint.PrettyPrinter()
# This script is for finding annotations of JIGS-UP functions where the line
# noted in the spreadsheet is not the line where the JIGS-UP function starts.
# We print out these instances and the line that they should be corrected to.
def main(args):
# get list of annotations from CSV
annotations = []
with open(args.csv_path) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
# discard the first row (header info) and rows between files (these typically have the second column empty)
if (i > 0) and (row[1] != '') and (row[6] != 'N/A'):
annotations.append({'filename': row[0], 'line_number': row[1]})
# get list of paths to ZIL files for the indicated game
zil_paths = []
for root, dirs, files in os.walk(args.game_folder_path):
for name in files:
if name.split('.')[-1] == 'zil':
zil_paths.append({'filename': name, 'full_path': os.path.join(root, name)})
# For each ZIL file, find the range of lines spanned by each JIGS-UP call.
# Then, for each annotation, if it is inside the range of a JIGS-UP call,
# make sure it is at the first line. Keep track of violations.
jigs_up_annotations = []
violations = []
for path in zil_paths:
print('Checking {}'.format(path['filename']))
with open(path['full_path'], 'r') as f:
zil_lines = f.readlines()
# get span of each JIGS-UP call
jigs_up_calls = []
start = -1
for i, line in enumerate(zil_lines):
if start != -1:
if '>' in line:
jigs_up_calls.append([start, i])
start = -1
if '<JIGS-UP' in line:
start = i
if len(re.findall('<JIGS-UP .*>', line)) > 0: # starts and ends on same line
jigs_up_calls.append([start, i])
start = -1
# make sure each annotation of a JIGS-UP call is on the first line of the JIGS-UP
for annotation in annotations:
if annotation['filename'] != path['filename']:
continue # annotation is from a different file than the one we're focusing on
line_number = re.sub('\([a-z]\)', '', annotation['line_number']) # transform "1312(a)", "1312(b)", etc. into "1312"
line_number = int(line_number) - 1 # convert to 0-indexing
for call in jigs_up_calls:
if call[0] <= line_number <= call[1]:
jigs_up_annotations.append([annotation['filename'], annotation['line_number']])
if line_number != call[0]:
violations.append([annotation['filename'], annotation['line_number'], 'should be {}'.format(call[0] + 1)])
print('\n\nFound {} JIGS-UP annotations:\n'.format(len(jigs_up_annotations)))
pp.pprint(jigs_up_annotations)
print('\n\n')
if len(violations) == 0:
print('Found no errors! Good job.')
else:
print('\nFound {} errors:\n'.format(len(violations)))
pp.pprint(violations)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='See comments for description.')
parser.add_argument('--game_folder_path', type=str, default='',
help='path to the game folder, e.g. ./zork1/')
parser.add_argument('--csv_path', type=str, default='',
help='path to the annotation CSV, e.g. ./zork1/zork1_annotations.csv')
args = parser.parse_args()
main(args)
|
[
"mrmazeika@gmail.com"
] |
mrmazeika@gmail.com
|
de6682fbf0a6b2307883950d8adbb07e38918215
|
134c79f0d526dc3f48bd352cdff1d4e55a919d5e
|
/TextRendering/src/doc/lowlevel.py
|
0e493533ea451cfc556fa8f20646fb5d40c78978
|
[] |
no_license
|
KrbAlmryde/cs524-Journal
|
ff4a85c0021da49b39fa12376d87856fb2a316fe
|
1db769c81825eaad12dc4c7326daebebebe24371
|
refs/heads/master
| 2021-01-25T05:27:50.549574
| 2015-04-10T16:49:18
| 2015-04-10T16:49:18
| 29,946,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,542
|
py
|
#! /usr/bin/env python
from pyevtk.vtk import VtkFile, VtkRectilinearGrid
import numpy as np
# **************************************************************
# * Example of how to use the low level VtkFile class. *
# **************************************************************
nx, ny, nz = 6, 6, 2
lx, ly, lz = 1.0, 1.0, 1.0
dx, dy, dz = lx/nx, ly/ny, lz/nz
ncells = nx * ny * nz
npoints = (nx + 1) * (ny + 1) * (nz + 1)
x = np.arange(0, lx + 0.1*dx, dx, dtype='float64')
y = np.arange(0, ly + 0.1*dy, dy, dtype='float64')
z = np.arange(0, lz + 0.1*dz, dz, dtype='float64')
start, end = (0, 0, 0), (nx, ny, nz)
w = VtkFile("./evtk_test", VtkRectilinearGrid)
w.openGrid(start=start, end=end)
w.openPiece(start=start, end=end)
# Point data
temp = np.random.rand(npoints)
vx = vy = vz = np.zeros([nx + 1, ny + 1, nz + 1], dtype="float64", order = 'F')
w.openData("Point", scalars = "Temperature", vectors = "Velocity")
w.addData("Temperature", temp)
w.addData("Velocity", (vx,vy,vz))
w.closeData("Point")
# Cell data
pressure = np.zeros([nx, ny, nz], dtype="float64", order='F')
w.openData("Cell", scalars = "Pressure")
w.addData("Pressure", pressure)
w.closeData("Cell")
# Coordinates of cell vertices
w.openElement("Coordinates")
w.addData("x_coordinates", x);
w.addData("y_coordinates", y);
w.addData("z_coordinates", z);
w.closeElement("Coordinates");
w.closePiece()
w.closeGrid()
w.appendData(data = temp)
w.appendData(data = (vx,vy,vz))
w.appendData(data = pressure)
w.appendData(x).appendData(y).appendData(z)
w.save()
|
[
"kyle.almryde@gmail.com"
] |
kyle.almryde@gmail.com
|
7608751f89cc4ada8d4a85a974c37bb44e7dc50b
|
29cabe70d48a3ba9493ad8d4b7d3973b4782c7ab
|
/src/Baseline.py
|
004c6eeeb23fc020bf4d47403389239909c0b958
|
[] |
no_license
|
drawdoowmij/salaryprediction
|
8d1e7321b7d06da3f9edcd3ab3fea0931a3a017c
|
ae6156ecc47322067f33d6503ae3c88e3a9d154c
|
refs/heads/master
| 2020-05-31T17:33:27.390380
| 2019-06-30T22:52:30
| 2019-06-30T22:52:30
| 190,411,791
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
class Baseline:
""" Runs a basline linear regression model with limited features
just to get a basline measure of the mean squared errorr.
Future models will be compared to this one for validity.
Parameters
----------
df: feature DataFrame
est: estimator to use for baseline measure
yrsExp: feature to use
mileMetro: feature to use
sal: target
Returns
-------
Nothing
"""
def __init__(self, df, est, yrsExp, milesMetro, sal):
self.df = df
self.sal = sal
self.est = est
self.yrsExp = yrsExp
self.milesMetro = milesMetro
def baseline_model(self):
## baseline features
X = self.df.loc[:, self.yrsExp:self.milesMetro]
## target
y = self.df[self.sal]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
## instantiate linear regression
lr = self.est
lr.fit(X_train, y_train)
## Predict our model
predict = lr.predict(X_test)
lr_scores = cross_val_score(lr, X, y, scoring='neg_mean_squared_error', cv=5)
print('\n')
print(color.BOLD + 'Baseline Model Information' + color.END)
print('Linear Regression score is {}'.format(lr.score(X_test, y_test)))
print('The mean squared errors are {}'.format(lr_scores))
print('The average mean squared error is {}'.format(-np.mean(lr_scores)))
print('\n')
|
[
"noreply@github.com"
] |
drawdoowmij.noreply@github.com
|
1660fc1e774c3881a875f18ebffd5dfe52106f81
|
fe300a07894111acaf1f9eb38d8d2e4d57ce470a
|
/Function/2-1.py
|
10791ea323cf2109991f375a740bcea49b200c3b
|
[] |
no_license
|
lienero/-2019
|
d26c73e506b41deb383ca0a9e5dcfb406e6c52b1
|
a53cc3e4e05224070c24f0afb005415fd55fec4f
|
refs/heads/master
| 2020-05-24T11:05:40.700815
| 2019-09-16T10:23:58
| 2019-09-16T10:23:58
| 187,240,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
def times_tables(num):
n=1
while n <= 9:
print(num, " x ", n, " = ", n*num)
n = n+1
|
[
"lienero@naver.com"
] |
lienero@naver.com
|
d7949c88d9a7edbfea31dbd0ba7ab967b033b518
|
8dca64dd11b23a7d59413ac8e28e92a0ab80c49c
|
/804. Unique Morse Code Words/solution.py
|
8dcc8c84f34f57b973a8d917edf02c942dffc095
|
[] |
no_license
|
huangruihaocst/leetcode-python
|
f854498c0a1d257698e10889531c526299d47e39
|
8f88cae7cc982ab8495e185914b1baeceb294060
|
refs/heads/master
| 2020-03-21T20:52:17.668477
| 2018-10-08T20:29:35
| 2018-10-08T20:29:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 701
|
py
|
class Solution:
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
morse = ['.-', '-...', '-.-.', '-..', '.', '..-.', '--.', '....', '..', '.---',
'-.-', '.-..', '--', '-.', '---', '.--.', '--.-', '.-.', '...', '-', '..-', '...-',
'.--', '-..-', '-.--', '--..']
ms = list()
for word in words:
m = str()
for c in word:
m += morse[ord(c) - ord('a')]
ms.append(m)
return len(set(ms))
if __name__ == '__main__':
solution = Solution()
print(solution.uniqueMorseRepresentations(["gin", "zen", "gig", "msg"]))
|
[
"huangruihaocst@126.com"
] |
huangruihaocst@126.com
|
c6bb44a4b00a0210af19a85c3a485a6a6a5a3c72
|
f0e2643cf8a015b581f509a4f87f92136424b0c9
|
/Cryptage_Decryptage_Polybe.py
|
86535ba889fab89bb609e8c31e1c795aad1bcdbb
|
[] |
no_license
|
MrVyM/Cypher
|
0ddbf0b1205e87718f8d9d96c0435c1797c0af93
|
b28be87f17f5a6439a852477bb1f2352d6d71799
|
refs/heads/main
| 2023-04-01T22:56:50.276107
| 2021-04-03T10:20:39
| 2021-04-03T10:20:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,309
|
py
|
import Enlever_Caractere as EC
import Open_Write_Fichier as OpenWrite
carre_polybe_codage=[('', '11'), ('', '21'), ('', '31'), ('', '41'), ('', '51'), ('', '12'), ('', '22'), ('', '32'), ('', '42'), ('', '52'), ('', '13'), ('', '23'), ('', '33'), ('', '43'), ('', '53'), ('', '14'), ('', '24'), ('', '34'), ('', '44'), ('', '54'), ('', '15'), ('', '25'), ('', '35'), ('', '45'), ('', '55')]
carre_polybe_decodage=[('11',''), ('21',''), ('31',''), ('41',''), ('51',''), ('12',''), ('22',''), ('32',''), ('42',''), ('52',''), ('13',''), ('23',''), ('33',''), ('43',''), ('53',''), ('14',''), ('24',''), ('34',''), ('44',''), ('54',''), ('15',''), ('25',''), ('35',''), ('45',''), ('55','')]
def correction_tableau(cle) :
global carre_polybe_codage #je declare que j'utilise la liste de __main__
global carre_polybe_decodage #je declare que j'utilise la liste de __main__
#je remet a zero les listes
carre_polybe_codage=[('', '11'), ('', '21'), ('', '31'), ('', '41'), ('', '51'), ('', '12'), ('', '22'), ('', '32'), ('', '42'), ('', '52'), ('', '13'), ('', '23'), ('', '33'), ('', '43'), ('', '53'), ('', '14'), ('', '24'), ('', '34'), ('', '44'), ('', '54'), ('', '15'), ('', '25'), ('', '35'), ('', '45'), ('', '55')]
carre_polybe_decodage=[('11',''), ('21',''), ('31',''), ('41',''), ('51',''), ('12',''), ('22',''), ('32',''), ('42',''), ('52',''), ('13',''), ('23',''), ('33',''), ('43',''), ('53',''), ('14',''), ('24',''), ('34',''), ('44',''), ('54',''), ('15',''), ('25',''), ('35',''), ('45',''), ('55','')]
temp="" #je declare une variable tampo
for carac in cle : #boucle for pour enlever toutes les ponctuation et les nombres de la clef
if carac in ",?;/:§!°]=}[({'-_&0123456789 " :
pass
else : #si le carac n'est pas dans la chaine des interdits
temp+=carac #alors j'ajoute carac a temp
cle=temp #je dit que la cle est egale a temp qui est la variable tampon qui m'a servi dans ma boucle for
cle=EC.enlever_carac_accent(cle.lower()) #je corrige encore la clef
temp="" #je remet a zero temp
increment=0 #je dit que l'increment est a 0
if cle!="" : #si la cle est vide cela veut dire que on fait un polybe sans cle donc je passe
for carac in cle : #boucle for dans la cle
if carac not in temp : #si le caractere n'est pas dans la variable tampon alors
if str(carac)=="j" : #si le caractere est j alors je ne fait rien
pass
else : #sinon je continue a faire les listes de codage et de decodage
carre_polybe_codage[increment]=str(carac),carre_polybe_codage[increment][1] #je dit que l'element'increment' de la liste est un tuple de carac et de son nombre
carre_polybe_decodage[increment]=carre_polybe_decodage[increment][1],str(carac) #je dit que l'element'increment' de la liste est un tuple de nombre et de carac
temp+=carac #j'ajoute le carac a temp qui est la liste qui contient tous les caracteres deja mis dans les listes
increment+=1 #j'incremente incremente
for carac in range(0,26) : #for de 0 a 26
if chr((97+carac)) not in temp : #si le carac n'est pas dans temp
if chr((97+carac))=="j" : #si c'est j alors je fait rien
pass
else : #sinon je continue a faire les listes de codage et de decodage
carre_polybe_codage[increment]=chr(carac+97),carre_polybe_codage[increment][1] #je dit que l'element'increment' de la liste est un tuple de carac et de son nombre
carre_polybe_decodage[increment]=carre_polybe_decodage[increment][0],chr(carac+97) #je dit que l'element'increment' de la liste est un tuple de nombre et de carac
temp+=chr(carac+97) #j'ajoute le carac a temp qui est la liste qui contient tous les caracteres deja mis dans les listes
increment+=1 #j'incremente incremente
carre_polybe_codage=dict(carre_polybe_codage) #je transforme la liste de codage en dictionnaires
carre_polybe_decodage=dict(carre_polybe_decodage) #je transforme la liste de codage en dictionnaires
def cryptage(cle,chemin_original,chemin_final) :
texte=EC.enlever_carac_accent(OpenWrite.ouvrir_fichier(chemin_original)) #on ouvre le fichier cela nous donne une longue chaine de caractere
texte.lower()
if texte!=False : #si le texte n'est pas vide
correction_tableau(cle) #je modifie les dictionnaires de codage et de decodage
texte_crypter="" #textecrypter est vide
for carac in texte :# for de tous les caracteres dans le texte
if carac in "j" : #si le caractere est j alors je le code avec la cle i
texte_crypter+=(carre_polybe_codage["i"]+",")
elif carac in "abcdefghiklmnopqrstuvwxyz" : #sinon je demande la valeurs dans le dictionnaires puis je rajoute une virgule
texte_crypter+=(carre_polybe_codage[carac]+",")
else :
texte_crypter+='"'+carac+'",'# si je ne conait pas le caractere alors je ne le code pas mais je l'entoure de "
OpenWrite.ecrire_fichier(chemin_final,texte_crypter[:-1]) #on ecrit le texte sans la derniere virgule dans un fichier
def decryptage(cle,chemin_original,chemin_final) :
texte=EC.enlever_carac_accent(OpenWrite.ouvrir_fichier(chemin_original)) #on ouvre le fichier cela nous donne une longue chaine de caractere
texte=texte.lower() #je ne gere que les minuscules
if texte!=False : #si le texte n'est pas vide
correction_tableau(cle) #je modifie les dictionnaires de codage et de decodage
texte=texte.split(",") #je separe le texte grace au virgule
texte_decrypter="" #textedecrypter est vide
for carac in texte : # for de tous les caracteres dans le texte
if '"' not in carac : #si il n'y a pas de " alors je prends la valeur du dictionnaires
texte_decrypter+=carre_polybe_decodage[carac]
else : #si il y a des " alors je ne l'est pas coder donc je prend le caractere entre "
texte_decrypter+=carac[1]
OpenWrite.ecrire_fichier(chemin_final,texte_decrypter) #on ecrit le texte dans un fichier
|
[
"noreply@github.com"
] |
MrVyM.noreply@github.com
|
b1e22bf9ed41b1f3607a1bad07394668a0b1f99f
|
2e6c379a22e87ad15f6d9c0356e615f42609e0eb
|
/Codility/4CountingElements/MissingInteger.py
|
46a95f94c673ed01a4975c312cbd7e077517f1c1
|
[] |
no_license
|
opethe1st/CompetitiveProgramming
|
49f24b1b0c6bf737c5698a15edfdf5009a308a52
|
84ab62144f6b389ef74b7e8956b7e02e0f2ab108
|
refs/heads/master
| 2021-01-13T10:35:08.339291
| 2020-09-14T21:23:34
| 2020-09-14T21:23:34
| 69,969,077
| 7
| 2
| null | 2019-02-17T18:36:34
| 2016-10-04T13:46:21
|
Python
|
UTF-8
|
Python
| false
| false
| 206
|
py
|
def solution(A):
arr = [False]*100001
for a in A:
if 0 < a <= 100000:
arr[a] = True
for i in range(1, 100001):
if not arr[i]:
return i
return 100001
|
[
"ogunks@live.com"
] |
ogunks@live.com
|
46d0d21c4da3de754acdc3335de44520de01be80
|
53a21ab982c8bf6695c2c103fcef7e2d9b535269
|
/youtube_dl.py
|
b382272741de9b7771816d56945c3defdfbfccfe
|
[
"MIT"
] |
permissive
|
Naaatan/PyTube
|
f3cf87548d845b86f4be64b0e3b2f845a09719dc
|
87d0f210ccb880b4b4593379441f98da92e5fcf4
|
refs/heads/main
| 2023-05-11T21:46:12.506210
| 2021-06-07T01:48:36
| 2021-06-07T01:48:36
| 374,491,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,843
|
py
|
#!/usr/bin/env python
# coding: utf-8
import glob
import time
import os
import re
import subprocess
from pytube import Playlist, YouTube
from tqdm import tqdm
# consts
MUSIC_DIR = os.path.join(os.getcwd(), "music")
VIDEO_DIR = os.path.join(os.getcwd(), "videos")
pbar = None
def download_video(url):
"""
Download video from YouTube.
Parameters
----------
url : str
YouTube video URL
Returns
----------
info : dict
Downloaded video info.
"""
print("Downloading {url}".format(url=url))
yt = YouTube(url)
yt.register_on_progress_callback(show_progress_bar)
yt.register_on_complete_callback(complete_download)
stream = yt.streams.filter(progressive=True, file_extension='mp4').first()
return (stream.download(VIDEO_DIR), stream.title)
def download_playlist(url):
"""
Download playlist from YouTube.
Parameters
----------
url : str
YouTube playlist URL
Returns
----------
info : dict
Downloaded video info.
"""
print("Downloading {url}".format(url=url))
pl = Playlist(url)
video_infos = {}
for video in pl.videos:
try:
video.register_on_progress_callback(show_progress_bar)
video.register_on_complete_callback(complete_download)
stream = video.streams.filter(progressive=True, file_extension='mp4').first()
video_path = stream.download(VIDEO_DIR)
video_infos[video_path] = stream.title
except Exception as e:
print(e)
continue
return video_infos
def is_playlist(video_url) -> bool :
pattern_playlist = r'^(https|http)://www.youtube.com/playlist\?list=\.*'
match = re.search(pattern_playlist, video_url)
return True if match is not None else False
def convertMP3(video_path, title):
if video_path:
music_path = os.path.join(MUSIC_DIR, "{title}.mp3".format(title=title))
subprocess.call([
'ffmpeg',
'-i',
video_path,
'-loglevel', # 標準出力設定
'error', # エラーすべて
# '-progress', # 進捗表示
# '-', # 進捗を標準出力
music_path
])
return music_path
return None
def show_progress_bar(stream, chunk, bytes_remaining):
global pbar
if pbar is None:
print(stream.default_filename)
pbar = tqdm(total=stream.filesize)
progress = stream.filesize - bytes_remaining
pbar.update(progress)
time.sleep(0.01)
def complete_download(stream, file_path):
global pbar
if pbar is not None:
pbar.close()
pbar = None
def download_with_convert(url):
if is_playlist(url):
video_infos = download_playlist(url)
for video_path, title in video_infos.items():
music_path = convertMP3(video_path, title)
convert_result_print(video_path, music_path)
else:
video_path, title = download_video(url)
music_path = convertMP3(video_path, title)
convert_result_print(video_path, music_path)
def convert_result_print(video_path, music_path):
print()
print("================== Result ==================")
if video_path and music_path:
print("video_path={video}".format(video=video_path))
print("music_path={music}".format(music=music_path))
elif video_path and (music_path is None):
print("video_path={video}".format(video=video_path))
print("music mp3 Convert Failed..")
else:
print("Download Failed")
print("============================================")
if __name__ == "__main__":
print("Please input youtube video URL or playlist URL")
print()
url = input(">> ")
download_with_convert(url)
|
[
"nagura@avancesys.co.jp"
] |
nagura@avancesys.co.jp
|
a5d8335443fdf0a6e06452af44b450abf01e00c8
|
f72c9f33046fa17b19dbb0c5f91b7fee64888f81
|
/blue_custom_branding/helpers/less.py
|
dc89119db20b02a0e848659d13f49efe685fcb79
|
[] |
no_license
|
eisaferreterias/newcode
|
7c6922a21459b0d2ddc7027d75df66c2d9747463
|
bf5df7e50acd116992c1da1498dd27d0f4b553f0
|
refs/heads/master
| 2020-04-14T09:59:46.831636
| 2019-01-02T06:11:25
| 2019-01-02T06:11:25
| 163,774,755
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,763
|
py
|
# -*- coding: utf-8 -*-
import os
import re
import logging
_logger = logging.getLogger(__name__)
def write_less(env):
"""
Write the company theme details as less variables in a database-specific less file.
:raise Exception: if there is an error opening or writing to files
:return: None
"""
dbname = env.cr.dbname
addon_path = env['ir.config_parameter'].get_param(
'blue_custom_branding.addon_path')
fname = "{}/static/src/less/variables_{}.less".format(addon_path, dbname)
companies = env['res.company'].search([])
try:
f = open(fname, "w")
for company in companies:
less_string = """
@brand-primary-{database}-{company_id}: #{primary};
@brand-success-{database}-{company_id}: #{success};
@brand-info-{database}-{company_id}: #{info};
@brand-warning-{database}-{company_id}: #{warning};
@brand-danger-{database}-{company_id}: #{danger};
@navbar-default-bg-{database}-{company_id}: @brand-primary-{database}-{company_id}; // @brand-primary
@navbar-inverse-bg-{database}-{company_id}: @brand-info-{database}-{company_id}; // @brand-info
@label-primary-bg-{database}-{company_id}: @brand-primary-{database}-{company_id}; // @brand-primary
""".format(
primary=company.theme_color_primary,
success=company.theme_color_success,
info=company.theme_color_info,
warning=company.theme_color_warning,
danger=company.theme_color_danger,
database=dbname,
company_id=company.id, )
f.write(less_string)
f.close()
except Exception as e:
_logger.debug('Theme error writing to file : %s' % e)
def write_bootswatch_less(env):
"""
Write the company theme details as bootswatch-compatible less
variables in a database-specific bootswatch less file.
:raise Exception: if there is an error opening or writing to files
:return: None
"""
dbname = env.cr.dbname
addon_path = env['ir.config_parameter'].get_param(
'blue_custom_branding.addon_path')
fname = "{}/static/src/less/bootswatch_{}.less".format(addon_path, dbname)
companies = env['res.company'].search([])
try:
f = open(fname, "w")
for company in companies:
# { = { } = } // They get converted back when the files are merged.
css_string = """
body.blue_theme__{database}__{company_id} a.oe_menu_toggler:hover,
body.blue_theme__{database}__{company_id} a.oe_menu_toggler:focus {
background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;
}
/* main navigation bar */
body.blue_theme__{database}__{company_id} a.oe_menu_toggler,
body.blue_theme__{database}__{company_id} #oe_main_menu_navbar,
body.blue_theme__{database}__{company_id} .o_main_navbar {
background-color: @brand-primary-{database}-{company_id} !important ;
border-color: @brand-primary-{database}-{company_id};
}
body.blue_theme__{database}__{company_id} a.o_menu_toggle:hover,
body.blue_theme__{database}__{company_id} a.o_menu_toggle:focus,
body.blue_theme__{database}__{company_id} button.o_mobile_menu_toggle:hover,
body.blue_theme__{database}__{company_id} button.o_mobile_menu_toggle:focus,
body.blue_theme__{database}__{company_id} .o_main_navbar ul.o_menu_systray li > a:hover,
body.blue_theme__{database}__{company_id} .o_main_navbar ul.o_menu_systray li > a:focus {
background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;
}
@media (min-width: @grid-float-breakpoint-max) {
body.blue_theme__{database}__{company_id} body .o_main_navbar > ul > li > a[data-toggle="collapse"]:hover,
body.blue_theme__{database}__{company_id} body .o_main_navbar > ul > li > a[data-toggle="collapse"]:focus {
background-color: @brand-info-{database}-{company_id} !important;
}
}
body.blue_theme__{database}__{company_id} .o_list_view tfoot {
background-color: @brand-primary-{database}-{company_id};
}
body.blue_theme__{database}__{company_id} .o_searchview .o_searchview_facet .o_searchview_facet_label {
background-color: @brand-primary-{database}-{company_id};
}
body.blue_theme__{database}__{company_id} .o_form_view.o_form_editable .o_form_field .o_list_view td.o_readonly {
background-color: transparent;
}
body.blue_theme__{database}__{company_id} .navbar {
&-default {
.badge {
background-color: #fff;
color: @navbar-default-bg-{database}-{company_id};
}
}
&-inverse {
.badge {
background-color: #fff;
color: @navbar-inverse-bg-{database}-{company_id};
}
}
}
body.blue_theme__{database}__{company_id} .o_form_view .o_notebook > ul.nav-tabs > li.active > a,
body.blue_theme__{database}__{company_id} .o_form_view .o_notebook > ul.nav-tabs > li.active > a:hover,
body.blue_theme__{database}__{company_id} .o_form_view .o_notebook > ul.nav-tabs > li.active > a:focus,
body.blue_theme__{database}__{company_id} .o_form_view .o_notebook > ul.nav-tabs > li.active > a:active {
color: @brand-primary-{database}-{company_id};
}
/* For the community version */
/* This gets the developer mode button. */
body.blue_theme__{database}__{company_id} .label-primary:hover,
body.blue_theme__{database}__{company_id} .label-primary:focus,
body.blue_theme__{database}__{company_id} .label-primary {
background-color: darken(@brand-primary-{database}-{company_id}, 10%) ;
}
body.blue_theme__{database}__{company_id} .o_main_navbar {
background-color: @brand-primary-{database}-{company_id};
border-color: @brand-primary-{database}-{company_id};
}
body.blue_theme__{database}__{company_id} .o_main_navbar button:hover,
body.blue_theme__{database}__{company_id} .o_main_navbar button:focus {
background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;
}
/* This picks up the menu items that are open but lost focus. */
body.blue_theme__{database}__{company_id} .o_main_navbar > li.open > a:focus,
body.blue_theme__{database}__{company_id} .o_main_navbar > li.open > a[aria-expanded="true"] {
background-color: darken(@brand-primary-{database}-{company_id}, 10%);
}
/* This is the "X" button that closes debug mode */
body.blue_theme__{database}__{company_id} a[data-action="leave_debug_mode"]:hover {
background-color: darken(@brand-primary-{database}-{company_id}, 10%);
}
@media (min-width: @grid-float-breakpoint-max) {
body.blue_theme__{database}__{company_id} .o_main_navbar > li > a.oe_menu_toggler {
background-color: @brand-primary-{database}-{company_id} !important;
}
}
@media (max-width: @grid-float-breakpoint-max) {
body.blue_theme__{database}__{company_id} .o_main_navbar a:hover,
body.blue_theme__{database}__{company_id} .o_main_navbar a:focus {
background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;
}
}
@media (min-width: @grid-float-breakpoint-max) {
body.blue_theme__{database}__{company_id} .o_main_navbar > li > a.oe_menu_toggler:focus,
body.blue_theme__{database}__{company_id} .o_main_navbar > li > a.oe_menu_toggler:active,
body.blue_theme__{database}__{company_id} .o_main_navbar > li > a.oe_menu_toggler:hover,
body.blue_theme__{database}__{company_id} .o_main_navbar > li > a[data-toggle="dropdown"]:hover,
body.blue_theme__{database}__{company_id} .o_main_navbar > li > a[data-toggle="dropdown"]:focus,
body.blue_theme__{database}__{company_id} .o_main_navbar > li > a[data-toggle="collapse"]:hover,
body.blue_theme__{database}__{company_id} .o_main_navbar > li > a[data-toggle="collapse"]:focus,
body.blue_theme__{database}__{company_id} .o_main_navbar > .open > a {
background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;
}
}
body.blue_theme__{database}__{company_id} .o_main_navbar {
border-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;
}
body.blue_theme__{database}__{company_id} .o_main_navbar .o_menu_brand {
border-bottom: 1px solid darken(@brand-primary-{database}-{company_id}, 10%);
}
body.blue_theme__{database}__{company_id}.o_web_client .navbar .o_menu_toggle:hover {
background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;
}
body.blue_theme__{database}__{company_id}.o_web_client .o_main_navbar > ul > li > a:hover,
body.blue_theme__{database}__{company_id}.o_web_client .o_main_navbar > ul > li > a:hover,
body.blue_theme__{database}__{company_id}.o_web_client .o_main_navbar .dropdown-toggle:hover,
body.blue_theme__{database}__{company_id}.o_web_client .o_main_navbar .dropdown-toggle:focus {
background-color: darken(@brand-primary-{database}-{company_id}, 10%) !important;
}
body.blue_theme__{database}__{company_id} .o_list_view tfoot {
background-color: @brand-primary-{database}-{company_id};
}
body.blue_theme__{database}__{company_id} .o_searchview .o_searchview_facet .o_searchview_facet_label {
background-color: @brand-primary-{database}-{company_id};
}
body.blue_theme__{database}__{company_id} .o_form_view.o_form_editable .o_form_field .o_list_view td.o_readonly {
background-color: transparent;
}
body.blue_theme__{database}__{company_id} .navbar {
&-default {
.badge {
background-color: #fff;
color: @navbar-default-bg-{database}-{company_id};
}
}
&-inverse {
.badge {
background-color: #fff;
color: @navbar-inverse-bg-{database}-{company_id};
}
}
}
""".format(
database=dbname,
company_id=company.id)
if company.override_home:
css_string += '''
body.blue_theme__{database}__{company_id} .o_application_switcher {
background: -webkit-gradient(linear, left top, right bottom,
from(@brand-info-{database}-{company_id}),
to(darken(@brand-info-{database}-{company_id}, 10%))
);
}
'''.format(
database=dbname,
company_id=company.id)
f.write(css_string)
f.close()
except Exception as e:
_logger.debug('Theme error writing to file : %s' % e)
def combine_bootswatch_less(env):
"""
Write the company theme details as bootswatch-compatible less
variables in a bootswatch less file.
:raise Exception: if there is an error opening or writing to files
:return: None
"""
addon_path = env['ir.config_parameter'].get_param(
'blue_custom_branding.addon_path')
if addon_path:
outname = "{}/static/src/less/bootswatch.less".format(addon_path)
filepath = "{}/static/src/less/".format(addon_path)
infiles = [fn for fn in os.listdir(
filepath) if re.match("bootswatch_.*.less", fn)]
try:
f = open(outname, "w")
for file in infiles:
with open(filepath + file, 'r') as datafile:
inless = datafile.read()
inless = inless.replace('{', '{')
inless = inless.replace('}', '}')
f.write(inless)
datafile.close()
f.close()
except Exception as e:
_logger.debug('Theme error writing to file : %s' % e)
def combine_variables_less(env):
"""
Write the company theme details as less variables in a less file.
:raise Exception: if there is an error opening or writing to files
:return: None
"""
addon_path = env['ir.config_parameter'].get_param(
'blue_custom_branding.addon_path')
if addon_path:
outname = "{}/static/src/less/variables.less".format(addon_path)
filepath = "{}/static/src/less/".format(addon_path)
infiles = [fn for fn in os.listdir(
filepath) if re.match("variables_.*.less", fn)]
try:
f = open(outname, "w")
for file in infiles:
with open(filepath + file, 'r') as datafile:
inless = datafile.read()
f.write(inless)
datafile.close()
f.close()
except Exception as e:
_logger.debug('Theme error writing to file : %s' % e)
|
[
"noreply@github.com"
] |
eisaferreterias.noreply@github.com
|
d5db198ca07bdb534c74600e5996092ed4d583d8
|
8e8a961a7b4e9ee012bd7e8e18de6abce9c189fb
|
/playground.py
|
99a1af554249edb32f33190c86854e6abd0a393f
|
[] |
no_license
|
youkaede77/LeetCode_Python
|
628eb74b835463b86f129eed37cbf4515b5eccc0
|
1e43fdac9d9baec899e10e1983a916a95fa59e84
|
refs/heads/master
| 2022-01-23T04:54:55.414576
| 2019-05-17T13:08:45
| 2019-05-17T13:08:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
ans = [1]+[0]*rowIndex
for i in range(rowIndex):
for j in range(i+1, 0, -1):
ans[j] = ans[j] + ans[j-1]
print(i,j,ans)
return ans
x = Solution()
x.getRow(5)
|
[
"weidafeng.edu@gmail.com"
] |
weidafeng.edu@gmail.com
|
deb02443f9433006c687b7c6430df80bc872c14c
|
1dd2bdcee28f17011401da44882534d0db441031
|
/exceptions.py
|
dfe2fc16cf94d709f395a6d3d58d34241db21d27
|
[] |
no_license
|
DjDeMi/bq78412
|
dc89adc97becd25bac4db3db1d44f036d6f4a87b
|
4c771b12de4812fb2ab6157d8c2a3c5df1cce345
|
refs/heads/master
| 2020-06-08T09:59:52.464844
| 2013-11-24T22:38:18
| 2013-11-24T22:38:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
class CRCError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "Error " + str(self.value)
class timeoutError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "Error " + str(self.value)
class sizeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "Error " + str(self.value)
class ACKError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return "Error " + str(self.value)
|
[
"djdemi@gmail.com"
] |
djdemi@gmail.com
|
dac582a4280341b6432f8c1dde055370905ef637
|
67fff1dfe06865b78a159efec9c7a3416a1170d3
|
/Snakefile
|
b87ac8edff111e3390cf9c6aadac2ea925ba9baa
|
[] |
no_license
|
marcoralab/SumHer_pipeline
|
32cece59926bd11eec42913b47778155166b18d5
|
4901b0bbacf3ad3d79f4164f57429583456909c7
|
refs/heads/master
| 2021-05-20T08:47:20.290634
| 2020-04-01T19:14:02
| 2020-04-01T19:14:02
| 252,205,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,735
|
'''Snakefile for sex stratified GWAS
Version 0.2'''
from scripts.parse_config import parser, getcom, getloads, subs
from scripts.parse_config import proc_cohorts, proc_covars
from getpass import getuser
import glob
import os
import socket
isMinerva = "hpc.mssm.edu" in socket.getfqdn()
configfile: "config.yaml"
shell.executable("/bin/bash")
BPLINK = ["bed", "bim", "fam"]
CHROM = parser(config)
def gsc(wildcards, k):
return study_cohorts[wildcards.sample][k]
study_cohorts = {k: proc_cohorts(config['studies'][k], config)
for k in config['studies'].keys()}
study_covars = {k: proc_covars(k, config, study_cohorts)
for k in config['studies'].keys()}
if isMinerva:
anacondapath = sys.exec_prefix + "/bin"
shell.prefix(". ~/.bashrc; PATH={}:$PATH; ".format(anacondapath))
tempdir = "/sc/orga/scratch/{}/temp/".format(getuser())
else:
import tempfile
tempdir = tempfile.gettempdir() + '/'
com = getcom({'plink': 'plink --keep-allele-order', 'plink2': 'plink',
'plink3': 'plink --keep-allele-order',
'bcftools': 'bcftools', 'R': 'Rscript', 'R2': 'R', 'METAL': 'metal'},
{'plink': '--memory 3800', 'plink3': '--memory 15800'})
loads = getloads(config,
{'plink': 'plink', 'bcftools': 'bcftools', 'METAL': 'metal',
'R': ['R', 'pandoc', 'udunits']},
lsfextra={'R': 'RSTUDIO_PANDOC=$(which pandoc)'})
def filldir(string):
if '{basedir}' in string:
return string.format(basedir=config['base_dir'])
return string
infiles = {k: {kk: filldir(vv) for kk, vv in v.items() if isinstance(vv, str)}
for k, v in config['studies'].items()}
maf = config['maf']
COV = list(list(study_covars.values())[0].keys())
MAF = list(maf.keys())
MANEXT = ["manhattan.png", "qq.png"]
GWASMAN = ["empP", "filtered"] if config['perm'] else ["filtered"]
def maybetemp(x):
return x if config['keepgenos'] else temp(x)
OUTFILES = expand(
"GWAS/cov-{cov}.maf-{maf}/{sample}.{allsex}_{Ptype}.assoc.{tt}.{ext}",
cov=COV, maf=MAF, Ptype=GWASMAN, ext=MANEXT, tt=config['test'],
sample='ADGC', allsex=['male', 'female', 'interaction'])
if config['keepgenos']:
OUTFILES += expand(
"filtered/cov-{cov}.maf-{maf}/{sample}.{allsex}.chr{chrom}.{ext}",
cov=COV, maf=MAF, sample='ADGC', chrom=CHROM, ext=BPLINK,
allsex=['male', 'female', 'interaction'])
rule all:
input: OUTFILES
rule make_samplist:
input: lambda wildcards: infiles[wildcards.sample]["pheno"]
output:
pheno = "phenotypes/{sample}.pheno",
ikeep = "{sample}.chosen.ikeep"
params:
default_cht = lambda wildcards: gsc(wildcards, 'DEFAULT_COHORT'),
filts = lambda wildcards: subs(config[wildcards.sample]['filter']),
ec = lambda wildcards: gsc(wildcards, 'EXCLUDECOHORTS')
shell:
"""
{loads[R]}
{com[R]} scripts/make_plink_phenos.R {input} {output.pheno} {output.ikeep} \
{params.filts} {params.default_cht} {params.ec}
"""
rule filter_adgc:
input:
plink = lambda wildcards: expand(infiles[wildcards.sample]["geno"] + ".{ext}", ext=BPLINK),
keep = "{sample}.chosen.ikeep"
output: temp(expand("genotypes/{{sample}}.{{sex}}.maf-{{maf}}.chr{{chrom}}.{ext}", ext=BPLINK))
params:
i = lambda wildcards: infiles[wildcards.sample]["geno"],
o = "genotypes/{sample}.{sex}.maf-{maf}.chr{chrom}",
maf = lambda wildcards: maf[wildcards.maf],
shell:
"""
{loads[plink]}
{com[plink]} --bfile {params.i} --keep {input.keep} --filter-{wildcards.sex}s \
--maf {params.maf} --mac 10 --chr {wildcards.chrom} --geno 0.05 \
--hwe 0.000001 midp --hardy midp gz --make-bed --out {params.o}
"""
#look for significant nonrandom missingness
#make sure case-control is in file
rule test_miss:
input: rules.filter_adgc.params.o + '.bim'
output: "miss/{sample}.{sex}.maf-{maf}.chr{chrom}.missing",
"miss/{sample}.{sex}.maf-{maf}.chr{chrom}.exclude",
"miss/{sample}.{sex}.maf-{maf}.chr{chrom}.include"
params:
i = rules.filter_adgc.params.o,
o = "miss/{sample}.{sex}.maf-{maf}.chr{chrom}",
shell:
"""
{loads[plink]}
{com[plink]} --bfile {params.i} --test-missing midp \
--out {params.o}
sed -r 's/[[:blank:]]+/ /g;s/^\s|\s$//g' {output[0]} | \
awk 'NR > 1 && $5 < 0.000001 {{print $2}}' > {output[1]}
awk 'NR == FNR {{a[$1]; next}} !($2 in a) {{print $2}}' \
<(cat <(echo exclude) {output[1]}) {input} > {output[2]}
"""
rule combine_miss:
input:
expand("miss/{{sample}}.{sex}.maf-{{maf}}.chr{{chrom}}.include",
sex=['male', 'female'])
output:
"miss/allsex/{sample}.maf-{maf}.chr{chrom}.include"
shell:
"""
awk 'NR == FNR {{a[$1]; next}} $1 in a {{print}}' {input} > {output}
"""
rule prep_GWAS:
input:
geno = rules.filter_adgc.output,
keep = "miss/{sample}.{sex}.maf-{maf}.chr{chrom}.exclude",
output: maybetemp(multiext("filtered/cov-{cov}.maf-{maf}/{sample}.{sex,male|female}.chr{chrom}", ".bed", ".bim", ".fam"))
params:
i = rules.filter_adgc.params.o,
o = "filtered/cov-{cov}.maf-{maf}/{sample}.{sex}.chr{chrom}",
shell:
"""
{loads[plink]}
{com[plink]} --bfile {params.i} --exclude {input.keep} --make-bed --out {params.o}
"""
rule prep_GWAS_interact:
input:
geno = lambda wildcards: expand(infiles[wildcards.sample]["geno"] + ".{ext}", ext=BPLINK),
ikeep = "{sample}.chosen.ikeep",
keep = "miss/allsex/{sample}.maf-{maf}.chr{chrom}.include",
output: maybetemp(multiext("filtered/cov-{cov}.maf-{maf}/{sample}.interaction.chr{chrom}", ".bed", ".bim", ".fam"))
params:
i = rules.filter_adgc.params.i,
o = "filtered/cov-{cov}.maf-{maf}/{sample}.interaction.chr{chrom}",
shell:
"""
{loads[plink]}
{com[plink]} --bfile {params.i} --extract {input.keep} --keep {input.ikeep} --make-bed --out {params.o}
"""
rule do_GWAS:
input:
geno = rules.prep_GWAS.output,
phen = "phenotypes/{sample}.pheno"
output:
"GWAS/cov-{{cov}}.maf-{{maf}}/{{sample}}.{{sex,male|female}}.chr{{chrom}}.assoc.{tt}".format(tt=config['test']),
"GWAS/cov-{{cov}}.maf-{{maf}}/{{sample}}.{{sex,male|female}}.chr{{chrom}}.assoc.{tt}.perm".format(tt=config['test']) if config['perm'] else []
params:
i = rules.prep_GWAS.params.o,
o = "GWAS/cov-{cov}.maf-{maf}/{sample}.{sex,male|female}.chr{chrom}",
cov = lambda wildcards: study_covars[wildcards.study][wildcards.cov],
perm = 'perm ' if config['perm'] else '',
pname = lambda wildcards: config['studies'][wildcards.sample]['phenoname']
shell:
"""
{loads[plink]}
{com[plink3]} --bfile {params.i} \
--pheno {input.phen} --pheno-name {params.pname} \
--covar {input.phen} --covar-name {params.cov} \
--{config[test]} {params.perm} genotypic beta --ci 0.99 --out {params.o}
"""
def gettests(wildcards):
ncov = len(covariates[wildcards.cov].split(', '))
return '1, 3-{}, {}-{}'.format(
ncov + 2, 3 * ncov + 3, 3 * ncov + 4)
rule do_GWAS_interact:
input:
geno = rules.prep_GWAS_interact.output,
phen = "phenotypes/{sample}.pheno"
output:
"GWAS/cov-{{cov}}.maf-{{maf}}/{{sample}}.interaction.chr{{chrom}}.assoc.{tt}".format(tt=config['test']),
"GWAS/cov-{{cov}}.maf-{{maf}}/{{sample}}.interaction.chr{{chrom}}.assoc.{tt}.perm".format(tt=config['test']) if config['perm'] else []
params:
i = rules.prep_GWAS_interact.params.o,
o = "GWAS/cov-{cov}.maf-{maf}/{sample}.interaction.chr{chrom}",
cov = lambda wildcards: study_covars[wildcards.study][wildcards.cov],
perm = 'perm ' if config['perm'] else '',
pname = lambda wildcards: config['studies'][wildcards.sample]['phenoname'],
tests = gettests
shell:
"""
{loads[plink]}
{com[plink3]} --bfile {params.i} --parameters {params.tests} \
--pheno {input.phen} --pheno-name {params.pname} \
--covar {input.phen} --covar-name {params.cov} \
--{config[test]} {params.perm} genotypic sex interaction beta \
--ci 0.99 --out {params.o}
"""
#--parameters 1, 4, 6-7
rule fix_gwas:
input: expand("GWAS/cov-{{cov}}.maf-{{maf}}/{{sample}}.{{allsex}}.chr{chrom}.assoc.{tt}", chrom=CHROM, tt=config['test'])
output: "GWAS/cov-{{cov}}.maf-{{maf}}/{{sample}}.{{allsex}}_filtered.assoc.{tt}".format(tt=config['test'])
shell:
r"""
sed -r 's/[[:blank:]]+/ /g;s/^\s|\s$//g' {input} | \
awk 'NR == 1 || ($5 == "ADD" && $7 != "NA")' | \
awk 'BEGIN {{FS=" |:"}} NR == 1 {{print $0, "A2"}} NR != 1 {{print $0, $4}}' > \
{output}
"""
rule gwas_manhattan:
input: rules.fix_gwas.output
output: [rules.fix_gwas.output[0] + '.' + x for x in MANEXT]
log: "GWAS/cov-{cov}.maf-{maf}/{sample}.{allsex}_filtered.plots.log"
shell:
"""
{loads[R]}
scripts/manhattan.R {input} &> {log}
"""
rule add_emp:
input:
sstats = rules.fix_gwas.output,
emp = expand("GWAS/cov-{{cov}}.maf-{{maf}}/{{sample}}.{{allsex}}.chr{chrom}.assoc.{tt}.perm", chrom=CHROM, tt=config['test'])
output: "GWAS/cov-{{cov}}.maf-{{maf}}/{{sample}}.{{allsex}}_empP.assoc.{tt}".format(tt=config['test'])
shell:
r"""
awk 'NR == FNR {{emp[$2] = $3}} NR != FNR {{print $0, emp[$2]}}' \
<(cat {input.emp} | sed -r 's/[[:blank:]]+/ /g;s/^\s|\s$//g') \
{input.sstats} > {output}
"""
rule emp_manhattan:
input: rules.add_emp.output
output: [rules.add_emp.output[0] + '.' + x for x in MANEXT]
log: "GWAS/cov-{cov}.maf-{maf}/{sample}.{allsex}_empP.plots.log"
shell:
"""
{loads[R]}
scripts/manhattan.emp.R {input} &> {log}
"""
|
[
"fultonh1@gmail.com"
] |
fultonh1@gmail.com
|
|
01bdaa76d7b26697d8497b9ccf1047896d67e5cb
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/ui/base/ui_base_tests_bundle.gypi
|
2eeabe88a68f15af2e9b303ac363da4d95b3f8e7
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 1,793
|
gypi
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file contains resources for the ui_base_unittests test bundle.
# See chrome_dll_bundle.gypi for a description of the techniques here.
{
'product_name': 'ui_unittests Framework',
'variables': {
# There is no executable in the mock framework, and so nothing to strip.
'mac_strip': 0,
},
'mac_bundle': 1,
'xcode_settings': {
'CHROMIUM_BUNDLE_ID': 'com.google.ChromiumUITests',
'DYLIB_COMPATIBILITY_VERSION': '1.0.0',
'DYLIB_CURRENT_VERSION': '1.0.0',
'DYLIB_INSTALL_NAME_BASE': '@executable_path/../Versions/1.0.0.0',
'LD_DYLIB_INSTALL_NAME':
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(WRAPPER_NAME)/$(PRODUCT_NAME)',
'INFOPLIST_FILE': 'test/framework-Info.plist',
},
'mac_bundle_resources': [
'test/framework-Info.plist',
'<(PRODUCT_DIR)/ui_test.pak',
# Just include the English-US locale made by ui_resources.gyp:ui_test_pak.
'<(PRODUCT_DIR)/ui/en.lproj/locale.pak',
],
'mac_bundle_resources!': [
'test/framework-Info.plist',
],
'postbuilds': [
{
'postbuild_name': 'Symlink Resources',
'action': [
'ln',
'-fns',
'Versions/A/Resources',
'${BUILT_PRODUCTS_DIR}/${WRAPPER_NAME}/Resources'
],
},
{
# Resource bundle pak names are hardcoded. This allows ui_test.pak to be
# found while running the ResourceBundle tests.
'postbuild_name': 'Symlink chrome_100_percent for test',
'action': [
'ln',
'-fns',
'ui_test.pak',
'${BUILT_PRODUCTS_DIR}/${WRAPPER_NAME}/Versions/A/Resources/chrome_100_percent.pak'
],
},
],
}
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
6f1d0db25e6a71e4e3a0e6767b8cec85b3d52da2
|
395f7556c69c15ef97377842a104233624dcc30f
|
/votes/models.py
|
2a87657a1fdfe843d6b9cdb3e4e6b590b71bf860
|
[] |
no_license
|
Morelromain/rocket_vote
|
6441b7be2ed9da7fc17cae0c44f081456c0f396e
|
b575b39344bdf8bf37cb8b5f5c7581b0d9eebe11
|
refs/heads/main
| 2023-06-27T07:42:13.219112
| 2021-07-28T14:10:31
| 2021-07-28T14:10:31
| 385,236,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
class Voter(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
class Vote(models.Model):
title = models.CharField(max_length=50)
result = models.CharField(max_length=20, null=True, blank=True)
nb_choice = models.IntegerField(null=True, blank=True)
nb_all_vote = models.IntegerField(default=0, null=True, blank=True)
"""date_created = models.DateTimeField(auto_now_add=True)"""
"""date_end = models.DateTimeField()"""
"""creator = models.ForeignKey(
to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)"""
"""voter = models.ForeignKey(
to=Voter, on_delete=models.CASCADE)"""
def __str__(self):
return self.title
class Choice(models.Model):
description = models.CharField(max_length=30, null=True, blank=True)
nb_vote = models.IntegerField(default=0)
percent_vote = models.IntegerField(default=0, null=True, blank=True)
num_id_choice = models.IntegerField(null=True, blank=True)
voters_names = models.CharField(default="", max_length=120)
vote = models.ForeignKey(
to=Vote, on_delete=models.CASCADE,
null=True, blank=True
)
def __str__(self):
return self.description
|
[
"morelromain86@gmail.com"
] |
morelromain86@gmail.com
|
97cd3a49ac81326732594597d2ce787fb3fdc961
|
34c5467042b9f1f4dd83be6f56519fb93b82951e
|
/dataset/BSD500.py
|
c7c7387f75e6af83e554e70000a2fb835e35f9e4
|
[
"MIT"
] |
permissive
|
mmmuuuuua/pytorch-HED
|
34c5057c13707a0dc5d38ea49125b42968a41032
|
0a614537dd8aa0d5f7ae5812b235351d89832540
|
refs/heads/master
| 2022-03-03T10:30:20.750807
| 2019-10-20T10:10:21
| 2019-10-20T10:10:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,251
|
py
|
import os
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
from PIL import Image
import numpy as np
import random
import pdb
class BSD500Dataset():
def __init__(self, cfg):
self.cfg = cfg
self.rootdir = cfg.DATA.root
self.train_list = cfg.DATA.train_list
### data
self.all_path_list = []
with open('/'.join([self.rootdir, self.train_list]), 'r') as f:
lines = f.readlines()
for line in lines:
line = line[:-1]
cur_pair = line.split(' ')
self.all_path_list.append( cur_pair )
print('in data_loader: Train data preparation done')
'''
### transformer
mean = [float(item) / 255.0 for item in cfg.DATA.mean]
std = [1,1,1]
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean,std)
])
self.targetTransform = transforms.Compose([
transforms.ToTensor()
])
'''
def mytransfrom(self, img, gt):
'''
input: img,gt, PIL image
output: tensor
'''
### ColorJitterUG:
if self.cfg.DATA.AUG.ColorJitter:
color_jitter = transforms.ColorJitter(brightness = self.cfg.DATA.AUG.brightness,
contrast = self.cfg.DATA.AUG.contrast,
saturation = self.cfg.DATA.AUG.saturation,
hue = self.cfg.DATA.AUG.hue )
color_jitter_transform = color_jitter.get_params(color_jitter.brightness, color_jitter.contrast,
color_jitter.saturation, color_jitter.hue)
img = color_jitter_transform(img)
if self.cfg.DATA.AUG.HFlip:
if random.random() > 0.5:
img = F.hflip(img)
gt = F.hflip(gt)
### ToTensor
img = F.to_tensor(img)
gt = F.to_tensor(gt)
### Normalization
mean = [float(item) / 255.0 for item in self.cfg.DATA.mean]
std = [1,1,1]
normalizer = transforms.Normalize(mean=mean, std=std)
img = normalizer(img)
return img, gt
def __getitem__(self, idx):
img_path, gt_path = [ '/'.join([self.rootdir, item]) for item in self.all_path_list[idx] ]
img = Image.open(img_path).convert('RGB')
gt = Image.open(gt_path).convert('L')
img_t, gt_t = self.mytransfrom(img, gt)
if self.cfg.DATA.gt_mode=='gt_half':
gt_t[gt_t>=0.5] = 1
gt_t[gt_t<0.5] = 0
return img_t, gt_t
def __len__(self):
return len(self.all_path_list)
####################################################################################################
class BSD500DatasetTest():
def __init__(self, cfg):
self.rootdir = cfg.DATA.root
self.train_list = cfg.DATA.test_list
### data
self.all_path_list = []
with open('/'.join([self.rootdir, self.train_list]), 'r') as f:
lines = f.readlines()
for line in lines:
line = line[:-1]
self.all_path_list.append( line )
print('in data_loader: Test data preparation done')
### transformer
mean = [float(item) / 255.0 for item in cfg.DATA.mean]
std = [1,1,1]
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean,std)
])
def __getitem__(self, idx):
img_path = '/'.join([self.rootdir, self.all_path_list[idx]])
img_filename = img_path.split('/')[-1].split('.')[0]
img = Image.open(img_path).convert('RGB')
img_t = self.transform(img)
return (img_t, img_filename)
def __len__(self):
return len(self.all_path_list)
|
[
"chongruo@gmail.com"
] |
chongruo@gmail.com
|
1f305d103ada09c4e3c1dadf0a5b42070d953e56
|
3ead5aedee381cd264c268220fbb3f7e4545f7c6
|
/keyboard.py
|
82868ea33b6bf18d77d99a2b0e6a2db697ac5b85
|
[
"MIT"
] |
permissive
|
AmineSoukara/OneSecMailBot
|
e48866b0b5e8ece9095da97cde2797c5ba3ba5eb
|
45a046890dac7ca8a10f3c48a4969aedf34cd04b
|
refs/heads/main
| 2023-07-16T15:07:07.872438
| 2021-09-04T16:03:55
| 2021-09-04T16:03:55
| 403,098,692
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from aiogram import types
menu = types.ReplyKeyboardMarkup(resize_keyboard=True)
menu.add(
types.KeyboardButton('استلام البريد')
)
|
[
"noreply@github.com"
] |
AmineSoukara.noreply@github.com
|
9f664c31e97af59857c6cdf03b82180d5f75e483
|
24b3352991931cbc68a9f5f3a84354bf3a561e07
|
/review/restviews.py
|
f7fad7f4972ca6d5a8d930fed79f676059445776
|
[] |
no_license
|
kangsm0903/bongtoo_restapi
|
ac883dea82c77e1038564321b9c9b65272797825
|
ee7aaa9346892bec8a80383dcf8f961c14acb44a
|
refs/heads/master
| 2020-07-02T07:39:08.864468
| 2019-08-09T11:19:43
| 2019-08-09T11:19:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,434
|
py
|
# django
from django.shortcuts import get_object_or_404
# rest framework
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView, RetrieveUpdateDestroyAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated, AllowAny, IsAuthenticatedOrReadOnly
from rest_framework.pagination import PageNumberPagination
from rest_framework import mixins
# my app
# review
from .models import Review, Image, Like, Comment
from .serializers import ImageSerializer, ReviewSerializer, CommentSerializer, ReviewListSerializer
# users
from users.models import User
from users.serializers import UserSerializer
class SearchReviewList(APIView, PageNumberPagination):
page_size = 8
def get(self, request, format=None):
filters = {
'activity__in': request.GET.getlist('activites'),
'subject__in': request.GET.getlist('subjects'),
'region__city': request.GET.get('city'),
'region__town': request.GET.get('town')
}
items = filters.items()
filters = dict(filter(lambda item: item[1], items))
review = Review.objects.filter(
**filters)
result = self.paginate_queryset(review, request, view=self)
serializer = ReviewListSerializer(result, many=True)
return self.get_paginated_response(serializer.data)
class ReviewView(APIView):
def get(self, request, format=None):
filters = {
'activity__in': request.GET.getlist('activites'),
'subject__in': request.GET.getlist('subjects'),
'region__city': request.GET.get('city'),
'region__town': request.GET.get('town')
}
items = filters.items()
filters = dict(filter(lambda item: item[1], items))
review = Review.objects.filter(
**filters)
serializer = ReviewSerializer(review, many=True)
return Response(serializer.data)
def post(self, request):
user = request.user
data = request.data
if 'activity' in data:
activity = data.pop('activity')
else:
activity = []
if 'subject' in data:
subject = data.pop('subject')
else:
subject = []
if 'region' in data:
region = data.pop('region')
else:
region = []
serializer = ReviewSerializer(data=request.data)
# images = ImageSerializer()
try:
if serializer.is_valid():
serializer.save(user=user, activity=activity,
subject=subject, region=region)
return Response(data=serializer.data, status=status.HTTP_200_OK)
except:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ReviewDetailView(APIView):
def find_own_Review(self, review_id, user):
try:
review = Review.objects.get(id=review_id, creator=user)
return review
except Review.DoesNotExist:
return None
def get(self, request, review_id, format=None):
reivew = get_object_or_404(Review, id=review_id)
serializer = ReviewSerializer(reivew)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, review_id, format=None):
user = request.user
review = self.find_own_Review(review_id, user)
if review is None:
return Response(status=status.HTTP_401_UNAUTHORIZED)
serializer = ReviewSerializer(review, data=request.data, partial=True)
if serializer.is_valid():
serializer.save(user=user)
return Response(data=serializer.data, status=status.HTTP_204_NO_CONTENT)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class MyReviewView(APIView):
def get(self, request, pk, format=None):
reviews = User.objects.get(id=pk).reviews
serializer = ReviewListSerializer(reviews, many=True)
return Response(serializer.data)
# 리뷰 좋아요 하기 위한 뷰
class LikeReview(APIView):
# 리뷰 좋아요
def post(self, request, review_id, format=None):
user = request.user
review = get_object_or_404(Review, id=review_id)
try:
Like.objects.get(creator=user, review=review)
return Response(status=status.HTTP_304_NOT_MODIFIED)
except Like.DoesNotExist:
Like.objects.create(creator=user, review=review)
return Response(status=status.HTTP_201_CREATED)
# 좋아요 취소
def delete(self, request, review_id, format=None):
user = request.user
review = get_object_or_404(Review, id=review_id)
try:
like = Like.objects.get(creator=user, review=review)
like.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Like.DoesNotExist:
return Response(status=status.HTTP_304_NOT_MODIFIED)
class ReviewCommentView(APIView):
def get(self, reqeust, review_id, format=None):
review = get_object_or_404(Review, id=review_id)
comments = review.comments.all()
serializer = CommentSerializer(comments, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
def post(self, request, review_id, format=None):
user = request.user
review = get_object_or_404(Review, id=review_id)
serializer = CommentSerializer(data=request.data)
if serializer.is_valid():
serializer.save(created_by=user, review=review)
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CommentsView(ListAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
class CommetDetailView(RetrieveUpdateDestroyAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
|
[
"gtah2yk@gmail.com"
] |
gtah2yk@gmail.com
|
8b5d3ec3cfaa50228532f727bea9f0114cbb6040
|
fd2965d30b61567052738888079f174937d31fd8
|
/tests/test_application.py
|
f1ecab8593d3d11f649d4b410a8451168db8aa60
|
[
"MIT"
] |
permissive
|
AlexMathew/vial
|
16d39220cbacc2a0ecfab0d041e410acff6259ab
|
2d8bb09915a3ccd84549ed68e365ea843bcc1865
|
refs/heads/master
| 2023-01-08T01:50:18.885588
| 2021-03-19T05:55:16
| 2021-03-19T05:55:16
| 161,334,656
| 4
| 3
|
MIT
| 2022-12-23T19:35:11
| 2018-12-11T13:02:39
|
Python
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
from nose import with_setup
from nose.tools import (assert_equal, assert_in, assert_is_none,
assert_is_not_none)
from vial.server.application import Application
app = Application('test_runner')
def setup():
@app.route(methods=['GET'], path='/$')
def route1():
return '1'
@with_setup(setup)
def test_application_routes():
assert_in('get', app._routes)
assert_is_none(app.validate_route('get', '/'))
assert_is_not_none(app.validate_route('post', '/'))
assert_is_not_none(app.validate_route('get', '/1'))
assert_equal('1', app.get_controller('get', '/')[0]())
|
[
"alexmathew003@gmail.com"
] |
alexmathew003@gmail.com
|
d2e62ae7d228a189efbf53aa40a08bced02d5fef
|
cf40d5061b6459f984651f92a5ec1ad442886ec0
|
/node_modules/watchpack-chokidar2/node_modules/fsevents/build/config.gypi
|
7435f3d786baf8fe6bd368baea139d464a8e0776
|
[
"MIT"
] |
permissive
|
MathGL92/react-intro-portals-and-refs
|
2db988ab3050c9ccf498498195a9c03bdc58fb04
|
033271009c52e9fc744505c5eb7a3aadfbf848c3
|
refs/heads/master
| 2023-07-15T06:13:27.920968
| 2021-08-29T04:46:29
| 2021-08-29T04:46:29
| 400,952,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,846
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"icu_ver_major": "68",
"is_debug": 0,
"llvm_version": "12.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "false",
"node_module_version": 88,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/15.10.0_1",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "88.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "12.0",
"nodedir": "/Users/mathieulonge/Library/Caches/node-gyp/15.10.0",
"standalone_static_library": 1,
"metrics_registry": "https://registry.npmjs.org/",
"globalconfig": "/usr/local/etc/npmrc",
"init.module": "/Users/mathieulonge/.npm-init.js",
"init_module": "/Users/mathieulonge/.npm-init.js",
"userconfig": "/Users/mathieulonge/.npmrc",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"cache": "/Users/mathieulonge/.npm",
"user_agent": "npm/7.5.3 node/v15.10.0 darwin x64",
"prefix": "/usr/local"
}
}
|
[
"mathieu.longe@orange.fr"
] |
mathieu.longe@orange.fr
|
2dfddc1e33cbd66f5b0d212c97211597b790b360
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/BillRepayResult.py
|
fb5b8f38c9de1fe638c540d2a34f43874b6375cf
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,810
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BillRepayResult(object):
def __init__(self):
self._error_code = None
self._error_msg = None
self._result = None
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
@property
def error_msg(self):
return self._error_msg
@error_msg.setter
def error_msg(self, value):
self._error_msg = value
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
def to_alipay_dict(self):
params = dict()
if self.error_code:
if hasattr(self.error_code, 'to_alipay_dict'):
params['error_code'] = self.error_code.to_alipay_dict()
else:
params['error_code'] = self.error_code
if self.error_msg:
if hasattr(self.error_msg, 'to_alipay_dict'):
params['error_msg'] = self.error_msg.to_alipay_dict()
else:
params['error_msg'] = self.error_msg
if self.result:
if hasattr(self.result, 'to_alipay_dict'):
params['result'] = self.result.to_alipay_dict()
else:
params['result'] = self.result
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BillRepayResult()
if 'error_code' in d:
o.error_code = d['error_code']
if 'error_msg' in d:
o.error_msg = d['error_msg']
if 'result' in d:
o.result = d['result']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
f6ed7d2d772b2c228ddc3e8665375c38ea5fbc00
|
64e918d603f3673160019bc1fd339359f774181d
|
/Encapsulation project.py
|
9769b5b803a1500f96b0849213d425a122a19d11
|
[] |
no_license
|
wbennett84/Python-Projects
|
becacf14d2f7041853a68a33979c79aeda3ee30e
|
e99d6bfce8718341138f13a8d7c9ec47a18730d3
|
refs/heads/master
| 2023-06-07T03:41:40.304549
| 2021-07-01T02:06:57
| 2021-07-01T02:06:57
| 374,774,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
#here we are defining our class and setting up some variables with initial values.
# These are private and protected variables, respectively.
#Next we are setting up some print and value-setting functions for use
# later on when we call them.
class Private:
def __init__(self):
self.__private = 30
self._protected = 0
def getitprivate(self):
print(self.__private)
def getitprotected(self):
print(self._protected)
def setit(self, priv):
self.__private = priv
def setit2(self, priv):
self._protected = priv
# All the below is doing is instantiating an object(instance2, which is from the Private class)
# it is then calling a couple print functions.
# Then it is changing the values of some variables with set functions.
# Then it is printing the new values after having been reset
# This is an instantiation of a class which is utilizing both
# the private and protected aspects.
instance2 = Private()
instance2.getitprivate()
instance2.getitprotected()
instance2.setit(39)
instance2.getitprivate()
instance2.setit2(399)
instance2.getitprotected()
|
[
"81777304+wbennett84@users.noreply.github.com"
] |
81777304+wbennett84@users.noreply.github.com
|
dd2828173e9ba99516e67147bc939f0500f6c8b9
|
9b20161b91400238b0c6e6ee3282a328d42935e2
|
/tensorflow_datasets/core/utils/generic_path.py
|
19967f951b9471036227c762a4c899090a58d51d
|
[
"Apache-2.0"
] |
permissive
|
okyanusoz/datasets
|
61c0ced07c420d7e900080e851890def74a37d94
|
8997c4140cd4fc145f0693787b1da78691930459
|
refs/heads/master
| 2023-05-31T23:19:30.153499
| 2021-05-06T19:56:49
| 2021-05-06T19:58:56
| 365,308,067
| 1
| 1
|
Apache-2.0
| 2021-07-04T11:15:13
| 2021-05-07T17:32:53
| null |
UTF-8
|
Python
| false
| false
| 3,599
|
py
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pathlib-like generic abstraction."""
import os
import typing
from typing import Callable, Dict, Tuple, Type, Union, TypeVar
from tensorflow_datasets.core.utils import gpath
from tensorflow_datasets.core.utils import type_utils
PathLike = type_utils.PathLike
ReadOnlyPath = type_utils.ReadOnlyPath
ReadWritePath = type_utils.ReadWritePath
PathLikeCls = Union[Type[ReadOnlyPath], Type[ReadWritePath]]
T = TypeVar('T')
_PATHLIKE_CLS: Tuple[PathLikeCls, ...] = (
gpath.PosixGPath,
gpath.WindowsGPath,
)
_URI_PREFIXES_TO_CLS: Dict[str, PathLikeCls] = {
# Even on Windows, `gs://`,... are PosixPath
uri_prefix: gpath.PosixGPath for uri_prefix in gpath.URI_PREFIXES
}
# pylint: disable=g-wrong-blank-lines
@typing.overload
def register_pathlike_cls(path_cls_or_uri_prefix: str) -> Callable[[T], T]:
...
@typing.overload
def register_pathlike_cls(path_cls_or_uri_prefix: T) -> T:
...
def register_pathlike_cls(path_cls_or_uri_prefix):
"""Register the class to be forwarded as-is in `as_path`.
```python
@utils.register_pathlike_cls('my_path://')
class MyPath(pathlib.PurePosixPath):
...
my_path = tfds.core.as_path('my_path://some-path')
```
Args:
path_cls_or_uri_prefix: If a uri prefix is given, then passing calling
`tfds.core.as_path('prefix://path')` will call the decorated class.
Returns:
The decorator or decoratorated class
"""
global _PATHLIKE_CLS
if isinstance(path_cls_or_uri_prefix, str):
def register_pathlike_decorator(cls: T) -> T:
_URI_PREFIXES_TO_CLS[path_cls_or_uri_prefix] = cls
return register_pathlike_cls(cls)
return register_pathlike_decorator
else:
_PATHLIKE_CLS = _PATHLIKE_CLS + (path_cls_or_uri_prefix,)
return path_cls_or_uri_prefix
# pylint: enable=g-wrong-blank-lines
def as_path(path: PathLike) -> ReadWritePath:
"""Create a generic `pathlib.Path`-like abstraction.
Depending on the input (e.g. `gs://`, `github://`, `ResourcePath`,...), the
system (Windows, Linux,...), the function will create the right pathlib-like
abstraction.
Args:
path: Pathlike object.
Returns:
path: The `pathlib.Path`-like abstraction.
"""
is_windows = os.name == 'nt'
if isinstance(path, str):
uri_splits = path.split('://', maxsplit=1)
if len(uri_splits) > 1: # str is URI (e.g. `gs://`, `github://`,...)
# On windows, `PosixGPath` is created for `gs://` paths
return _URI_PREFIXES_TO_CLS[uri_splits[0] + '://'](path) # pytype: disable=bad-return-type
elif is_windows:
return gpath.WindowsGPath(path)
else:
return gpath.PosixGPath(path)
elif isinstance(path, _PATHLIKE_CLS):
return path # Forward resource path, gpath,... as-is # pytype: disable=bad-return-type
elif isinstance(path, os.PathLike): # Other `os.fspath` compatible objects
path_cls = gpath.WindowsGPath if is_windows else gpath.PosixGPath
return path_cls(path)
else:
raise TypeError(f'Invalid path type: {path!r}')
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
e213eed102c19e72cfea1fd9024c877c0704034c
|
649fb5b641c54db3f6cd89ee5d288519e5b6a3f9
|
/Tutorial_4_PyTest_TDD/ejercicio2/fibo.py
|
e9e0a069c3132f919f6ad044c4207df00fdcc6b9
|
[] |
no_license
|
Hindegarth/Arqui-Software
|
8b722dec78da4531d985f3a6566ff67282c87a29
|
4250543b4238598aa98a7222339489856307735b
|
refs/heads/main
| 2023-02-14T16:42:20.396550
| 2021-01-11T01:59:59
| 2021-01-11T01:59:59
| 302,714,901
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
def fibonacci(num):
if num < 2:
return num
else:
return fibonacci(num-1) + fibonacci(num-2)
|
[
"noreply@github.com"
] |
Hindegarth.noreply@github.com
|
e6d3086c573f068856b707acf22ffd8a23dd140a
|
d026fc378de67067eaa06dbf3b401ff49b0c94fe
|
/TestScripts/BeatTracking/TestBpmDetect.py
|
ce17baa7d6889b7c255f0218efbb9f0c65d6ac4e
|
[] |
no_license
|
lenvdv/auto-dj
|
7544ff674b6b0430954dbf5efe4a284cfb30544c
|
a34ac0e0fd0869d2e3f2c020c12e1c4545ba7eba
|
refs/heads/master
| 2021-01-23T10:21:30.263480
| 2017-09-08T09:19:01
| 2017-09-08T09:19:01
| 93,055,455
| 8
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,166
|
py
|
import numpy as np
import sys
# Also see the default python example!
# Load a file using the command line
try:
filename = sys.argv[1]
if len(sys.argv) > 2:
MIN_VALID_BPM = int(sys.argv[2])
MAX_VALID_BPM = int(sys.argv[3])
else:
MIN_VALID_BPM = 100.0
MAX_VALID_BPM = 190.0
except:
print "usage:", sys.argv[0], "<audiofile>"
sys.exit()
# Load the libraries
print 'Loading Essentia...'
from essentia import *
from essentia.standard import *
import matplotlib.pyplot as plt
# Load the audio
print 'Loading audio file "', filename, '" ...'
loader = essentia.standard.MonoLoader(filename = filename)
audio = loader()
# ------------ Calculate the onset detection function
print 'Initialising algorithms...'
FRAME_SIZE = 1024
HOP_SIZE = 512
spec = Spectrum(size = FRAME_SIZE)
w = Windowing(type = 'hann')
# For calculating the phase and magnitude
fft = np.fft.fft#FFT()
c2p = CartesianToPolar()
od_csd = OnsetDetection(method = 'melflux')
od_flux = OnsetDetection(method = 'complex')
pool = Pool()
print 'Calculating frame-wise onset detection curve...'
for frame in FrameGenerator(audio, frameSize = FRAME_SIZE, hopSize = HOP_SIZE):
pool.add('windowed_frames', w(frame))
# TODO Test if this is faster?
print 'windowed frames: ', (pool['windowed_frames']).shape
fft_result = fft(pool['windowed_frames']).astype('complex64')
print 'fftresult: ', fft_result.shape
fft_result_mag = np.absolute(fft_result)
fft_result_ang = np.angle(fft_result)
# Process every frame vector in the result
for mag,phase in zip(fft_result_mag, fft_result_ang):
pool.add('onsets.complex', od_csd(mag, phase))
#pool.add('onsets.flux', od_flux(mag, phase))
# Done! now show the result
# ------------ Calculate the tempo function thingy (using method from paper)
# Step 1: normalise the data using an adaptive mean threshold
print 'Normalising result and half-wave rectifying it...'
def adaptive_mean(x, N):
#TODO efficient implementation instead of convolve
return np.convolve(x, [1.0]*int(N), mode='same')/N
novelty_mean = adaptive_mean(pool['onsets.complex'], 16.0)
# Step 2: half-wave rectify the result
novelty_hwr = (pool['onsets.complex'] - novelty_mean).clip(min=0)
# Step 3: then calculate the autocorrelation of this signal
print 'Autocorrelating resulting curve...'
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[result.size/2:]
novelty_autocorr = autocorr(novelty_hwr)
# Step 4: Apply a "shift-invariant comb filterbank"
# own implementation: sum over constant intervals
print 'Iterating over valid BPM values...'
#valid_bpms = np.arange(170.0, 176.0, 0.01)
valid_bpms = np.arange(MIN_VALID_BPM, MAX_VALID_BPM, 0.01)
for bpm in valid_bpms:
num_frames_per_beat = (60.0 * 44100.0)/(512.0 * bpm) # TODO put this in a function
frames = (np.round(np.arange(0,np.size(novelty_autocorr),num_frames_per_beat)).astype('int'))[:-1] # Discard last value to prevent reading beyond array (last value rounded up for example)
pool.add('output.bpm', np.sum(novelty_autocorr[frames])/np.size(frames))
bpm = valid_bpms[np.argmax(pool['output.bpm'])]
print 'Detected BPM: ', bpm
# Step 5: Calculate phase information
# Valid phases in SECONDS
valid_phases = np.arange(0.0, 60.0/bpm, 0.001)
num_frames_per_beat_final = (60.0 * 44100.0)/(512.0 * bpm) #TODO put this in a function
for phase in valid_phases:
# Convert phase from seconds to frames
phase_frames = (phase * 44100.0) / (512.0)
frames = (np.round(np.arange(phase_frames,np.size(novelty_hwr),num_frames_per_beat_final)).astype('int'))[:-1] # Discard last value to prevent reading beyond array (last value rounded up for example)
pool.add('output.phase', np.sum(novelty_hwr[frames])/np.size(frames))
phase = valid_phases[np.argmax(pool['output.phase'])]
print 'Detected phase: ', phase
spb = 60./bpm #seconds per beat
beats = (np.arange(phase, (np.size(audio)/44100) - spb + phase, spb).astype('single'))
plt.subplot(511)
plt.plot(audio[0*len(audio):0.01*len(audio)])
plt.xlim((0,len(audio)*0.01))
plt.title('Audio waveform')
plt.subplot(512)
plt.plot(novelty_hwr[0*len(novelty_hwr):0.01*len(novelty_hwr)])
plt.title('Half-wave rectified novelty detection curve')
plt.xlim((0,len(novelty_hwr)*0.01))
plt.subplot(513)
plt.plot(novelty_autocorr[0*len(novelty_autocorr):0.01*len(novelty_autocorr)])
plt.xlim((0,0.01*len(novelty_autocorr)))
plt.title('Correlation of half-wave rectified novelty detection curve')
plt.subplot(514)
plt.title('BPM detection curve')
plt.plot(valid_bpms, pool['output.bpm'], linewidth=2.0)
plt.subplot(515)
plt.title('Phase detection curve')
plt.plot(valid_phases, pool['output.phase'], linewidth=2.0)
plt.show()
# Overlay the audio file with onsets
onsetMarker = AudioOnsetsMarker(onsets = beats)
audioMarked = onsetMarker(audio/2.)
# Stretch the result
#from librosa.effects import time_stretch
#audioMarked = time_stretch(audioMarked, 175./172.)
# Output the marked file
writer = MonoWriter(filename = 'test.wav')
beginIndex = 0.2*np.size(audioMarked)
endIndex = 0.5*np.size(audioMarked)
writer(audioMarked[beginIndex:endIndex]) #Only write fragment
# Play the result
from subprocess import call
call(["mplayer", 'test.wav'])
|
[
"len.vandeveire@gmail.com"
] |
len.vandeveire@gmail.com
|
f0f429078d396f0952b7c19b9802bc35cb48f9ef
|
0341cf21094e3d5bdf11cbfffe5928e3d43392d2
|
/JudgingSystem/apps.py
|
d6e040837246b05bd06cc812236c690b50473f24
|
[] |
no_license
|
ciuti/Judging-System
|
70890c1882e07f0223b26146096a7a7245b18519
|
c70321d11d4888f489f32d5a80975fd68a292b00
|
refs/heads/master
| 2020-12-28T15:45:27.515587
| 2020-02-05T07:29:08
| 2020-02-05T07:29:08
| 238,392,563
| 0
| 1
| null | 2020-10-01T00:40:55
| 2020-02-05T07:28:38
|
Python
|
UTF-8
|
Python
| false
| false
| 101
|
py
|
from django.apps import AppConfig
class JudgingsystemConfig(AppConfig):
name = 'JudgingSystem'
|
[
"hosecuter@gmail.com"
] |
hosecuter@gmail.com
|
e1647fc3691542a65b2764f02867a1fb4c023cd8
|
6bb1634996f9fa2521cbc6814b6f0976890aee39
|
/M3Sewver/web/validators/scanning.py
|
cfb8e36ee114af0efbda9719c0b37c2abcb23e94
|
[] |
no_license
|
SmallPotY/m3Allocation
|
bca66475df95edf8b875feed5c51a85ca10a7606
|
ba9147d00603bc540e58511095b6355b5a8ca892
|
refs/heads/master
| 2023-02-12T13:17:53.765358
| 2019-09-24T16:05:31
| 2019-09-24T16:05:31
| 210,164,793
| 0
| 0
| null | 2023-02-02T06:39:44
| 2019-09-22T14:55:02
|
Python
|
UTF-8
|
Python
| false
| false
| 663
|
py
|
# -*- coding:utf-8 -*-
from wtforms.validators import DataRequired
from web.validators import BaseForm
from wtforms import StringField, IntegerField, DateField
class InScanning(BaseForm):
location_id = StringField(validators=[DataRequired(message='请输入货位号')])
order_number = StringField(validators=[DataRequired(message='请输入订单号')])
class CheckLocationId(BaseForm):
"""检查货位ID"""
location_id = StringField(validators=[DataRequired(message='请输入货位号')])
class CheckOrderNumber(BaseForm):
"""检查订单号"""
order_number = StringField(validators=[DataRequired(message='请输入订单号')])
|
[
"1041132457@qq.com"
] |
1041132457@qq.com
|
25b8ef20f1d3fd2994351333dea03eefff95513e
|
21a5d36b32ddf277be891fd1f0e93d458c4f0c2f
|
/official/nlp/modeling/models/bert_span_labeler.py
|
2dd9ab13f518373b6bf82800256d75df9d553750
|
[
"Apache-2.0"
] |
permissive
|
pkulzc/models
|
7cf3b718bc4edba53accd14b692712f6c1883578
|
2ec6572e1b79127a7cf905c1e67ec6568e364f10
|
refs/heads/master
| 2021-06-28T08:04:36.609825
| 2020-06-18T17:54:53
| 2020-06-18T22:00:50
| 126,526,822
| 8
| 9
|
Apache-2.0
| 2018-03-23T18:50:30
| 2018-03-23T18:50:29
| null |
UTF-8
|
Python
| false
| false
| 3,874
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trainer network for BERT-style models."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
from official.nlp.modeling import networks
@tf.keras.utils.register_keras_serializable(package='Text')
class BertSpanLabeler(tf.keras.Model):
"""Span labeler model based on a BERT-style transformer-based encoder.
This is an implementation of the network structure surrounding a transformer
encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers
for Language Understanding" (https://arxiv.org/abs/1810.04805).
The BertSpanLabeler allows a user to pass in a transformer stack, and
instantiates a span labeling network based on a single dense layer.
Arguments:
network: A transformer network. This network should output a sequence output
and a classification output. Furthermore, it should expose its embedding
table via a "get_embedding_table" method.
initializer: The initializer (if any) to use in the span labeling network.
Defaults to a Glorot uniform initializer.
output: The output style for this network. Can be either 'logits' or
'predictions'.
"""
def __init__(self,
network,
initializer='glorot_uniform',
output='logits',
**kwargs):
self._self_setattr_tracking = False
self._network = network
self._config = {
'network': network,
'initializer': initializer,
'output': output,
}
# We want to use the inputs of the passed network as the inputs to this
# Model. To do this, we need to keep a handle to the network inputs for use
# when we construct the Model object at the end of init.
inputs = network.inputs
# Because we have a copy of inputs to create this Model object, we can
# invoke the Network object with its own input tensors to start the Model.
sequence_output, _ = network(inputs)
# This is an instance variable for ease of access to the underlying task
# network.
self.span_labeling = networks.SpanLabeling(
input_width=sequence_output.shape[-1],
initializer=initializer,
output=output,
name='span_labeling')
start_logits, end_logits = self.span_labeling(sequence_output)
# Use identity layers wrapped in lambdas to explicitly name the output
# tensors. This allows us to use string-keyed dicts in Keras fit/predict/
# evaluate calls.
start_logits = tf.keras.layers.Lambda(
tf.identity, name='start_positions')(
start_logits)
end_logits = tf.keras.layers.Lambda(
tf.identity, name='end_positions')(
end_logits)
logits = [start_logits, end_logits]
super(BertSpanLabeler, self).__init__(
inputs=inputs, outputs=logits, **kwargs)
@property
def checkpoint_items(self):
return dict(encoder=self._network)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
28931249c5359ad6622ac5438cca8906a12cbfd6
|
bd5d1a05789e8c1dd181ca8108a1a2a52e41c3df
|
/assignment-1-liuyingjiacfa-master/assignment-1-liuyingjiacfa-master/assignment_1.py
|
3276258c744f2bc1b9257f3181b5a87df40aa81c
|
[] |
no_license
|
liuyingjiacfa1/homework
|
22566207bc0078ae3b6d2c422bf11bb867e48a3b
|
5a1e48364479a75742515f8dc42d918f624a03af
|
refs/heads/master
| 2020-05-16T02:09:44.373542
| 2019-10-14T02:01:21
| 2019-10-14T02:01:21
| 182,621,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,354
|
py
|
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
from pylab import mpl,plt
plt.style.use('ggplot')
mpl.rcParams['font.family']='serif'
data={
'url1':"https://www.ncdc.noaa.gov/cag/statewide/time-series/",
'url2':"-tavg-1-",
'url3':"-1895-2019.csv?base_prd=true&begbaseyear=1901&endbaseyear=2000",
'path':r'C:\LYJ\Python\homework\assignment-1-liuyingjiacfa-master\weather',
'save_path':r'C:\Users\Yingj\Desktop\Data Homework\homework\assignment-1-liuyingjiacfa-master',
'plot_state':['Illinois','California','New York','Texas']
}
class Homework1():
def __init__(self, data):
self.url1 = data['url1']
self.url2 = data['url2']
self.url3 = data['url3']
self.path = data['path']
self.save_path = data['save_path']
self.plot_state = data['plot_state']
urls=[]
for i in range(1,49):
for j in [1,8]:
url=self.url1+str(i)+self.url2+str(j)+self.url3
urls.append(url)
for url in urls:
response = requests.get(url)
state, measure, month = response.text.split('\n')[0].split(', ')
with open(os.path.join(self.path, state + '_' + month + '.csv'), 'w') as ofile:
ofile.write(response.text)
weather_data = os.listdir(self.path)
dfs = []
for f in weather_data:
st, month = f.split('_')
df = pd.read_csv(os.path.join(self.path, f), skiprows = 4)
df['State'] = st
df['Date'] = pd.to_datetime(df['Date'], format = '%Y%m')
dfs.append(df)
df = pd.concat(dfs)
df = df.sort_values(['State', 'Date'])
self.df = df
def plot_1(self):
self.df['Year'] = self.df['Date'].map(lambda d: d.year)
self.df['Jan-Aug Delta'] = self.df.groupby(['State', 'Year'])['Value'].diff()
df_delta = self.df.dropna(subset=['Jan-Aug Delta'])[['State', 'Year', 'Jan-Aug Delta']]
State = []
for name, group in df_delta.groupby('State'):
State.append(name)
df_delta2 = pd.DataFrame()
for state in State :
df_delta2['Year']=df_delta['Year'][:125]
df_delta2[state]=df_delta[df_delta['State']==state].iloc[:,2]
df_delta2.index=df_delta2['Year']
title_name = 'Average Jan-Aug Temperature Variation'
df_delta2.loc[:, self.plot_state].plot(subplots = True, figsize = (16,9),title = title_name)
plt.savefig(self.save_path + '\Jan_Aug_Temp_Delta.png')
def plot_2(self):
self.df['Month'] = self.df['Date'].map(lambda d: d.month)
df2 = self.df.dropna()
State2 = []
for name, group in df2.groupby('State'):
State2.append(name)
df_average_temp = pd.DataFrame()
for state in State2:
df_average_temp['Year'] = df2['Year'][:125]
df_average_temp[state] = df2[df2['State'] == state].iloc[:,1]
df_average_temp.index = df_average_temp['Year']
title_name = 'Average August Temperature'
df_average_temp.loc[:,self.plot_state].plot(figsize = (16,9), title = title_name)
plt.savefig(self.save_path + '\Aug_Temp.png')
Homework = Homework1(data)
Homework.plot_1()
Homework.plot_2()
|
[
"yingjia.liu.eric@gmail.com"
] |
yingjia.liu.eric@gmail.com
|
5e11043ba615aac76aab5db918ba366a7498ee5f
|
367d2571f2ad5a141ca2ec7bb9d1a9999e3c300b
|
/player.py
|
c1e8bdf51d8a1f8981b6e03ce1f4c28661415d8b
|
[] |
no_license
|
advaitparulekar/The-Skateboard-Game
|
0c606d592fd8e383e30da0150cb410d96261d2f9
|
c4ab8d7c97de75b140cceeb90e055b99c43f142e
|
refs/heads/master
| 2020-06-25T06:01:37.231588
| 2019-07-27T23:59:01
| 2019-07-27T23:59:01
| 199,223,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
import arcade
import numpy as np
from brain import *
PLAYER_SCALING = 60/3016
MOVEMENT = 100
INPUTS = 9
OUTPUTS = 3
NEURON_COUNT = 5
class Player(arcade.Sprite):
def __init__(self, neuron_count, num_inputs, file_name):
super().__init__("character.png", PLAYER_SCALING)
weights = np.loadtxt(file_name)
self.brain = Brain(weights, num_inputs, neuron_count)
self.center_x = 64
self.center_y = 250
self.pos = 0
self.score = 0
def draw(self):
self.draw()
def update(self, raw):
player_move = self.brain.get_move(raw)
self.pos += player_move
if self.pos >= 4:
self.pos = 3
elif self.pos < 0:
self.pos = 0
self.center_y = 550-100*self.pos
|
[
"adutheparulekar@tamu.edu"
] |
adutheparulekar@tamu.edu
|
d9286fa7b073260972f731dea2fe668184dee7c1
|
85f52de727f72db30a4fc4161fc2414cd72035d8
|
/18day/8-从列表中选择符合条件的元素组成新列表.py
|
9e71f8fbabd6ba6d841b0afff5914b8a76b644ea
|
[] |
no_license
|
ittoyou/-2-
|
ff7ca3bfd9428ac6f3ba5332a4c62825c5d30dcd
|
2c988c2f996221e86e1bbbeb9b3e96da25fe8f92
|
refs/heads/master
| 2020-03-24T16:57:05.220482
| 2018-07-30T08:08:59
| 2018-07-30T08:08:59
| 142,844,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
price = [1100,5270,2965,6400,1996,8874]
sale = [x for x in price if x > 5000]
print('原列表: ',price)
print('价格高于5000的: ',sale)
|
[
"429013601@qq.com"
] |
429013601@qq.com
|
b0d3521a3d9a58cbc400d108fed0a5d0a2a5c801
|
59c9cb7f5aaa19124de3cc66f8bef1553084f043
|
/venv/bin/easy_install
|
a05162334a8283afbafb2ab8a47317616fd9202b
|
[] |
no_license
|
izaldal/Muslimbook
|
92726dd91f74598b5be3c11781e05d3bbba7ff0f
|
54964b4a231b8c31b8fc297dfa5281de02d69b80
|
refs/heads/master
| 2021-03-18T09:51:53.085423
| 2020-03-22T07:34:36
| 2020-03-22T07:34:36
| 247,064,547
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
#!/Users/izadeenalkoran/Desktop/Muslimbook/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
|
[
"58135379+izaldal@users.noreply.github.com"
] |
58135379+izaldal@users.noreply.github.com
|
|
784aaa14232009b5b096f93fbc7d7847d67c9d86
|
ad37c4fac9daf27d24ee518aace00ed234982f67
|
/PIDvsRL/compare.py
|
2f5fef8bc690689bbd8bd98772441e6d5701d8ca
|
[] |
no_license
|
BaiLiping/Coaching
|
81025abf1dadf54069939e51e1ad49d6280efa9a
|
5388cb42a59f834c8258fae8f2d4ae01ee2bfe56
|
refs/heads/master
| 2023-04-10T13:33:18.202175
| 2021-04-18T02:11:34
| 2021-04-18T02:11:34
| 325,438,279
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,811
|
py
|
from tensorforce import Agent, Environment
import matplotlib.pyplot as plt
import numpy as np
import math
import pickle
from tqdm import tqdm
import gym
import statistics
rl=[]
pid=[]
rl_average=[]
pid_average=[]
ip_without=pickle.load(open( "ip_without_record.p", "rb"))
ip_record=pickle.load(open( "ip_record.p", "rb"))
ip_evaluation_record_without=pickle.load(open( "ip_evaluation_without_record.p", "rb"))
ip_evaluation_record=pickle.load(open( "ip_evaluation_record.p", "rb"))
double_without=pickle.load(open( "double_without_record.p", "rb"))
double_record=pickle.load(open( "double_record.p", "rb"))
double_evaluation_record_without=pickle.load(open( "double_evaluation_without_record.p", "rb"))
double_evaluation_record=pickle.load(open( "double_evaluation_record.p", "rb"))
hopper_without=pickle.load(open( "hopper_without_record.p", "rb"))
hopper_record=pickle.load(open( "hopper_record.p", "rb"))
hopper_evaluation_record_without=pickle.load(open( "hopper_evaluation_without_record.p", "rb"))
hopper_evaluation_record=pickle.load(open( "hopper_evaluation_record.p", "rb"))
walker_without=pickle.load(open( "walker_without_record.p", "rb"))
walker_record=pickle.load(open( "walker_record.p", "rb"))[2][0]
walker_evaluation_record_without=pickle.load(open( "walker_evaluation_without_record.p", "rb"))
walker_evaluation_record=pickle.load(open( "walker_evaluation_record.p", "rb"))[2][0]
n_groups = 4
standard=[800,7000,800,800]
without=[ip_without,double_without,hopper_without,walker_without]
coached=[ip_record,double_record,hopper_record,walker_record]
average_over=[20,150,100,100]
evaluation_without=[ip_evaluation_record_without,double_evaluation_record_without,hopper_evaluation_record_without,walker_evaluation_record_without]
evaluation=[ip_evaluation_record,double_evaluation_record,hopper_evaluation_record,walker_evaluation_record]
name=['ip','double','hopper','walker']
#get bounds
without_ave=[]
coached_ave=[]
without_sd=[]
coached_sd=[]
for i in range(len(name)):
actual_without_record=without[i]
actual_record=coached[i]
braket_size=average_over[i]
start_point=0
without_average=[]
coached_average=[]
without_standard_deviation=[]
coached_standard_deviation=[]
for j in range(len(actual_record)-braket_size+1):
braket_without=actual_without_record[start_point:start_point+braket_size]
without_mean=statistics.mean(braket_without)
without_average.append(without_mean)
without_standard_deviation.append(statistics.stdev(braket_without, xbar = without_mean))
braket_coached=actual_record[start_point:start_point+braket_size]
coached_mean=statistics.mean(braket_coached)
coached_average.append(coached_mean)
coached_standard_deviation.append(statistics.stdev(braket_coached, xbar = coached_mean))
start_point+=1
without_sd.append(without_standard_deviation)
coached_sd.append(coached_standard_deviation)
without_ave.append(without_average)
coached_ave.append(coached_average)
#plot training results
for i in range(len(name)):
fig=plt.figure(figsize=(13,7))
without_record=np.array(without_ave[i])
coached_record=np.array(coached_ave[i])
without_standard_deviation=np.array(without_sd[i])
coached_standard_deviation=np.array(coached_sd[i])
evalu_without=evaluation_without[i]
evalu=evaluation[i]
evalu_without_ave=int(sum(evalu_without)/len(evalu_without))
evalu_ave=int(sum(evalu)/len(evalu))
env_standard=standard[i]
x=range(len(without_record))
plt.plot(x,without_record,label='Normal Training\nEvaluation %s'%evalu_without_ave,color='black',linestyle='-.')
plt.fill_between(x, without_record - without_standard_deviation, without_record+without_standard_deviation,color='gray',alpha=0.3)
plt.plot(x,coached_record,label='Coached by PID Controller\nEvaluation %s'%evalu_ave,color='royalblue')
plt.fill_between(x, coached_record - coached_standard_deviation, coached_record+coached_standard_deviation,color='royalblue',alpha=0.3)
plt.xlabel('Episode Number', fontsize=25)
plt.xticks(fontsize=18)
plt.ylabel('Episode Reward', fontsize=25)
plt.yticks(fontsize=18)
plt.legend(loc='upper left',ncol=1, borderaxespad=0,prop={'size': 20})
plt.axhline(y=env_standard, color='black', linestyle='dotted')
plt.savefig('%s.png' %name[i])
for k in range(n_groups):
for i in range(len(without_ave[k])):
if without_ave[k][i]>=standard[k]:
rl_average.append(i+average_over[k]-1)
break
for k in range(n_groups):
for i in range(len(coached_ave[k])):
if coached_ave[k][i]>=standard[k]:
pid_average.append(i+average_over[k]-1)
break
for k in range(n_groups):
count=0
first_time=0
index=0
total=5
for i in range(len(without[k])):
if without[k][i]>=standard[k]:
if first_time==0:
count=1
index=i
first_time=1
total-=1
elif i-index==1:
count+=1
index=i
total-=1
if total==0:
rl.append(index)
break
else:
count=1
total=4
index=i
for k in range(n_groups):
count=0
first_time=0
index=0
total=5
for i in range(len(coached[k])):
if coached[k][i]>=standard[k]:
if first_time==0:
count=1
index=i
first_time=1
total-=1
elif i-index==1:
count+=1
index=i
total-=1
if total==0:
pid.append(index)
break
else:
count=1
total=4
index=i
# create plot
print('rl:',rl)
print('rl_average',rl_average)
print('pid:',pid)
print('pid_average',pid_average)
labels = ['Inverted\nPendulum', 'Double\nInverted\nPendulum', 'Hopper','Walker']
x = np.arange(len(labels)) # the label locations
width = 0.35/2 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width*3/2, pid, width, label='With PID Coaching\n 5 Consecutive Wins')
rects3 = ax.bar(x - width/2, pid_average, width, label='With PID Coaching\n Average over 20')
rects2 = ax.bar(x + width/2, rl, width, label='Without Coaching\n 5 Consecutive Wins')
rects4 = ax.bar(x + width*3/2, rl_average, width, label='Without Coaching\n Average over 20')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Episode Number')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
autolabel(rects4)
fig.tight_layout()
plt.savefig('compare.png')
|
[
"blp_engineer@outlook.com"
] |
blp_engineer@outlook.com
|
7b8b23fe9207efbed061a777795142ee72cfdbd0
|
3cf552ace92dbdb911bbf4a9a02f7fc350546d6f
|
/rock_paper_scissors.py
|
7cfde922e7fc393720fc4894d23b950aa3ad2069
|
[] |
no_license
|
oshlern/ObjectOriented
|
99d16593a22682fe14b5c4494072df2eab9d7d9b
|
bb7374628f70cd89739c3b94a8c9992994d48e07
|
refs/heads/master
| 2021-01-21T12:11:06.867484
| 2018-06-07T01:36:57
| 2018-06-07T01:36:57
| 102,046,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,033
|
py
|
import random
CHOICES = ['rock', 'paper', 'scissors']
class Action:
def __init__(self, prompt):
'''Create an action instance, either by randomly choosing the action or prompting the user.'''
# if an action hasn't been named, choose it randomly
if prompt is False:
self.name = random.choice(CHOICES)
else:
while True:
self.name = raw_input('Choose {}\n'.format(', '.join(CHOICES)))
if self.name in CHOICES:
# the user made a valid choice, so we can stop the loop
break
else:
# the user picked something else, so make them choose again
print 'Invalid action {}.'.format(self.name)
# get the position of the choice in the list
self.id = CHOICES.index(self.name)
def compete(self, other_action):
'''Compete against another action. Print out who won.'''
if other_action.id == self.id:
print 'Tie! Both chose {}!'.format(self.name)
# each action is beaten by the action after it in the list
# modulo makes it wrap around to the beginning of the list
elif ((other_action.id + 1) % len(CHOICES)) == self.id:
print '{} beats {}! I win!'.format(self.name.capitalize(), other_action.name)
else:
print '{} beats {}! You win!'.format(other_action.name.capitalize(), self.name)
# this is a standard Python thing: definitions go above, and any code that will actually
# run should go into the __main__ section. This way, if someone imports the file because
# they want to use the functions or classes you've defined, it won't start running your game
# automatically
if __name__ == '__main__':
# Create actions for the two players, computer and user
computer_action = Action(prompt=False)
user_action = Action(prompt=True)
# Have the actions play against one another
computer_action.compete(user_action)
|
[
"osherler@gmail.com"
] |
osherler@gmail.com
|
de284ecc38f6d4920a0aa4ffd069fbb1cff69f8d
|
b8b3847c527fc1655244056e0869ced82ce6a937
|
/Big Data/Assignment 1/Code/Data_Mining.py
|
b317ac45f976762dc011e1e86f9dc80146cc6bcb
|
[] |
no_license
|
anirudh-11/code
|
b87256b15f154f772b10b278be8a717f93eb519e
|
e0b3acdc14b9b83f61e7450198e289053d60d19d
|
refs/heads/master
| 2022-12-13T21:43:47.364323
| 2020-08-11T14:26:28
| 2020-08-11T14:26:28
| 245,441,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
import pandas as pd
import numpy as np
import fim
# from mlxtend.preprocessing import TransactionEncoder
# from mlxtend.frequent_patterns import apriori, association_rules
data = pd.read_csv(r'cleaned_data.csv')
print(data.head())
print(data.shape)
data_list = data.values
print(data.values)
def mfi(data):
print("Using relim for mfi : ")
freq_list = fim.relim(tracts = data, target = 'm', supp = 5)
print("The frequent item list is : ")
print(freq_list)
print("Using ista for mfi : ")
freq_list = fim.ista(tracts = data, target = 'm', mode = 'z', algo = 'p',supp = 5)
print("The frequent item list is : ")
print(freq_list)
def cfi(data):
print("Using relim for cfi : ")
freq_list = fim.relim(tracts = data, target = 'c', supp = 5)
print("The frequent item list is : ")
print(freq_list)
print("Using ista for cfi : ")
freq_list = fim.ista(tracts = data, target = 'c', algo = 'p',supp = 5)
print("The frequent item list is : ")
print(freq_list)
def fi(data):
print("Using apriori for fim : ")
freq_list = fim.apriori(tracts = data, supp = 5)
print("The frequent item list is : ")
print(freq_list)
rules = fim.apriori(tracts = data, target = 'r', eval = 'c', report = 'c')
print("The rules are : ")
print(rules)
rules = fim.apriori(tracts = data, target = 'r', eval = 'l', report = 'l')
print("The rules are (evaluated with lift): ")
print(rules)
print("lfi using apriori : ")
lfi(freq_list)
print("Using fp-growth for fim : ")
freq_list = fim.fpgrowth(tracts = data, supp = 5)
print("The frequent item list is : ")
print(freq_list)
rules = fim.fpgrowth(tracts = data, target = 'r', eval = 'c', report = 'c', conf = 60)
print("The rules are (evaluated with confidence): ")
print(rules)
rules = fim.fpgrowth(tracts = data, target = 'r', eval = 'l', report = 'l', conf = 60)
print("The rules are (evaluated with lift): ")
print(rules)
print("lfi using fpgrowth is : ")
lfi(freq_list)
def lfi(freq_list):
len_of_freq_list = [len(ele) for ele in freq_list]
lfi = freq_list[len_of_freq_list.index(max(len_of_freq_list))]
print("lfi is : ")
print(lfi)
mfi(data_list)
cfi(data_list)
fi(data_list)
|
[
"noreply@github.com"
] |
anirudh-11.noreply@github.com
|
b42a9b1f9a54ad056c572604eccca2d115889414
|
99436974b0750bc0cef6999804160bd4a746f9a2
|
/activity/blindDrive.py
|
dc867e876d62352713c5f79c3c61b36a0d8647f3
|
[] |
no_license
|
disperate/haley
|
3d1bd87c26737e1346ef8d0f50b648ac22f0bdd1
|
513eee702f43742eaac54c2557914c0baacf9be2
|
refs/heads/master
| 2021-03-19T15:23:30.911772
| 2017-07-05T07:26:21
| 2017-07-05T07:26:21
| 85,970,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
import time
from threading import Thread
import config
class blindDriveActivity(Thread):
def __init__(self, fsm, motor):
super().__init__()
self._running = True
self._motorController = motor
def terminate(self):
self._running = False
def run(self):
while (self._running):
self._motorController.setVelocityLeft(config.blindDriveVelocity)
self._motorController.setVelocityRight(config.blindDriveVelocity)
time.sleep(0.1)
|
[
"julian.bigler@hotmail.com"
] |
julian.bigler@hotmail.com
|
b4145cea71900436372b71f6647a8f2fa6e6c415
|
26ec3f123ba0ff8339ac23f63e22bbca11d59d21
|
/client_1.py
|
2abf970c463edef19380f3151eb4512c122d0c51
|
[] |
no_license
|
wangbiao0327/nihao
|
e1319e9e8bc6c4a102a9e07820fa01faf666bacb
|
b66dcf7d502cdeb6383589ec758ae8256ab6dcd5
|
refs/heads/master
| 2020-03-30T00:51:22.975186
| 2018-09-27T02:34:51
| 2018-09-27T02:34:51
| 150,547,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
from socket import *
import sys,os
#发送消息
def send_msg(s,name,addr):
while True:
text = input('发言:')
#如果输入quit表示退出
if text.strip() == 'quit':
msg = 'Q ' + name
s.sendto(msg.encode(),addr)
sys.exit('退出聊天室')
break
msg = 'C %s %s'%(name,text)
s.sendto(msg.encode(),addr)
#接收消息
def recv_msg(s):
while True:
data,addr = s.recvfrom(2048)
if data.decode() == 'EXIT':
sys.exit(0)
print(data.decode() + '\n发言:',end='')
#创建套接字 登录 创建子进程
def main():
if len(sys.argv) < 3:
print('argv is error')
return
HOST = sys.argv[1]
PORT = int(sys.argv[2])
ADDR = (HOST,PORT)
#创建套接字
s = socket(AF_INET,SOCK_DGRAM)
while True:
name = input('请输入姓名:')
msg = 'L ' + name
#发送登录请求
s.sendto(msg.encode(),ADDR)
#等待服务器回复
data,addr = s.recvfrom(1024)
if data.decode() == 'OK':
print('您已进入聊天室')
break
else:
#不成功服务端回复不允许登录原因
print(data.decode())
#创建父子进程
pid = os.fork()
if pid < 0:
sys.exit('创建进程失败')
elif pid == 0:
send_msg(s,name,ADDR)
else:
recv_msg(s)
if __name__=='__main__':
main()
|
[
"Wangbiao@qq.com"
] |
Wangbiao@qq.com
|
768867aafb99ad6a6fd6c264bad58cd081cf5097
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/nlp/MT5_ID4146_for_PyTorch/transformers/src/transformers/tokenization_utils.py
|
dcf1b110430d308ef7ad31536fc79240fa171a6d
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 39,892
|
py
|
# coding=utf-8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tokenization classes for python tokenizers. For fast tokenizers (provided by HuggingFace's tokenizers library) see
tokenization_utils_fast.py
"""
import bisect
import itertools
import re
import unicodedata
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union, overload
from .file_utils import PaddingStrategy, TensorType, add_end_docstrings
from .tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
INIT_TOKENIZER_DOCSTRING,
AddedToken,
BatchEncoding,
EncodedInput,
EncodedInputPair,
PreTokenizedInput,
PreTokenizedInputPair,
PreTrainedTokenizerBase,
TextInput,
TextInputPair,
TruncationStrategy,
)
from .utils import logging
logger = logging.get_logger(__name__)
# Slow tokenizers are saved in a vocabulary plus three separated files
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
class Trie:
"""
Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
Loose reference https://en.wikipedia.org/wiki/Trie
"""
def __init__(self):
self.data = {}
def add(self, word: str):
"""
Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
The special key `""` is used to represent termination.
This function is idempotent, adding twice the same word will leave the trie unchanged
Example:
```python
>>> trie = Trie()
>>> trie.add("Hello 友達")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
>>> trie.add("Hello")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
```
"""
if not word:
# Prevent empty string
return
ref = self.data
for char in word:
ref[char] = char in ref and ref[char] or {}
ref = ref[char]
ref[""] = 1
def split(self, text: str) -> List[str]:
"""
Will look for the words added to the trie within `text`. Output is the original string splitted along the
boundaries of the words found.
This trie will match the longest possible word first !
Example:
```python
>>> trie = Trie()
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS] This is a extra_id_100"]
>>> trie.add("[CLS]")
>>> trie.add("extra_id_1")
>>> trie.add("extra_id_100")
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS]", " This is a ", "extra_id_100"]
```
"""
# indexes are counted left of the chars index.
# "hello", index 0, is left of h, index 1 is between h and e.
# index 5 is right of the "o".
# States are going to capture every possible start (indexes as above)
# as keys, and have as values, a pointer to the position in the trie
# where we're at. This is a partial match for now.
# This enables to keep track of multiple matches while we're iterating
# the string
# If the trie contains, "blowing", and "lower" and we encounter the
# string "blower", we need to split into ["b", "lower"].
# This is where we need to keep track of multiple possible starts.
states = OrderedDict()
# This will contain every indices where we need
# to cut.
# We force to cut at offset 0 and len(text) (added later)
offsets = [0]
# This is used by the lookahead which needs to skip over
# some text where the full match exceeded the place in the initial
# for loop
skip = 0
# Main loop, Giving this algorithm O(n) complexity
for current, current_char in enumerate(text):
if skip and current < skip:
# Prevents the lookahead for matching twice
# like extra_id_100 and id_100
continue
# This will track every state
# that stop matching, we need to stop tracking them.
# If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
# fail on "b", we need to remove 0 from the valid states.
to_remove = set()
# Whenever we found a match, we need to drop everything
# this is a greedy algorithm, it will match on the first found token
reset = False
# In this case, we already have partial matches (But unfinished)
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
# Lookahead to match longest first
# Important in case of extra_id_1 vs extra_id_100
# Here we are also actively looking for other earlier partial
# matches
# "[CLS]", "L", we need to match CLS even if L is special
for lookstart, looktrie_pointer in states.items():
if lookstart > start:
# This partial match is later, we can stop looking
break
elif lookstart < start:
# This partial match is earlier, the trie pointer
# was already updated, so index is + 1
lookahead_index = current + 1
end = current + 1
else:
# Here lookstart == start and
# looktrie_pointer == trie_pointer
# It wasn't updated yet so indices are current ones
lookahead_index = current
end = current
next_char = text[lookahead_index] if lookahead_index < len(text) else None
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index
while next_char in looktrie_pointer:
looktrie_pointer = looktrie_pointer[next_char]
lookahead_index += 1
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index
if lookahead_index == len(text):
# End of string
break
next_char = text[lookahead_index]
# End lookahead
# Storing and resetting
offsets.append(start)
offsets.append(end)
reset = True
break
elif current_char in trie_pointer:
# The current character being looked at has a match within the trie
# update the pointer (it will be stored back into states later).
trie_pointer = trie_pointer[current_char]
# Storing back the new pointer into the states.
# Partial matches got longer by one.
states[start] = trie_pointer
else:
# The new character has not match in the trie, we need
# to stop keeping track of this partial match.
# We can't do it directly within the loop because of how
# python iteration works
to_remove.add(start)
# Either clearing the full start (we found a real match)
# Or clearing only the partial matches that didn't work.
if reset:
states = {}
else:
for start in to_remove:
del states[start]
# If this character is a starting character within the trie
# start keeping track of this partial match.
if current >= skip and current_char in self.data:
states[current] = self.data[current_char]
# We have a cut at the end with states.
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
end = len(text)
offsets.append(start)
offsets.append(end)
# Longest cut is always the one with lower start so the first
# item so we need to break.
break
return self.cut_text(text, offsets)
def cut_text(self, text, offsets):
# We have all the offsets now, we just need to do the actual splitting.
# We need to eventually add the first part of the string and the eventual
# last part.
offsets.append(len(text))
tokens = []
start = 0
for end in offsets:
if start > end:
logger.error(
"There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
)
continue
elif start == end:
# This might happen if there's a match at index 0
# we're also preventing zero-width cuts in case of two
# consecutive matches
continue
tokens.append(text[start:end])
start = end
return tokens
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def _is_end_of_word(text):
"""Checks whether the last character in text is one of a punctuation, control or whitespace character."""
last_char = text[-1]
return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
def _is_start_of_word(text):
"""Checks whether the first character in text is one of a punctuation, control or whitespace character."""
first_char = text[0]
return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
"""
Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
"""
insertion_idx = bisect.bisect_left(token_list, new_token)
# Checks if new_token is already in the ordered token_list
if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token:
# new_token is in token_list, don't add
return
else:
token_list.insert(insertion_idx, new_token)
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizer(PreTrainedTokenizerBase):
"""
Base class for all slow tokenizers.
Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Added tokens - We store this for both slow and fast tokenizers
# until the serialization of Fast tokenizers is updated
self.added_tokens_encoder: Dict[str, int] = {}
self.added_tokens_decoder: Dict[int, str] = {}
self.unique_no_split_tokens: List[str] = []
self.tokens_trie = Trie()
self._decode_use_source_tokenizer = False
@property
def is_fast(self) -> bool:
return False
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
raise NotImplementedError
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
`Dict[str, int]`: The added tokens.
"""
return self.added_tokens_encoder
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.vocab_size + len(self.added_tokens_encoder)
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary.
Args:
new_tokens (`List[str]`or `List[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
checking if the tokenizer assign the index of the `unk_token` to them).
special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the tokens should be added as special tokens.
Returns:
`int`: The number of tokens actually added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = BertModel.from_pretrained("bert-base-uncased")
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
new_tokens = [str(tok) for tok in new_tokens]
tokens_to_add = []
for token in new_tokens:
if not isinstance(token, str):
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
if not special_tokens and hasattr(self, "do_lower_case") and self.do_lower_case:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
if self.verbose:
logger.info(f"Adding {token} to the vocabulary")
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
# Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
if special_tokens:
if len(new_tokens) == 1:
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, new_tokens[0])
else:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(new_tokens)))
else:
# Or on the newly added tokens
if len(tokens_to_add) == 1:
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, tokens_to_add[0])
else:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(tokens_to_add)))
self._create_trie(self.unique_no_split_tokens)
return len(tokens_to_add)
def _create_trie(self, unique_no_split_tokens):
trie = Trie()
for token in unique_no_split_tokens:
if hasattr(self, "do_lower_case") and self.do_lower_case and token not in self.all_special_tokens:
trie.add(token.lower())
else:
trie.add(token)
self.tokens_trie = trie
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
<Tip>
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put
this inside your training loop.
</Tip>
Args:
pair (`bool`, *optional*, defaults to `False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
`int`: Number of special tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def tokenize(self, text: TextInput, **kwargs) -> List[str]:
"""
Converts a string in a sequence of tokens, using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
(BPE/SentencePieces/WordPieces). Takes care of added tokens.
Args:
text (`str`):
The sequence to be encoded.
**kwargs (additional keyword arguments):
Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
Returns:
`List[str]`: The list of tokens.
"""
# Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
all_special_tokens_extended = dict(
(str(t), t) for t in self.all_special_tokens_extended if isinstance(t, AddedToken)
)
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
if kwargs:
logger.warning(f"Keyword arguments {kwargs} not recognized.")
# TODO: should this be in the base class?
if hasattr(self, "do_lower_case") and self.do_lower_case:
# convert non-special tokens to lowercase
escaped_special_toks = [
re.escape(s_tok) for s_tok in (self.unique_no_split_tokens + self.all_special_tokens)
]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
no_split_token = set(self.unique_no_split_tokens)
tokens = self.tokens_trie.split(text)
# ["This is something", "<special_token_1>", " else"]
for i, token in enumerate(tokens):
if token in no_split_token:
tok_extended = all_special_tokens_extended.get(token, None)
left = tokens[i - 1] if i > 0 else None
right = tokens[i + 1] if i < len(tokens) - 1 else None
if isinstance(tok_extended, AddedToken):
if tok_extended.rstrip and right:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
tokens[i + 1] = right.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and left:
tokens[i - 1] = left.rstrip() # Opposite here
else:
# We strip left and right by default
if right:
tokens[i + 1] = right.lstrip()
if left:
tokens[i - 1] = left.rstrip()
# ["This is something", "<special_token_1>", "else"]
tokenized_text = []
for token in tokens:
# Need to skip eventual empty (fully stripped) tokens
if not token:
continue
if token in no_split_token:
tokenized_text.append(token)
else:
tokenized_text.extend(self._tokenize(token))
# ["This", " is", " something", "<special_token_1>", "else"]
return tokenized_text
def _tokenize(self, text, **kwargs):
"""
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
vocabulary.
Args:
tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).
Returns:
`int` or `List[int]`: The token id or list of token ids.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
if is_split_into_words:
tokens = list(
itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text))
)
return self.convert_tokens_to_ids(tokens)
else:
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
if is_split_into_words:
raise ValueError(
f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_split_into_words=True`."
)
else:
raise ValueError(
f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
if is_split_into_words:
tokens = list(
itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text))
)
return self.convert_tokens_to_ids(tokens)
else:
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
input_ids = []
for ids_or_pair_ids in batch_text_or_text_pairs:
if not isinstance(ids_or_pair_ids, (list, tuple)):
ids, pair_ids = ids_or_pair_ids, None
elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)):
ids, pair_ids = ids_or_pair_ids, None
else:
ids, pair_ids = ids_or_pair_ids
first_ids = get_input_ids(ids)
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
input_ids.append((first_ids, second_ids))
batch_outputs = self._batch_prepare_for_model(
input_ids,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(
self,
batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for first_ids, second_ids in batch_ids_pairs:
outputs = self.prepare_for_model(
first_ids,
second_ids,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
def prepare_for_tokenization(
self, text: str, is_split_into_words: bool = False, **kwargs
) -> Tuple[str, Dict[str, Any]]:
"""
Performs any necessary transformations before tokenization.
This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
`kwargs` at the end of the encoding process to be sure all the arguments have been used.
Args:
text (`str`):
The text to prepare.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
kwargs:
Keyword arguments to use for the tokenization.
Returns:
`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
"""
return (text, kwargs)
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of ids of the first sequence.
token_ids_1 (`List[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
@overload
def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str:
...
@overload
def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]:
...
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return " ".join(tokens)
def _decode(
self,
token_ids: List[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
spaces_between_special_tokens: bool = True,
**kwargs
) -> str:
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
if spaces_between_special_tokens:
text = " ".join(sub_texts)
else:
text = "".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
38e4a8deed62e3575fce47fec504b754d64bf0a4
|
f659aab67a15b96f383cfcd37255349a416f40b7
|
/TataDjango/wsgi.py
|
95f1e1d28394cc8b27d4110a8614811c7e5cd44f
|
[] |
no_license
|
talitalopes/django-learning
|
10006de27d4e3c804a5975b1451a13f10b723203
|
02e844db21b6d167b7abac8c8b41d69dd2f87ba5
|
refs/heads/master
| 2020-07-11T17:16:02.367795
| 2017-06-17T02:03:08
| 2017-06-17T02:03:11
| 94,273,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
"""
WSGI config for TataDjango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TataDjango.settings")
application = get_wsgi_application()
|
[
"talitalopes@gmail.com"
] |
talitalopes@gmail.com
|
7630eba99b49fec3058efa02c252e3607ba0c0dc
|
e53373d072d15da1316a61043747ba5544da9a09
|
/website/auth/forms.py
|
88c08b3ec44879793977ba9161e1e635a19f54fb
|
[] |
no_license
|
OWF/owf2014
|
3de4b2c825dd74a68c20036d0108eb0af1e3208d
|
3d64302bb43c43fa1bd332490da62739e70aa126
|
refs/heads/master
| 2020-04-05T22:48:34.143204
| 2014-12-05T15:19:59
| 2014-12-05T15:19:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
# coding=utf-8
from flask.ext.wtf import Form, TextField, TextAreaField, required
from flask.ext.babel import lazy_gettext as _l
from wtforms import SelectField
__all__ = ['RegistrationForm', 'BaseRegistrationForm']
org_types = [
u"",
u"Auto-entrepreneur",
u"PME",
u"ETI",
u"Grand Groupe",
u"Investisseur",
u"Académique",
u"Institutionnel",
u"Autre",
]
org_types = [(x, x) for x in org_types]
class RegistrationForm(Form):
first_name = TextField(label=_l("First name"),
validators=[required()])
last_name = TextField(label=_l("Last name"),
validators=[required()])
title = TextField(label=_l("Title"),
validators=[required()])
organization = TextField(label=_l("Organization"),
validators=[required()])
organization_type = SelectField(label=_l("Organization type"),
choices=org_types,
validators=[required()])
url = TextField(label=_l("URL"))
twitter_handle = TextField(label=_l("Twitter handle"))
biography = TextAreaField(label=_l("Biography"))
# github_handle = Column(UnicodeText(200), default="", nullable=False)
# sourceforge_handle = Column(UnicodeText(200), default="", nullable=False)
# linkedin_url = Column(UnicodeText(200), default="", nullable=False)
class UnsecureRegistrationForm(RegistrationForm):
def validate_csrf_token(self, field):
return
|
[
"sf@fermigier.com"
] |
sf@fermigier.com
|
d9668b913095a52b51c777087c7b0cdbcf0f0923
|
a1c08f74e2faf093d395eacf1d8145bbf22b0774
|
/adventofcode/adv_6.py
|
f69977068e1fafd4f45820d50cdf6800a502e9e8
|
[] |
no_license
|
prusinskiPiotr/algorithms
|
fe4e8ec46166370b16a92321f1b1f43df38a4b27
|
7a6c9bf167d162b765b6cce31317e864fba14ccd
|
refs/heads/master
| 2021-06-07T21:31:05.549872
| 2019-12-16T16:05:38
| 2019-12-16T16:05:38
| 137,759,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
from collections import defaultdict
with open('data_6.txt') as file:
data = file.readlines()
a = [data[i].rstrip().split(')') for i, num in enumerate(data)]
def DFS(G,v,seen=None,path=None):
if seen is None: seen = []
if path is None: path = [v]
seen.append(v)
paths = []
for t in G[v]:
if t not in seen:
t_path = path + [t]
paths.append(list(t_path))
paths.extend(DFS(G, t, seen[:], t_path))
return paths
G = defaultdict(list)
for (s,t) in a:
G[s].append(t)
G[t].append(s)
longest_depth_paths = DFS(G, 'COM')
orbits_sum = sum(len(i)-1 for i in longest_depth_paths)
# print(orbits_sum)
all_paths = [p for ps in [DFS(G, n) for n in set(G)] for p in ps]
san = [i for i in all_paths if ('YOU' in i) and ('SAN' in i)]
print(len(san[0])-3)
# this code is terribly inefficient and takes forever to execute
# but it gets the result.
|
[
"prusinski.pio@gmail.com"
] |
prusinski.pio@gmail.com
|
c12bcb3649544f920ec15252f9c474f129c7e660
|
0cf29d08fb67b9472867998884737d8d4487ddee
|
/wdqms/context_processors.py
|
ae46e0f1bc3de94a85537eee346199c88e8fb2eb
|
[] |
no_license
|
kurt-hectic/wdqms
|
8971f2b2cecd97680a308f663a6f7bfe01b2587b
|
fbc40c958492bff4c9a489e6aee819993ef57b00
|
refs/heads/master
| 2020-04-08T19:15:12.107424
| 2018-09-14T10:02:36
| 2018-09-14T10:02:36
| 159,646,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
from django.conf import settings
def global_settings(request):
# return any necessary values
return {
'OSCAR_STATION_REPORT': 'https://oscar.wmo.int/surface/index.html#/search/station/stationReportDetails/',
'GEOSERVER_URL' : 'http://128.65.196.37:80/geoserver/wdqms/wms'
}
|
[
"timo@proescholdt.de"
] |
timo@proescholdt.de
|
b16c733060a86bcf7bce7aaf30a362827d156232
|
cf8ff9bb2b09dd9be080954120aa1977ec1945e8
|
/week3/test_fixture1.py
|
1ca08a3c357aab9d2f81176096f1fd4d0394c2c8
|
[] |
no_license
|
SaleevaMariia/stepik---auto-tests-course-python
|
aa34e2eedce8ec178391355c7dd17b52b3a48c4a
|
a751cc88320752d6a0ae426f4c3b9d6871423d35
|
refs/heads/master
| 2022-06-10T10:01:59.487936
| 2020-05-06T17:44:03
| 2020-05-06T17:44:03
| 260,880,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
from selenium import webdriver
link = "http://selenium1py.pythonanywhere.com/"
class TestMainPage1():
@classmethod
def setup_class(self):
print("\nstart browser for test suite1..")
self.browser = webdriver.Chrome()
@classmethod
def teardown_class(self):
print("quit browser for test suite1..")
self.browser.quit()
def test_guest_should_see_login_link(self):
self.browser.get(link)
self.browser.find_element_by_css_selector("#login_link")
def test_guest_should_see_basket_link_on_the_main_page(self):
self.browser.get(link)
self.browser.find_element_by_css_selector(".basket-mini .btn-group > a")
class TestMainPage2():
def setup_method(self):
print("start browser for test2..")
self.browser = webdriver.Chrome()
def teardown_method(self):
print("quit browser for test2..")
self.browser.quit()
def test_guest_should_see_login_link(self):
self.browser.get(link)
self.browser.find_element_by_css_selector("#login_link")
def test_guest_should_see_basket_link_on_the_main_page(self):
self.browser.get(link)
self.browser.find_element_by_css_selector(".basket-mini .btn-group > a")
|
[
"sunlimen13@gmail.com"
] |
sunlimen13@gmail.com
|
732f0768302d0ef7fc24343b319572ce36afe58b
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/digress.py
|
88c39e0e78e7f37ca3dfbb2e1dbdb99c4c79e106
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 624
|
py
|
ii = [('GodwWSL2.py', 3), ('SadlMLP.py', 3), ('ProuWCM.py', 1), ('WilbRLW5.py', 1), ('FitzRNS3.py', 1), ('GrimSLE.py', 1), ('KiddJAE.py', 2), ('AdamHMM.py', 1), ('RoscTTI2.py', 1), ('CoolWHM.py', 1), ('CrokTPS.py', 1), ('ClarGE.py', 1), ('DibdTRL2.py', 1), ('MedwTAI.py', 5), ('WadeJEB.py', 2), ('CoopJBT.py', 3), ('KirbWPW2.py', 3), ('MedwTAI2.py', 1), ('SoutRD.py', 2), ('HogaGMM.py', 1), ('FitzRNS4.py', 1), ('HaliTBC.py', 1), ('AinsWRR2.py', 1), ('MereHHB2.py', 1), ('JacoWHI.py', 2), ('ClarGE3.py', 1), ('DibdTRL.py', 8), ('FitzRNS2.py', 5), ('MartHSI.py', 1), ('SadlMLP2.py', 4), ('LyelCPG3.py', 2), ('ChalTPW.py', 1)]
|
[
"prabhjyotsingh95@gmail.com"
] |
prabhjyotsingh95@gmail.com
|
e1d1982c72f41e76af6ee2e389c30e70c274561b
|
a7dba1fb8adf8ccbb4a58fd15fff55686e22fd17
|
/programLicenceReader.py
|
41e9e84da0c4ae7df0139faadb79d771c0dc0bc6
|
[] |
no_license
|
aniket1418/NumberPlateDetection
|
25e3e8db46f19f6f98f26b3e3e5ea92a82dc0184
|
f8f8c04eb79093d11c2e45acca0c0e6843a326d5
|
refs/heads/master
| 2023-04-10T23:07:55.716495
| 2021-04-23T08:37:14
| 2021-04-23T08:37:14
| 360,817,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,780
|
py
|
import cv2
import pytesseract
# Read the image file
image = cv2.imread('26.jpg')
# your path may be different
pytesseract.pytesseract.tesseract_cmd = 'C:/OCR/Tesseract-OCR/tesseract.exe'
# Convert to Grayscale Image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Canny Edge Detection
canny_edge = cv2.Canny(gray_image, 170, 200)
# Find contours based on Edges
contours, new = cv2.findContours(
canny_edge.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:30]
# Initialize license Plate contour and x,y coordinates
contour_with_license_plate = None
license_plate = None
x = None
y = None
w = None
h = None
# Find the contour with 4 potential corners and creat ROI around it
for contour in contours:
# Find Perimeter of contour and it should be a closed contour
perimeter = cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, 0.01 * perimeter, True)
if len(approx) == 4: # see whether it is a Rect
contour_with_license_plate = approx
x, y, w, h = cv2.boundingRect(contour)
license_plate = gray_image[y:y + h, x:x + w]
break
# Removing Noise from the detected image, before sending to Tesseract
license_plate = cv2.bilateralFilter(license_plate, 11, 17, 17)
(thresh, license_plate) = cv2.threshold(
license_plate, 150, 180, cv2.THRESH_BINARY)
# Text Recognition
text = pytesseract.image_to_string(license_plate)
# Draw License Plate and write the Text
image = cv2.rectangle(image, (x, y), (x+w, y+h), (34, 148, 3), 3)
image = cv2.putText(image, text, (x-100, y-50),
cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 6, cv2.LINE_AA)
print("License Plate :", text)
cv2.imshow("License Plate Detection", image)
cv2.waitKey(0)
|
[
"17btrcs029@jainuniversity.ac.in"
] |
17btrcs029@jainuniversity.ac.in
|
fb40df8b8bb3763c19d5f1e061fe5d6df51e162a
|
73c5a80b8ef2de609737a57c48b4190196bd4d20
|
/shell/database/LinuxMIPS/chmod.py
|
ba78fc467c6b66508d4b1f7f89c19243244c81dd
|
[
"MIT"
] |
permissive
|
raildex1/shellsploit-framework
|
1674a2a0215ce0c87b0ad7f284e9b7c42834546b
|
a16d22fdffa5d9369cd55c8768d327e5abf8e648
|
refs/heads/master
| 2021-01-20T01:27:25.919068
| 2016-12-08T08:57:51
| 2016-12-08T08:57:51
| 89,279,926
| 2
| 1
| null | 2017-04-24T19:37:04
| 2017-04-24T19:37:04
| null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
from lib.payloads.shellcode import Shellcode
class Payload(Shellcode):
Shellcode.info["author"] = "Sang-Min LEE"
Shellcode.info["name"] = "LinuxMIPS - chmod shellcode"
Shellcode.info["references"] = [
"https://www.exploit-db.com/exploits/36276/"
]
def __init__(self, **kwargs):
Shellcode.info["size"] = 44 + Shellcode().getsize(kwargs["file"])
Shellcode.info["payload"] = [
r"\xff\xff\x06\x28\xff\xff"
r"\xd0\x04\xff\xff\x05\x28"
r"\xb6\x01\x05\x24\x01\x10"
r"\xe4\x27\x1f\xf0\x84\x24"
r"\xaf\x0f\x02\x24\x0c\x01"
r"\x01\x01\xff\xff\x04\x28"
r"\xa1\x0f\x02\x24\x0c\x01"
r"\x01\x01"
+ kwargs["file"]
]
|
[
"b3mb4m@tuta.io"
] |
b3mb4m@tuta.io
|
ec2aabe61cdf4a348deb2d0d6b01450aff7dffa8
|
4fb67bbf06a2ebe1213aea66b4ab97031dbf04b2
|
/PyGames/Sanke.py
|
d8d9c067c397c086981c85de6c8db506161e83fc
|
[] |
no_license
|
ArunCSK/MachineLearningAlgorithms
|
72110dec8c61e358da0c6d51753fb81142673812
|
81278433a51d1f9fe60faf877a00b5aa5a8c6c7d
|
refs/heads/master
| 2021-06-11T17:35:43.098620
| 2021-05-22T09:11:22
| 2021-05-22T09:11:22
| 191,166,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,806
|
py
|
#py -m pip install -U pygame --user
#py -m pygame.examples.aliens
#pip install pygame
import pygame
import time
import random
pygame.init()
dis_width = 600
dis_height = 500
dis = pygame.display.set_mode((dis_width,dis_height))
#play_area = pygame.display.set_mode((dis_width-20,dis_height-50))
pygame.display.set_caption("Snake Game")
blue=(0,0,255)
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
clock = pygame.time.Clock()
snake_speed=15
font_style = pygame.font.SysFont("bahnschrift", 30)
score_font = pygame.font.SysFont("comicsansms", 35)
def Your_score(score):
value = score_font.render("Your Score: " + str(score), True, blue)
dis.blit(value, [20, 0])
def our_snake(snake_block, snake_list):
for x in snake_list:
pygame.draw.rect(dis, black, [x[0], x[1], snake_block, snake_block])
def message(msg,color):
mesg = font_style.render(msg, True, color)
if(msg == "Game Over!!!"):
dis.blit(mesg, [dis_width/3, dis_height/2])
else:
dis.blit(mesg, [dis_width/12, dis_height/3])
def game_loop():
game_over = False
game_close = False
x1 = dis_width / 2
y1 = dis_height / 2
snake_block = 10
x1_change = 0
y1_change = 0
snake_List = []
Length_of_snake = 1
foodx = round(random.randrange(20, 580) / 10.0) * 10.0
foody = round(random.randrange(50, 480) / 10.0) * 10.0
while not game_over:
while game_close == True:
dis.fill(black)
message("You Lost! Press C-Play Again or Q-Quit", red)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_over = True
game_close = False
if event.key == pygame.K_c:
game_loop()
for event in pygame.event.get():
#print(event)
if event.type == pygame.QUIT:
game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x1_change = -10
y1_change = 0
elif event.key == pygame.K_RIGHT:
x1_change = 10
y1_change = 0
elif event.key == pygame.K_UP:
x1_change = 0
y1_change = -10
elif event.key == pygame.K_DOWN:
x1_change = 0
y1_change = 10
if x1 >= 580 or x1 < 20 or y1 >= 480 or y1 < 50:
game_over = True
x1 += x1_change
y1 += y1_change
dis.fill(black)
#Define Game Area
pygame.draw.rect(dis, white, [dis_width-580, dis_height-450, dis_width-40, dis_height- 70])
pygame.draw.rect(dis, black, [foodx, foody, snake_block, snake_block])
snake_Head = []
snake_Head.append(x1)
snake_Head.append(y1)
snake_List.append(snake_Head)
if len(snake_List) > Length_of_snake:
del snake_List[0]
for x in snake_List[:-1]:
if x == snake_Head:
game_close = True
#pygame.draw.rect(dis, blue, [x1,y1, snake_block,snake_block])
our_snake(snake_block, snake_List)
Your_score(Length_of_snake - 1)
pygame.display.update()
clock.tick(snake_speed)
if x1 == foodx and y1 == foody:
foodx = round(random.randrange(20, 580) / 10.0) * 10.0
foody = round(random.randrange(50, 470) / 10.0) * 10.0
Length_of_snake += 1
clock.tick(snake_speed)
message("Game Over!!!",red)
pygame.display.update()
time.sleep(1)
pygame.quit()
#quit()
game_loop()
|
[
"arunsubburaj@gmail.com"
] |
arunsubburaj@gmail.com
|
4bfe8c3ab1a24ab6ee9a54399f9e9409deeff0bd
|
75b3b691f9520434212e7caec4e4a0eacfa268e0
|
/stacks.py
|
7c6eed6d0fa0089d0d25a673f6f72a200d622dec
|
[] |
no_license
|
rohunvora/stacks_and_queues
|
64c510f4de13bc7283dff6bc06eeda0a6a441f8e
|
62fcd8e531fff61b7e52f54ec7535e2a24ee188b
|
refs/heads/master
| 2022-12-15T21:33:09.614322
| 2020-09-15T23:42:53
| 2020-09-15T23:42:53
| 295,867,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
# A class to represent a queue
# The queue, front stores the front node
# of LL and rear stores the last node of LL
class Queue:
def __init__(self):
self.front = self.rear = None
def isEmpty(self):
return self.front == None
# Method to add an item to the queue
def EnQueue(self, item):
temp = Node(item)
if self.rear == None:
self.front = self.rear = temp
return
self.rear.next = temp
self.rear = temp
# Method to remove an item from queue
def DeQueue(self):
if self.isEmpty():
return
temp = self.front
self.front = temp.next
if(self.front == None):
self.rear = None
|
[
"emailrohun@gmail.com"
] |
emailrohun@gmail.com
|
9c5b71d7d9cb2e80ea971a626edf0ebc3538a0b6
|
63da7169cc5896a13bb0adb9cae8214f429ab377
|
/模块/datetime模块.py
|
ad8675597c6424cdf68507060c84d874436dc906
|
[] |
no_license
|
StarvWd/Python
|
37ec676cfd9ba1338463e91125c62c6640d8e049
|
8c8c82ed956a8b6375a98453d361f9f0dfb32b9d
|
refs/heads/master
| 2020-12-01T16:19:19.614658
| 2020-02-25T04:25:17
| 2020-02-25T04:25:17
| 230,695,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
import datetime
import time
import calendar
a = datetime.datetime.now()
print(a)
# 日期转星期函数
trans_weekday = lambda datastring:calendar.day_name[datetime.datetime.strptime(datastring, '%Y/%m/%d').weekday()]
str = '2019/12/22'
b = trans_weekday(str)
print(b)
|
[
"l785655536@163.com"
] |
l785655536@163.com
|
b9df64ecf6c998a4751b231993484ce5b837f39d
|
78f3fe4a148c86ce9b80411a3433a49ccfdc02dd
|
/2018/09/gender-turnout-20180921/graphic_config.py
|
cac7c7afe0eff34c3c389d01a7317c13de7e1ad1
|
[] |
no_license
|
nprapps/graphics-archive
|
54cfc4d4d670aca4d71839d70f23a8bf645c692f
|
fe92cd061730496cb95c9df8fa624505c3b291f8
|
refs/heads/master
| 2023-03-04T11:35:36.413216
| 2023-02-26T23:26:48
| 2023-02-26T23:26:48
| 22,472,848
| 16
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '1TSfm6BAGeGcN6-clhig9CKt0hW0jVvyxehX9Ard05dg'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
|
[
"ahurt@npr.org"
] |
ahurt@npr.org
|
fa3332729e6800356f03317bd11286ce0dc48d0d
|
4b69e2310e2147a302d99f8485b01f11702759ad
|
/ScrapingTwitter2.py
|
91a6582682d4b6d8e8109de030393807f2d759de
|
[] |
no_license
|
guilhermealfred/EstudosPython
|
9d3c8697a43f08ae4f9ddc9a89d6a284d6f89aaf
|
55832d0fe157a337aa65661a7822d1a51a5d5223
|
refs/heads/master
| 2020-03-17T15:15:47.308914
| 2018-07-06T21:39:01
| 2018-07-06T21:39:01
| 133,703,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,200
|
py
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup as bs
from tkinter import *
from PIL import Image
import urllib.request
import os
class scraping(object):
def __init__(self):
self.tela = Tk()
self.tela.title('Scraping Twitter')
self.tela.geometry('400x400+400+200')
self.tela.resizable(False,False)
self.tela['bg'] = '#61f2cf'
self.framelink = Frame(self.tela)
self.framelink.pack()
self.alert = Label(self.tela,fg='red',bg='#61f2cf',font=('Ubuntu Condensed',12))
self.alert.pack()
self.lblink = Label(self.framelink,text='Link',font=('Ubuntu Condensed',12),padx = 25)
self.lblink.pack(side=LEFT)
self.frameimg = Frame(self.tela,pady = 10,bg='#61f2cf')
self.frameimg.pack()
self.link = Entry(self.framelink,width=40)
self.link.focus_force()
self.link.insert(END,'https://twitter.com/')
self.link.bind('<Return>',self.handle)
self.link.pack(side=LEFT)
self.tela.mainloop()
@property
def get_tweets(self):
try:
self.tweets = self.soup.find('a',class_="ProfileNav-stat ProfileNav-stat--link u-borderUserColor u-textCenter js-tooltip js-nav")
return self.tweets.get('title')
except:
return 'Não tem nenhum tweet'
@property
def get_following(self):
try:
self.fl = self.soup.find('li',class_='ProfileNav-item ProfileNav-item--following')
self.following = self.fl.find('a',class_="ProfileNav-stat ProfileNav-stat--link u-borderUserColor u-textCenter js-tooltip js-openSignupDialog js-nonNavigable u-textUserColor").get('title').split()
return str(self.following[1].replace('s','S')) + ' ' + str(self.following[0]) + ' pessoas'
except:
return 'Não segue nenhum perfil'
@property
def get_followers(self):
try:
self.flo = self.soup.find('li',class_='ProfileNav-item ProfileNav-item--followers')
self.followers = self.flo.find('a',class_="ProfileNav-stat ProfileNav-stat--link u-borderUserColor u-textCenter js-tooltip js-openSignupDialog js-nonNavigable u-textUserColor").get('title')
return self.followers
except:
return 'Não possui seguidores'
@property
def get_photos(self):
try:
self.ph = self.soup.find('a',class_='PhotoRail-headingWithCount js-nav')
if self.ph.text.strip() == '0 Foto ou vídeo':
return 'Não possui fotos/vídeos'
return ' '* 15 + self.ph.text.lstrip()
except:
pass
def handle(self,event):
try:
self.site = urllib.request.urlopen(self.link.get()).read()
self.soup = bs(self.site,'lxml')
try:
if self.soup.find('input',value="app/pages/profile/highline_landing") != None:
self.download()
else:
raise
except:
raise
except Exception as e:
self.alert['text'] = 'Perfil Inválido!.Formato de entrada : https://twitter.com/perfil'
print(e)
def download(self):
try:
img = self.soup.find('img',class_='ProfileAvatar-image ').get('src')
urllib.request.urlretrieve(img,'img.png')
except:
urllib.request.urlretrieve('https://abs.twimg.com/a/1527200258/img/t1/highline/empty_state/owner_empty_avatar.png','img.png')
finally:
self.handle_image()
def handle_image(self):
image = Image.open('img.png')
new_img = image.resize((150,150))
new_img.save('perfil.gif')
self.photo = PhotoImage(file='perfil.gif')
self.save = self.photo
self.organize()
def organize(self):
self.alert.pack_forget()
self.lblink.pack_forget()
self.link.pack_forget()
self.framelink.pack_forget()
self.tela.title(f'Perfil-@{self.soup.find("b",class_="u-linkComplex-target").text}')
self.lb = Label(self.frameimg,image=self.photo,bg='#61f2cf')
self.lb.pack()
os.system('rm img.png')
os.system('rm perfil.gif')
self.frameinfo = Frame(self.tela,bg='#61f2cf')
self.frameinfo.pack()
lista = [self.get_following,self.get_followers,self.get_photos,self.get_tweets]
for i in lista:
self.lbi = Label(self.frameinfo,text=i,font=('Ubuntu Condensed',12),fg='#4c4c4c',bg='#61f2cf')
self.lbi.pack()
self.framebt = Frame(self.tela,pady=30,bg='#61f2cf')
self.framebt.pack(side=BOTTOM)
self.again = Button(self.framebt,text='Scrapar denovo',width=13,bg='#61f2cf',borderwidth=0,command=self.dnv,font=('Ubuntu Condensed',12),fg='black')
self.again.pack()
def dnv(self):
self.tela.destroy()
scraping()
|
[
"noreply@github.com"
] |
guilhermealfred.noreply@github.com
|
e6f786e878bd5df9b166ba7e66963ce750bd25de
|
738fa77629258bb81a1048fd595cc6937e1e1231
|
/screenshot_sampling.py
|
d44202a2627f44f58d852636adf7e37b311a4a3f
|
[
"MIT"
] |
permissive
|
ahmedshingaly/sketch2shape
|
a24ac126202cd8ce9d5527ee3cdc84d1bfaf8bf3
|
128f83d760d215ec7fae35aeb1430512552f2b92
|
refs/heads/master
| 2022-12-17T11:25:30.983537
| 2020-09-18T02:03:18
| 2020-09-18T02:03:18
| 296,487,395
| 0
| 0
|
MIT
| 2020-09-18T01:58:47
| 2020-09-18T01:58:46
| null |
UTF-8
|
Python
| false
| false
| 2,871
|
py
|
from pyDOE import lhs
import torch
import numpy as np
from utils.util_vtk import *
from utils.util import *
from utils.sketchify import sketchify
from time import time
#from sklearn.cluster import DBSCAN
# def filter(voxels, threshold=0.5, distance=10, min_voxels=10):
# filter_size = voxels > threshold
# voxels[~filter_size] = 0
# non_zeros = np.nonzero(voxels)
# non_zeros = np.vstack(non_zeros).T
# # print(non_zeros.shape)
# db = DBSCAN(eps=distance, min_samples=min_voxels)
# db.fit(non_zeros)
# (values, counts) = np.unique(db.labels_, return_counts=True)
# ind = values[np.argmax(counts)]
# print(ind)
# # print(ind)
# # print(np.max(counts))
# not_retained = non_zeros[db.labels_ != ind]
# print(not_retained.shape[0])
# # voxels = np.zeros((64, 64, 64))
# voxels[not_retained] = 0
# return voxels
# Device configuration
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu' # we're dealing with cpu iv3_model
# Load Model
model_basepath = r"models_cpu/"
model_filepath = "chair_G_cpu"
model = torch.load(model_basepath + model_filepath)
# Data Folder
data_folder = r"data/"
sketch_folder = r"screenshotsBT/"
sketch_filename = "screenshot"
img_ext = ".png"
samples_filename = "samples_screenshot_BT.npy"
sketch_fp = data_folder + sketch_folder + sketch_filename
samples_fp = data_folder + samples_filename
# Parameters
nsamples = 10000
ndim = 200
downsampling = 2
image_size = 128
overwrite = True
# Latin Hypercube Sampling
samples = lhs(ndim, samples=nsamples, criterion=None)
np.save(samples_fp, samples)
print(10 * '#' + ' Sampling Started ' + 10 * '#')
tic = time()
sample_num = 0
for latent_vec in samples:
z = torch.Tensor(latent_vec)
# z = torch.randn(1, ndim, device=device)*20
z = z.view(1, -1, 1, 1, 1)
fake = model(z)
np_fake = fake.detach().numpy()
voxels = np.reshape(fake.detach().numpy(), (64, 64, 64))
if downsampling > 1:
voxels = downsample(voxels, downsampling, method='mean')
# voxels = filter(voxels)
# print(max_connected(voxels, distance=3).shape)
path = sketch_fp + str(sample_num) + img_ext
visualization(voxels, 0.3, title=None, uniform_size=1, use_colormap=False, angle=0.3, filename=path)
# out_path = path
# if ~overwrite:
# out_path = path.split(".")[0] + "bis." + path.split(".")[1]
# sketchify(path, path, output_dim=(128, 128))
sample_num += 1
toc = time()
print("Sketch " + str(sample_num) + "/" + str(nsamples) + " Sampled | Elapsed Time: " + "{0:.2f}".format(
toc - tic) + "s | Estimated Remaining Time: " + "{0:.2f}".format(
(toc - tic) / sample_num * (nsamples - sample_num)) + "s")
# Write Samples Data
# f = h5py.File(samples_fp, "w")
# f.create_dataset('samples', data=samples)
# f.close()
|
[
"38009983+danhaive@users.noreply.github.com"
] |
38009983+danhaive@users.noreply.github.com
|
05ad2df497ac7fab5459b1b1b3f38d06c6a3c071
|
d488007fd29194535f8f57359c9c398eab99e9c8
|
/src/python/fem/data/plot/__init__.py
|
f5439f2e7090a33926c1e615e19c1be410b636f1
|
[] |
no_license
|
kvendingoldo/diploma
|
0df711455968ce0ffa4ceb948c4442acc5b34f65
|
f98455b53a003cbe878855ed80eff88fb8952c61
|
refs/heads/master
| 2018-09-17T04:26:27.860389
| 2018-06-13T20:53:08
| 2018-06-13T20:53:08
| 109,409,623
| 1
| 0
| null | 2018-02-13T19:10:27
| 2017-11-03T15:07:42
|
Python
|
UTF-8
|
Python
| false
| false
| 109
|
py
|
# -*- coding: utf-8 -*-
# @Author: Alexander Sharov
__all__ = ['poly_contour', 'contour_lines', 'tri_plot']
|
[
"kvendingoldo@yandex.ru"
] |
kvendingoldo@yandex.ru
|
dda517535d004caebe20b1ae918bcb69b4d9bdee
|
b348a7931799a5ad689f5e49c76bd36f7243fddf
|
/mconfig/views.py
|
fa4fa0a4fe9306324388d8e1a6cefd6a5b529013
|
[] |
no_license
|
pavel-lihanov/danfoss-mconfig
|
360968630e569c277d71cc288134ebcbe14ce5ca
|
be4a20ac50f2ad09e065f1afc504af314001925f
|
refs/heads/master
| 2021-01-11T12:36:28.110038
| 2017-08-21T07:58:22
| 2017-08-21T07:58:22
| 79,431,816
| 0
| 0
| null | 2017-02-10T06:29:59
| 2017-01-19T08:22:22
|
Python
|
UTF-8
|
Python
| false
| false
| 25,087
|
py
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseForbidden, HttpResponseRedirect, HttpResponseServerError
from django.template import loader
from django.utils.translation import activate, check_for_language, get_language
from django.utils.translation import ugettext as _
from django.views.static import serve
from django.views import generic
from django.contrib.auth import views as auth_views
import django.contrib.auth
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.mixins import PermissionRequiredMixin, AccessMixin
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
#from django.core.urlresolvers import reverse
from mconfig.models import Order, Profile
from field_views import HTMLChoiceMixin, HTMLEditMixin, HTMLCompoundMixin, HTMLOneOfManyMixin, HTMLSearchChoiceMixin, HTMLStreetAddressMixin, HTMLHeaderMixin
import price
price.price_lists['VEDADrive'] = price.VEDAXLPriceList('prices.xlsm')
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import threading
import datetime
import devices
import questions
import wizard
import time
import traceback
import locale
import os
import os.path
import shutil
import json
#import rpdb2
#rpdb2.start_embedded_debugger('123qwe')
#locale.setlocale(locale.LC_ALL, 'ru')
#wizard instances are stored here
sessions = {}
last_id = 0
lock = threading.Lock()
def send_mail(server, from_, to, subject, html, text):
msg = MIMEMultipart('alternative')
html = html
msg['Subject'] = subject
msg['From'] = from_
msg['To'] = to
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
msg.attach(part1)
msg.attach(part2)
s = smtplib.SMTP(server)
s.sendmail(from_, to, msg.as_string())
s.quit()
class Reaper(threading.Thread):
#deletes inactive sessions after 1 day
def __init__(self):
threading.Thread.__init__(self)
self.daemon = True
def run(self):
try:
while True:
time.sleep(600)
#print('Reaper: checking sessions')
to_reap = []
with lock:
for k,v in sessions.items():
if (datetime.datetime.now() - v.last_activity).days >= 1:
print('Reaper: session {0} will be reaped'.format(k))
to_reap.append(k)
#del sessions[k]
else:
pass
#print('Let it leave, last active at {0}'.format(v.last_activity))
for k in to_reap:
del sessions[k]
except:
traceback.print_exc()
#reaper = Reaper()
#reaper.start()
if check_for_language('ru'):
print('Has Russian')
activate('ru')
else:
print('No Russian, sorry')
#print(get_language())
print(_('Hello'))
#todo: move to separate module
class Settings:
pass
class Decider:
def __init__(self, settings):
self.select_option = self.select_option_first
#self.select_option = self.select_option_most_expensive
def select_option_first(self, device, option):
#select first available option
return device.options[option][0]
def select_option_most_expensive(self, device, option):
#select most expensive option
avail = device.options[option]
ops = option_prices[option]
prices = sorted([(o, p) for o,p in ops.items() if o in avail], key=operator.itemgetter(1), reverse=True)
return prices[0][0]
def select_devices(self, devices):
def get_nom_current(device):
return device.attributes['nom_current']
return [list(sorted(devices, key=get_nom_current))[0]]
def select_choice(self, devices, choice):
return choice.get_valid_choices(devices)
settings = Settings()
decider = Decider(settings)
#test_dev = devices.VEDADrive.from_order_code('VD-P2000U1F531SSX192ACA21B2CXDX21E1S')
#print(test_dev)
class HTMLQuestion:
template = "mconfig/question.html"
def as_json(self, **args):
return json.dumps({ 'name': self.question.header,
'error': self.question.last_error,
'next_enabled': self.question.can_proceed(),
'prev_enabled': self.question.previous is not None,
'fields': [f.view.as_json() for f in self.question.get_fields()]
})
class VEDADriveView:
_template = "VEDADrive.html"
@property
def template(self):
return self._template
def as_json(self):
return {
'name': self.package.name,
'order_code': self.package.order_code(),
'short_descr': self.package.short_descr(),
'options': self.package.display_options(),
'main_cabinet': self.package.main_cabinet.name,
'addons': '+'.join([o.name for o in self.package.addons]),
'width': self.package.width,
'height': self.package.height,
'length': self.package.length,
'weight': self.package.weight,
'therm_loss': self.package.therm_loss
}
class PriceView:
_template = "price.html"
def __init__(self, show_details):
self.show_details = show_details
@property
def template(self):
return self._template
def as_json(self):
if self.show_details:
dv = PriceDetailsView()
dv.price = self.price
return {'total': self.price.total, 'details': dv.as_json()}
else:
return {'total': self.price.total, 'details': None}
class PriceDetailsView:
_template = "price.html"
def template(self):
return self._template
def as_json(self):
return {'supplier_price': self.price.supplier_price,
'delivery_cost': self.price.delivery_cost,
'sale_price': self.price.sale_price}
class HTMLResult:
_template = "mconfig/result.html"
_unpriced_template = "mconfig/result_unpriced.html"
@property
def template(self):
return self._template
#TODO: view should tell if current user has appropriate access level
def as_json(self, show_details):
package = self.question.packages[0]
package.view = VEDADriveView()
package.view.package = package
try:
package.calculate_price()
package.price.view = PriceView(show_details)
package.price.view.price = package.price
return json.dumps({'package': package.view.as_json(),
'price': package.price.view.as_json(),
})
except price.NotInPricelist:
return json.dumps({'package': package.view.as_json(),
'price': None,
})
class HTMLWizard(wizard.Wizard):
def __init__(self, devices, questions):
views = {}
wizard.Wizard.__init__(self, devices)
for question in questions:
#self.append_screen(HTMLQuestion(question), views=views)
self.append_screen(question, views=views)
def add_wizard_instance(request):
session = request.session
global sessions, last_id
#html mixins just provide template names
views = {
wizard.SearchChoiceField: HTMLSearchChoiceMixin,
questions.LoadQuestion.ApplicationField: HTMLSearchChoiceMixin,
questions.LoadQuestion.OverloadField: HTMLEditMixin,
wizard.ChoiceField: HTMLChoiceMixin,
wizard.ValueField: HTMLEditMixin,
questions.MotorCableLenField: HTMLEditMixin,
wizard.CompoundField: HTMLCompoundMixin,
wizard.OneOfManyField: HTMLOneOfManyMixin,
wizard.StreetAddressField:HTMLStreetAddressMixin,
wizard.TextHeader: HTMLHeaderMixin,
}
qs = [
questions.LoadQuestion(devices.devices, views, view=HTMLQuestion()),
questions.PlacementQuestion(devices.devices, views, view=HTMLQuestion()),
#questions.OptionsQuestion(devices.devices, views, view=HTMLQuestion()),
#questions.DeliveryQuestion(devices.devices, views, view=HTMLQuestion(), user_getter = lambda: request.user),
wizard.Result(decider, view=HTMLResult())
]
wiz = HTMLWizard(devices.devices, qs)
wiz.last_activity = datetime.datetime.now()
key = hash(wiz)
session['key'] = key
wiz.key = key
with lock:
cur_id = last_id
sessions[last_id] = (wiz, threading.Lock())
session['wizard'] = last_id
last_id += 1
wiz.start()
return cur_id
def is_superuser(user):
return user.is_superuser
class OrderView(PermissionRequiredMixin, AccessMixin, generic.ListView):
template_name = 'mconfig/order.html'
context_object_name = 'orders'
permission_required = 'mconfig.view_all_orders'
login_url = '/mconfig/login/'
def get_queryset(self):
"""Return the last five published questions."""
return Order.objects.all()
def request_access(request, action):
if request.method == 'GET':
template = loader.get_template('mconfig/request_access.html')
context={}
return HttpResponse(template.render(context, request))
elif request.method == 'POST':
access_level = 2
user = User.objects.filter(username=request.POST['email'])
user = user[0] if user else None
profile = Profile.objects.filter(email=request.POST['email'])
profile = profile[0] if profile else None
if user is None and profile is None:
user = User.objects.create_user(request.POST['email'], request.POST['email'], 'danfoss')
profile = Profile(first_name=request.POST['first_name'], last_name = request.POST['last_name'], organization=request.POST['organization'], email=request.POST['email'], role=access_level, registered=False)
user.is_active = False
user.profile = profile
profile.user = user
profile.save()
user.save()
msg = '''\
<html>
<head></head>
<body>
Hello
{0} {1} from {2} has asked for VEDADrive configurator access.
To grant it please follow the <a href="http://pc0149941:8000/mconfig/create_user?email={3}">link</a> and click "Submit"
</body>
</html>
'''.format(request.POST['first_name'], request.POST['last_name'], request.POST['organization'], request.POST['email'])
text = '{0} {1} from {2} has asked for VEDADrive configurator access.\n To grant it go to http://pc0149941:8000/mconfig/create_user?email={3} and click "Submit"'.format(request.POST['first_name'], request.POST['last_name'], request.POST['organization'], request.POST['email'])
send_mail('localhost', 'pl@mydomain.org', 'manager@myconfirm.org', 'Mconfig registration request', msg, text)
return HttpResponse('Registration request created, await confirmation email')
else:
return HttpResponse('Email already registered')
@user_passes_test(is_superuser, login_url='/mconfig/login/')
def create_user(request, action):
if request.method == 'GET':
template = loader.get_template('mconfig/create_user.html')
if 'email' in request.GET:
profile = Profile.objects.get(email=request.GET['email'])
context = {'profile': profile}
else:
context = {}
return HttpResponse(template.render(context, request))
elif request.method == 'POST':
#print(request.POST['email'], request.POST['password'])
access_level = int(request.POST['role'])
try:
user = User.objects.get(username=request.POST['email'])
except User.DoesNotExist:
user = None
try:
profile = Profile.objects.get(email=request.POST['email'])
except Profile.DoesNotExist:
profile = None
if user is None:
if access_level > 0:
user = User.objects.create_user(request.POST['email'], request.POST['email'], 'danfoss')
else:
user = User.objects.create_superuser(request.POST['email'], request.POST['email'], 'danfoss')
else:
user.first_name = request.POST['first_name']
user.last_name = request.POST['last_name']
user.is_active = True
if profile is None:
profile = Profile(first_name=request.POST['first_name'], last_name = request.POST['last_name'], organization=request.POST['organization'], email=request.POST['email'], role=access_level, registered=True)
else:
profile.first_name = request.POST['first_name']
profile.last_name = request.POST['last_name']
profile.role = access_level
user.profile = profile
profile.user = user
if access_level > 0:
user.user_permissions.clear()
if access_level == 1:
content_type = ContentType.objects.get_for_model(Order)
permission = Permission.objects.get(content_type=content_type, codename='view_price')
user.user_permissions.add(permission)
permission = Permission.objects.get(content_type=content_type, codename='view_delivery')
user.user_permissions.add(permission)
elif access_level == 2:
content_type = ContentType.objects.get_for_model(Order)
permission = Permission.objects.get(content_type=content_type, codename='view_price')
user.user_permissions.add(permission)
permission = Permission.objects.get(content_type=content_type, codename='view_delivery')
user.user_permissions.add(permission)
profile.save()
user.save()
return HttpResponse('User created OK')
msg = '''\
<html>
<head></head>
<body>
Hello
You have been given access to VEDADrive configurator. To access extended functions please follow the <a href="http://pc0149941:8000/mconfig/login">link</a>, use your email as login and "danfoss" as password.
</body>
</html>
'''
text = '''Hello
You have been given access to VEDADrive configurator. To access extended functions please go to http://pc0149941:8000/mconfig/login, use your email as login and "danfoss" as password.'''
send_mail('localhost', 'manager@myconfirm.org', request.POST['email'], 'Mconfig registration confirmation', msg, text)
return HttpResponse('User created OK')
def login(request):
print('login')
template_response = views.login(request)
# Do something with `template_response`
return template_response
def logout(request):
django.contrib.auth.logout(request)
return HttpResponseRedirect('/mconfig/login/')
def index(request):
print('mconfig start page')
template = loader.get_template('mconfig/index.html')
context = {}
return HttpResponse(template.render(context, request))
#@login_required(login_url='/mconfig/login/')
def config_start(request):
print('config_start')
id = add_wizard_instance(request)
template = loader.get_template('mconfig/start.html')
return HttpResponseRedirect('/mconfig/start/{0}/questions'.format(id))
@login_required(login_url='/mconfig/login/')
def download(request, session):
wiz, lock = sessions[int(session)]
print (session, wiz)
package = wiz.screens[-1].packages[0]
filepath = 'C:\\Users\\u327397\\Desktop\\Projects\\HV\\configurator\\mysite\\test.docx'
path = os.path.join(os.path.dirname(filepath), '{0}.{1}'.format(session, 'docx'))
package.make_offer_template(path)
'''
date = models.DateField(auto_now_add=True)
price_version = models.CharField(max_length=60)
typecode = models.CharField(max_length=60)
price = models.DecimalField(max_digits=12, decimal_places=3)
user = models.ForeignKey(User)
'''
user = request.user
try:
profile = Profile.objects.get(email=user.email)
order = Order(date = datetime.date.today(), price_version = '0.0', typecode = package.order_code(), price=package.price.sale_price, user=user)
order.save()
except Profile.DoesNotExist:
#should never happen
pass
return serve(request, os.path.basename(path), os.path.dirname(path))
def field(request, session):
try:
wiz, lock = sessions[int(session)]
except KeyError:
return HttpResponseNotFound('Session not found or expired (field): {0}, have sessions {1}'.format(int(session), list(sessions.keys())))
context = {}
wiz.last_activity = datetime.datetime.now()
question = wiz.current_screen
field = request.GET['field']
#print(request.GET)
#print('requested field', field)
try:
f = question.get_field(field)
template = loader.get_template(f.view.template)
context['field'] = f
context['as_xml'] = True
res = HttpResponse(template.render(context, request), content_type="text/xml")
return res
except KeyError:
return HttpResponseNotFound('Field not found')
def question_refresh(request, session, _context={}, error=''):
#TODO: check user permissions - can be Result!!!
try:
wiz, lock = sessions[int(session)]
except KeyError:
return HttpResponse('Session not found or expired (refresh): {0}, have sessions {1}'.format(int(session), list(sessions.keys())))
if not validate_request(request, wiz):
return HttpResponseForbidden()
context = dict(_context)
wiz.last_activity = datetime.datetime.now()
question = wiz.current_screen
question.last_error = error
data = question.view.as_json(show_details=request.user.is_superuser)
return HttpResponse(data, content_type="application/json")
def show_question(session, request, wiz, context):
question = wiz.current_screen
template = loader.get_template(question.view.template)
context['question'] = wiz.current_screen
#context['devices'] = [decider.select_devices(wiz.apply_filters_nosave(question.next, options=opts))]
#context['options'] = wiz.get_options(question)
res = HttpResponseRedirect(reverse('mconfig:question', args=(session, )))
return res
def next_question(request, session):
try:
wiz, lock = sessions[int(session)]
except KeyError:
return HttpResponse('Session not found or expired (next): {0}, have sessions {1}'.format(int(session), list(sessions.keys())))
if not validate_request(request, wiz):
return HttpResponseForbidden()
context = {}
wiz.last_activity = datetime.datetime.now()
try:
wiz.go_forward()
except wizard.ValidationError as ex:
print('ValidationError', ex.message)
context['error_message'] = ex.message
#context['devices'] = [decider.select_devices(wiz.apply_filters_nosave(question.next))]
#context['options'] = wiz.get_options(question)
return show_question(session, request, wiz, context)
def prev_question(request, session):
user = request.user
try:
wiz, lock = sessions[int(session)]
except KeyError:
return HttpResponse('Session not found or expired (prev): {0}, have sessions {1}'.format(int(session), list(sessions.keys())))
if not validate_request(request, wiz):
return HttpResponseForbidden()
context = {}
wiz.last_activity = datetime.datetime.now()
wiz.go_back()
return show_question(session, request, wiz, context)
def update_question(request, session):
#updates all fields, should be triggered on any field change
user = request.user
try:
wiz, lock = sessions[int(session)]
except KeyError:
return HttpResponse('Session not found or expired(update): {0}, have sessions {1}'.format(int(session), list(sessions.keys())))
if not validate_request(request, wiz):
return HttpResponseForbidden()
context = {}
wiz.last_activity = datetime.datetime.now()
question = wiz.current_screen
try:
field = question.find_field(request.POST['Current_field'])
try:
wiz.update(question, field, request.POST[field.name])
except wizard.NoMatches:
#no devices for this value, this may happen if user sets value in edit that filters out all devices
error = _('No device matches value {0} for field {1}').format(request.POST[field.name], field.name)
return question_refresh(request, session, context, error)
except ValueError:
#invalid value
error = _('Invalid value {0} for field {1}').format(request.POST[field.name], field.name)
return question_refresh(request, session, context, error)
except KeyError:
print('KeyError', request.POST['Current_field'])
#opts = wiz.get_options(question)
#prev_devs = wiz.apply_filters_nosave(question, options=opts)
#for field in question.fields:
# field.update(prev_devs, opts)
#return show_question(session, request, wiz, context)
return question_refresh(request, session, context, '')
def validate_request(request, wiz):
return 'key' in request.session and request.session['key'] == wiz.key
def question(request, session):
start_time = datetime.datetime.now()
try:
wiz, lock = sessions[int(session)]
except KeyError:
return HttpResponse('Session not found or expired (question): {0}, have sessions {1}'.format(int(session), list(sessions.keys())))
if not validate_request(request, wiz):
return HttpResponseForbidden()
context = {}
wiz.last_activity = datetime.datetime.now()
if request.method == 'GET':
question = wiz.current_screen
question.select()
#print(type(question), question.view.template)
opts = wiz.get_options(question)
all_devs = wiz.apply_filters_nosave(question.next, options=opts)
devs = decider.select_devices(all_devs)
template = loader.get_template(question.view.template)
for field in question.fields:
wiz.refresh_field(question, field)
#prev_devs = wiz.apply_filters_nosave(question, options=opts)
#for field in question.fields:
# field.update(prev_devs, opts)
question.last_error = ''
context['user'] = request.user
context['question'] = question
#context['devices'] = wiz.devs
context['devices'] = devs
context['options'] = opts
context['full'] = True
res = HttpResponse(template.render(context, request))
end_time = datetime.datetime.now()
print('Request took {0}'.format(end_time-start_time))
return res
@user_passes_test(is_superuser, login_url='/mconfig/login/')
def upload_price(request, session):
if request.method == 'GET':
template = loader.get_template('mconfig/upload_price.html')
context = {}
return HttpResponse(template.render(context, request))
else:
f = request.FILES['price_file']
#backup old price
now = datetime.datetime.now()
shutil.copyfile('prices.xlsm', now.isoformat().replace(':','_') + '_prices.xlsm')
#replace price
with open('prices.xlsm', 'wb') as destination:
for chunk in f.chunks():
destination.write(chunk)
#recreate pricelist
price.price_lists['VEDADrive'] = price.VEDAXLPriceList('prices.xlsm')
return HttpResponse('Pricelist uploaded OK')
|
[
"lihanov.pavel@danfoss.com"
] |
lihanov.pavel@danfoss.com
|
816679062ae9e3989915f6117764fc65c743f6d4
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Introduction_to_finite_element_methods_Langtangen/src/Bernstein_vs_Lagrange.py
|
6b47a69e3aaed18a2ae693248c856d34f124e6fd
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,793
|
py
|
import sympy as sym
import mpmath
import numpy
import pylab
import time
def lagrange_series(N):
psi = []
# h = Rational(1, N)
h = 1.0/N
points = [i*h for i in range(N+1)]
for i in range(len(points)):
p = 1
for k in range(len(points)):
if k != i:
p *= (x - points[k])/(points[i] - points[k])
psi.append(p)
psi = psi[1:-1]
return psi
def bernstein_series(N):
# FIXME: check if a normalization constant is common in the definition
# advantage is that the basis is always positive
psi = []
# for k in range(0,N+1):
for k in range(1,N): # bc elsewhere
psi_k = x**k*(1-x)**(N-k)
psi.append(psi_k)
return psi
def sin_series(N):
# FIXME: do not satisfy bc
psi = []
for k in range(1,N):
psi_k = sin(pi*k*x)
psi.append(psi_k)
return psi
def taylor_series(N):
# FIXME: do not satisfy bc
print("Cannot with current BC implementation")
return
psi = []
for k in range(1,N):
psi_k = x**k
psi.append(psi_k)
return psi
def series(series_type, N):
if series_type=="Taylor" : return taylor_series(N) # cannot do with current implementation of bc
elif series_type=="sin" : return sin_series(N)
elif series_type=="Bernstein" : return bernstein_series(N)
elif series_type=="Lagrange" : return lagrange_series(N)
else: print("series type unknown ") # sys.exit(0)
x = sym.Symbol("x")
integrand_type = "stiffness"
bstime = []
lstime = []
bqtime = []
lqtime = []
Ns = [2, 4, 8, 16, 32]
for N in Ns:
t0 = time.time()
bpsi = series("Bernstein", N)
A = sym.zeros((N-1), (N-1))
for i in range(0, N-1):
for j in range(0, N-1):
integrand = 0
if integrand_type == "mass": integrand = bpsi[i]*bpsi[j]
if integrand_type == "stiffness": integrand = sym.diff(bpsi[i],x)*sym.diff(bpsi[j],x)
integrand = sym.lambdify([x], integrand)
A[i,j] = mpmath.quad(integrand, [0, 1])
t1 = time.time()
bqtime.append(t1-t0)
for N in Ns:
t0 = time.time()
lpsi = series("Lagrange", N)
A = sym.zeros((N-1), (N-1))
for i in range(0, N-1):
for j in range(0, N-1):
integrand = 0
if integrand_type == "mass": integrand = lpsi[i]*lpsi[j]
if integrand_type == "stiffness" : integrand = sym.diff(lpsi[i],x)*sym.diff(lpsi[j],x)
integrand = sym.lambdify([x], integrand)
A[i,j] = mpmath.quad(integrand, [0,1])
t1 = time.time()
lqtime.append(t1-t0)
for N in Ns:
t0 = time.time()
bpsi = series("Bernstein", N)
A = sym.zeros((N-1), (N-1))
for i in range(0, N-1):
for j in range(0, N-1):
integrand = 0
if integrand_type == "mass": integrand = bpsi[i]*bpsi[j]
if integrand_type == "stiffness": integrand = sym.diff(bpsi[i],x)*sym.diff(bpsi[j],x)
A[i,j] = sym.integrate(integrand, (x, [0, 1]))
t1 = time.time()
bstime.append(t1-t0)
for N in Ns:
t0 = time.time()
lpsi = series("Lagrange", N)
A = sym.zeros((N-1), (N-1))
for i in range(0, N-1):
for j in range(0, N-1):
integrand = 0
if integrand_type == "mass": integrand = lpsi[i]*lpsi[j]
if integrand_type == "stiffness" : integrand = sym.diff(lpsi[i],x)*sym.diff(lpsi[j],x)
A[i,j] = sym.integrate(integrand, (x, [0, 1]))
t1 = time.time()
lstime.append(t1-t0)
print("Berstein quadrature ", bqtime)
print("Lagrange quadrature ", lqtime)
print("Bernstein symbolic ", bstime)
print("Lagrange symbolic ", lstime)
import pylab
pylab.loglog(Ns, bqtime)
pylab.loglog(Ns, lqtime)
pylab.loglog(Ns, bstime)
pylab.loglog(Ns, lstime)
pylab.loglog(Ns, [4*10**-4*N**2 for N in Ns])
pylab.loglog(Ns, [10**-4*N**4 for N in Ns])
pylab.legend(["Bernstein quad", "Lagrange quad", "Berstein symb", "Lagrange symb", "N**2", "N**4"], loc="upper left")
pylab.show()
|
[
"me@yomama.com"
] |
me@yomama.com
|
272c6c339b4b5a793f6deff7f5757934c92d3bcf
|
d0758e0ca004226cec8ad8b26c9565c98534a8b8
|
/11-videogames/Julio/6 - Platformer/sprites.py
|
719d783fe930fcd1946f478b7914a188fa19f79e
|
[] |
no_license
|
pythoncanarias/eoi
|
334d64a96afc76ac1fa10282378f291b6d8c94b3
|
349367254f85e3e4273cede067ca950913a1332c
|
refs/heads/master
| 2023-07-06T08:00:11.366345
| 2023-06-30T15:19:33
| 2023-06-30T15:19:33
| 222,742,870
| 26
| 19
| null | 2023-06-25T16:03:46
| 2019-11-19T16:41:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,393
|
py
|
import pygame
from settings import *
from pygame import Vector2
import math
class Wall(pygame.sprite.Sprite):
def __init__(self, game, x, y):
self.groups = game.all_sprites, game.walls
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = pygame.Surface((TILESIZE, TILESIZE))
self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.x, self.y = x, y
self.rect.x, self.rect.y = x * TILESIZE, y * TILESIZE
class Player(pygame.sprite.Sprite):
def __init__(self, game, position):
self.groups = game.all_sprites
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = pygame.Surface((TILESIZE, TILESIZE))
self.image.fill(YELLOW)
self.rect = self.image.get_rect()
self.position = position * TILESIZE
self.desired_velocity = Vector2(0, 0)
self.velocity = Vector2(0, 0)
self.grounded = False
self.trigger_jump = False
self.jump_time = 0
def update(self):
self.handle_input()
self.velocity.x -= self.velocity.x * DRAG * self.game.dt
self.velocity += (Vector2(0, GRAVITY) +
self.desired_velocity * PLAYER_ACCELERATION) * self.game.dt
if self.trigger_jump:
self.trigger_jump = False
self.velocity.y = PLAYER_JUMP_SPEED * 0.5
self.jump_time += self.game.dt
if abs(self.velocity.y) > PLAYER_MAX_Y_SPEED:
self.velocity.y = math.copysign(
PLAYER_MAX_Y_SPEED, self.velocity.y)
self.position += self.velocity * self.game.dt
self.rect.x = self.position.x
self.collide_with_walls('x')
self.rect.y = self.position.y
self.collide_with_walls('y')
def handle_input(self):
vx, vy = 0, 0
key = pygame.key.get_pressed()
if key[pygame.K_LEFT] or key[pygame.K_a]:
vx = -1
if key[pygame.K_RIGHT] or key[pygame.K_d]:
vx = 1
if key[pygame.K_UP] or key[pygame.K_w]:
if self.grounded or self.jump_time < PLAYER_JUMP_TIME:
self.trigger_jump = True
else:
self.jump_time = PLAYER_JUMP_TIME
self.desired_velocity = Vector2(vx, vy)
if self.desired_velocity.magnitude() > 0:
self.desired_velocity = self.desired_velocity.normalize()
def collide_with_walls(self, dir):
hits = pygame.sprite.spritecollide(self, self.game.walls, False)
if len(hits) == 0:
self.grounded = False
return
if dir == 'x':
if self.velocity.x > 0:
self.position.x = hits[0].rect.left - self.rect.width
if self.velocity.x < 0:
self.position.x = hits[0].rect.right
self.velocity.x = 0
self.rect.x = self.position.x
if dir == 'y':
if self.velocity.y > 0:
self.position.y = hits[0].rect.top - self.rect.height
self.grounded = True
self.jump_time = 0
if self.velocity.y < 0:
self.position.y = hits[0].rect.bottom
self.velocity.y = 0
self.rect.y = self.position.y
|
[
"euribates@gmail.com"
] |
euribates@gmail.com
|
da66a758dcca094777bc5f050eb1ed976073b47f
|
8bffdb34468cb49eeaf0445f990cc0b255569c54
|
/eco/reward/migrations/0002_delete_user.py
|
1078ce6d00040f34493b1642709046608e94a315
|
[] |
no_license
|
newcountwhy/LP2T2X
|
e3ee2c997f0accec53aa64b9ad6a22368c2a400d
|
240bdafd3363c3d6e043f31af314c63b1d68dacc
|
refs/heads/main
| 2023-08-21T07:30:52.039978
| 2021-10-17T10:13:36
| 2021-10-17T10:13:36
| 417,551,820
| 2
| 0
| null | 2021-10-16T13:56:44
| 2021-10-15T15:38:32
| null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
# Generated by Django 3.2.5 on 2021-09-29 08:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reward', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='User',
),
]
|
[
"43769362+languede@users.noreply.github.com"
] |
43769362+languede@users.noreply.github.com
|
7d63c73ee096cb4e3f0f87738cd039809681dc5c
|
a34ec07c3464369a88e68c9006fa1115f5b61e5f
|
/E_SlidingWindow/DynamicWindow/L0_340_Longest_Substring_With_At_Most_K_Distinct_Characters.py
|
510d6f33695a970c599943078378927e4fd0932a
|
[] |
no_license
|
824zzy/Leetcode
|
9220f2fb13e03d601d2b471b5cfa0c2364dbdf41
|
93b7f4448a366a709214c271a570c3399f5fc4d3
|
refs/heads/master
| 2023-06-27T02:53:51.812177
| 2023-06-16T16:25:39
| 2023-06-16T16:25:39
| 69,733,624
| 14
| 3
| null | 2022-05-25T06:48:38
| 2016-10-01T10:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 625
|
py
|
""" https://leetcode.com/problems/longest-substring-with-at-most-two-distinct-characters/
sliding window with hash table, the same as 159_Longest_Substring_with_At_Most_Two_Distinct_Characters_L0.py
"""
from header import *
class Solution:
def lengthOfLongestSubstringKDistinct(self, A: str, k: int) -> int:
seen = Counter()
i = 0
ans = 0
for j in range(len(A)):
seen[A[j]] += 1
while len(seen)>k:
seen[A[i]] -= 1
if not seen[A[i]]: seen.pop(A[i])
i += 1
ans = max(ans, j-i+1)
return ans
|
[
"zhengyuan.zhu@mavs.uta.edu"
] |
zhengyuan.zhu@mavs.uta.edu
|
f8a4dddf72723381900a15ec875ec3a622a4f17c
|
7b0942229eae38a52d2f5859743b56b99b056e87
|
/robotiq_2f_gripper/robotiq_2f_driver/bin/robotiq_2f_test
|
21bedfbd5245762a7a7c2315ab1ae4b0fc512a08
|
[
"BSD-3-Clause"
] |
permissive
|
LasseNojgaard/rovi2
|
4b4a497ec37c741a235f1b330b410fcf488743aa
|
8e465d332c4226d0d764bab180508f57e26f36ba
|
refs/heads/master
| 2020-05-18T23:36:32.206489
| 2019-05-17T19:55:50
| 2019-05-17T19:55:50
| 184,712,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,642
|
#!/usr/bin/env python
"""--------------------------------------------------------------------
COPYRIGHT 2014 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed Segway RMP Robotic Platforms is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file robotiq_2f_driver
\brief Node for Robotiq 2-Finger Gripper communication
\Platform: Linux/ROS Kinetic
--------------------------------------------------------------------"""
from robotiq_2f_driver.robotiq_2f_gripper_test import Robotiq2FGripperTest
import rospy
if __name__ == "__main__":
"""
Initialize the node
"""
rospy.init_node('robotiq_2f_test')
gripper_test = Robotiq2FGripperTest()
|
[
"emsee14@student.sdu.dk"
] |
emsee14@student.sdu.dk
|
|
29a74d3341e4f6786e159bd446f6158eb9c58e09
|
1c91439673c898c2219ee63750ea05ff847faee1
|
/configs/hornet/hornet-base_8xb64_in1k.py
|
969d8b95b6ee1a06ecf257d162050982d4d5d698
|
[
"Apache-2.0"
] |
permissive
|
ChenhongyiYang/GPViT
|
d7ba7f00d5139a989a999664ab0874c5c9d53d4d
|
2b8882b2da41d4e175fe49a33fcefad1423216f4
|
refs/heads/main
| 2023-06-08T00:10:07.319078
| 2023-05-26T15:52:54
| 2023-05-26T15:52:54
| 577,075,781
| 78
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
_base_ = [
'../_base_/models/hornet/hornet-base.py',
'../_base_/datasets/imagenet_bs64_swin_224.py',
'../_base_/schedules/imagenet_bs1024_adamw_swin.py',
'../_base_/default_runtime.py',
]
data = dict(samples_per_gpu=64)
optimizer = dict(lr=4e-3)
optimizer_config = dict(grad_clip=dict(max_norm=5.0), _delete_=True)
custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')]
|
[
"chenhongyiyang@Chenhongyis-MacBook-Pro.local"
] |
chenhongyiyang@Chenhongyis-MacBook-Pro.local
|
fb016885c98e31f743de384920194f7cba194416
|
fe3fd4e56a30c1a3e0a6016d9597fbb1a47a5b37
|
/tests/test_silo.py
|
8bdba94a4c8fe43f60ba4d3b845e4bf04e62a105
|
[
"MIT"
] |
permissive
|
digitalmensch/silo
|
dd9aaa53c2e03780ee258a560e5c0a390ce78947
|
69340d437c4663e09bd670f3d2e68f25d79ee538
|
refs/heads/master
| 2021-05-13T21:11:59.895074
| 2018-01-28T22:05:26
| 2018-01-28T22:05:26
| 116,457,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
import silo
def test_functions_return_none():
assert silo.emergency('silo test message') == None
assert silo.alert('silo test message') == None
assert silo.critical('silo test message') == None
assert silo.error('silo test message') == None
assert silo.warning('silo test message') == None
assert silo.notice('silo test message') == None
assert silo.info('silo test message') == None
assert silo.debug('silo test message') == None
|
[
"tag@adnm.ch"
] |
tag@adnm.ch
|
9fe712fa7614c5cc60c9c70ebeab89b54141cd00
|
b81668a2cc43654cf6a3ed952d781310876838f9
|
/venv/Lib/site-packages/spacy/tests/pipeline/test_senter.py
|
197fdca6e33cb73a580e8d4f491a3f5fc4fc1e4f
|
[] |
no_license
|
gowthamr1999/docbot-1
|
6a8b873407f15035fb8b30b69ed66ded343bd1e4
|
3119958d68e95673b4c9187d58d8cad5c18a6b2c
|
refs/heads/master
| 2023-04-07T02:16:55.574750
| 2021-04-16T02:52:38
| 2021-04-16T02:52:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
import pytest
from spacy import util
from spacy.lang.en import English
from spacy.language import Language
from spacy.tests.util import make_tempdir
def test_label_types():
nlp = Language()
nlp.add_pipe(nlp.create_pipe("senter"))
with pytest.raises(NotImplementedError):
nlp.get_pipe("senter").add_label("A")
SENT_STARTS = [0] * 14
SENT_STARTS[0] = 1
SENT_STARTS[5] = 1
SENT_STARTS[9] = 1
TRAIN_DATA = [
("I like green eggs. Eat blue ham. I like purple eggs.", {"sent_starts": SENT_STARTS}),
("She likes purple eggs. They hate ham. You like yellow eggs.", {"sent_starts": SENT_STARTS}),
]
def test_overfitting_IO():
# Simple test to try and quickly overfit the senter - ensuring the ML models work correctly
nlp = English()
senter = nlp.create_pipe("senter")
nlp.add_pipe(senter)
optimizer = nlp.begin_training()
for i in range(200):
losses = {}
nlp.update(TRAIN_DATA, sgd=optimizer, losses=losses)
assert losses["senter"] < 0.001
# test the trained model
test_text = "I like purple eggs. They eat ham. You like yellow eggs."
doc = nlp(test_text)
gold_sent_starts = [0] * 14
gold_sent_starts[0] = 1
gold_sent_starts[5] = 1
gold_sent_starts[9] = 1
assert [int(t.is_sent_start) for t in doc] == gold_sent_starts
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert [int(t.is_sent_start) for t in doc2] == gold_sent_starts
|
[
"42891786+kiranm211@users.noreply.github.com"
] |
42891786+kiranm211@users.noreply.github.com
|
45cd2e6d141ba535bcd44646200fce9ddbbb7c68
|
c42738bac7b50bd17b6340ac7205cbb1a89a3466
|
/ft/ft/test_command.py
|
a765b480469bafabe11e1e2e36c9d52e150e2744
|
[
"MIT"
] |
permissive
|
CristianCantoro/shell-functools
|
1f3a3ce41d367f3857773c06650510e62ccd5c72
|
e66dfc3c4c6c0fd236645dc79607279cb3a8bdca
|
refs/heads/master
| 2020-04-05T16:06:25.285179
| 2018-11-10T16:03:40
| 2018-11-12T17:42:57
| 156,997,043
| 0
| 0
|
MIT
| 2018-11-10T16:03:51
| 2018-11-10T16:03:51
| null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
from ft.types import T_BOOL, T_INT, T_ARRAY, T_STRING
from ft.internal import add_dynamic_type
def test_add_dynamic_type_bool():
assert add_dynamic_type("True").fttype == T_BOOL
assert add_dynamic_type("False").fttype == T_BOOL
def test_add_dynamic_type_int():
assert add_dynamic_type("0").fttype == T_INT
assert add_dynamic_type("1223").fttype == T_INT
assert add_dynamic_type("-1223").fttype == T_INT
assert add_dynamic_type("+1223").fttype == T_INT
def test_add_dynamic_type_array():
assert add_dynamic_type("foo\tbar").fttype == T_ARRAY
assert add_dynamic_type("foo\tbar\tbaz").fttype == T_ARRAY
def test_add_dynamic_type_string():
assert add_dynamic_type("foo").fttype == T_STRING
assert add_dynamic_type("foo bar").fttype == T_STRING
def test_add_dynamic_type():
assert add_dynamic_type("a ").value == "a "
|
[
"davidpeter@web.de"
] |
davidpeter@web.de
|
27c72d0ddebbeb2d39c09a34cf14adc3636ba2e7
|
286b6dc56323f982092ffafbfac8a32dbbaeb7ef
|
/training_assignments/Day_01/Nitesh_Mahajan/data_types_1.py
|
5b5bf6b902e1d78d86b94edd8e9c1c203423d387
|
[] |
no_license
|
learndevops19/pythonTraining-CalsoftInc
|
ccee0d90aadc00bfdb17f9578620f6bf92f80a4c
|
c5f61516b835339b394876edd1c6f62e7cc6f0c3
|
refs/heads/master
| 2021-02-05T04:27:17.590913
| 2019-11-20T17:27:06
| 2019-11-20T17:27:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
#!usr/bin/env python
def remove_odd_index(in_str):
"""
This function removes odd index values from a given string
Args: in_str
Returns: out_str
"""
out_str = in_str[::2]
return out_str
sample_input = "abcdef"
output = remove_odd_index(sample_input)
print(output)
|
[
"rajpratik71@gmail.com"
] |
rajpratik71@gmail.com
|
9ec3cbeec5cb79876c7d51f3d726443586b0572e
|
ffb76b4ad5e73944a66c92f5409579463d254731
|
/kaplanmeier.py
|
44c268e54e332034caab299bcc6315ae7766f610
|
[] |
no_license
|
RigautAntoine/pysurv
|
7323111fd1c84e0d951437580618969da65c9978
|
ab52372dcd35fc7b4238481d5bca95d4d5c20781
|
refs/heads/master
| 2020-04-05T16:44:27.108392
| 2018-12-01T22:29:06
| 2018-12-01T22:29:06
| 157,026,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,156
|
py
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from .stats import inv_normal_cdf
class KaplanMeier():
def __init__(self, events, durations, alpha=0.95, strata=None):
self.kms = []
if strata is None:
self.kms.append(KaplanMeierFitter(events, durations, label='', alpha=alpha))
else:
stratas = np.unique(strata)
for s in stratas:
m = (strata == s)
self.kms.append(KaplanMeierFitter(events[m], durations[m], label=s, alpha=alpha))
def summary(self):
return [km.summary() for km in self.kms]
def plot(self):
ax = plt.figure().add_subplot(111)
for km in self.kms:
km.plot(ax=ax)
ax.set_ylim(0, 1)
ax.set_xlim(0)
ax.set_xlabel('Timeline')
plt.legend(loc='best')
class KaplanMeierFitter():
"""
Non-parametric estimator of the survival function
for non- or right-censored data.
TO-DO: Strata, confidence interval
"""
def __init__(self, events, durations, label, alpha=0.95):
"""
Params:
events (numpy.array): 0 or 1
durations (numpy.array): time at which event happened or observation was censored
"""
self.label=label
self._fitted = False
self.events = events
self.durations = durations
self.alpha = alpha
self._fit(events, durations)
def _fit(self, events, durations):
unique_event_times = np.unique(durations[events==1])
unique_event_times.sort()
# Number of unique observed failure times
n = len(unique_event_times)
# Risk pool where value i correspond to at-risk objects at unique_event_times[i]
risk = np.zeros((n,))
# Failures at unique_event_times[i]
failures = np.zeros((n,))
for i, t in enumerate(list(unique_event_times)):
risk[i] = np.sum(durations >= t)
failures[i] = np.sum((events == 1) & (durations == t))
lifetable = pd.DataFrame({'at-risk': risk, 'failures':failures}, index=unique_event_times)
lifetable['survival'] = np.cumprod((risk - failures)/risk)
lifetable['cumhaz'] = -np.log(lifetable['survival'])
self._lifetable = lifetable
self._unique_event_times = unique_event_times
self._survival = lifetable['survival'].values
self.fitted = True
def _compute_z_score(self, alpha = None):
if alpha is None:
alpha = self.alpha
return inv_normal_cdf((1. + alpha) / 2.)
def _compute_confidence_bounds(self, alpha = None):
'''
Kalbfleisch and Prentice (1980) method
“exponential” Greenwood formula
https://www.math.wustl.edu/%7Esawyer/handouts/greenwood.pdf
'''
if alpha is not None:
self.alpha = alpha
_EPSILON = 1e-5
# Computation of these should be moved to fitting part. Not gonna change
stable_survival = np.maximum(self._survival, _EPSILON) # Numerical stability with the log
#stable_survival = self._survival
deaths = self._lifetable['failures'].values
ns = self._lifetable['at-risk'].values
var_t = stable_survival**2 * np.cumsum(deaths / (ns * (ns - deaths)))
var_t_p = np.cumsum(deaths / (ns * (ns - deaths))) / np.log(stable_survival)**2
z = self._compute_z_score()
c1 = np.log(-np.log(stable_survival)) + z * np.sqrt(var_t_p)
c2 = np.log(-np.log(stable_survival)) - z * np.sqrt(var_t_p)
confidence = pd.DataFrame()
confidence['time'] = self._unique_event_times
confidence['at-risk'] = ns
confidence['failures'] = deaths
confidence['survival'] = stable_survival
confidence['var'] = var_t_p
confidence['lower'] = np.exp(-np.exp(c1))
confidence['upper'] = np.exp(-np.exp(c2))
#confidence = confidence.fillna(0)
return confidence
def summary(self):
'''
Returns the life table
Time => Nb at risk => Nb of events => Survival => VarSur => CIs => Hazard Rate => Cumlative
'''
return self._lifetable
def plot(self, ax):
# Set ax
c = ax._get_lines.get_next_color()
self._lifetable['survival'].plot(drawstyle="steps-post",
c=c,
label='km_estimate_' + str(self.label))
confdf = self._compute_confidence_bounds().set_index('time')[['lower', 'upper']]
ax.fill_between(confdf.index,
y1=confdf['lower'].values,
y2=confdf['upper'].values,
step='post',
alpha=0.3,
color=c)
return ax
|
[
"rigautantoine@hotmail.com"
] |
rigautantoine@hotmail.com
|
d9c50d73fc5faceeff86d5196fd7fd518d312ee2
|
4b744cd2db31bb5f20149fcae11db0cb32e3205e
|
/BaconProd/Ntupler/python/myJetExtras_cff.py
|
8f9ed9cc802faecc05270a32b5d2be1e61480953
|
[] |
no_license
|
violatingcp/Bacon
|
8efec5cda60fa47e224bcbd863ce834db4500d21
|
5438faf241823324c5bbb82fde48476d00b0bf48
|
refs/heads/master
| 2021-01-19T19:39:53.067166
| 2014-04-16T12:39:31
| 2014-04-16T12:39:31
| 17,031,122
| 0
| 1
| null | 2014-03-16T17:50:21
| 2014-02-20T18:39:24
|
C++
|
UTF-8
|
Python
| false
| false
| 5,467
|
py
|
import FWCore.ParameterSet.Config as cms
#from RecoJets.Configuration.GenJetParticles_cff import *
from RecoJets.Configuration.RecoGenJets_cff import ak5GenJets
from RecoJets.JetProducers.ak5PFJetsPruned_cfi import ak5PFJetsPruned
genParticlesForJets = cms.EDProducer("InputGenJetsParticleSelector",
src = cms.InputTag("genParticles"),
ignoreParticleIDs = cms.vuint32(
1000022,
1000012, 1000014, 1000016,
2000012, 2000014, 2000016,
1000039, 5100039,
4000012, 4000014, 4000016,
9900012, 9900014, 9900016,
39),
partonicFinalState = cms.bool(False),
excludeResonances = cms.bool(True),
excludeFromResonancePids = cms.vuint32(12, 13, 14, 16),
tausAsJets = cms.bool(False)
)
genParticlesForJetsNoNu = genParticlesForJets.clone()
genParticlesForJetsNoNu.ignoreParticleIDs += cms.vuint32( 12,14,16)
antiktGenJets = ak5GenJets.clone(
rParam = cms.double(0.5)
)
# Flavour byValue PhysDef
AK5byValPhys = cms.EDProducer("JetFlavourIdentifier",
srcByReference = cms.InputTag("AK5byRef"),
physicsDefinition = cms.bool(True),
leptonInfo = cms.bool(True)
)
# Flavour byReference
partons = cms.EDProducer("PartonSelector",
withLeptons = cms.bool(False),
src = cms.InputTag("genParticles")
)
AK5byRef = cms.EDProducer("JetPartonMatcher",
jets = cms.InputTag("ak5PFJets"),
coneSizeToAssociate = cms.double(0.3),
partons = cms.InputTag("partons")
)
AK5byValAlgo = cms.EDProducer("JetFlavourIdentifier",
srcByReference = cms.InputTag("AK5byRef"),
physicsDefinition = cms.bool(False),
leptonInfo = cms.bool(True))
jetFlavor = cms.Sequence(partons*AK5byRef*AK5byValPhys*AK5byValAlgo)
#for each jet collection run Pruning, subjet b-tagging, quark gluon discrimination,n-subjettiness and subjet quark gluon discrimination
ca5PFJetsPruned = ak5PFJetsPruned.clone(
jetAlgorithm = cms.string("CambridgeAachen"),
rParam = cms.double(0.5),
doAreaFastjet = cms.bool(False),
writeCompound = cms.bool(True),
jetCollInstanceName=cms.string("SubJets"),
jetPtMin = cms.double(20)
)
from RecoJets.JetAssociationProducers.ic5JetTracksAssociatorAtVertex_cfi import ic5JetTracksAssociatorAtVertex
jetTracksAssociatorAtVertex = ic5JetTracksAssociatorAtVertex.clone()
jetTracksAssociatorAtVertex .jets = cms.InputTag('ak5PFJets')
jetTracksAssociatorAtVertex .tracks = "generalTracks"
jetTracksAssociatorAtVertexSJ = ic5JetTracksAssociatorAtVertex.clone()
jetTracksAssociatorAtVertexSJ.jets = cms.InputTag('ca5PFJetsPruned','SubJets')
jetTracksAssociatorAtVertexSJ.tracks = "generalTracks"
from RecoBTag.Configuration.RecoBTag_cff import *
jetImpactParameterTagInfos = impactParameterTagInfos.clone()
jetImpactParameterTagInfos.jetTracks = "jetTracksAssociatorAtVertex"
jetSecondaryVertexTagInfos = secondaryVertexTagInfos.clone()
jetSecondaryVertexTagInfos.trackIPTagInfos = "jetImpactParameterTagInfos"
jetCombinedSecondaryVertexMVABJetTags = combinedSecondaryVertexMVABJetTags.clone()
jetCombinedSecondaryVertexMVABJetTags.tagInfos = cms.VInputTag( cms.InputTag("jetImpactParameterTagInfos"), cms.InputTag("jetSecondaryVertexTagInfos") )
jetImpactParameterTagInfosSJ = impactParameterTagInfos.clone()
jetImpactParameterTagInfosSJ.jetTracks = "jetTracksAssociatorAtVertexSJ"
jetSecondaryVertexTagInfosSJ = secondaryVertexTagInfos.clone()
jetSecondaryVertexTagInfosSJ.trackIPTagInfos = "jetImpactParameterTagInfosSJ"
jetCombinedSecondaryVertexMVABJetTagsSJ = combinedSecondaryVertexMVABJetTags.clone()
jetCombinedSecondaryVertexMVABJetTagsSJ.tagInfos = cms.VInputTag( cms.InputTag("jetImpactParameterTagInfosSJ"), cms.InputTag("jetSecondaryVertexTagInfosSJ") )
from JetTools.AnalyzerToolbox.QGTagger_RecoJets_cff import *
QGTagger.srcJets = cms.InputTag('ak5PFJets')
QGTaggerSubJets = QGTagger.clone()
QGTaggerSubJets.srcJets = cms.InputTag('ca5PFJetsPruned','SubJets')
from JetTools.AnalyzerToolbox.njettinessadder_cfi import *
Njettiness.src = cms.InputTag('ak5PFJets')
genjetsequence = cms.Sequence(
genParticlesForJets *
genParticlesForJetsNoNu *
ak5GenJets *
jetFlavor )
jetsequence = cms.Sequence(
ca5PFJetsPruned *
jetTracksAssociatorAtVertex *
jetImpactParameterTagInfos *
jetSecondaryVertexTagInfos *
jetTracksAssociatorAtVertexSJ *
jetImpactParameterTagInfosSJ *
jetSecondaryVertexTagInfosSJ *
jetCombinedSecondaryVertexMVABJetTags *
jetCombinedSecondaryVertexMVABJetTagsSJ *
goodOfflinePrimaryVerticesQG *
kt6PFJetsQG *
kt6PFJetsIsoQG *
QGTagger *
QGTaggerSubJets *
Njettiness
)
|
[
"violatingcp@gmail.com"
] |
violatingcp@gmail.com
|
5e3e80a52bb9f2c8c05cdc9bbf0b88641f9f12d4
|
5d3fd5c0fa4ef35f008980ffb9741fddf1d5f9e4
|
/eqmgmt/equipment_management/doctype/equipment_type/equipment_type.py
|
b3628d88e47798e77d134ac67d528390d2bcb6b8
|
[
"MIT"
] |
permissive
|
dgsol/eqmgmt
|
f911d027a60f1629d0898519f353a678869e8820
|
20074cfbfe1141a9fd0c016b4dcd60f090118abf
|
refs/heads/master
| 2020-03-17T21:07:02.607741
| 2018-05-18T11:16:35
| 2018-05-18T11:16:35
| 133,943,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, DGSOL InfoTech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class EquipmentType(Document):
pass
|
[
"solankeejk@gmail.com"
] |
solankeejk@gmail.com
|
37af63203540dfe11d36fe05d74694f05c6505f2
|
4ecf14e0bbe105958d83e5fad014a1cd014e669d
|
/testing/web-platform/tests/webdriver/tests/element_click/stale.py
|
d39e3a3ecb730d77e422d14c0b48fd931b9225cd
|
[
"W3C",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-w3c-03-bsd-license",
"BSD-3-Clause"
] |
permissive
|
fx-dev-playground/gecko
|
2c1a13a51f00645f8bfc35c9ec71a4d40b8c8dc2
|
889acebc871804214ab2579e434d7633e271e5cb
|
refs/heads/central_default
| 2023-01-27T11:13:44.498662
| 2018-01-16T21:43:19
| 2018-01-16T21:43:19
| 64,968,340
| 8
| 13
|
NOASSERTION
| 2023-01-11T07:47:54
| 2016-08-04T21:24:19
| null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
import pytest
import webdriver
from tests.support.asserts import assert_error
from tests.support.inline import inline
def click_element(session, element):
return session.transport.send(
"POST", "/session/{session_id}/element/{element_id}/click".format(**{
"session_id": session.session_id,
"element_id": element.id,
}))
def test_is_stale(session):
session.url = inline("<button>foo</button>")
button = session.find.css("button", all=False)
session.url = inline("<button>bar</button>")
response = click_element(session, button)
assert_error(response, "stale element reference")
|
[
"ato@sny.no"
] |
ato@sny.no
|
f64cdc1ca92ae41695d57226a52b6a8cf4b72928
|
81016a0c04ccf7e0b112736b397d858cf5c3c6a9
|
/CA2/model_training.py
|
61eafd552b982faddb9d19ea281e488eea5287af
|
[] |
no_license
|
nazaninsbr/NLP-UT
|
b22591bccc8f95c6d25e858874f9ea062f4404db
|
f73beb06906e82ce0602a73ed2723e91342041f5
|
refs/heads/master
| 2021-02-16T08:51:33.675516
| 2020-06-26T20:51:28
| 2020-06-26T20:51:28
| 244,982,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,449
|
py
|
from sklearn.model_selection import KFold
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
def train_the_model(X, y):
X, y = np.array(X), np.array(y)
kf = KFold(n_splits=5, shuffle=True, random_state=10)
f1_values, recall_values, precision_values, accuracy_values = [], [], [], []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = MultinomialNB()
y_pred = clf.fit(X_train, y_train).predict(X_test)
this_acc = accuracy_score(y_test, y_pred)
this_f1 = f1_score(y_test, y_pred, average='weighted')
this_pre = precision_score(y_test, y_pred, average='weighted')
this_recall = recall_score(y_test, y_pred, average='weighted')
f1_values.append(this_f1)
recall_values.append(this_recall)
precision_values.append(this_pre)
accuracy_values.append(this_acc)
avg_acc = np.mean(accuracy_values)
avg_recall = np.mean(recall_values)
avg_pre = np.mean(precision_values)
avg_f1 = np.mean(f1_values)
print('Accuracy = {}, Recall = {}, Precision = {}, F1 = {}'.format(avg_acc, avg_recall, avg_pre, avg_f1))
|
[
"nazanin.sabrii@gmail.com"
] |
nazanin.sabrii@gmail.com
|
0c3cc9b7205ee130759da6020073ddcd46370b81
|
63eda25d7584f788a7ab1baaa37cd44b65cb2898
|
/7g.py
|
dfa6290737fea6125d5b24cc42274cffcf8ef602
|
[] |
no_license
|
fox016/bioinformatics
|
c8e89af8466b4453f0fa8bff1adfd20e8db2dce3
|
2842ad8e59ab7e2d1d0aee96b4e75e113cf32e67
|
refs/heads/master
| 2021-06-01T13:28:19.695752
| 2019-10-28T15:59:15
| 2019-10-28T15:59:15
| 23,671,723
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
#!/usr/bin/python
"""
This is a linear-time implementation developed by Karkkainen, Sanders, and Burkhardt.
See http://www.cs.helsinki.fi/u/tpkarkka/publications/jacm05-revised.pdf
The paper provides C++ code. The following Python source code is entirely my own.
Author - Nathan Fox
"""
text = [line[:-2] for line in open("input.txt", "r")][0]
alpha = "$ACGT"
alpha_index_map = {
"$": 0,
"A": 1,
"C": 2,
"G": 3,
"T": 4
}
def get_prime(list_of_tuples, sorted_list):
list_map = {}
duplicates = False
for index in xrange(len(sorted_list)):
element = sorted_list[index]
if element in list_map:
duplicates = True
list_map[element] = index
ranks = []
for element in list_of_tuples:
ranks.append(list_map[element]+1)
return ranks, duplicates
def compare_tuple(t1, t2):
for index in xrange(len(t1)):
if t1[index] < t2[index]:
return -1
if t1[index] > t2[index]:
return 1
return 0
def suffix_array(T):
n = len(T)
B = [range(start, n+1, 3) for start in xrange(3)]
C = B[1] + B[2]
T += [0,0]
R = []
for k in [1,2]:
offset = 0
while k+offset <= B[k][-1]:
R.append(tuple(T[k+offset:k+offset+3]))
offset += 3
R_sorted = sorted(R)
R_prime, is_dup = get_prime(R, R_sorted)
while is_dup:
SA_R = suffix_array(R_prime)
SA_R_sorted = sorted(SA_R)
R_prime, is_dup = get_prime(SA_R_sorted, SA_R)
R_prime = map(lambda x: x-1, R_prime[0:-1])
rank = [None for _ in xrange(len(T)+1)]
SC = [0] * len(C)
for index in xrange(len(C)):
i = C[index]
value = R_prime[index]
rank[i] = value
SC[value-1] = i
rank[n+1] = 0
rank[n+2] = 0
pairs = [(T[i], rank[i+1]) for i in B[0]]
pairs_sorted = sorted(pairs)
SB = map(lambda i: B[0][i-1], get_prime(pairs_sorted, pairs)[0])
solution = [0] * (n+1)
SC_index = 0
SB_index = 0
for solution_index in xrange(len(solution)):
i = SC[SC_index]
j = SB[SB_index]
if (i-1) % 3 == 0:
Si = (T[i], rank[i+1])
Sj = (T[j], rank[j+1])
else:
Si = (T[i], T[i+1], rank[i+2])
Sj = (T[j], T[j+1], rank[j+2])
if compare_tuple(Si, Sj) < 0:
solution[solution_index] = i
SC_index+=1
if SC_index >= len(SC):
return solution[0:solution_index+1] + SB[SB_index:]
else:
solution[solution_index] = j
SB_index+=1
if SB_index >= len(SB):
return solution[0:solution_index+1] + SC[SC_index:]
return solution
print ', '.join(map(str, suffix_array(map(lambda a: alpha_index_map[a], text))))
|
[
"nfox@byu.edu"
] |
nfox@byu.edu
|
3d31399de0ecd29befc8598347e3e856fceae0ec
|
34fafe0ce4a28a0b2ace024858868ed030d5e432
|
/CalibNtuplizer/test/prod/crabConfig_stop2000.py
|
fdd4440ab11bd5d36a6aa01fd136944e72dbd8f3
|
[
"CC-BY-4.0"
] |
permissive
|
CMS-HSCP/SUSYBSMAnalysis-HSCP
|
d40fe64ece600fc0909a2c32eb07b4204784948c
|
809271e1dacc98d34992561b5fcb502c01cc1172
|
refs/heads/master
| 2023-07-25T00:46:38.870643
| 2023-07-11T20:25:12
| 2023-07-11T20:25:12
| 164,119,298
| 3
| 16
| null | 2023-07-11T20:25:14
| 2019-01-04T14:47:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
from WMCore.Configuration import Configuration
# More details here: https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial
config = Configuration()
config.section_('General')
config.General.requestName = 'stop2000'
#config.General.workArea = '.'
#config.General.instance = 'private'
config.General.transferOutputs = True
config.General.transferLogs = False
#config.General.serverUrl = 'You need to set the CRAB3 server URL'
config.section_('JobType')
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'run_aod_mc17.py'
config.JobType.allowUndistributedCMSSW = True
config.JobType.inputFiles = ['minbias_template_uncorr_iter1.root','minbias_template_corr_iter1.root']
config.section_('Data')
config.Data.inputDataset = '/HSCPstop_M-2000_TuneCP5_13TeV-pythia8/RunIIFall17DRPremix-PU2017_HSCP1_94X_mc2017_realistic_v11-v2/AODSIM'
config.Data.allowNonValidInputDataset = True # FIXME
#config.Data.inputDBS = 'https://cmsweb.cern.ch/dbs/prod/global/DBSReader/'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'LumiBased'
#config.Data.unitsPerJob = 25
config.Data.splitting = 'Automatic'
#config.Data.splitting = 'FileBased' # special case for memory issues
#config.Data.unitsPerJob = 100
#config.Data.lumiMask = 'Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON_v1.txt'
#config.Data.runRange = '315257,315259,315264,315265,315267,315270,315339,315357,315361,315363,315365,315366,315420,315489,315490'
config.Data.publication = False
config.Data.outputDatasetTag = 'MC_Stop2000'
config.Data.outLFNDirBase = '/store/user/ccollard/HSCP/prodJan2020_CMSSW_10_6_2/'
#config.Data.ignoreLocality = True # to be used only if use the whitelist
config.section_('Site')
config.Site.storageSite = 'T2_FR_IPHC'
#config.Site.storageSite = 'T2_CH_CERN'
#config.Site.blacklist = ['T2_IT_Legnaro']
#config.Site.whitelist = ['T2_FR_IPHC']
config.section_('User')
|
[
"Tamas.Almos.Vami@cern.ch"
] |
Tamas.Almos.Vami@cern.ch
|
d1ed5ee506bd7388368e8adade60db60845253f4
|
ca97bfb2a22c6197cab8f304ef82f6e5a326afdb
|
/dask/bytes/hdfs.py
|
d46a86fe63f2c08e7362289eca23eefac548f5e5
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
josephlin55555/dask
|
0aca36287ed1d190c25b3ee486ded797be266064
|
8c6f4792105d7f31cdf62553828e57df94c102f3
|
refs/heads/master
| 2021-09-06T05:02:12.972728
| 2018-02-01T22:04:41
| 2018-02-01T22:04:41
| 119,901,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 625
|
py
|
from __future__ import print_function, division, absolute_import
from ..base import tokenize
from .core import _filesystems
from hdfs3.core import HDFileSystem
class DaskHDFS3FileSystem(HDFileSystem):
sep = '/'
def mkdirs(self, path):
return super(DaskHDFS3FileSystem, self).makedirs(path)
def ukey(self, path):
return tokenize(path, self.info(path)['last_mod'])
def size(self, path):
return self.info(path)['size']
def _get_pyarrow_filesystem(self):
from ._pyarrow import HDFS3Wrapper
return HDFS3Wrapper(self)
_filesystems['hdfs'] = DaskHDFS3FileSystem
|
[
"noreply@github.com"
] |
josephlin55555.noreply@github.com
|
4b542cd32cc70bf3f5ff5416537b6f3b31024d43
|
b2f8e6b48ed4f6b83a4ac46a2d2697ef1c780ae3
|
/apps/scenicspots/urls.py
|
5c56773f35bdfe35f8154f6f9d97032bada4923c
|
[] |
no_license
|
duanlei123/WorldTravel
|
2355694bd49b0e53d0f521a75de7a9467172343c
|
abf10ee41fb6c116c17fc1d0c8380282676f566e
|
refs/heads/master
| 2020-08-30T21:49:54.033596
| 2019-10-30T10:05:20
| 2019-10-30T10:05:20
| 218,496,168
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from django.urls import path, re_path,include
from .views import *
urlpatterns = [
path('all', ScenicListView.as_view(), name='all'),
# 景点详情
path('scenic_detail/<int:scenic_id>/', ScenicDetails.as_view(), name='scenic_detail'),
# 旅游订单详情
# path('order_detail/<slug:order_num>/', OrderDetailsView.as_view(), name='order_detail'),
# 活动详情
path('active_detail/<int:active_id>/', ActiveDetails.as_view(), name='active_detail'),
]
|
[
"wb-dl321273@autonavi.com"
] |
wb-dl321273@autonavi.com
|
4e8e226de3b8718e680e59f54b9b3d2413ba4aa9
|
bc1abe026115cf84bdf5bdd5f60bb6cf3fb409ee
|
/BettingSheets/admin.py
|
04552b6e819b039a59ebb8242ce357c1a2044552
|
[] |
no_license
|
um-eece-4081/2019-sports-pool
|
c434c6c0baf972f5d4c94f86caa4dceaf6a98944
|
2a9d2a8ea02066c79e96d086dff2ebf2aa4ff99f
|
refs/heads/master
| 2020-08-03T11:13:09.688434
| 2019-11-24T10:50:07
| 2019-11-24T10:50:07
| 211,731,030
| 1
| 5
| null | 2019-11-09T21:29:29
| 2019-09-29T21:52:56
|
Python
|
UTF-8
|
Python
| false
| false
| 2,558
|
py
|
from django.contrib import admin
from django.http import HttpResponse, HttpResponseRedirect
from .models import MasterBettingSheet, Game
# Register your models here.
admin.site.site_header = 'Manage Betting Sheets'
class GameAdmin(admin.TabularInline):
...
change_form_template = 'admin/game_change_form.html'
model = Game
exclude = ('favorite_score', 'underdog_score')
def get_readonly_fields(self, request, obj=None):
readonly_fields = super(GameAdmin, self).get_readonly_fields(request, obj)
if obj and obj.is_published:
readonly_fields = readonly_fields + ('betting_sheet', 'favorite_team', 'underdog_team', 'betting_line', 'network_name', 'date_time', 'game_of_the_week')
if obj.is_scored:
readonly_fields = readonly_fields + ('favorite_score', 'underdog_score')
return readonly_fields
def has_add_permission(self, request, obj=None):
if obj and obj.is_published:
return False
return True
def has_delete_permission(self, request, obj=None):
if obj and obj.is_published:
return False
return True
def get_exclude(self, request, obj=None):
exclude = super(GameAdmin, self).get_exclude(request, obj)
if obj and obj.is_published:
exclude_list = list(exclude)
exclude_list.remove('favorite_score')
exclude_list.remove('underdog_score')
exclude = tuple(exclude_list)
return exclude
class MasterBettingSheetAdmin(admin.ModelAdmin):
...
change_form_template = 'admin/mbs_change_form.html'
inlines = [GameAdmin, ]
def get_form(self, request, obj=None, **kwargs):
"""Override the get_form and extend the 'exclude' keyword arg"""
if obj and obj.is_published:
kwargs.update({
'exclude': getattr(kwargs, 'exclude', tuple()) + ('title',),
})
return super(MasterBettingSheetAdmin, self).get_form(request, obj, **kwargs)
def response_change(self, request, mbs):
if '_publish' in request.POST:
MasterBettingSheet.objects.filter(pk=mbs.pk).update(is_published=True)
self.message_user(request, "Betting Sheet Published")
if '_score' in request.POST:
MasterBettingSheet.objects.filter(pk=mbs.pk).update(is_scored=True)
self.message_user(request, "Betting Sheet Scored")
return super().response_change(request, mbs)
admin.site.register(MasterBettingSheet, MasterBettingSheetAdmin)
|
[
"tbwllace@memphis.edu"
] |
tbwllace@memphis.edu
|
3a95eb348b32c464d004d4156754aa7ab5e6a689
|
6aa8de83a51532b978c87e46b800acdab652d633
|
/src/python/review/inspectors/parsers/checkstyle_parser.py
|
9cf75acbd5b74cd2586497a50b18c4d9a9fca531
|
[
"Apache-2.0",
"LGPL-2.0-only",
"LGPL-2.1-only"
] |
permissive
|
nihalshetty-boop/hyperstyle
|
ea2e791ee00ab904a19b9cc6a233f06afaaa9bc0
|
9a6d53cd1ca220d97d296c0087056b5885b26281
|
refs/heads/main
| 2023-07-01T06:00:26.355034
| 2021-07-26T13:02:17
| 2021-07-26T13:02:17
| 391,957,024
| 0
| 0
|
Apache-2.0
| 2021-08-02T13:20:21
| 2021-08-02T13:20:21
| null |
UTF-8
|
Python
| false
| false
| 4,680
|
py
|
import logging
import re
from pathlib import Path
from typing import Callable, Dict, List, Any, Optional
from xml.etree import ElementTree
from src.python.review.common.file_system import get_content_from_file
from src.python.review.inspectors.inspector_type import InspectorType
from src.python.review.inspectors.issue import BaseIssue, BoolExprLenIssue, CodeIssue, CyclomaticComplexityIssue, \
FuncLenIssue, IssueType, LineLenIssue, IssueData
from src.python.review.inspectors.tips import get_bool_expr_len_tip, get_cyclomatic_complexity_tip, get_func_len_tip, \
get_line_len_tip
logger = logging.getLogger(__name__)
# Check if the result of the inspectors is correct: it exists and it is not empty
def __is_result_file_correct(file_path: Path, inspector_type: InspectorType) -> bool:
if not file_path.is_file():
logger.error('%s: error - no output file' % inspector_type.value)
return False
file_content = get_content_from_file(file_path)
if not file_content:
logger.error('%s: error - empty file' % inspector_type.value)
return False
return True
def __parse_error_message(element: ElementTree) -> str:
message = element.attrib['message']
return re.sub(r'\(max allowed is \d+\). ', '', message)
# Measurable means that the issue has integer measure,
# e.g. BoolExprLenIssue, CyclomaticComplexityIssue and so on
def __parse_measurable_issue(issue_data: Dict[str, Any], issue_type: IssueType,
measure_value: int) -> Optional[BaseIssue]:
if issue_type == IssueType.CYCLOMATIC_COMPLEXITY:
issue_data[IssueData.CYCLOMATIC_COMPLEXITY.value] = measure_value
issue_data[IssueData.DESCRIPTION.value] = get_cyclomatic_complexity_tip()
return CyclomaticComplexityIssue(**issue_data)
elif issue_type == IssueType.FUNC_LEN:
issue_data[IssueData.FUNCTION_LEN.value] = measure_value
issue_data[IssueData.DESCRIPTION.value] = get_func_len_tip()
return FuncLenIssue(**issue_data)
elif issue_type == IssueType.BOOL_EXPR_LEN:
issue_data[IssueData.BOOL_EXPR_LEN.value] = measure_value
issue_data[IssueData.DESCRIPTION.value] = get_bool_expr_len_tip()
return BoolExprLenIssue(**issue_data)
elif issue_type == IssueType.LINE_LEN:
issue_data[IssueData.LINE_LEN.value] = measure_value
issue_data[IssueData.DESCRIPTION.value] = get_line_len_tip()
return LineLenIssue(**issue_data)
return None
def __should_handle_element(element: ElementTree) -> bool:
return element.tag == 'file'
def __is_error(element: ElementTree) -> bool:
return element.tag == 'error'
# TODO Needs testing
def parse_checkstyle_file_result(
file_path: Path,
inspector_type: InspectorType,
issue_type_selector: Callable[[str], IssueType],
origin_class_to_description: Dict[str, str]) -> List[BaseIssue]:
if not __is_result_file_correct(file_path, inspector_type):
return []
# Parse result XML
tree = ElementTree.parse(file_path)
issues: List[BaseIssue] = []
for element in tree.getroot():
if not __should_handle_element(element):
continue
code_file_path = Path(element.attrib['name'])
for inner_element in element:
if not __is_error(inner_element):
continue
message = __parse_error_message(inner_element)
origin_class = inner_element.attrib['source'].split('.')[-1]
issue_data = IssueData.get_base_issue_data_dict(code_file_path, inspector_type,
line_number=int(inner_element.attrib['line']),
column_number=int(
inner_element.attrib.get('column', 1)),
origin_class=origin_class)
issue_type = issue_type_selector(origin_class)
issue_data[IssueData.ISSUE_TYPE.value] = issue_type
if origin_class in origin_class_to_description:
pattern = origin_class_to_description.get(origin_class)
measure_value = int(re.search(pattern, message,
flags=re.IGNORECASE).groups()[0])
issue = __parse_measurable_issue(issue_data, issue_type, measure_value)
else:
issue_data[IssueData.DESCRIPTION.value] = message
issue = CodeIssue(**issue_data)
if issue is not None:
issues.append(issue)
return issues
|
[
"noreply@github.com"
] |
nihalshetty-boop.noreply@github.com
|
b53242e5e42a9427edde8d26c9ad3d4f95f7cafa
|
e550562e543637a774d484d450471a16361cab28
|
/3_scripts/python/p8_zonacensal.py
|
5e94d46e07fbe2d0ef957940fd17efb1c88c5039
|
[] |
no_license
|
jdaniel14/GRPIAA-LOAD-OSM
|
cc1d18345f7745e4312e88de7ba9759803ccfe0f
|
64b01c0224620cbb626b5d9094d73a248234480c
|
refs/heads/master
| 2021-01-10T08:59:49.618147
| 2015-12-22T18:08:42
| 2015-12-22T18:08:42
| 48,446,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
import csv
import sys
from bs4 import BeautifulSoup
import urllib2
from itertools import izip
path_file_job1 = "3_scripts/input/i9_zonascensales.csv"
path_file_out = "3_scripts/output/o9_zonascensales.csv"
file_out = open(path_file_out, "wb")
file_out.write("id|dpto|prov|dist|nombre|length|area|geom\n")
with open(path_file_job1, "r") as file1:
flag = True
for row in file1:
if flag == True :
flag = False
continue
data_insert = str(row).strip().split(";")
cad = data_insert[1] + "|" + data_insert[3] + "|" + data_insert[4] + "|" + data_insert[5] + "|" + data_insert[8] + "|" + data_insert[10] + "|" + data_insert[11] + "|SRID=4326;" + data_insert[0]
file_out.write(cad+'\n')
file_out.close()
|
[
"jkliose14@gmail.com"
] |
jkliose14@gmail.com
|
9a577a33d19a23deb0ff0126e7dcbc7223c2a860
|
a882591ff55815e9e18ea9cdc2de99d3b232e520
|
/html.py
|
12ef46ab94483458a0ef81bfd05f5e4dc665a078
|
[
"MIT"
] |
permissive
|
Real-Currents/d3MapRenderer
|
8cfa35da1be8fc11b9f9f7548399c37dfeb1e747
|
aa3733f2cc0e52ca70b93bc55d5f8f7059b73e31
|
refs/heads/master
| 2020-04-08T17:04:17.983769
| 2017-11-26T20:32:39
| 2017-11-26T20:32:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,102
|
py
|
import codecs
import os
from models import model
from logger import log
class index(object):
"""Class for formatting the index html file for the map"""
def __init__(self):
"""Format the index file with scripts for the viz"""
self.__logger = log(self.__class__.__name__)
self.model = None
self.layersForOutput = []
self.mainLayer = None
def tipInUse(self):
"""Is there a tip or chart in one of the layers"""
tip = False
for o in self.layersForOutput:
if o.hasTip == True or o.hasViz == True:
tip = True
return tip
def vizInUse(self):
"""Is there a chart in one of the layers"""
viz = False
for o in self.layersForOutput:
if o.hasViz == True:
viz = True
return viz
def createHeader(self, title):
"""Creating the optional heading element"""
template = u"<h1>{0}</h1>"
if self.model.showHeader == True:
return template.format(title)
else:
return ""
def createSvgPaths(self, labels, selectedProjection):
"""Create the Svg group and path elements required by the layers"""
paths = []
template = " var vectors{index} = vectors.append(\"g\");\n var vector{index} = void 0;\n"
i = 0
for i, o in enumerate(self.layersForOutput):
path = template.format( index = i )
paths.append(path)
paths.append(o.renderers[0].symbols[0].safeSvgNode(i, selectedProjection.safeCentroid))
if self.model.showLabels == True:
labelTemplate = """ var label{index} = void 0;"""
for l in labels:
if l.hasLabels() == True:
path = labelTemplate.format( index = l.index )
paths.append(path)
return "".join(paths)
def createZoom(self, selectedProjection):
"""Create the JavaScript function to zoom"""
if self.model.panZoom:
return selectedProjection.zoomBehaviourScript()
else:
return ""
def createTipFunction(self):
"""Create the Javascript function for tips"""
template = """ //Define 'div' for tooltips
var tip = d3.tip()
.attr("class", "d3-tip")
.direction("c"){0};
vectors.call(tip);"""
ext = """
.ext("#extTip")"""
if self.tipInUse() == True:
if self.model.isExternalTip() == True:
return template.format(ext)
else:
return template.format("")
else:
return ""
def hideTip(self):
"""Conditionally add the hide tip call to the map container"""
if self.tipInUse() == True:
return """.on("click", hideTip)\n """
else:
return ""
def createChartFunction(self, vizWidth, vizHeight):
"""Create the chart javascript"""
value = ""
if self.vizInUse() == True:
value = self.model.selectedVizChart.getJavaScript(self.model.getMainLayer(), self.model.ranges, self.model.vizLabels, self.model.vizWidth, self.model.vizHeight, self.model.isExternalTip())
return value
def createTipHelpers(self):
"""Create the tip helper functions"""
template = """ // Show a tool tip for the selected element
function showTip(id) {
var obj = _.findWhere(_data, {<%idfield%>: id.toString()});
tip.html(info(obj))
.show();
<%chart%>
d3.event.stopPropagation();
}
// Get the tool tip data from the template
function info(obj) {
var template = document.getElementById("template").innerHTML;
Object.keys(obj).forEach(function(key){
template = template.replace("{" + key + "}", obj[key]);
});
return template;
}
// Hide the tooltip
function hideTip() {
tip.hide();
}"""
val = ""
if self.tipInUse() == True:
val = template.replace("<%idfield%>", self.model.idField)
cobj = ""
if self.vizInUse() == True:
cobj = "chart(obj);"
val = val.replace("<%chart%>", cobj)
return val
def createSymbologyFunctions(self):
"""Create the necessary helper functions for symbology to display correctly"""
scripts = []
for o in self.layersForOutput:
script = o.renderers[0].symbols[0].getAdditionalScripts()
# Ensure items in the list are unique
if script != "" and script not in scripts:
scripts.append(script)
return "".join(scripts)
def createSafeCentroidFunction(self, selectedProjection):
"""Create the JavaScript centroid helper function"""
if self.model.panZoom == True and selectedProjection.safeCentroid == True:
return """ function getSafeCentroid(d) {
var centroid = path.centroid(d);
var clip_test_path = d3.geo.path().projection(projection);
var clipped = typeof(clip_test_path({ type: "MultiPoint", coordinates: [centroid] })) == "undefined";
return clipped ? [0, 0] : centroid;
}
"""
else:
return ""
def createZoomFunction(self, selectedProjection, labels):
"""Create the Javascript zoom helper functions"""
labelSize = """ function labelSize(orig, scale){
var size = orig / (Math.ceil(scale/2));
return (size > 0) ? size : 1;
}\n\n"""
template = """<%labelsize%>
// Zoom/pan
function onZoom() {
<%hidetip%>
<%vectorscaling%>
<%labelscaling%>
}"""
if self.model.panZoom == True:
if self.tipInUse() == True:
template = template.replace("<%hidetip%>", "hideTip();")
else:
template = template.replace("<%hidetip%>", "")
''' Zoom scaling script '''
v = []
''' Projection wide scaling script '''
v.append(selectedProjection.zoomScalingScript())
''' Symbol specific scaling script '''
for i, o in enumerate(self.layersForOutput):
v.append(o.renderers[0].symbols[0].zoomScalingScript(i, selectedProjection.safeCentroid))
template = template.replace("<%vectorscaling%>", "".join(v))
''' Label scaling '''
if self.model.showLabels == True:
template = template.replace("<%labelsize%>", labelSize)
l = []
for label in labels:
if label.hasLabels() == True:
l.append(label.zoomLabelScript(selectedProjection.safeCentroid))
if len(l) > 0:
template = template.replace("<%labelscaling%>", "".join(l))
else:
template = template.replace("<%labelscaling%>", "")
else:
template = template.replace("<%labelsize%>", "")
template = template.replace("<%labelscaling%>", "")
return template
else:
return ""
def createQueueScript(self):
"""Create the javascript queue of json files"""
queue = []
template = " .defer(d3.json, \"json/{name}.json\")\n"
for o in self.layersForOutput:
path = template.format(
name = o.getSafeName()
)
queue.append(path)
if self.tipInUse():
queue.append(" .defer(d3.csv, \"data/info.csv\")")
return "".join(queue)
def createReadyParams(self):
"""Create the JavaScript ready function parameters"""
params = []
template = ", json{index}"
for i, o in enumerate(self.layersForOutput):
param = template.format(
index = i
)
params.append(param)
if self.tipInUse():
params.append(", data")
return "".join(params)
def createMainObject(self):
"""Get the name of the main object"""
output = ""
template = "object{index}"
i = 0
for o in self.layersForOutput:
if o.main:
output = template.format(index = i)
break
i += 1
return output
def createLabelFeatures(self, selectedProjection, labels):
"""Create the label features"""
scripts = []
if self.model.showLabels == True:
for l in labels:
if l.hasLabels() == True:
scripts.append(l.getLabelObjectScript(selectedProjection.safeCentroid))
return "".join(scripts)
def createDataStore(self):
"""Optionally store a copy of the info.csv in JavaScript"""
if self.tipInUse() == True:
return " _data = data;"
else:
return ""
def createLegend(self):
"""Add a call to the JavaScript function to add a legend"""
if self.model.legend:
template = """ {e}
var legend = d3.legend({s})
.csv("data/legend.csv")
.position({p})
.{f}("{a}");
{s}.call(legend);"""
func = "shape"
arg = "square"
# Find the main layer and check the first symbol to determine the correct JS function call
m = self.model.getMainLayer()
if m.renderers[0].symbols[0].hasImage() == True:
func = "svgImg"
head, tail = os.path.split(m.renderers[0].symbols[0].path)
arg = "img/{0}".format(tail)
else:
arg = m.renderers[0].symbols[0].getShape()
ext = ""
svg = "svg"
pos = self.model.selectedLegendPosition
if self.model.selectedLegendPosition == 4:
# external legend has to have a different hosting svg element
ext = """var extLgnd = d3.select("#extLgnd")
.append("svg");\n"""
svg = "extLgnd"
# format and return
return template.format(
e = ext,
f = func,
a = arg,
s = svg,
p = pos
)
else:
return ""
def createExtLegend(self):
"""Add a placeholder for the external legend"""
if self.model.legend == True and self.model.selectedLegendPosition == 4:
return """ <div id="extLgnd"></div>"""
else:
return ""
def createExtTip(self):
"""Add a placeholder for the external tip"""
if self.tipInUse() == True and self.model.isExternalTip() == True:
return """ <div id="extTip"></div>"""
else:
return ""
def createVectorFeatures(self):
"""Create the polygon vector features"""
scripts = []
template = """ vector{index} = vectors{index}.selectAll("path").data(object{index}.features);
vector{index}.enter()
.append("path")\n"""
main = """ .attr("id", function (d) {{ return d.properties.""" + self.model.idField + """; }})\n"""
static = """ .attr("class", function (d) {{ return d.properties.d3Css; }})"""
tip = """\n .on("click", function (d) {{ return showTip(d.properties.""" + self.model.idField + """); }});\n\n"""
for i, o in enumerate(self.layersForOutput):
layerScript = []
script = template.format(
index = i
)
layerScript.append(script)
if o.main == True:
layerScript.append(main)
layerScript.append("{0}")
layerScript.append(static)
if o.hasTip == True or o.hasViz == True:
layerScript.append(tip)
else:
layerScript.append(";\n\n")
scripts.append(o.renderers[0].symbols[0].toLayerScript( i, "".join(layerScript), self.model.selectedProjection.safeCentroid ) )
return "".join(scripts)
def writeIndexFile(self, path, model, bound, labels):
"""Read and write the index html file"""
self.model = model
self.mainLayer = self.model.getMainLayer()
self.layersForOutput = self.model.getLayersForOutput()
f = codecs.open(path, "r", encoding="utf-8")
# Get the contents of the file
html = f.read()
f.close()
# Can't use string format as it has a fit over css and javascript braces {}
outHtml = u""
outHtml = html.replace("<%title%>", self.model.title)
outHtml = outHtml.replace("<%header%>", self.createHeader(self.model.title))
outHtml = outHtml.replace("<%tooltiptemplate%>", self.model.selectedFormat.getPopupTemplate(self.model.selectedFields, self.vizInUse(), self.model.vizWidth, self.model.vizHeight))
outHtml = outHtml.replace("<%externallegend%>", self.createExtLegend())
outHtml = outHtml.replace("<%externaltip%>", self.createExtTip())
outHtml = outHtml.replace("<%width%>", str(self.model.width))
outHtml = outHtml.replace("<%height%>", str(self.model.height))
outHtml = outHtml.replace("<%projection%>", self.model.selectedProjection.toScript(bound, self.model.width, self.model.height))
outHtml = outHtml.replace("<%vectorpaths%>", self.createSvgPaths(labels, self.model.selectedProjection))
outHtml = outHtml.replace("<%attachzoom%>", self.createZoom(self.model.selectedProjection))
outHtml = outHtml.replace("<%hidetip%>", self.hideTip())
outHtml = outHtml.replace("<%attachtip%>", self.createTipFunction())
outHtml = outHtml.replace("<%queuefiles%>", self.createQueueScript())
outHtml = outHtml.replace("<%readyparams%>", self.createReadyParams())
outHtml = outHtml.replace("<%polygonobjects%>", self.model.selectedFormat.createPolygonObjects(self.layersForOutput))
outHtml = outHtml.replace("<%refineprojection%>", self.model.selectedProjection.refineProjectionScript(self.createMainObject()))
outHtml = outHtml.replace("<%vectorfeatures%>", self.createVectorFeatures())
outHtml = outHtml.replace("<%labelfeatures%>", self.createLabelFeatures(self.model.selectedProjection, labels))
outHtml = outHtml.replace("<%datastore%>", self.createDataStore())
outHtml = outHtml.replace("<%addlegend%>", self.createLegend())
outHtml = outHtml.replace("<%tipfunctions%>", self.createTipHelpers())
outHtml = outHtml.replace("<%symbologyfunctions%>", self.createSymbologyFunctions())
outHtml = outHtml.replace("<%chartfunction%>", self.createChartFunction(self.model.vizWidth, self.model.vizHeight))
outHtml = outHtml.replace("<%safecentroidfunction%>", self.createSafeCentroidFunction(self.model.selectedProjection))
outHtml = outHtml.replace("<%zoomfunction%>", self.createZoomFunction(self.model.selectedProjection, labels))
# overwrite the file with new contents
f = codecs.open(path, "w", encoding="utf-8")
f.write(outHtml)
f.close()
|
[
"swbenten@gmail.com"
] |
swbenten@gmail.com
|
d055365495c81ad395fddc53315ffaa1d0e69a04
|
6e2932f49fd0be248c6c57f17921389d123722d9
|
/image_processing_package/setup.py
|
c05e3b35867fd9e8b9e50c3a24ba81cbb6a1432f
|
[] |
no_license
|
jonlegarda/robotics
|
594b1c992f5aa2bc263b6db38d6a2093b0b2ae3f
|
0784d22cd03ec752d05de0f348eafde4419d3724
|
refs/heads/master
| 2020-03-28T09:14:48.845716
| 2018-12-01T10:37:12
| 2018-12-01T10:37:12
| 148,021,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages=['image_processing_package'],
package_dir={'': 'src'})
setup(**setup_args)
|
[
"jonlegarda002@gmail.com"
] |
jonlegarda002@gmail.com
|
2bf4d548eaef38895b555f1cf41959aece16c16a
|
ed187b7f3773ebdab2d9a1ce2db7691741a179e4
|
/Part 1 - Data Preprocessing/Section 2 -------------------- Part 1 - Data Preprocessing --------------------/data_preprocessing_template.py
|
6926ff6f8ac8aaff8ae878192f2435b78368e558
|
[] |
no_license
|
harveen54/Machine-Learning-A-Z
|
31595e76daf27501d11e615f1c1bad37e84014a2
|
39efe55a643479d23e049d1437e2496721650961
|
refs/heads/master
| 2020-03-07T18:41:08.367590
| 2018-04-09T20:54:42
| 2018-04-09T20:54:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
# Data Preprocessing Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
#import all the features in X matrix
X = dataset.iloc[:, :-1].values
#import the final to be predicted in Y vector
y = dataset.iloc[:, 3].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
|
[
"harveensinghchadha@infosys.com"
] |
harveensinghchadha@infosys.com
|
9c20a4fbafe875127b240f8b78fc0fd722d7556c
|
c6f93a9f7537cb25485973bb7c1ee0994b27f58a
|
/application.py
|
91f19a6476bc946635d717995d2469fd344b44dc
|
[] |
no_license
|
dataviztools/Tableau-Parameter-Comments
|
8e2324ef9975261b2eeb72f2a205de498f0f34f3
|
90c8a6b21cb435ae804f86a72f16f15d395d9a05
|
refs/heads/master
| 2020-03-09T08:34:13.863813
| 2018-04-09T00:21:06
| 2018-04-09T00:21:06
| 128,692,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
import psycopg2
from flask import Flask, request
application = Flask(__name__)
application.secret_key = 'badpassword123'
class Database(object):
@staticmethod
def update(state, comment):
client = psycopg2.connect(database='databasename',
user='userid',
password='password',
host='databse.address.com',
port='5432')
cur = client.cursor()
upsertsql = 'INSERT INTO COMMENTS (state, comment) VALUES (%s, %s) ON CONFLICT (state) DO UPDATE SET comment = %s'
cur.execute(upsertsql, (state, comment, comment))
client.commit()
cur.close()
client.close()
@application.route('/post')
def post_to_mongo():
state = request.args.get('state')
comment = request.args.get('comment')
if state is None:
return '<html><body><h3>State Field Missing!</h3></body></html>'
if comment is None:
return '<html><body><h3>Comment Must Not Be Blank!</h3></body></html>'
Database.update(state=state, comment=comment)
return '<html><body><h6>Comment Posted!<br />State: {}<br />Comment: {}</h6></body></html>'.format(state, comment)
if __name__ == '__main__':
application.run(debug=True, port=80)
|
[
"samuel.n.plant@gmail.com"
] |
samuel.n.plant@gmail.com
|
1570e1e45340851da56b2053bd79fd78927ca257
|
31a766fcae3779b05796534c354286083502f74a
|
/python/onshape_client/models/bt_export_model_body.py
|
c6e2da616f27a337f903764c80b53d790952c065
|
[] |
no_license
|
nychang/onshape-clients
|
5ea21e73a05948f5e232d4851eb8ae8a6b8c75c8
|
9c97baae57f80e3922726443584e4cc50b99623f
|
refs/heads/master
| 2020-05-06T20:35:28.212953
| 2019-04-05T20:38:19
| 2019-04-05T20:38:19
| 180,243,972
| 0
| 0
| null | 2019-04-08T22:43:59
| 2019-04-08T22:43:59
| null |
UTF-8
|
Python
| false
| false
| 9,014
|
py
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
OpenAPI spec version: 1.96
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class BTExportModelBody(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'type': 'str',
'edges': 'list[BTExportModelEdge]',
'faces': 'list[BTExportModelFace]',
'vertices': 'list[BTExportModelVertex]',
'type_id': 'int',
'connection_source': 'BTConnection',
'export_type_name': 'str',
'unknown_class': 'bool'
}
attribute_map = {
'id': 'id',
'type': 'type',
'edges': 'edges',
'faces': 'faces',
'vertices': 'vertices',
'type_id': 'typeId',
'connection_source': 'connectionSource',
'export_type_name': 'exportTypeName',
'unknown_class': 'unknownClass'
}
def __init__(self, id=None, type=None, edges=None, faces=None, vertices=None, type_id=None, connection_source=None, export_type_name=None, unknown_class=None): # noqa: E501
"""BTExportModelBody - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._type = None
self._edges = None
self._faces = None
self._vertices = None
self._type_id = None
self._connection_source = None
self._export_type_name = None
self._unknown_class = None
self.discriminator = None
if id is not None:
self.id = id
if type is not None:
self.type = type
if edges is not None:
self.edges = edges
if faces is not None:
self.faces = faces
if vertices is not None:
self.vertices = vertices
if type_id is not None:
self.type_id = type_id
if connection_source is not None:
self.connection_source = connection_source
if export_type_name is not None:
self.export_type_name = export_type_name
if unknown_class is not None:
self.unknown_class = unknown_class
@property
def id(self):
"""Gets the id of this BTExportModelBody. # noqa: E501
:return: The id of this BTExportModelBody. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this BTExportModelBody.
:param id: The id of this BTExportModelBody. # noqa: E501
:type: str
"""
self._id = id
@property
def type(self):
"""Gets the type of this BTExportModelBody. # noqa: E501
:return: The type of this BTExportModelBody. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this BTExportModelBody.
:param type: The type of this BTExportModelBody. # noqa: E501
:type: str
"""
allowed_values = ["SOLID", "SURFACE", "UNKNOWN"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def edges(self):
"""Gets the edges of this BTExportModelBody. # noqa: E501
:return: The edges of this BTExportModelBody. # noqa: E501
:rtype: list[BTExportModelEdge]
"""
return self._edges
@edges.setter
def edges(self, edges):
"""Sets the edges of this BTExportModelBody.
:param edges: The edges of this BTExportModelBody. # noqa: E501
:type: list[BTExportModelEdge]
"""
self._edges = edges
@property
def faces(self):
"""Gets the faces of this BTExportModelBody. # noqa: E501
:return: The faces of this BTExportModelBody. # noqa: E501
:rtype: list[BTExportModelFace]
"""
return self._faces
@faces.setter
def faces(self, faces):
"""Sets the faces of this BTExportModelBody.
:param faces: The faces of this BTExportModelBody. # noqa: E501
:type: list[BTExportModelFace]
"""
self._faces = faces
@property
def vertices(self):
"""Gets the vertices of this BTExportModelBody. # noqa: E501
:return: The vertices of this BTExportModelBody. # noqa: E501
:rtype: list[BTExportModelVertex]
"""
return self._vertices
@vertices.setter
def vertices(self, vertices):
"""Sets the vertices of this BTExportModelBody.
:param vertices: The vertices of this BTExportModelBody. # noqa: E501
:type: list[BTExportModelVertex]
"""
self._vertices = vertices
@property
def type_id(self):
"""Gets the type_id of this BTExportModelBody. # noqa: E501
:return: The type_id of this BTExportModelBody. # noqa: E501
:rtype: int
"""
return self._type_id
@type_id.setter
def type_id(self, type_id):
"""Sets the type_id of this BTExportModelBody.
:param type_id: The type_id of this BTExportModelBody. # noqa: E501
:type: int
"""
self._type_id = type_id
@property
def connection_source(self):
"""Gets the connection_source of this BTExportModelBody. # noqa: E501
:return: The connection_source of this BTExportModelBody. # noqa: E501
:rtype: BTConnection
"""
return self._connection_source
@connection_source.setter
def connection_source(self, connection_source):
"""Sets the connection_source of this BTExportModelBody.
:param connection_source: The connection_source of this BTExportModelBody. # noqa: E501
:type: BTConnection
"""
self._connection_source = connection_source
@property
def export_type_name(self):
"""Gets the export_type_name of this BTExportModelBody. # noqa: E501
:return: The export_type_name of this BTExportModelBody. # noqa: E501
:rtype: str
"""
return self._export_type_name
@export_type_name.setter
def export_type_name(self, export_type_name):
"""Sets the export_type_name of this BTExportModelBody.
:param export_type_name: The export_type_name of this BTExportModelBody. # noqa: E501
:type: str
"""
self._export_type_name = export_type_name
@property
def unknown_class(self):
"""Gets the unknown_class of this BTExportModelBody. # noqa: E501
:return: The unknown_class of this BTExportModelBody. # noqa: E501
:rtype: bool
"""
return self._unknown_class
@unknown_class.setter
def unknown_class(self, unknown_class):
"""Sets the unknown_class of this BTExportModelBody.
:param unknown_class: The unknown_class of this BTExportModelBody. # noqa: E501
:type: bool
"""
self._unknown_class = unknown_class
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BTExportModelBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"ethan.keller@gmail.com"
] |
ethan.keller@gmail.com
|
7cca72ca665a108a610764e33fd1235916e847bf
|
68e9222d83acbccb9badbf7d962b11630106a835
|
/cfg/synthesize.py
|
5eaf8e66ad7ab85658fda027120310c329852b65
|
[] |
no_license
|
luwi1993/dctts_docker
|
288511800f0e9cfe9e81bb57836e8164d2f38819
|
042f1ca6c17b156f48d43d31796c8a2763b4e177
|
refs/heads/master
| 2023-02-13T05:24:48.818567
| 2021-01-07T07:56:57
| 2021-01-07T07:56:57
| 296,009,615
| 0
| 0
| null | 2020-10-03T10:13:27
| 2020-09-16T11:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,887
|
py
|
# -*- coding: utf-8 -*-
# /usr/bin/python2
'''
By kyubyong park. kbpark.linguist@gmail.com.
https://www.github.com/kyubyong/dc_tts
'''
from __future__ import print_function
import os
from hyperparams import Hyperparams as hp
import numpy as np
import tensorflow as tf
from train import Graph
from utils import *
from data_load import load_data
from scipy.io.wavfile import write
from tqdm import tqdm
import time
def synthesize(domain="outside"):
info = {}
absolute_beginning = time.time()
info["start_time"] = absolute_beginning
# Load data
if domain == "outside":
L = load_data("synthesize", domain=domain)
elif domain == "inside":
# Load graph
synth_graph = Graph(mode="synthesize");
print("Graph loaded")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Restore parameters
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Text2Mel')
saver1 = tf.train.Saver(var_list=var_list)
saver1.restore(sess, tf.train.latest_checkpoint(hp.logdir + "-1"))
print("Text2Mel Restored!")
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'SSRN') + \
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'gs')
saver2 = tf.train.Saver(var_list=var_list)
saver2.restore(sess, tf.train.latest_checkpoint(hp.logdir + "-2"))
print("SSRN Restored!")
# Feed Forward
## mel
begin_of_frame_synthesis = time.time()
init_time = begin_of_frame_synthesis - absolute_beginning
Y = np.zeros((len(L), hp.max_T, hp.n_mels), np.float32)
prev_max_attentions = np.zeros((len(L),), np.int32)
for j in tqdm(range(hp.max_T)):
_gs, _Y, _max_attentions, _alignments = \
sess.run([synth_graph.global_step, synth_graph.Y, synth_graph.max_attentions, synth_graph.alignments],
{synth_graph.L: L,
synth_graph.mels: Y,
synth_graph.prev_max_attentions: prev_max_attentions})
Y[:, j, :] = _Y[:, j, :]
prev_max_attentions = _max_attentions[:, j]
# from the start time of the synthesis until the first time we reach this point is called the Latency
Latency_beginning = time.time() - absolute_beginning
Latency_synthesis = time.time() - begin_of_frame_synthesis
duration_mels = time.time() - begin_of_frame_synthesis
mels = {}
for i, mel in enumerate(Y):
mels["/{}.wav".format(i + 1)] = mel
# Get magnitude
Z = sess.run(synth_graph.Z, {synth_graph.Y: Y})
duration_mags = time.time() - begin_of_frame_synthesis
# Generate wav files
if not os.path.exists(hp.sampledir): os.makedirs(hp.sampledir)
samples = {}
mags = {}
for i, mag in enumerate(Z):
print("Working on file", i + 1)
mags["/{}.wav".format(i + 1)] = mag
wav = spectrogram2wav(mag)
write(hp.sampledir + "/{}.wav".format(i + 1), hp.sr, wav)
samples["/{}.wav".format(i + 1)] = wav
duration_total = begin_of_frame_synthesis
time_measurents = {
"init_time":init_time,
"Latency_beginning":Latency_beginning,
"Latency_synthesis":Latency_synthesis,
"duration_mels":duration_mels,
"duration_mags":duration_mags,
"duration_total":duration_total,
}
info["mels"] = mels
info["mags"] = mags
info["samples"] = samples
info["time_measurents"] = time_measurents
return info
if __name__ == '__main__':
time_measurements = synthesize()
print("Done")
|
[
"luwid102@uni-duesseldorf.de"
] |
luwid102@uni-duesseldorf.de
|
f78b783c30c351d99042c713cc51e3bb302a4d3c
|
6fac29fba9c10f4ac3fad19a95c2bacae72892d6
|
/request/sync_request.py
|
b8c841e3f14c4d403342f6d0094e3ae8ba5b373c
|
[] |
no_license
|
TauWu/common-py
|
2496472e82f3517c1eb96dce56b5a8fff64445fa
|
b595c3abf33585810af76b33ce718cdc636532d6
|
refs/heads/master
| 2020-05-07T08:55:57.236297
| 2019-09-01T09:19:58
| 2019-09-01T09:19:58
| 180,353,371
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
# -*- coding: utf-8 -*-
from requests import get
class SyncRequestBase(object):
def __init__(self, url_list, headers=None):
self._url_dict = {url:None for url in url_list}
self._url_list = url_list
self._headers = headers
@property
def result(self):
[self.get_content(url) for url in self._url_list]
return self._url_dict
def get_content(self, url):
data = get(url, headers=self._headers).content
self._url_dict[url] = data
|
[
"tauwoo@seuxw.cc"
] |
tauwoo@seuxw.cc
|
0ad541f519c3c3caec0c532915c566ffb73ac107
|
71ed4eabc319e5fd6102a26c13e6a3970dd32ed0
|
/frontpage/models.py
|
bd3354a2ca5834786708b2e49b082ce75b122058
|
[
"MIT"
] |
permissive
|
SkyVault/WorkingOn
|
ad972371a480f439be3c26f4c8ca858ad638c2f7
|
1ea37bd482dc2ef571bcc67ef563d13799dd8ccb
|
refs/heads/master
| 2020-06-06T15:06:53.271912
| 2019-09-23T19:13:12
| 2019-09-23T19:13:12
| 165,898,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
from django.db import models
from django.utils import timezone
# User authors posts and projects
from django.contrib.auth.models import User
"""
NOTE: to access all projects for a certain user
user = User.objects.get(username='<username>')
user.project_set
NOTE: to get the User model
from django.contrib.auth.models import User
NOTE: to create a project/post fast
user.project_set.create(title='...',...)
"""
# A project is a child of the User
class Project(models.Model):
# NOTE(Dustin): models.CASCADE deletes project if user is deleted
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=256)
description = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
# A post is a child of a project, when the project gets deleted
# all the posts for that project will too
class Post(models.Model):
author = models.ForeignKey(Project, on_delete=models.CASCADE)
title = models.CharField(max_length=256)
url = models.URLField(max_length=256)
description = models.TextField()
published_date = models.DateTimeField(default=timezone.now)
def __str__(self):
return f"{self.title}"
|
[
"dustinneumann42@gmail.com"
] |
dustinneumann42@gmail.com
|
53358d6f4012535641bd36471e0cd2762cd983e7
|
76dfe9dc71ef2f807ee8b360e73a1455d91346cc
|
/hw2/Code/hw2_ex21.py
|
694a1c4e2922c3ff86b359f2d6e7b27bab7566c2
|
[] |
no_license
|
akswart/phys416code
|
8231b0722eb98c0716aa3af69b7eb513234abed3
|
49383c578c4b18a7e0832e1056be47b1c28ca978
|
refs/heads/master
| 2020-12-22T12:35:07.115403
| 2020-05-06T19:37:43
| 2020-05-06T19:37:43
| 236,782,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,943
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 11:55:57 2020
@author: akswa
"""
# Program to compute the motion of a Kapitza’s pendulum
# using the Verlet method
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import ellipk
def period_pend(theta0,g_over_L):
# function to return the exact period for a pendulum of length L
# usage: period = exact_period(theta0,g_over_L)
# where: theta0 = inital angle in degrees
# g_over_L = ratio g to the length of the pendulum
# note -earlier version has a bug as it x sqrt(g/l) not divided 9/11
# note the squaring of the argument in the elliptic function
# matlab uses a different normalization than the book
period = 4/np.sqrt(g_over_L)*ellipk((np.sin(theta0*np.pi/180./2.))**2)
return period
def pend(theta0,tau,A0,nstep,NumericalMethod,plotting = False,verbose = False):
# Set initial position and velocity of pendulum
theta = theta0*np.pi/180 # Convert angle to radians
omega = 0 # Set the initial velocity
# Set the physical constants and other variables
g_over_L = 1 # The constant g/L
time = 0 # Initial time
irev = 0 # Used to count number of reversals
g = 9.81
L = 9.81
Td = .2 # Driving period (s)
def accel(time,A0,Td,theta,L):
# The acceleration give by the equation in problem 21
a_d = A0*np.sin(2*np.pi*time/Td)
return -((g+a_d)/L)*np.sin(theta)
# Take one backward step to start Verlet
theta_old = theta - omega*tau + 0.5*tau**2*accel(time,A0,Td,theta,L)
# Loop over desired number of steps with given time step
# and numerical method
# initialize arrays
t_plot=np.array([])
th_plot=np.array([])
period=np.array([])
for istep in range(0,nstep):
# Record angle and time for plotting
t_plot = np.append(t_plot,time)
th_plot = np.append(th_plot,theta*180/np.pi) # Convert angle to degrees
time = time + tau
# Compute new position and velocity using Verlet method
theta_new = 2*theta - theta_old + tau**2*accel(time,A0,Td,theta,L)
theta_old = theta # Verlet method
theta = theta_new
# Test if the pendulum has passed through theta = 0;
# if yes, use time to estimate period
if theta*theta_old < 0: # Test position for sign change
if verbose:
print("Turning point at time t= %f" %time) ;
if irev == 0: # If this is the first change,
time_old = time # just record the time
else:
period = np.append(period,2*(time - time_old))
time_old = time
irev = irev + 1 # Increment the number of reversals
if verbose:
if irev > 1:
# Estimate period of oscillation, including error bar
AvePeriod = np.mean(period)
ErrorBar = np.std(period)/np.sqrt(irev)
print("Average period = %g +/- %g" %(AvePeriod,ErrorBar))
else:
print('Pendulum program could not complete a period, time =%g'%time)
print("Exact period = %g" %period_pend(theta0,g_over_L))
# Graph the oscillations as theta versus time
if plotting:
plt.figure(0)
plt.plot(t_plot,th_plot,'.-')
plt.title(r"Method: %s, $\theta_0$: %s, Driving Amplitude: %sg" % (NumericalMethod,theta0,A0/9.81) )
plt.xlabel('Time')
plt.ylabel(r'$\theta$ (degrees)') # the 'r' means raw strings for latex
plt.grid()
plt.show()
return t_plot,th_plot,period
if __name__ == "__main__":
# Part a
# Figure 2.7
g = 9.81
for i in [40,60,80,100]:
a,b,c = pend(170,.005,i*g,2000,"eulercromer",plotting = True, verbose = True)
|
[
"akswart@gmail.com"
] |
akswart@gmail.com
|
3e0b2d2e7e8bc8656ec8dff9559ff4dd1bdb5ebb
|
c08b5edb5075e7840e716b0a09006dae0a4d05ac
|
/.history/Missions_to_Mars/scrape_mars_20200809232616.py
|
948f62b0d1894d482d3efeeb8cf7fb3180ac81b3
|
[] |
no_license
|
OlgaDlzk/web-scraping-challenge-1
|
06f915eb76c55c9bc37889017dd9af81122dc1a5
|
f99c3436dfb0169595c46dae7733d90e21385cc6
|
refs/heads/master
| 2023-03-18T00:58:37.928024
| 2020-09-22T20:32:47
| 2020-09-22T20:32:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,704
|
py
|
from splinter import Browser
from bs4 import BeautifulSoup as bs
import pandas as pd
import time
import re
# This is for debugging
def savetofile(contents):
file = open('_temporary.txt',"w",encoding="utf-8")
file.write(contents)
file.close()
def scrape():
executable_path = {"executable_path": "chromedriver"}
browser = Browser("chrome", **executable_path, headless=False)
# NASA Mars News
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
time.sleep(3)
html = browser.html
soup = bs(html, 'html.parser')
slides = soup.find_all('li', class_='slide')
html = browser.html
soup = bs(html, "html.parser")
content_title = slides[0].find('div', class_='content_title')
news_title = content_title.text.strip()
article_teaser_body = slides[0].find('div', class_='article_teaser_body')
news_p = article_teaser_body.text.strip()
# JPL Mars Space Images
base_url = 'https://www.jpl.nasa.gov'
url = base_url + '/spaceimages/?search=&category=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
featured_image_url = base_url + soup.find('a',class_='button fancybox')['data-fancybox-href']
# Mars Weather
mars_weather = []
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
time.sleep(3)
weather_html = browser.html
soup = bs(weather_html, "html.parser")
# print(weathersoup.prettify())
mars_tweets = [soup.find_all('p', class_="TweetTextSize"), soup.find_all(
'span', class_="css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0")]
for tweets in mars_tweets:
mars_tweet = tweets
for tweet in mars_tweet:
if 'InSight' in tweet.text:
mars_weather = tweet.text
if tweet.a in tweet:
mars_weather = mars_weather.strip(tweet.a.text)
break
# Mars facts
url = 'https://space-facts.com/mars/'
browser.visit(url) # not necessary, but added for checking the operation
time.sleep(1)
dfs = pd.read_html(url)
for df in dfs:
try:
df = df.rename(columns={0: "Description", 1: "Value"})
df = df.set_index("Description")
marsfacts_html = df.to_html().replace('\n', '')
# df.to_html('marsfacts.html') # to save to a file to test
break
except:
continue
# Mars Hemispheres
base_url = 'https://astrogeology.usgs.gov'
url = base_url + '/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
items = soup.find_all('div', class_='item')
urls = []
titles = []
for item in items:
urls.append(base_url + item.find('a')['href'])
titles.append(item.find('h3').text.strip())
img_urls = []
for oneurl in urls:
browser.visit(oneurl)
time.sleep(1)
html = browser.html
soup = bs(html, 'html.parser')
oneurl = base_url+soup.find('img',class_='wide-image')['src']
img_urls.append(oneurl)
hemisphere_image_urls = []
for i in range(len(titles)):
hemisphere_image_urls.append({'title':titles[i],'img_url':img_urls[i]})
# Assigning scraped data to a page
marspage = {}
marspage["news_title"] = news_title
marspage["news_p"] = news_p
marspage["featured_image_url"] = featured_image_url
marspage["mars_weather"] = mars_weather
marspage["marsfacts_html"] = marsfacts_html
marspage["hemisphere_image_urls"] = hemisphere_image_urls
return marspage
|
[
"ermiasgelaye@gmail.com"
] |
ermiasgelaye@gmail.com
|
d819ff360d32372dfa26529212cc11c5b24fa935
|
5ea32a391b79a326ba6f0165f86d8b274856f4df
|
/scripts/TouchinBuild/CommandBuilders/PatchCsprojCommandBuilder.py
|
5c04b90629f392fa302c2c4c964d34ce21e3bee3
|
[] |
no_license
|
TouchInstinct/BuildScript
|
b9a78afdebce45e508204b8ad1c7a57bd356c23d
|
5da1a6efa0f10c242dfee61335cdf38466e24dd4
|
refs/heads/master
| 2021-01-23T11:19:58.591859
| 2014-06-02T16:11:19
| 2014-06-02T16:11:19
| 12,847,134
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from commands.PatchCsprojCommand import PatchCsprojCommand
from parsers.InsideParser.InsideCsprojSetParser import InsideCsprojSetParser
class PatchCsprojCommandBuilder:
def __init__(self):
pass
def getCommandFor(self, line):
assert line is not None
parser = self.getParser()
result = parser.parseLine(line)
csprojPath = result[0]
key = result[1]
value = result[2]
slnConfig = result[3]
command = PatchCsprojCommand(csprojPath, key, value, slnConfig)
return command
def isPatchCsproj(self, line):
assert line is not None
parser = self.getParser()
isValid = parser.isValidLine(line)
return isValid
def getParser(self):
return InsideCsprojSetParser('csproj')
|
[
"r-zaitov@yandex.ru"
] |
r-zaitov@yandex.ru
|
3b9bdcdb0022cef882418f1278022da511ee8e02
|
c6735c5825991f6421b09e8997dac0397ea581c9
|
/unittests/Reflection/RemoteMirrorInterop/test.py
|
6bda70ddc35bfdffe9fe4a69a1c8ab070a1311a3
|
[
"Apache-2.0",
"Swift-exception"
] |
permissive
|
TAIPANBOX/swift
|
6b5ff6a9e8032bef746579914c4999c8ff31540c
|
48e48f22fe59b0aa7a782b01d8df4e4414e12a52
|
refs/heads/master
| 2020-04-20T22:08:36.980111
| 2019-02-04T18:32:55
| 2019-02-04T18:32:55
| 169,129,762
| 3
| 0
|
Apache-2.0
| 2019-02-04T18:49:44
| 2019-02-04T18:49:43
| null |
UTF-8
|
Python
| false
| false
| 2,283
|
py
|
#!/usr/bin/env python
# Exercise the SwiftRemoteMirrorLegacyInterop API. This works with
# multiple versions of Swift. It builds Swift code using all versions,
# and exercises the Interop API using various combinations of those
# versions' Remote Mirror libraries.
#
# Invoke by passing the various Swift build directories as parameters.
import itertools
import os
import subprocess
import sys
args = sys.argv[1:]
if len(args) == 0:
print >> sys.stderr, "Usage:", sys.argv[0], "swift-build-dirs..."
print >> sys.stderr, ("Note: pass paths to the swift-macosx-x86_64"
" directories.")
sys.exit(1)
absoluteArgs = [os.path.abspath(arg) for arg in args]
swiftcs = [os.path.join(arg, 'bin', 'swiftc') for arg in absoluteArgs]
mirrorlibs = [os.path.join(arg, 'lib', 'swift', 'macosx',
'libswiftRemoteMirror.dylib')
for arg in absoluteArgs]
os.chdir(os.path.dirname(sys.argv[0]))
# Build the remote mirror test harness program.
subprocess.check_call(['clang',
'-framework', 'Foundation',
'-I', '../../../include/swift/SwiftRemoteMirror',
'-I', '../../../include/',
'-o', '/tmp/test',
'-Wall', '-Wextra',
'-g', 'test.m'])
# Build a test library with each Swift compiler passed in.
for i, swiftc in enumerate(swiftcs):
subprocess.check_call(
['xcrun', swiftc, '-emit-library', 'test.swift',
'-o', os.path.join('/tmp', 'libtest' + str(i) + '.dylib')])
# Run the test harness with all combinations of the remote mirror libraries.
for i in range(len(swiftcs) + 1):
for localMirrorlibs in itertools.combinations(mirrorlibs, i):
for i, arg in enumerate(absoluteArgs):
print 'Testing', arg, 'with mirror libs:'
for l in localMirrorlibs:
print '\t', l
callArgs = ['/tmp/test']
dylibPath = os.path.join('/tmp', 'libtest' + str(i) + '.dylib')
callArgs.append(dylibPath)
callArgs += list(localMirrorlibs)
print ' '.join(callArgs)
subprocess.call(callArgs)
print 'DONE'
print ''
print localMirrorlibs
|
[
"mikeash@apple.com"
] |
mikeash@apple.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.