blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
37c24b3960134c61b5a8710012b9ad3ebf8a62fe
55c250525bd7198ac905b1f2f86d16a44f73e03a
/Python/Python/Scripts/Auto py to exe/build/lib/auto_py_to_exe/dialogs.py
08ced7a66201b6e9c57607cc3cabb9a7329be462
[]
no_license
NateWeiler/Resources
213d18ba86f7cc9d845741b8571b9e2c2c6be916
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
refs/heads/master
2023-09-03T17:50:31.937137
2023-08-28T23:50:57
2023-08-28T23:50:57
267,368,545
2
1
null
2022-09-08T15:20:18
2020-05-27T16:18:17
null
UTF-8
Python
false
false
129
py
version https://git-lfs.github.com/spec/v1 oid sha256:17632a1084b74f79b082631a021c864a01bee63a94b1fb5768945e30f05a405b size 2899
[ "nateweiler84@gmail.com" ]
nateweiler84@gmail.com
7461b94a60fcbe15ed116a2853262476e06aaafd
c06d18ac5b87b3b82fc486454c422b119d6c1ee9
/src/demo/_tensorflow/linear/linear.py
70f197e8d2ad5074603c813b803127c0355fe803
[ "MIT" ]
permissive
tangermi/nlp
b3a4c9612e6049463bf12bc9abb7aff06a084ace
aa36b8b20e8c91807be73a252ff7799789514302
refs/heads/master
2022-12-09T12:33:15.009413
2020-04-03T04:03:24
2020-04-03T04:03:24
252,056,010
0
0
null
2022-12-08T07:26:55
2020-04-01T02:55:05
Jupyter Notebook
UTF-8
Python
false
false
1,092
py
# -*- coding: utf-8 -*- import tensorflow as tf class Linear(tf.keras.Model): def __init__(self): super().__init__() self.dense = tf.keras.layers.Dense( units=1, activation=None, kernel_initializer=tf.zeros_initializer(), bias_initializer=tf.zeros_initializer() ) def call(self, input): output = self.dense(input) return output if __name__ == '__main__': X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) y = tf.constant([[10.0], [20.0]]) model = Linear() optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) for i in range(100): with tf.GradientTape() as tape: y_pred = model(X) # 调用模型 y_pred = model(X) 而不是显式写出 y_pred = a * X + b loss = tf.reduce_mean(tf.square(y_pred - y)) grads = tape.gradient(loss, model.variables) # 使用 model.variables 这一属性直接获得模型中的所有变量 optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables)) print(model.variables)
[ "n10057862@qut.edu.au" ]
n10057862@qut.edu.au
237b5db6e779a7de6c8b385bcac3bf982604e07e
931aa9c6a44f86e86440c17de62801b26b66fce8
/constance/LV/getLineUnbalanceAndLosses.py
f92c4871027b8e1d87960321b14354a1e8ea4bb7
[]
no_license
Constancellc/epg-psopt
3f1b4a9f9dcaabacf0c7d2a5dbc10947ac0e0510
59bdc7951bbbc850e63e813ee635474012a873a4
refs/heads/master
2021-06-08T11:33:57.467689
2020-04-01T13:19:18
2020-04-01T13:19:18
96,895,185
1
0
null
null
null
null
UTF-8
Python
false
false
7,785
py
import csv import random import copy import numpy as np import matplotlib.pyplot as plt from lv_optimization_new import LVTestFeeder import pickle #outfile = '../../../Documents/simulation_results/LV/voltages.csv' stem = '../../../Documents/ccModels/eulv/' alpha = 0.328684513701 g = open(stem+'lnsYprims.pkl','rb') data = pickle.load(g) g.close() # first get phases lds = np.load('../../../Documents/ccModels/loadBuses/eulvLptloadBusesCc-24.npy') lds = lds.flatten()[0] phase = [] for i in range(len(lds)): bus = lds['load'+str(i+1)] if bus[-1] == '1': phase.append('A') elif bus[-1] == '2': phase.append('B') elif bus[-1] == '3': phase.append('C') # data is a dictionary where the key is the line number and it points to # [bus a, bus b, Yprim] # so we need to build up a dictionary of the voltages a = np.load(stem+'eulvLptaCc060.npy') My = np.load(stem+'eulvLptMyCc060.npy') v0 = np.load(stem+'eulvLptV0Cc060.npy') Y = np.load(stem+'eulvLptYbusCc060.npy') Y = Y.flatten()[0] Y = Y.conj() YNodeOrder = np.load(stem+'eulvNmtYNodeOrderCc060.npy') buses = [] for node in YNodeOrder: buses = buses+[node.split('.')[0]] def get_losses(Vtot): losses = {} for line in data: data0 = data[line] bus1 = data0[0] bus2 = data0[1] Yprim = data0[2] idx1 = [i for i, x in enumerate(buses) if x == bus1] idx2 = [i for i, x in enumerate(buses) if x == bus2] Vidx = Vtot[idx1+idx2] Iphs = Yprim.dot(Vidx) Sinj = Vidx*(Iphs.conj()) Sloss = sum(Sinj) losses[line] = [bus1,bus2,Sloss.real] return losses def get_unbalance(Vtot): unbalance = {} a = complex(-0.5,0.866) A = np.array([[complex(1,0),complex(1,0),complex(1,0)], [complex(1,0),a,a*a], [complex(1,0),a*a,a]]) A = A*0.333 for line in data: data0 = data[line] bus1 = data0[0] bus2 = data0[1] Yprim = data0[2] idx1 = [i for i, x in enumerate(buses) if x == bus1] idx2 = [i for i, x in enumerate(buses) if x == bus2] Vidx = Vtot[idx1+idx2] Iphs = Yprim.dot(Vidx) Is = np.matmul(A,Iphs[:3]) unbalance[line] = [bus1,bus2,abs(Is[0]),abs(Is[1]),abs(Is[2])] return unbalance fdr = LVTestFeeder('manc_models/1',1) fdr.set_households_NR('../../../Documents/netrev/TC2a/03-Dec-2013.csv') fdr.set_evs_MEA('../../../Documents/My_Electric_Avenue_Technical_Data/'+ 'constance/ST1charges/') voltages = fdr.get_all_voltages(My,a,alpha,v0) losses_no_evs = {} ub_no_evs = {} print(fdr.predict_losses()) for t in voltages: ls = get_losses(voltages[t]) ub = get_unbalance(voltages[t]) for l in ls: if l not in losses_no_evs: losses_no_evs[l] = 0 ub_no_evs[l] = [0]*3 losses_no_evs[l] += ls[l][2] for i in range(3): ub_no_evs[l][i] += ub[l][2+i] fdr.uncontrolled() voltages = fdr.get_all_voltages(My,a,alpha,v0) losses_unc = {} ub_unc = {} print(fdr.predict_losses()) for t in voltages: ls = get_unbalance(voltages[t]) ub = get_unbalance(voltages[t]) for l in ls: if l not in losses_unc: losses_unc[l] = 0 ub_unc[l] = [0]*3 losses_unc[l] += ls[l][2] for i in range(3): ub_unc[l][i] += ub[l][2+i] fdr.load_flatten() voltages = fdr.get_all_voltages(My,a,alpha,v0) losses_lf = {} ub_lf = {} print(fdr.predict_losses()) for t in voltages: ls = get_unbalance(voltages[t]) ub = get_unbalance(voltages[t]) for l in ls: if l not in losses_lf: losses_lf[l] = 0 ub_lf[l] = [0]*3 losses_lf[l] += ls[l][2] for i in range(3): ub_lf[l][i] += ub[l][2+i] fdr.loss_minimise() voltages = fdr.get_all_voltages(My,a,alpha,v0) losses_lm = {} ub_lm = {} print(fdr.predict_losses()) for t in voltages: ls = get_unbalance(voltages[t]) ub = get_unbalance(voltages[t]) for l in ls: if l not in losses_lm: losses_lm[l] = 0 ub_lm[l] = [0]*3 losses_lm[l] += ls[l][2] for i in range(3): ub_lm[l][i] += ub[l][2+i] fdr.balance_phase2(phase) voltages = fdr.get_all_voltages(My,a,alpha,v0) losses_p = {} ub_p = {} print(fdr.predict_losses()) for t in voltages: ls = get_unbalance(voltages[t]) ub = get_unbalance(voltages[t]) for l in ls: if l not in losses_p: losses_p[l] = 0 ub_p[l] = [0]*3 losses_p[l] += ls[l][2] for i in range(3): ub_p[l][i] += ub[l][2+i] for i in range(3): with open('lv test/branch_'+str(i)+'.csv','w') as csvfile: writer = csv.writer(csvfile) writer.writerow(['line','no evs','unc','lf','lm','p']) for l in losses_unc: writer.writerow([l,ub_no_evs[l][i],ub_unc[l][i],ub_lf[l][i], ub_lm[l][i],ub_p[l][i]]) with open('lv test/branch_losses.csv','w') as csvfile: writer = csv.writer(csvfile) writer.writerow(['line','no evs','unc','lf','lm','p']) for l in losses_unc: writer.writerow([l,losses_no_evs[l],losses_unc[l],losses_lf[l], losses_lm[l],losses_p[l]]) ''' busV = {} for i in range(907): busV[i+1] = [complex(0,0)]*3 for i in range(3): busV[1][i] = v0[i] for i in range(len(voltages)): bn = int(i/3)+2 pn = i%3 busV[bn][pn] = voltages[i] lineI = {} for l in data: b1 = data[l][0] b2 = data[l][1] Yp = data[l][2] v_ = np.hstack((busV[int(b1)],busV[int(b2)])) i = np.matmul(Yp,v_)[:3] iT = 0 for ii in range(3): iT += abs(i[ii]/1000) lineI[l] = iT with open('lv test/no_evs.csv','w') as csvfile: writer = csv.writer(csvfile) for l in lineI: writer.writerow([l,lineI[l]]) busV = {} for i in range(907): busV[i+1] = [complex(0,0)]*3 for i in range(3): busV[1][i] = v0[i] for i in range(len(voltages)): bn = int(i/3)+2 pn = i%3 busV[bn][pn] = voltages[i] lineI = {} for l in data: b1 = data[l][0] b2 = data[l][1] Yp = data[l][2] v_ = np.hstack((busV[int(b1)],busV[int(b2)])) i = np.matmul(Yp,v_)[:3] iT = 0 for ii in range(3): iT += abs(i[ii]/1000) lineI[l] = iT with open('lv test/uncontrolled.csv','w') as csvfile: writer = csv.writer(csvfile) for l in lineI: writer.writerow([l,lineI[l]]) busV = {} for i in range(907): busV[i+1] = [complex(0,0)]*3 for i in range(3): busV[1][i] = v0[i] for i in range(len(voltages)): bn = int(i/3)+2 pn = i%3 busV[bn][pn] = voltages[i] lineI = {} for l in data: b1 = data[l][0] b2 = data[l][1] Yp = data[l][2] v_ = np.hstack((busV[int(b1)],busV[int(b2)])) i = np.matmul(Yp,v_)[:3] iT = 0 for ii in range(3): iT += abs(i[ii]/1000) lineI[l] = iT with open('lv test/lf.csv','w') as csvfile: writer = csv.writer(csvfile) for l in lineI: writer.writerow([l,lineI[l]]) busV = {} for i in range(907): busV[i+1] = [complex(0,0)]*3 for i in range(3): busV[1][i] = v0[i] for i in range(len(voltages)): bn = int(i/3)+2 pn = i%3 busV[bn][pn] = voltages[i] lineI = {} for l in data: b1 = data[l][0] b2 = data[l][1] Yp = data[l][2] v_ = np.hstack((busV[int(b1)],busV[int(b2)])) i = np.matmul(Yp,v_)[:3] iT = 0 for ii in range(3): iT += abs(i[ii]/1000) lineI[l] = iT with open('lv test/lm.csv','w') as csvfile: writer = csv.writer(csvfile) for l in lineI: writer.writerow([l,lineI[l]]) # now I need to work out the line flows from the current injections '''
[ "constancellc@gmail.com" ]
constancellc@gmail.com
9c541ff8948b8d049f61e4e3e61cfa30a9bb0056
33170e7fc26b6af2ab61b67aa520c307bbd0e118
/py/trash/947_predict_0228-4.py
09ef21e955ea5f5f8ebc8ba007660cc1fa85d498
[ "MIT" ]
permissive
alaskaw/Microsoft-Malware-Prediction
26e56adb803184328d1a8f5a3423d5edda7fc400
103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80
refs/heads/master
2020-04-28T21:22:06.403542
2019-03-14T04:36:01
2019-03-14T04:36:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,407
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Feb 28 16:52:33 2019 @author: kazuki.onodera """ import numpy as np import pandas as pd import os, gc from glob import glob from tqdm import tqdm import sys sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary') import lgbextension as ex import lightgbm as lgb from sklearn.externals import joblib from sklearn.metrics import roc_auc_score import utils , utils_cat utils.start(__file__) #============================================================================== SUBMIT_FILE_PATH = '../output/0228-4.csv.gz' COMMENT = 'nejumi + f009 f014 top50(f019)' EXE_SUBMIT = True SEED = np.random.randint(9999) print('SEED:', SEED) param = { 'boosting_type': 'gbdt', 'class_weight': None, 'colsample_bytree': 0.71, 'learning_rate': 0.05, 'max_depth': -1, 'min_child_samples': 10, 'min_child_weight': 5, 'min_split_gain': 0, # 'n_estimators': n_estimators, 'n_jobs': -1, 'num_leaves': 64, 'objective': 'binary', # 'random_state': seed, 'reg_alpha': 0, 'reg_lambda': 0, 'subsample': 0.71, 'subsample_for_bin': 50000, 'subsample_freq': 1, 'max_bin': 255, 'metric': 'auc', 'nthread': -1, 'verbose': -1, # 'seed': seed, # 'device': 'gpu', # 'gpu_use_dp': False } NROUND = 19999 NFOLD = 5 VERBOSE_EVAL = 100 ESR = 100 col_drop = [ 'Census_SystemVolumeTotalCapacity', ] USE_PREF_f019 = ['f019'] feature_f019 = pd.read_csv('LOG/imp_f019.csv').head(50).feature.tolist() USE_PREF_all = ['f009', 'f014'] RESULT_DICT = {} RESULT_DICT['file'] = SUBMIT_FILE_PATH # ============================================================================= # def # ============================================================================= def get_files(search:str, prefs:list): files = sorted(glob(search)) # USE_PREF li = [] for i in files: for j in prefs: if j in i: li.append(i) break files = li [print(i,f) for i,f in enumerate(files)] return files # ============================================================================= # load # ============================================================================= files_tr_f019 = get_files('../data/train_f*.f', USE_PREF_f019) X_train_f019 = pd.concat([ pd.read_feather(f) for f in tqdm(files_tr_f019, mininterval=30) ], axis=1)[feature_f019] files_tr_all = get_files('../data/train_f*.f', USE_PREF_all) X_train_all = pd.concat([ pd.read_feather(f) for f in tqdm(files_tr_all, mininterval=30) ], axis=1) X_train = pd.concat([X_train_f019, X_train_all, joblib.load('../external/X_train_nejumi.pkl.gz')], axis=1) del X_train_f019, X_train_all; gc.collect() y_train = utils.load_target()['HasDetections'] # drop if len(col_drop) > 0: X_train.drop(col_drop, axis=1, inplace=True) if X_train.columns.duplicated().sum()>0: raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }') print('no dup :) ') print(f'X_train.shape {X_train.shape}') gc.collect() CAT = list( set(X_train.columns)&set(utils_cat.ALL)) print(f'CAT: {CAT}') COL = X_train.columns.tolist() RESULT_DICT['feature size'] = len(COL) RESULT_DICT['category feature size'] = len(CAT) # ============================================================================= # all sample # ============================================================================= dtrain = lgb.Dataset(X_train, y_train.values, categorical_feature=CAT, free_raw_data=False) gc.collect() #models = [] #for i in range(LOOP): # param['seed'] = np.random.randint(9999) # model = lgb.train(params=param, train_set=dtrain, # num_boost_round=NROUND, # ) # model.save_model(f'../data/lgb{i}.model') # models.append(model) # CV param['seed'] = np.random.randint(9999) ret, models = lgb.cv(param, dtrain, NROUND, nfold=NFOLD, stratified=True, shuffle=True, feval=ex.eval_auc, early_stopping_rounds=ESR, verbose_eval=VERBOSE_EVAL, categorical_feature=CAT, seed=SEED) for i, model in enumerate(models): model.save_model(f'../data/lgb{i}.model') #models = [] #for i in range(LOOP): # model = lgb.Booster(model_file=f'../data/lgb{i}.model') # models.append(model) imp = ex.getImp(models) imp['split'] /= imp['split'].max() imp['gain'] /= imp['gain'].max() imp['total'] = imp['split'] + imp['gain'] imp.sort_values('total', ascending=False, inplace=True) imp.reset_index(drop=True, inplace=True) imp.to_csv(f'LOG/imp_{__file__}.csv', index=False) utils.savefig_imp(imp, f'LOG/imp_{__file__}.png', x='total') RESULT_DICT['nfold'] = NFOLD RESULT_DICT['seed'] = SEED RESULT_DICT['eta'] = param['learning_rate'] RESULT_DICT['NROUND'] = NROUND RESULT_DICT['train AUC'] = ret['auc-mean'][-1] del dtrain, X_train, y_train; gc.collect() # ============================================================================= # test # ============================================================================= files_te = get_files('../data/test_f*.f', USE_PREF_f019+USE_PREF_all) X_test = pd.concat([ pd.read_feather(f) for f in tqdm(files_te, mininterval=30) ]+[joblib.load('../external/X_test_nejumi.pkl.gz')], axis=1)[COL] gc.collect() if X_test.columns.duplicated().sum()>0: raise Exception(f'duplicated!: { X_test.columns[X_test.columns.duplicated()] }') print('no dup :) ') print(f'X_test.shape {X_test.shape}') y_pred = pd.Series(0, index=X_test.index) for model in tqdm(models): y_pred += pd.Series(model.predict(X_test)).rank() y_pred /= y_pred.max() sub = pd.read_csv('../input/sample_submission.csv.zip') sub['HasDetections'] = y_pred.values print('corr with best') sub_best = pd.read_csv(utils.SUB_BEST) print('with mybest:', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') ) sub_best['HasDetections'] = np.load(utils.SUB_nejumi) print('with nejumi:', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') ) print(""" # ============================================================================= # write down these info to benchmark.xlsx # ============================================================================= """) [print(f'{k:<25}: {RESULT_DICT[k]}') for k in RESULT_DICT] print(""" # ============================================================================= """) # save sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip') #utils.to_pkl_gzip(sub[['HasDetections']], SUBMIT_FILE_PATH.replace('.csv.gz', f'_{SEED}.pkl')) # ============================================================================= # submission # ============================================================================= if EXE_SUBMIT: print('submit') utils.submit(SUBMIT_FILE_PATH, COMMENT) #============================================================================== utils.end(__file__) #utils.stop_instance()
[ "luvsic02@gmail.com" ]
luvsic02@gmail.com
ef3126368dbc5fb7408a2d35f7fc575b6e8fb814
5aee5e9274aad752f4fc1940030e9844ef8be17d
/HeavyIonsAnalysis/JetAnalysis/python/jets/akPu7CaloJetSequence_pPb_jec_cff.py
d5e8f0b11759a74be3f22036f437b49b4dd08852
[]
no_license
jiansunpurdue/5316_dmesonreco_hiforest
1fb65af11ea673646efe1b25bd49e88de9bf3b44
a02224ad63160d91aab00ed2f92d60a52f0fd348
refs/heads/master
2021-01-22T02:53:43.471273
2014-04-26T16:10:12
2014-04-26T16:10:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,574
py
import FWCore.ParameterSet.Config as cms from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import * from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import * akPu7Calomatch = patJetGenJetMatch.clone( src = cms.InputTag("akPu7CaloJets"), matched = cms.InputTag("ak7HiGenJetsCleaned") ) akPu7Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPu7CaloJets"), matched = cms.InputTag("genParticles") ) akPu7Calocorr = patJetCorrFactors.clone( useNPV = False, # primaryVertices = cms.InputTag("hiSelectedVertex"), levels = cms.vstring('L2Relative','L3Absolute'), src = cms.InputTag("akPu7CaloJets"), payload = "AKPu7Calo_HI" ) akPu7CalopatJets = patJets.clone(jetSource = cms.InputTag("akPu7CaloJets"), jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu7Calocorr")), genJetMatch = cms.InputTag("akPu7Calomatch"), genPartonMatch = cms.InputTag("akPu7Caloparton"), jetIDMap = cms.InputTag("akPu7CaloJetID"), addBTagInfo = False, addTagInfos = False, addDiscriminators = False, addAssociatedTracks = False, addJetCharge = False, addJetID = False, getJetMCFlavour = False, addGenPartonMatch = True, addGenJetMatch = True, embedGenJetMatch = True, embedGenPartonMatch = True, embedCaloTowers = False, embedPFCandidates = False ) akPu7CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu7CalopatJets"), genjetTag = 'ak7HiGenJetsCleaned', rParam = 0.7, matchJets = cms.untracked.bool(False), matchTag = 'akPu7PFpatJets', pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'), trackTag = cms.InputTag("generalTracks"), fillGenJets = True, isMC = True, genParticles = cms.untracked.InputTag("genParticles"), eventInfoTag = cms.InputTag("generator") ) akPu7CaloJetSequence_mc = cms.Sequence( akPu7Calomatch * akPu7Caloparton * akPu7Calocorr * akPu7CalopatJets * akPu7CaloJetAnalyzer ) akPu7CaloJetSequence_data = cms.Sequence(akPu7Calocorr * akPu7CalopatJets * akPu7CaloJetAnalyzer ) akPu7CaloJetSequence_jec = akPu7CaloJetSequence_mc akPu7CaloJetSequence_mix = akPu7CaloJetSequence_mc akPu7CaloJetSequence = cms.Sequence(akPu7CaloJetSequence_jec) akPu7CaloJetAnalyzer.genPtMin = cms.untracked.double(1)
[ "sun229@purdue.edu" ]
sun229@purdue.edu
bc4dde6205e2dc08c3f1b2c7b8d97523b58c76b8
8b00e2b136636841b38eb182196e56f4721a1e4c
/trio/_core/_exceptions.py
45f21d389ae8d6f15662d6ff796adfea373bad80
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
xyicheng/trio
77c8c1e08e3aa4effe8cf04e879720ccfcdb7d33
fa091e2e91d196c2a57b122589a166949ea03103
refs/heads/master
2021-01-23T00:05:59.618483
2017-03-16T04:25:05
2017-03-16T04:25:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,364
py
import attr # Re-exported __all__ = [ "TrioInternalError", "RunFinishedError", "WouldBlock", "Cancelled", "PartialResult", ] class TrioInternalError(Exception): """Raised by :func:`run` if we encounter a bug in trio, or (possibly) a misuse of one of the low-level :mod:`trio.hazmat` APIs. This should never happen! If you get this error, please file a bug. Unfortunately, if you get this error it also means that all bets are off – trio doesn't know what is going on and its normal invariants may be void. (For example, we might have "lost track" of a task. Or lost track of all tasks.) Again, though, this shouldn't happen. """ pass TrioInternalError.__module__ = "trio" class RunFinishedError(RuntimeError): """Raised by ``run_in_trio_thread`` and similar functions if the corresponding call to :func:`trio.run` has already finished. """ pass RunFinishedError.__module__ = "trio" class WouldBlock(Exception): """Raised by ``X_nowait`` functions if ``X`` would block. """ pass WouldBlock.__module__ = "trio" class Cancelled(BaseException): """Raised by blocking calls if the surrounding scope has been cancelled. You should let this exception propagate, to be caught by the relevant cancel scope. To remind you of this, it inherits from :exc:`BaseException`, like :exc:`KeyboardInterrupt` and :exc:`SystemExit`. .. note:: In the US it's also common to see this word spelled "canceled", with only one "l". This is a `recent <https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=5&smoothing=3&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__ and `US-specific <https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=18&smoothing=3&share=&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__ innovation, and even in the US both forms are still commonly used. So for consistency with the rest of the world and with "cancellation" (which always has two "l"s), trio uses the two "l" spelling everywhere. """ _scope = None Cancelled.__module__ = "trio" @attr.s(slots=True, frozen=True) class PartialResult: # XX bytes_sent = attr.ib()
[ "njs@pobox.com" ]
njs@pobox.com
f9c568a46854f97c14938d17f5845aa1f9cf72f9
915ea8bcabf4da0833d241050ef226100f7bd233
/SDKs/Python/test/test_contract_item.py
d3f8d89ca8fd4f3b3678876eb22038d67bad2eb9
[ "BSD-2-Clause" ]
permissive
parserrr/API-Examples
03c3855e2aea8588330ba6a42d48a71eb4599616
0af039afc104316f1722ee2ec6d2881abd3fbc07
refs/heads/master
2020-07-10T22:17:24.906233
2019-08-26T03:06:21
2019-08-26T03:06:21
204,382,917
0
0
null
2019-08-26T02:48:16
2019-08-26T02:48:15
null
UTF-8
Python
false
false
922
py
# coding: utf-8 """ MINDBODY Public API No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: v6 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import swagger_client from swagger_client.models.contract_item import ContractItem # noqa: E501 from swagger_client.rest import ApiException class TestContractItem(unittest.TestCase): """ContractItem unit test stubs""" def setUp(self): pass def tearDown(self): pass def testContractItem(self): """Test ContractItem""" # FIXME: construct object with mandatory attributes with example values # model = swagger_client.models.contract_item.ContractItem() # noqa: E501 pass if __name__ == '__main__': unittest.main()
[ "christopher.volpi@mindbodyonline.com" ]
christopher.volpi@mindbodyonline.com
537ecd9ff7dea52514e94a67ec8488f4a88abd28
10f1f4ce92c83d34de1531e8e891f2a074b3fefd
/graph/gcn_utils/feeder.py
9b012bf3355a26228cac9c53bbd94c997bfe56d8
[ "MIT" ]
permissive
sourabhyadav/test_track
d88c4d35753d2b21e3881fc10233bf7bbb1e2cec
d2b4813aaf45dd35db5de3036eda114ef14d5022
refs/heads/master
2021-01-06T12:38:56.883549
2020-02-05T07:08:46
2020-02-05T07:08:46
241,328,706
1
0
MIT
2020-02-18T10:06:14
2020-02-18T10:06:13
null
UTF-8
Python
false
false
2,751
py
''' Author: Guanghan Ning E-mail: guanghan.ning@jd.com October 24th, 2018 Feeder of Siamese Graph Convolutional Networks for Pose Tracking Code partially borrowed from: https://github.com/yysijie/st-gcn/blob/master/feeder/feeder.py ''' # sys import os import sys import numpy as np import random import pickle import json # torch import torch import torch.nn as nn from torchvision import datasets, transforms # operation from . import tools import random class Feeder(torch.utils.data.Dataset): """ Feeder of PoseTrack Dataset Arguments: data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M) num_person_in: The number of people the feeder can observe in the input sequence num_person_out: The number of people the feeder in the output sequence debug: If true, only use the first 100 samples """ def __init__(self, data_path, data_neg_path, ignore_empty_sample=True, debug=False): self.debug = debug self.data_path = data_path self.neg_data_path = data_neg_path self.ignore_empty_sample = ignore_empty_sample self.load_data() def load_data(self): with open(self.data_path, 'rb') as handle: self.graph_pos_pair_list_all = pickle.load(handle) with open(self.neg_data_path, 'rb') as handle: self.graph_neg_pair_list_all = pickle.load(handle) # output data shape (N, C, T, V, M) self.N = min(len(self.graph_pos_pair_list_all) , len(self.graph_neg_pair_list_all)) #sample self.C = 2 #channel self.T = 1 #frame self.V = 15 #joint self.M = 1 #person def __len__(self): return self.N def __iter__(self): return self def __getitem__(self, index): # randomly add negative samples random_num = random.uniform(0, 1) if random_num > 0.5: #if False: # output shape (C, T, V, M) # get data sample_graph_pair = self.graph_pos_pair_list_all[index] label = 1 # a pair should match else: sample_graph_pair = self.graph_neg_pair_list_all[index] label = 0 # a pair does not match data_numpy_pair = [] for siamese_id in range(2): # fill data_numpy data_numpy = np.zeros((self.C, self.T, self.V, 1)) pose = sample_graph_pair[:][siamese_id] data_numpy[0, 0, :, 0] = [x[0] for x in pose] data_numpy[1, 0, :, 0] = [x[1] for x in pose] data_numpy_pair.append(data_numpy) return data_numpy_pair[0], data_numpy_pair[1], label
[ "chenhaomingbob@163.com" ]
chenhaomingbob@163.com
05a2d22595769aabb8ba1288219cbc5896aff69b
837fcd0d7e40de15f52c73054709bd40264273d2
/practices_loop-master/sum_user_quit.py
7d4bd070a2e7a364a41b6719421b8247f5090e2f
[]
no_license
NEHAISRANI/Python_Programs
dee9e05ac174a4fd4dd3ae5e96079e10205e18f9
aa108a56a0b357ca43129e59377ac35609919667
refs/heads/master
2020-11-25T07:20:00.484973
2020-03-08T12:17:39
2020-03-08T12:17:39
228,554,399
0
1
null
2020-10-01T06:41:20
2019-12-17T07:04:31
Python
UTF-8
Python
false
false
333
py
#In this program if user input 4 then sum all numbers from starting to ending. if user input quit then program exit" user=raw_input("enter your number") index=1 var1=0 while index<=user: if user=="quit": break user=int(user) if index<=user: var1=var1+index index=index+1 if var1!=0: print var1
[ "nehai18@navgurukul.org" ]
nehai18@navgurukul.org
ae4c1c1b0df6cf9a31d0f6d154fe645dd8e7fe8e
fd5c2d6e8a334977cda58d4513eb3385b431a13a
/extract_census_doc.py
a1445f608f735d677f398b8b2b123c44cf91d16e
[ "MIT" ]
permissive
censusreporter/census-api
817c616b06f6b1c70c7b3737f82f45a80544c44d
c8d2c04c7be19cdee1000001772adda541710a80
refs/heads/master
2023-07-28T06:17:26.572796
2023-07-05T20:37:03
2023-07-05T20:37:03
9,879,953
146
52
MIT
2022-07-11T07:16:19
2013-05-06T05:24:57
Python
UTF-8
Python
false
false
7,414
py
#!/bin/python import psycopg2 import psycopg2.extras import json from collections import OrderedDict conn = psycopg2.connect(database='postgres') cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) state = 'IL' logrecno = '89' # Evanston city, IL def sum(data, *columns): def reduce_fn(x, y): if x and y: return x + y elif x and not y: return x elif y and not x: return y else: return None return reduce(reduce_fn, map(lambda col: data[col], columns)) def maybe_int(i): return int(i) if i else i doc = dict(population=dict(), geography=dict(), education=dict()) cur.execute("SELECT * FROM acs2010_1yr.geoheader WHERE stusab=%s AND logrecno=%s;", [state, logrecno]) data = cur.fetchone() doc['geography'] = dict(name=data['name'], stusab=data['stusab'], sumlevel=data['sumlevel']) cur.execute("SELECT * FROM acs2010_1yr.B01002 WHERE stusab=%s AND logrecno=%s;", [state, logrecno]) data = cur.fetchone() doc['population']['median_age'] = dict(total=maybe_int(data['b010020001']), male=maybe_int(data['b010020002']), female=maybe_int(data['b010020003'])) cur.execute("SELECT * FROM acs2010_1yr.B01003 WHERE stusab=%s AND logrecno=%s;", [state, logrecno]) data = cur.fetchone() doc['population']['total'] = maybe_int(data['b010030001']) cur.execute("SELECT * FROM acs2010_1yr.B01001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno]) data = cur.fetchone() doc['population']['gender'] = OrderedDict([ ('0-9', dict(male=maybe_int(sum(data, 'b010010003', 'b010010004')), female=maybe_int(sum(data, 'b010010027', 'b010010028')))), ('10-19', dict(male=maybe_int(sum(data, 'b010010005', 'b010010006', 'b010010007')), female=maybe_int(sum(data, 'b010010029', 'b010010030', 'b010010031')))), ('20-29', dict(male=maybe_int(sum(data, 'b010010008', 'b010010009', 'b010010010', 'b010010011')), female=maybe_int(sum(data, 'b010010032', 'b010010033', 'b010010034', 'b010010035')))), ('30-39', dict(male=maybe_int(sum(data, 'b010010012', 'b010010013')), female=maybe_int(sum(data, 'b010010036', 'b010010037')))), ('40-49', dict(male=maybe_int(sum(data, 'b010010014', 'b010010015')), female=maybe_int(sum(data, 'b010010038', 'b010010039')))), ('50-59', dict(male=maybe_int(sum(data, 'b010010016', 'b010010017')), female=maybe_int(sum(data, 'b010010040', 'b010010041')))), ('60-69', dict(male=maybe_int(sum(data, 'b010010018', 'b010010019', 'b010010020', 'b010010021')), female=maybe_int(sum(data, 'b010010042', 'b010010043', 'b010010044', 'b010010045')))), ('70-79', dict(male=maybe_int(sum(data, 'b010010022', 'b010010023')), female=maybe_int(sum(data, 'b010010046', 'b010010047')))), ('80+', dict(male=maybe_int(sum(data, 'b010010024', 'b010010025')), female=maybe_int(sum(data, 'b010010048', 'b010010049')))) ]) cur.execute("SELECT * FROM acs2010_1yr.B15001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno]) data = cur.fetchone() doc['education']['attainment'] = OrderedDict([ ('<9th Grade', maybe_int(sum(data, 'b150010004', 'b150010012', 'b150010020', 'b150010028', 'b150010036', 'b150010045', 'b150010053', 'b150010061', 'b150010069', 'b150010077'))), ('9th-12th Grade (No Diploma)', maybe_int(sum(data, 'b150010005', 'b150010013', 'b150010021', 'b150010029', 'b150010037', 'b150010046', 'b150010054', 'b150010062', 'b150010070', 'b150010078'))), ('High School Grad/GED/Alt', maybe_int(sum(data, 'b150010006', 'b150010014', 'b150010022', 'b150010030', 'b150010038', 'b150010047', 'b150010055', 'b150010063', 'b150010071', 'b150010079'))), ('Some College (No Degree)', maybe_int(sum(data, 'b150010007', 'b150010015', 'b150010023', 'b150010031', 'b150010039', 'b150010048', 'b150010056', 'b150010064', 'b150010072', 'b150010080'))), ('Associate Degree', maybe_int(sum(data, 'b150010008', 'b150010016', 'b150010024', 'b150010032', 'b150010040', 'b150010049', 'b150010057', 'b150010065', 'b150010073', 'b150010081'))), ('Bachelor Degree', maybe_int(sum(data, 'b150010009', 'b150010017', 'b150010025', 'b150010033', 'b150010041', 'b150010050', 'b150010058', 'b150010066', 'b150010074', 'b150010082'))), ('Graduate or Professional Degree', maybe_int(sum(data, 'b150010010', 'b150010018', 'b150010026', 'b150010034', 'b150010042', 'b150010051', 'b150010059', 'b150010067', 'b150010075', 'b150010083'))) ]) cur.execute("SELECT * FROM acs2010_1yr.C16001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno]) data = cur.fetchone() doc['language'] = OrderedDict([ ('English Only', maybe_int(data['c160010002'])), ('Spanish', maybe_int(data['c160010003'])), ('French', maybe_int(data['c160010004'])), ('German', maybe_int(data['c160010005'])), ('Slavic', maybe_int(data['c160010006'])), ('Other Indo-European', maybe_int(data['c160010007'])), ('Korean', maybe_int(data['c160010008'])), ('Chinese', maybe_int(data['c160010009'])), ('Vietnamese', maybe_int(data['c160010010'])), ('Tagalong', maybe_int(data['c160010011'])), ('Other Asian', maybe_int(data['c160010012'])), ('Other & Unspecified', maybe_int(data['c160010013'])) ]) cur.execute("SELECT * FROM acs2010_1yr.B27010 WHERE stusab=%s AND logrecno=%s;", [state, logrecno]) data = cur.fetchone() doc['insurance'] = OrderedDict([ ('No Insurance', maybe_int(sum(data, 'b270100017', 'b270100033', 'b270100050', 'b270100053'))), ('Employer Only', maybe_int(sum(data, 'b270100004', 'b270100020', 'b270100036', 'b270100054'))), ('Direct-Purchase Only', maybe_int(sum(data, 'b270100005', 'b270100021', 'b270100037', 'b270100055'))), ('Medicare Only', maybe_int(sum(data, 'b270100006', 'b270100022', 'b270100038' ))), ('Medicaid/Means-Tested Only', maybe_int(sum(data, 'b270100007', 'b270100023', 'b270100039' ))), ('Tricare/Military Only', maybe_int(sum(data, 'b270100008', 'b270100024', 'b270100040', 'b270100056'))), ('VA Health Care Only', maybe_int(sum(data, 'b270100009', 'b270100025', 'b270100041', 'b270100057'))), ('Employer+Direct Purchase', maybe_int(sum(data, 'b270100011', 'b270100027', 'b270100043', 'b270100058'))), ('Employer+Medicare', maybe_int(sum(data, 'b270100012', 'b270100028', 'b270100044', 'b270100059'))), ('Direct+Medicare', maybe_int(sum(data, 'b270100045', 'b270100060'))), ('Medicare+Medicaid', maybe_int(sum(data, 'b270100013', 'b270100029', 'b270100046', 'b270100061'))), ('Other Private-Only', maybe_int(sum(data, 'b270100014', 'b270100030', 'b270100047', 'b270100062'))), ('Other Public-Only', maybe_int(sum(data, 'b270100015', 'b270100031', 'b270100048', 'b270100064'))), ('Other', maybe_int(sum(data, 'b270100016', 'b270100032', 'b270100049', 'b270100065'))) ]) print json.dumps(doc, indent=2)
[ "ian.dees@gmail.com" ]
ian.dees@gmail.com
5d565e7d89b2cf7e44965b839844bcc6a47e0e56
ecbbc5cf8b49de00dd956386ea7cf31951aecbf8
/src/KalmanFilter.py
d0005ea5d794108215ebbe567191ff497c0fe45c
[]
no_license
connorlee77/ardrone_stateestimation
9e49339c6d916a146a709acc4adf947453c9d626
253722cf1940fd368bc10dcd90be0c0113bb4339
refs/heads/master
2021-01-10T13:13:57.845898
2016-03-18T08:53:18
2016-03-18T08:53:18
53,226,979
0
1
null
null
null
null
UTF-8
Python
false
false
1,290
py
import numpy as np import matplotlib.pyplot as plt import rospy class KalmanFilter: def __init__(self, A, P, R, Q, H, B, dimension): self.A = A self.P = P self.x_k = 0 self.kalmanGain = 0 self.R = R #constant self.Q = Q #constant self.H = H self.B = B self.dimensions = dimension def predictState(self, u_k): #rospy.loginfo("predict_state1") #rospy.loginfo(self.x_k) self.x_k = np.add( np.dot(self.A, self.x_k), np.dot(self.B, u_k)) #rospy.loginfo("predict_state2") #rospy.loginfo(self.x_k) self.P = np.add(np.dot( np.dot(self.A, self.P), np.transpose(self.A)), self.Q) def getKalmanGain(self): first = np.dot(self.P, np.transpose(self.H)) second = np.linalg.inv( np.add( np.dot( np.dot(self.H, self.P), np.transpose(self.H)), self.R)) self.kalmanGain = np.dot(first, second) def update(self, z_k): residual = np.subtract( z_k, np.dot( self.H, self.x_k)) #chad = z_k #rospy.loginfo("update1") #rospy.loginfo(chad) self.x_k = np.add(self.x_k, np.dot(self.kalmanGain, residual)) #rospy.loginfo("update2") #rospy.loginfo(self.x_k) self.P = np.dot( np.subtract( np.identity(self.dimensions), np.dot( self.kalmanGain, self.H)), self.P)
[ "connorlee77@gmail.com" ]
connorlee77@gmail.com
aa0a9e73022a1268c8dc56985d5d5848748aa64e
3fe272eea1c91cc5719704265eab49534176ff0d
/scripts/item/consume_2439898.py
fdc636b193089e8c5f0e75eb0dac9c8a17c50c85
[ "MIT" ]
permissive
Bratah123/v203.4
e72be4843828def05592298df44b081515b7ca68
9cd3f31fb2ef251de2c5968c75aeebae9c66d37a
refs/heads/master
2023-02-15T06:15:51.770849
2021-01-06T05:45:59
2021-01-06T05:45:59
316,366,462
1
0
MIT
2020-12-18T17:01:25
2020-11-27T00:50:26
Java
UTF-8
Python
false
false
217
py
# Created by MechAviv # Valentine Damage Skin | (2439898) if sm.addDamageSkin(2439898): sm.chat("'Valentine Damage Skin' Damage Skin has been added to your account's damage skin collection.") sm.consumeItem()
[ "pokesmurfuwu@gmail.com" ]
pokesmurfuwu@gmail.com
acc0cbbbbef590f361a5a6744807f18458d0e078
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/130/usersdata/228/34476/submittedfiles/al8.py
99d23561646b83280774cd80f4ab4ad83803ccaf
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
141
py
# -*- coding: utf-8 -*- n=int(input('digite um valor:') nfat=1 for i in range(2,n+1): nfat=nfat+i print(nfat)
[ "rafael.mota@ufca.edu.br" ]
rafael.mota@ufca.edu.br
8e9f1d89a0a10175a73f79346baaea3a012c4479
3a5ea75a5039207104fd478fb69ac4664c3c3a46
/vega/algorithms/nas/modnas/estim/dist_backend/base.py
1725fd222057fa4b91024747947592087e159828
[ "MIT" ]
permissive
fmsnew/vega
e3df25efa6af46073c441f41da4f2fdc4929fec5
8e0af84a57eca5745fe2db3d13075393838036bb
refs/heads/master
2023-06-10T04:47:11.661814
2021-06-26T07:45:30
2021-06-26T07:45:30
285,174,199
0
0
MIT
2020-08-11T14:19:09
2020-08-05T03:59:49
Python
UTF-8
Python
false
false
1,712
py
# -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """Distributed remote client and server.""" import threading class RemoteBase(): """Distributed remote client class.""" def __init__(self): super().__init__() self.on_done = None self.on_failed = None def call(self, func, *args, on_done=None, on_failed=None, **kwargs): """Call function on remote client with callbacks.""" self.on_done = on_done self.on_failed = on_failed self.th_rpc = threading.Thread(target=self.rpc, args=(func,) + args, kwargs=kwargs) self.th_rpc.start() def close(self): """Close the remote client.""" raise NotImplementedError def rpc(self, func, *args, **kwargs): """Call function on remote client.""" raise NotImplementedError def on_rpc_done(self, ret): """Invoke callback when remote call finishes.""" self.ret = ret self.on_done(ret) def on_rpc_failed(self, ret): """Invoke callback when remote call fails.""" self.on_failed(ret) class WorkerBase(): """Distributed remote worker (server) class.""" def run(self, estim): """Run worker.""" raise NotImplementedError def close(self): """Close worker.""" raise NotImplementedError
[ "zhangjiajin@huawei.com" ]
zhangjiajin@huawei.com
682039f30aaa220caa90f937bbaf5bd7075dd986
fad752f7e4ae9c9fae7a472634a712249fb6f83f
/sato/cli.py
9697a09e053b96555f2b63cdabb75bc724fcc61c
[ "Apache-2.0" ]
permissive
VIDA-NYU/sato
895da0de833681335ec5122c4487555d2285f351
8fb51787b36114df13f54c1acd11df12a66ad3e4
refs/heads/master
2021-07-13T16:55:53.621521
2020-11-26T01:01:07
2020-11-26T01:01:07
225,955,500
0
0
Apache-2.0
2019-12-04T20:56:16
2019-12-04T20:56:15
null
UTF-8
Python
false
false
2,252
py
import click import pandas as pd from sato.predict import evaluate @click.command('predict') @click.option( '-n', '--count', default=1000, help='Sample size' ) @click.argument( 'src', nargs=-1, type=click.Path(file_okay=True, dir_okay=False, exists=True) ) def run_predict(count, src): """Predict column types for CSV file(s).""" for filename in src: # This is a very basic attempt to determine the file compression and # delimiter from the suffix. Currently, the following four oprions are # recognized: '.csv', '.csv.gz', '.tsv', '.tsv.gz'. Files ending with # '.gz' are assumed to be compressed by 'gzip' all other files are # considered as uncompressed. The delimiter for '.csv' files is ',' and # for '.tsv' files the delimiter is '\t'. if filename.endswith('.csv'): compression = None delimiter = ',' elif filename.endswith('.csv.gz'): compression = 'gzip' delimiter = ',' elif filename.endswith('.tsv'): compression = None delimiter = '\t' elif filename.endswith('.tsv.gz'): compression = 'gzip' delimiter = '\t' else: raise ValueError('unrecognized file format') try: df = pd.read_csv( filename, delimiter=delimiter, compression=compression, low_memory=False ) rows = df.shape[0] print('\n{}'.format(filename)) print('{}'.format('-' * len(filename))) if rows == 0: # Skip empty files. continue if rows > count: # Take sample for large files. df = df.sample(n=count, random_state=1) # Evaluate data frame to get predicted coluumn labels. labels = evaluate(df) for i in range(len(df.columns)): print('%s: %s' % (df.columns[i], labels[i])) except Exception as ex: print('error {}'.format(ex)) @click.group() def cli(): # pragma: no cover """Command line interface for SATO.""" pass cli.add_command(run_predict)
[ "heiko.muller@gmail.com" ]
heiko.muller@gmail.com
f531d8e47a46f16095ff0a4522cfedaf5eca3518
b8688a6c1824335808182768c3349624722abba6
/uamqp/constants.py
987bcaef27fd21d840f5b9e8ca36ca97fd73228c
[ "MIT", "LicenseRef-scancode-generic-cla" ]
permissive
gdooper/azure-uamqp-python
65d64e19190921c16cc65947ddcb01f686cd4277
8a71c86c7598b439afea28f216a97437b3ebaaed
refs/heads/master
2020-03-30T00:33:55.710726
2018-05-29T16:06:34
2018-05-29T16:06:34
150,530,862
0
0
MIT
2018-09-27T04:57:31
2018-09-27T04:57:31
null
UTF-8
Python
false
false
3,876
py
#------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- from enum import Enum from uamqp import c_uamqp DEFAULT_AMQPS_PORT = 5671 AUTH_EXPIRATION_SECS = c_uamqp.AUTH_EXPIRATION_SECS AUTH_REFRESH_SECS = c_uamqp.AUTH_REFRESH_SECS STRING_FILTER = b"apache.org:selector-filter:string" OPERATION = b"operation" READ_OPERATION = b"READ" MGMT_TARGET = b"$management" MESSAGE_SEND_RETRIES = 3 BATCH_MESSAGE_FORMAT = c_uamqp.AMQP_BATCH_MESSAGE_FORMAT MAX_FRAME_SIZE_BYTES = c_uamqp.MAX_FRAME_SIZE_BYTES MAX_MESSAGE_LENGTH_BYTES = c_uamqp.MAX_MESSAGE_LENGTH_BYTES class MessageState(Enum): WaitingToBeSent = 0 WaitingForAck = 1 Complete = 2 Failed = 3 DONE_STATES = (MessageState.Complete, MessageState.Failed) class MessageReceiverState(Enum): Idle = c_uamqp.MESSAGE_RECEIVER_STATE_IDLE Opening = c_uamqp.MESSAGE_RECEIVER_STATE_OPENING Open = c_uamqp.MESSAGE_RECEIVER_STATE_OPEN Closing = c_uamqp.MESSAGE_RECEIVER_STATE_CLOSING Error = c_uamqp.MESSAGE_RECEIVER_STATE_ERROR class MessageSendResult(Enum): Ok = c_uamqp.MESSAGE_SEND_OK Error = c_uamqp.MESSAGE_SEND_ERROR Timeout = c_uamqp.MESSAGE_SEND_TIMEOUT Cancelled = c_uamqp.MESSAGE_SEND_CANCELLED class MessageSenderState(Enum): Idle = c_uamqp.MESSAGE_SENDER_STATE_IDLE Opening = c_uamqp.MESSAGE_SENDER_STATE_OPENING Open = c_uamqp.MESSAGE_SENDER_STATE_OPEN Closing = c_uamqp.MESSAGE_SENDER_STATE_CLOSING Error = c_uamqp.MESSAGE_SENDER_STATE_ERROR class ManagementLinkState(Enum): Ok = c_uamqp.AMQP_MANAGEMENT_OPEN_OK Error = c_uamqp.AMQP_MANAGEMENT_OPEN_ERROR Cancelled = c_uamqp.AMQP_MANAGEMENT_OPEN_CANCELLED class ManagementOperationResult(Enum): Ok = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_OK Error = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_ERROR BadStatus = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_FAILED_BAD_STATUS Closed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_INSTANCE_CLOSED class Role(Enum): Sender = c_uamqp.ROLE_SENDER Receiver = c_uamqp.ROLE_RECEIVER class SenderSettleMode(Enum): Unsettled = c_uamqp.SENDER_SETTLE_MODE_UNSETTLED Settled = c_uamqp.SENDER_SETTLE_MODE_SETTLED Mixed = c_uamqp.SENDER_SETTLE_MODE_MIXED class ReceiverSettleMode(Enum): PeekLock = c_uamqp.RECEIVER_SETTLE_MODE_PEEKLOCK ReceiveAndDelete = c_uamqp.RECEIVER_SETTLE_MODE_RECEIVEANDDELETE class CBSOperationResult(Enum): Ok = c_uamqp.CBS_OPERATION_RESULT_OK Error = c_uamqp.CBS_OPERATION_RESULT_CBS_ERROR Failed = c_uamqp.CBS_OPERATION_RESULT_OPERATION_FAILED Closed = c_uamqp.CBS_OPERATION_RESULT_INSTANCE_CLOSED class CBSOpenState(Enum): Ok = c_uamqp.CBS_OPEN_COMPLETE_OK Error = c_uamqp.CBS_OPEN_COMPLETE_ERROR Cancelled = c_uamqp.CBS_OPEN_COMPLETE_CANCELLED class CBSAuthStatus(Enum): Ok = c_uamqp.AUTH_STATUS_OK Idle = c_uamqp.AUTH_STATUS_IDLE InProgress = c_uamqp.AUTH_STATUS_IN_PROGRESS Timeout = c_uamqp.AUTH_STATUS_TIMEOUT RefreshRequired = c_uamqp.AUTH_STATUS_REFRESH_REQUIRED Expired = c_uamqp.AUTH_STATUS_EXPIRED Error = c_uamqp.AUTH_STATUS_ERROR Failure = c_uamqp.AUTH_STATUS_FAILURE class MgmtExecuteResult(Enum): Ok = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_OK Error = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_ERROR Failed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_FAILED_BAD_STATUS Closed = c_uamqp.AMQP_MANAGEMENT_EXECUTE_OPERATION_INSTANCE_CLOSED class MgmtOpenStatus(Enum): Ok = c_uamqp.AMQP_MANAGEMENT_OPEN_OK Error = c_uamqp.AMQP_MANAGEMENT_OPEN_ERROR Cancelled = c_uamqp.AMQP_MANAGEMENT_OPEN_CANCELLED
[ "antisch@microsoft.com" ]
antisch@microsoft.com
b61e50e76ad27bc63647d402ed7b18c3b7bc2aae
9d1701a88644663277342f3a12d9795cd55a259c
/CSC148/07 Sorting/runtime.py
6d1020dee852cd090d7eccdd33874dd33c64eccf
[]
no_license
xxcocoymlxx/Study-Notes
cb05c0e438b0c47b069d6a4c30dd13ab97e4ee6d
c7437d387dc2b9a8039c60d8786373899c2e28bd
refs/heads/master
2023-01-13T06:09:11.005038
2020-05-19T19:37:45
2020-05-19T19:37:45
252,774,764
2
0
null
2022-12-22T15:29:26
2020-04-03T15:44:44
Jupyter Notebook
UTF-8
Python
false
false
3,989
py
VIDEO: https://www.youtube.com/watch?v=6Ol2JbwoJp0 NOTES ON THE PDF: def max_segment_sum(L): '''(list of int) -> int Return maximum segment sum of L. ''' max_so_far = 0 for lower in range(len(L)): for upper in range(lower, len(L)): sum = 0 for i in range(lower, upper + 1): sum = sum + L[i] max_so_far = max(max_so_far, sum) return max_so_far What is the running time of this algorithm? We want an answer in terms of n, not clock time I want you to find the statement that executes most often; count the number of times that it runs Statement that runs most often is one in the inner-most loop. sum = sum + L[i] Now let's upper-bound the number of times that this statement runs lower loop runs n times. Upper loop runs at most n times for each iteration of the lower loop i loop runs at most n iterations for each iteration of the upper loop. Now we can upper-bound the total number of times that the inner-most statement runs. At most n*n*n = n^3 So we have an n^3 algorithm. More precise: 2+2n^2+n^3 steps Is it worth it? Or should we just stick to n^3 Prove that 2+2n^2+n^3 is O(n^3). This means that we have to show 2+2n^2+n^3 is eventually <= kn^3 for some k > 0. 2+2n^2+n^3 <= 2n^3+2n^2+n^3 = 3n^3+2n^2 <= 3n^3+2n^3 = 5n^3 This is our proof that 2+2n^2+n^3 is O(n^3). ---------- We know that the segment-sum code is O(n^3). Is the code O(n^4) too? Yes Is it O(n^5)? Yes Is it O(2^n)? yes Is it O(n^2)? No Big oh is an upper bound. If you make it worse (e.g. n^3 to n^4), it's just a worse upper bound. Still technically correct though. But I want the most accurate bound; lowest upper bound. ---------- I'd like the big oh runtime for the following function. O(1), O(log n), O(n), O(n log n), O(n^2), O(n^3), ... O(2^n)... -I want the worst-case upper bound def bigoh1(n): sum = 0 for i in range(100, n): sum = sum+1 print(sum) It's O(n). It takes something like n-100 steps, which you can prove is O(n)! ---------- Let's do an ordering of best (fastest) to worst (slowest) algorithm efficiencies: The best one is O(1). Constant-time algorithm No matter how big your input, your runtime does not increase. Example: def f(n): print('hello world') -Return the first element of a list. -Return the maximum of two characters. Between constant and linear is O(log n) Example: binary search Getting worse... O(n), linear algorithm. -Printing all elements in a list -finding the maximum element in a list A little bit worse is O(n log n) Examples: quicksort (on average), mergesort Slower is O(n^2): bubble sort, insertion sort, selection sort Slower is O(n^3): maximum segment sum code Slower is O(n^4), O(n^5)... ... Eventually you get so bad that you can't even use them in practice O(2^n). As n increases by 1, you double the amount of time you take Even worse... O(n!). Like the permutation approach to finding all anagrams O(n^n) Huge difference between O(n^k) polynomials and O(k^n) exponential functions. O(n^2) and O(2^n): very different. O(n^2)is computable for reasonable-sized input; O(2^n) is not. ---------- I'd like the big oh runtime for each of these functions. e.g. O(1), O(log n), O(n), O(n log n), O(n^2), O(n^3), ... O(2^n)... -I want the worst-case upper bound def bigoh1(n): sum = 0 for i in range(100, n): sum = sum+1 print(sum) O(n) def bigoh2(n): sum = 0 for i in range(1, n // 2): sum = sum + 1 for j in range(1, n * n): sum = sum + 1 print(sum) First loop is n steps, second is n^2 steps. n+n^2 = o(n^2) def bigoh3(n): sum = 0 if n % 2 == 0: for j in range(1, n * n): sum = sum + 1 else: for k in range(5, n + 1): sum = sum + k print(sum) If n is even, we do n^2 work. If n is odd, we do n work. Remember that we want the worst-case. O(n^2) def bigoh4(m, n): sum = 0 for i in range(1, n + 1): for j in range(1, m + 1): sum = sum + 1 print(sum) O(n*m) Not O(n^2). Not O(m^2).
[ "coco.yang@mail.utoronto.ca" ]
coco.yang@mail.utoronto.ca
dd7a3ac6d291dc2db98817190f8813c458576953
66dd570bf5945dcbd183ed3c0cf897c0359cbccd
/python/python语法/pyexercise/Exercise03_09.py
4560a8df9de30b98aa5d9640c98b118b4dc4a3be
[]
no_license
SamJ2018/LeetCode
302cc97626220521c8847d30b99858e63fa509f3
784bd0b1491050bbd80f5a0e2420467b63152d8f
refs/heads/master
2021-06-19T10:30:37.381542
2021-02-06T16:15:01
2021-02-06T16:15:01
178,962,481
0
0
null
null
null
null
UTF-8
Python
false
false
1,206
py
# Obtain input name = input("Enter employee's name: ") hours = eval(input("Enter number of hours worked in a week: ")) payRate = eval(input("Enter hourly pay rate: ")) fedTaxWithholdingRate = eval(input("Enter federal tax withholding rate: ")) stateTaxWithholdingRate = eval(input("Enter state tax withholding rate: ")) grossPay = hours * payRate fedTaxWithholding = grossPay * fedTaxWithholdingRate stateTaxWithholding = grossPay * stateTaxWithholdingRate totalDeduction = fedTaxWithholding + stateTaxWithholding netPay = grossPay - totalDeduction # Obtain output out = "Employee Name: " + name + "\n\n" out += "Hours Worked: " + str(hours) + '\n' out += "Pay Rate: $" + str(payRate) + '\n' out += "Gross Pay: $" + str(grossPay) + '\n' out += "Deductions:\n" out += " Federal Withholding (" + str(fedTaxWithholdingRate * 100) + \ "%): $" + str(int(fedTaxWithholding * 100) / 100.0) + '\n' out += " State Withholding (" + str(stateTaxWithholdingRate * 100) + "%):" + \ " $" + str(int(stateTaxWithholding * 100) / 100.0) + '\n'; out += " Total Deduction:" + " $" + \ str(int(totalDeduction * 100) / 100.0) + '\n' out += "Net Pay:" + " $" + str(int(netPay * 100) / 100.0) print(out)
[ "juksam@centos7.localdomain" ]
juksam@centos7.localdomain
e3ede7d4acdd774e7b8621e60be2e1b12dc0f0e1
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02845/s251805975.py
a8e1b9dedbc87deeb6d7dd5ca8fac2fa7aa26e80
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
434
py
import sys readline = sys.stdin.readline MOD = 10 ** 9 + 7 INF = float('INF') sys.setrecursionlimit(10 ** 5) def main(): n = int(readline()) a = list(map(int, readline().split())) cnt = [0] * 3 ans = 1 for x in a: p = cnt.count(x) if p == 0: return print(0) ans *= p ans %= MOD cnt[cnt.index(x)] += 1 print(ans) if __name__ == '__main__': main()
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
d41c69e29c794cbabb1c2e1f208a21b4bf0f2f48
0e8b6f94467c25dd2440f7e2ea1519244e689620
/MarlinJobs/CalibrationConfigFiles/Stage27Config_5x5_30x30.py
3435a6f9a3f6a73455fa0470d23dcbb790425599
[]
no_license
StevenGreen1/HighEnergyPhotonAnalysis
97a661eaca2efd00472f1969855c724c9d505369
8a82ac57f56aad5bdbe99d4a5afb771592bc1725
refs/heads/master
2021-01-10T14:08:50.550184
2015-10-12T12:43:47
2015-10-12T12:43:47
43,491,318
0
0
null
null
null
null
UTF-8
Python
false
false
1,275
py
# Calibration config file for testing # Digitisation Constants - ECal CalibrECal = 42.4326603502 # Digitisation Constants ILDCaloDigi - HCal CalibrHCalBarrel = 49.057884929 CalibrHCalEndcap = 54.1136311832 CalibrHCalOther = 29.2180288685 # Digitisation Constants NewLDCCaloDigi - HCal CalibrHCal = -1 # Digitisation Constants - Muon Chamber CalibrMuon = 56.7 # MIP Peak position in directed corrected SimCaloHit energy distributions # used for realistic ECal and HCal digitisation options CalibrECalMIP = -1 CalibrHCalMIP = 0.0004925 # MIP Peak position in directed corrected CaloHit energy distributions # used for MIP definition in PandoraPFA ECalToMIPCalibration = 158.73 HCalToMIPCalibration = 40.8163 MuonToMIPCalibration = 10.101 # EM and Had Scale Settings ECalToEMGeVCalibration = 1.00062269867 HCalToEMGeVCalibration = 1.00062269867 ECalToHadGeVCalibration = 1.08773337955 HCalToHadGeVCalibration = 1.04823493932 # Pandora Threshold Cuts ECalMIPThresholdPandora = 0.5 HCalMIPThresholdPandora = 0.3 # Hadronic Energy Truncation in HCal PandoraPFA MaxHCalHitHadronicEnergy = 1000000.0 # Timing ECal ECalBarrelTimeWindowMax = 1000000.0 ECalEndcapTimeWindowMax = 1000000.0 # Timing HCal HCalBarrelTimeWindowMax = 1000000.0 HCalEndcapTimeWindowMax = 1000000.0
[ "sg1sg2sg3@hotmail.co.uk" ]
sg1sg2sg3@hotmail.co.uk
8eff0f0a7ccda0cc6e4779d87cd907c9f72549f8
f04fb8bb48e38f14a25f1efec4d30be20d62388c
/哈希表/204. 计数质数.py
2bd3e79467b7525a3d7e1a7e82f4074be703fff9
[]
no_license
SimmonsChen/LeetCode
d8ef5a8e29f770da1e97d295d7123780dd37e914
690b685048c8e89d26047b6bc48b5f9af7d59cbb
refs/heads/master
2023-09-03T01:16:52.828520
2021-11-19T06:37:19
2021-11-19T06:37:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
789
py
""" 统计所有小于非负整数 n 的质数的数量。 示例 1: 输入:n = 10 输出:4 解释:小于 10 的质数一共有 4 个, 它们是 2, 3, 5, 7 。 """ from math import sqrt class Solution(object): # 题意是统计[2, n] 中质数的个数 def countPrimes(self, n): """ :type n: int :rtype: int """ if n < 2: return 0 # 初始化标记数组,假设都是质数 isPrim = [True] * n isPrim[0] = False res = 0 for i in range(2, n): if isPrim[i]: res += 1 for j in range(i * i, n, i): isPrim[j] = False return res if __name__ == '__main__': s = Solution() print(s.countPrimes(10))
[ "15097686925@163.com" ]
15097686925@163.com
6ffe2a06880751514bb23ef6b2258b10f8257c43
14d7f5f83b6f84871ff6ebfa0af4c17b7115a33f
/remote_sensing/MODIS_data_test_v3.py
1f15cb363abab3ce4c3e8caedc88d88198bb5e8d
[]
no_license
tonychangmsu/Python_Scripts
8ca7bc841c94dcab36743bce190357ac2b1698a5
036f498b1fc68953d90aac15f0a5ea2f2f72423b
refs/heads/master
2016-09-11T14:32:17.133399
2016-03-28T16:34:40
2016-03-28T16:34:40
10,370,475
2
0
null
null
null
null
UTF-8
Python
false
false
3,468
py
#Title: MODIS_data_test.py #Author: Tony Chang #Abstract: Test for opening MODIS data and examining the various bands #Creation Date: 04/14/2015 #Modified Dates: 01/20/2016, 01/26/2016, 01/28/2016, 01/29/2016, 02/01/2016 #local directory : K:\\NASA_data\\scripts import numpy as np import matplotlib.pyplot as plt import os os.chdir("K:\\NASA_data\\scripts") import time import MODIS_acquire as moda import MODIS_tassel_cap as tas import MODIS_process as mproc import tiff_write as tw #MODIS file name as # 7 char (product name .) # 8 char (A YYYYDDD .) # 6 char (h XX v YY .) #tile index # 3 char (collection version .) #typically 005 # 14 char (julian date of production YYYYDDDHHMMSS) if __name__ == "__main__": start = time.time() #since we have the date, let's try to get all the data from that date together. htile = 9 vtile = 4 factor = 0.0001 year = 2000 #we would iterate through the year begin_year = 2000 end_year = 2015 wd = 'G:\\NASA_remote_data\\MOD09A1' mod_list, mod_dates = moda.mod_file_search(wd, year, True) #then iterate through theses list values scene = 0 mod_data, dnames = moda.mod_acquire_by_file(mod_list[scene]) #this is the full dataset band_query = 1 #get the files needed files_to_mosaic = moda.mod_date_dataset_list(wd, mod_dates[scene]) nonproj_mosaics = mproc.mosaic_files(files_to_mosaic, reproj = False) reproj_mosaics = mproc.mosaic_files(files_to_mosaic, reproj = True, method = 0) #inspect the cloud effects on the nonproj and reproj mosaics #looks like it comes from band 5! 1230-1250, ,Leaf/Canopy Differences #not much can be done about that if this is prevalent. In the mean time, we should just implement #the processing and use the QC to fix the problem #at this point we would like to transform the data. Then we can apply the reprojection #need to be careful here, do we reproject before transform or after? before... transformed = tas.tassel_cap_transform(nonproj_mosaics[:7]) #don't want to include the qc data #check out the tasseled_cap again. getting some striping for some reason. tw.tiff_write_gdal(transformed[0], 'K:\\NASA_data\\test\\test_clip.tif') tw.tiff_write(out, x_size, y_size, cell_size, ymax, xmin, 'K:\\NASA_data\\test\\test_clip.tif') #tas_array = moda.datasets_to_array(transformed, False) #find the bounding box by the netCDF from TOPOWX #GYE coordinates xmin = -112.39583333837999 #112 23 45 xmax = -108.19583334006 #108 11 45 ymin = 42.279166659379996 #42 16 45 ymax = 46.195833324479999 #46 11 45 aoa = [xmin, xmax, ymin, ymax] clip = mproc.clip_wgs84_scene(aoa, transformed[0]) #some problems with the reprojection process? #NO..getting some strange stripe artifacts from the tasselled cap, but could be inherant in the MOD09 data itself... #all this works now. So now perform this for all the MODIS data and store it in a netCDF4 file that #is continuous for each year. #write the file to check it out tw.tiff_write(clip, np.shape(clip)[1], np.shape(clip)[0], cell_size, ymax, xmin, 'K:\\NASA_data\\test\\', 'test_clip.tif') #now just write this function for netCDF4 #then save to a netCDF4 file #then repeat for all the data. end = time.time() print('run time :%s'%(end-start)) #takes about 25-30 seconds ''' mproc.plot_refl(mod_array) #plot all the reflectances #see which is faster import time start = time.time() b,g,w = tas.tassel_cap_transform(mod_array) end = time.time() mproc.plot_tassel_cap(b,g,w) '''
[ "tony.chang@msu.montana.edu" ]
tony.chang@msu.montana.edu
6c88d27d3b37ee3630d08d1654d8b7b2c1a7f640
dce7ca1ebab403bf7c23b77368ee26a2dd4475b6
/tests/test_cos.py
cd57475224ee19e74c5d9fa421f172e8a7f9fb4b
[]
no_license
qcymkxyc/Graduate
3b7e89b3f44141d9fd011c15690f902674a9e979
2afedacaaa3a0f4d9bbc13596d967ec8808d43d6
refs/heads/master
2022-12-10T12:32:37.326653
2018-11-10T07:49:13
2018-11-10T07:49:16
148,103,320
0
0
null
2022-12-08T01:14:09
2018-09-10T05:25:40
Python
UTF-8
Python
false
false
317
py
import unittest from app.util import cos class COSTestCase(unittest.TestCase): """ 腾讯云测试 """ def test_cos_upload(self): """ 腾讯云cos上传测试 """ cos.upload_binary_file(b"abcde","login_success.txt") if __name__ == '__main__': unittest.main()
[ "qcymkxyc@163.com" ]
qcymkxyc@163.com
d20be627a406e2379a3cd53a20a70ac4b5852db4
facb8b9155a569b09ba66aefc22564a5bf9cd319
/wp2/merra_scripts/01_netCDF_extraction/merra902Combine/284-tideGauge.py
255f5e1573a5a697bd3fef71c7b6f3022772b778
[]
no_license
moinabyssinia/modeling-global-storm-surges
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
6e385b2a5f0867df8ceabd155e17ba876779c1bd
refs/heads/master
2023-06-09T00:40:39.319465
2021-06-25T21:00:44
2021-06-25T21:00:44
229,080,191
0
0
null
null
null
null
UTF-8
Python
false
false
2,376
py
# -*- coding: utf-8 -*- """ Created on Tue Jun 17 11:28:00 2020 -------------------------------------------- Load predictors for each TG and combine them -------------------------------------------- @author: Michael Tadesse """ import os import pandas as pd #define directories # dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3' dir_in = "/lustre/fs0/home/mtadesse/merraLocalized" dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined" def combine(): os.chdir(dir_in) #get names tg_list_name = os.listdir() x = 284 y = 285 for tg in range(x, y): os.chdir(dir_in) tg_name = tg_list_name[tg] print(tg_name, '\n') #looping through each TG folder os.chdir(tg_name) #check for empty folders if len(os.listdir()) == 0: continue #defining the path for each predictor where = os.getcwd() csv_path = {'slp' : os.path.join(where, 'slp.csv'),\ "wnd_u": os.path.join(where, 'wnd_u.csv'),\ 'wnd_v' : os.path.join(where, 'wnd_v.csv')} first = True for pr in csv_path.keys(): print(tg_name, ' ', pr) #read predictor pred = pd.read_csv(csv_path[pr]) #remove unwanted columns pred.drop(['Unnamed: 0'], axis = 1, inplace=True) #sort based on date as merra files are scrambled pred.sort_values(by = 'date', inplace=True) #give predictor columns a name pred_col = list(pred.columns) for pp in range(len(pred_col)): if pred_col[pp] == 'date': continue pred_col[pp] = pr + str(pred_col[pp]) pred.columns = pred_col #merge all predictors if first: pred_combined = pred first = False else: pred_combined = pd.merge(pred_combined, pred, on = 'date') #saving pred_combined os.chdir(dir_out) tg_name = str(tg)+"_"+tg_name; pred_combined.to_csv('.'.join([tg_name, 'csv'])) os.chdir(dir_in) print('\n') #run script combine()
[ "michaelg.tadesse@gmail.com" ]
michaelg.tadesse@gmail.com
5833e03ed33a8ec7549369840b1fa07513ad8d85
4cb40963ebc95a9e4cdd5725ac4ae882594a363d
/tests/influence/_core/test_tracin_self_influence.py
0f327ce3fbc6230024bf4d2190c00f2750105f8c
[ "BSD-3-Clause" ]
permissive
NarineK/captum-1
59592277aed8c97dd8effed4af953676381d50c8
a08883f1ba3abc96ace06b11883893419b187d09
refs/heads/master
2022-12-23T22:39:50.502939
2022-08-01T16:30:43
2022-08-01T16:30:43
215,140,394
1
0
null
2019-10-14T20:36:19
2019-10-14T20:36:19
null
UTF-8
Python
false
false
5,906
py
import tempfile from typing import Callable import torch import torch.nn as nn from captum.influence._core.tracincp import TracInCP from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast from parameterized import parameterized from tests.helpers.basic import assertTensorAlmostEqual, BaseTest from tests.influence._utils.common import ( build_test_name_func, DataInfluenceConstructor, get_random_model_and_data, ) from torch.utils.data import DataLoader class TestTracInSelfInfluence(BaseTest): @parameterized.expand( [ (reduction, constructor, unpack_inputs) for unpack_inputs in [True, False] for (reduction, constructor) in [ ("none", DataInfluenceConstructor(TracInCP)), ( "sum", DataInfluenceConstructor( TracInCP, name="TracInCPFastRandProjTests", sample_wise_grads_per_batch=True, ), ), ("sum", DataInfluenceConstructor(TracInCPFast)), ("mean", DataInfluenceConstructor(TracInCPFast)), ] ], name_func=build_test_name_func(), ) def test_tracin_self_influence( self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool ) -> None: with tempfile.TemporaryDirectory() as tmpdir: ( net, train_dataset, ) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=False) # compute tracin_scores of training data on training data criterion = nn.MSELoss(reduction=reduction) batch_size = 5 tracin = tracin_constructor( net, train_dataset, tmpdir, batch_size, criterion, ) # calculate influence scores, using the training data as the test batch train_scores = tracin.influence( train_dataset.samples, train_dataset.labels, k=None, unpack_inputs=unpack_inputs, ) # calculate self_tracin_scores self_tracin_scores = tracin.self_influence( DataLoader(train_dataset, batch_size=batch_size), outer_loop_by_checkpoints=False, ) # check that self_tracin scores equals the diagonal of influence scores assertTensorAlmostEqual( self, torch.diagonal(train_scores), self_tracin_scores, delta=0.01, mode="max", ) # check that setting `outer_loop_by_checkpoints=False` and # `outer_loop_by_checkpoints=True` gives the same self influence scores self_tracin_scores_by_checkpoints = tracin.self_influence( DataLoader(train_dataset, batch_size=batch_size), outer_loop_by_checkpoints=True, ) assertTensorAlmostEqual( self, self_tracin_scores_by_checkpoints, self_tracin_scores, delta=0.01, mode="max", ) @parameterized.expand( [ (reduction, constructor, unpack_inputs) for unpack_inputs in [True, False] for (reduction, constructor) in [ ("none", DataInfluenceConstructor(TracInCP)), ( "sum", DataInfluenceConstructor( TracInCP, sample_wise_grads_per_batch=True, ), ), ("sum", DataInfluenceConstructor(TracInCPFast)), ("mean", DataInfluenceConstructor(TracInCPFast)), ] ], name_func=build_test_name_func(), ) def test_tracin_self_influence_dataloader_vs_single_batch( self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool ) -> None: # tests that the result of calling the public method `self_influence` for a # DataLoader of batches is the same as when the batches are collated into a # single batch with tempfile.TemporaryDirectory() as tmpdir: ( net, train_dataset, ) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=False) # create a single batch representing the entire dataset single_batch = next( iter(DataLoader(train_dataset, batch_size=len(train_dataset))) ) # create a dataloader that yields batches from the dataset dataloader = DataLoader(train_dataset, batch_size=5) # create tracin instance criterion = nn.MSELoss(reduction=reduction) batch_size = 5 tracin = tracin_constructor( net, train_dataset, tmpdir, batch_size, criterion, ) # compute self influence using `self_influence` when passing in a single # batch single_batch_self_influence = tracin.self_influence(single_batch) # compute self influence using `self_influence` when passing in a # dataloader with the same examples dataloader_self_influence = tracin.self_influence(dataloader) # the two self influences should be equal assertTensorAlmostEqual( self, single_batch_self_influence, dataloader_self_influence, delta=0.01, # due to numerical issues, we can't set this to 0.0 mode="max", )
[ "facebook-github-bot@users.noreply.github.com" ]
facebook-github-bot@users.noreply.github.com
c6ae34b2b23ff9afcccd235018498cdb235efb99
6f0e74cdc81f78ffc5dbc1b2db1cef8cbec950c4
/aws_interface/cloud/logic/delete_function_test.py
7a62e2c7c9241aa10726b393c1fa616aa7aa066f
[ "Apache-2.0" ]
permissive
hubaimaster/aws-interface
125b3a362582b004a16ccd5743d7bdff69777db5
5823a4b45ffb3f7b59567057855ef7b5c4c4308d
refs/heads/master
2023-01-19T15:43:38.352149
2023-01-12T01:38:00
2023-01-12T01:38:00
149,847,881
57
10
Apache-2.0
2023-01-12T01:39:49
2018-09-22T05:17:43
JavaScript
UTF-8
Python
false
false
742
py
from cloud.permission import Permission, NeedPermission # Define the input output format of the function. # This information is used when creating the *SDK*. info = { 'input_format': { 'test_name': 'str', }, 'output_format': { 'success': 'bool', } } @NeedPermission(Permission.Run.Logic.delete_function_test) def do(data, resource): partition = 'logic-function-test' body = {} params = data['params'] test_name = params.get('test_name') items, _ = resource.db_query(partition, [{'option': None, 'field': 'test_name', 'value': test_name, 'condition': 'eq'}]) for item in items: success = resource.db_delete_item(item['id']) body['success'] = success return body
[ "hubaimaster@gmail.com" ]
hubaimaster@gmail.com
6291a6042041500296cbde2708740f0bf984e374
0bb3bc8eea74d316377bb1f88a8600162d83d98a
/test_demo/dianping_food_top100.py
ddf32f2ecd1973f9a3ea2ec62336876b0d284b9a
[]
no_license
WangYongjun1990/spider
10a1f03c26a083b8a1b5e25a9180f69d50994d73
f13d756790a19d1465624f6c8b1f0ecb87870f51
refs/heads/master
2020-03-08T09:16:08.748865
2018-04-16T01:54:26
2018-04-16T01:54:26
128,042,969
1
0
null
null
null
null
UTF-8
Python
false
false
1,030
py
# -*- coding:utf-8 -*- """ File Name: `test_dianping_top100`.py Version: Description: 爬取南京评价最高的100家餐厅信息,对应网页 http://www.dianping.com/shoplist/search/5_10_0_score Author: wangyongjun Date: 2018/4/13 11:45 """ import requests headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36', } def dianping_food_top100(): url = 'http://www.dianping.com/mylist/ajax/shoprank?cityId=5&shopType=10&rankType=score&categoryId=0' try: r = requests.get(url, headers=headers, timeout=10, proxies=None, verify=False) # print r.text except Exception as e: print e shop_list = r.json().get('shopBeans') print shop_list print type(shop_list), len(shop_list) for shop_dict in shop_list: print shop_dict['shopName'], shop_dict['score1'], shop_dict['score2'], shop_dict['score3'], shop_dict['avgPrice'] if __name__ == "__main__": dianping_food_top100()
[ "yongjun.wang@mi-me.com" ]
yongjun.wang@mi-me.com
4a1fc4dc9297f3161f4f30e0492a815011a04b8c
747012e5b750cdc67748798c09b3ce1eb819568f
/strategy/migrations/0002_auto_20170703_1645.py
3a98d12dd70048ac2070500f701c0c01dc044e67
[ "MIT" ]
permissive
moshthepitt/probsc
da30c3829d5b8bf42804950320f006c78d2b94aa
9b8cab206bb1c41238e36bd77f5e0573df4d8e2d
refs/heads/master
2020-06-06T11:46:05.573933
2018-01-10T20:42:51
2018-01-10T20:42:51
192,730,789
0
0
null
null
null
null
UTF-8
Python
false
false
632
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-07-03 13:45 from __future__ import unicode_literals from django.db import migrations import django.db.models.deletion import mptt.fields class Migration(migrations.Migration): dependencies = [ ('strategy', '0001_initial'), ] operations = [ migrations.AlterField( model_name='objective', name='parent', field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='children', to='strategy.Objective', verbose_name='Contributes to'), ), ]
[ "kelvin@jayanoris.com" ]
kelvin@jayanoris.com
9fb6a68ceb3cf80621af5ba80af61427c4540b14
e1450725c9637e15709064aaa48bc4e053a213d5
/tests/test_funcptrdecl.py
a4d3a4d89874a4fe3280f0584e431cc6717bed5d
[]
no_license
gotoc/PyCParser-1
9d4e4c40a8c24923a689b1a0e3ebd4f07528d75b
b00cdd67a688792c0bc49b383a36199c50cc5cf2
refs/heads/master
2021-01-20T10:54:25.196102
2014-09-11T12:27:29
2014-09-11T12:27:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,717
py
import sys sys.path += [".."] from pprint import pprint from cparser import * import test testcode = """ int16_t (*f)(); int16_t (*g)(char a, void*); int (*h); // ISO/IEC 9899:TC3 : C99 standard int fx(void), *fip(), (*pfi)(); // example 1, page 120 int (*apfi[3])(int *x, int *y); // example 2, page 120 int (*fpfi(int (*)(long), int))(int, ...); // example 3, page 120 """ state = test.parse(testcode) f = state.vars["f"] g = state.vars["g"] assert f.name == "f" assert isinstance(f.type, CFuncPointerDecl) assert f.type.type == CStdIntType("int16_t") assert f.type.args == [] assert isinstance(g.type, CFuncPointerDecl) gargs = g.type.args assert isinstance(gargs, list) assert len(gargs) == 2 assert isinstance(gargs[0], CFuncArgDecl) assert gargs[0].name == "a" assert gargs[0].type == CBuiltinType(("char",)) assert gargs[1].name is None assert gargs[1].type == CBuiltinType(("void","*")) h = state.vars["h"] assert h.type == CPointerType(CBuiltinType(("int",))) fx = state.funcs["fx"] # fx is a function `int (void)` assert fx.type == CBuiltinType(("int",)) assert fx.args == [] fip = state.funcs["fip"] # fip is a function `int* (void)` assert fip.type == CPointerType(CBuiltinType(("int",))) assert fip.args == [] pfi = state.vars["pfi"] # pfi is a function-ptr to `int ()` assert isinstance(pfi.type, CFuncPointerDecl) assert pfi.type.type == CBuiltinType(("int",)) assert pfi.type.args == [] apfi = state.vars["apfi"] # apfi is an array of three function-ptrs `int (int*,int*)` # ... fpfi = state.funcs["fpfi"] # function which returns a func-ptr # the function has the parameters `int(*)(long), int` # the func-ptr func returns `int` # the func-ptr func has the parameters `int, ...`
[ "albert.zeyer@rwth-aachen.de" ]
albert.zeyer@rwth-aachen.de
a25245a35cacaea636067ccaec32d3b7094f710e
e5c9fc4dc73536e75cf4ab119bbc642c28d44591
/src/leetcodepython/math/hamming_distance_461.py
6ee39b31c590979bec6f64edd79227ce8fd40f94
[ "MIT" ]
permissive
zhangyu345293721/leetcode
0a22034ac313e3c09e8defd2d351257ec9f285d0
50f35eef6a0ad63173efed10df3c835b1dceaa3f
refs/heads/master
2023-09-01T06:03:18.231266
2023-08-31T15:23:03
2023-08-31T15:23:03
163,050,773
101
29
null
2020-12-09T06:26:35
2018-12-25T05:58:16
Java
UTF-8
Python
false
false
1,473
py
# encoding='utf-8' ''' /** * This is the solution of No. 461 problem in the LeetCode, * the website of the problem is as follow: * https://leetcode-cn.com/problems/hamming-distance/ * <p> * The description of problem is as follow: * ========================================================================================================== * 两个整数之间的汉明距离指的是这两个数字对应二进制位不同的位置的数目。 * <p> * 给出两个整数 x 和 y,计算它们之间的汉明距离。 * <p> * 注意: * 0 ≤ x, y < 231. * <p> * 示例: * <p> * 输入: x = 1, y = 4 * <p> * 输出: 2 * <p> * 解释: * 1 (0 0 0 1) * 4 (0 1 0 0) * ↑ ↑ * <p> * 上面的箭头指出了对应二进制位不同的位置。 * <p> * 来源:力扣(LeetCode) * ========================================================================================================== * * @author zhangyu (zhangyuyu417@gmail.com) */''' class Solution: def hamming_distance(self, x: int, y: int) -> int: ''' 汉明距离 Args: x: 数值x y: 数值y Returns: 距离 ''' c = x ^ y res = 0 while c > 0: res += (c & 1) c = c >> 1 return res if __name__ == '__main__': x = 1 y = 4 solution = Solution() res = solution.hamming_distance(x, y) print(res) assert res == 2
[ "zhangyu_xtb@geekplus.cc" ]
zhangyu_xtb@geekplus.cc
3637a41ea27d8219504f33dd65eda2ea0971739d
dd256415176fc8ab4b63ce06d616c153dffb729f
/aditya-works-feature-python_programming (1)/aditya-works-feature-python_programming/Assigment_5_01-Aug-2019/Assigment_5_5.py
24aa63c26add06b9baeb2c0235963e5db861b091
[]
no_license
adityapatel329/python_works
6d9c6b4a64cccbe2717231a7cfd07cb350553df3
6cb8b2e7f691401b1d2b980f6d1def848b0a71eb
refs/heads/master
2020-07-24T17:15:39.839826
2019-09-12T07:53:28
2019-09-12T07:53:28
207,993,516
0
0
null
null
null
null
UTF-8
Python
false
false
165
py
def accept(): name = input("Enter your string : ") val= [] for i in name: val.append(ord(i)) print(sum(val)/len(val)) accept()
[ "aditya.patel@1rivet.local" ]
aditya.patel@1rivet.local
599f4edbf8bbbcf5be1ba76d41791b9964071018
35a6f5a26ea97ebed8ab34619a8eec51719d2cc0
/Python_Basic/17 文件操作/5 seek函数.py
115eb71e6b1003cafcc78f9afeea357211ceaa76
[]
no_license
PandaCoding2020/pythonProject
c3644eda22d993b3b866564384ed10441786e6c5
26f8a1e7fbe22bab7542d441014edb595da39625
refs/heads/master
2023-02-25T14:52:13.542434
2021-02-03T13:42:41
2021-02-03T13:42:41
331,318,291
0
0
null
null
null
null
UTF-8
Python
false
false
607
py
""" 语法:文件对象.seek(偏移量,超始位置) 0开头,1当前 2结尾 目标: 1.r改变读取文件指针:改变读取数据开始位置或把文件指针放结尾(无法读取数据) 2.a改变读取文件指针,做到可以读到数据 """ # 1.1.改变读取数据开始位置 # f.seek(2, 0) # 1.2.把文件指针放到结尾(无法读取数据) # f.seek(0, 2) # f = open('test.txt', 'r+') # f.seek(2, 0) # con = f.read() # print(con) # # f.close() # 2.把文件指针放到结尾(无法读取数据) f = open('test.txt', 'a+') f.seek(0, 0) con = f.read() print(con) f.close()
[ "gzupanda@outlook.com" ]
gzupanda@outlook.com
ca0312e44c689d8a119737d9102edca66c6d0e32
757433be241afbff1c138d77daf13397f858aef3
/scorpio/urls.py
166247c53f8b21e7f1bf3184baad8bf10b8db329
[ "MIT" ]
permissive
RockefellerArchiveCenter/scorpio
1f9d152bb440bb98c007f652fa644602e3b8b483
f308cac3880ba9008d3aadfdc66a4062d4d27492
refs/heads/base
2023-08-20T22:34:32.085492
2023-08-07T17:00:58
2023-08-07T17:00:58
215,400,734
0
1
MIT
2023-09-08T21:09:13
2019-10-15T21:33:10
Python
UTF-8
Python
false
false
1,601
py
"""scorpio URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from asterism.views import PingView from django.contrib import admin from django.urls import include, re_path from rest_framework.schemas import get_schema_view from indexer.views import (IndexAddView, IndexDeleteView, IndexResetView, IndexRunViewSet) from .routers import ScorpioRouter router = ScorpioRouter() router.register(r'index-runs', IndexRunViewSet, 'indexrun') schema_view = get_schema_view( title="Scorpio API", description="Endpoints for Scorpio microservice application." ) urlpatterns = [ re_path(r'^admin/', admin.site.urls), re_path(r'^index/add/', IndexAddView.as_view(), name='index-add'), re_path(r'^index/delete/', IndexDeleteView.as_view(), name='index-delete'), re_path(r'^index/reset/', IndexResetView.as_view(), name='index-reset'), re_path(r'^status/', PingView.as_view(), name='ping'), re_path(r'^schema/', schema_view, name='schema'), re_path(r'^', include(router.urls)), ]
[ "helrond@hotmail.com" ]
helrond@hotmail.com
f428c560237217ad3f5dd49edbabd5734a5b4eff
0a679896fbe96a8a0a59ad9f4f55edb4aa044a93
/Duplicate File Handler/task/handler.py
040a40e81fc3f6eef361f3690d7a85ad20d01559
[]
no_license
TogrulAga/Duplicate-File-Handler
5b7bd9c9508ae3ee96751bc3e56ebaccc44c46f9
66fef381572c0e6697330463b0b720c2dbca82e6
refs/heads/master
2023-06-30T07:07:24.524591
2021-08-06T15:47:00
2021-08-06T15:47:00
393,424,765
0
0
null
null
null
null
UTF-8
Python
false
false
4,500
py
import os import argparse import hashlib class FileHandler: def __init__(self, directory): self.directory = directory self.file_format = None self.sorting_option = None self.files_dict = dict() self.dict_items = None self.numbered_dict = dict() self.get_format() self.get_sorting_option() self.walk_dir() self.list_same_sized_files() self.check_duplicates() self.delete_files() def get_format(self): self.file_format = input("Enter file format:\n") def get_sorting_option(self): print("Size sorting options:") print("1. Descending") print("2. Ascending\n") while True: self.sorting_option = int(input("Enter a sorting option:\n")) print() if self.sorting_option not in (1, 2): print("\nWrong option\n") else: break def walk_dir(self): for root, directories, filenames in os.walk(self.directory): for file in filenames: if self.file_format != "": if self.file_format != os.path.splitext(file)[-1].split(".")[-1]: continue file_path = os.path.join(root, file) file_size = os.path.getsize(file_path) if file_size in self.files_dict.keys(): self.files_dict[file_size].append(file_path) else: self.files_dict[file_size] = [file_path] def list_same_sized_files(self): if self.sorting_option == 1: dict_items = list(reversed(sorted(self.files_dict.items()))) elif self.sorting_option == 2: dict_items = sorted(self.files_dict.items()) for size, files in dict_items: print(f"{size} bytes") for file in files: print(file) print() self.dict_items = dict_items def check_duplicates(self): while True: answer = input("Check for duplicates?\n") if answer not in ("yes", "no"): continue else: break if answer == "no": return else: n_duplicate = 1 for size, files in self.dict_items: print(f"\n{size} bytes") hash_dict = dict() for file in files: hash_maker = hashlib.md5() with open(file, "rb") as f: hash_maker.update(f.read()) if hash_maker.hexdigest() not in hash_dict.keys(): hash_dict[hash_maker.hexdigest()] = [file] else: hash_dict[hash_maker.hexdigest()].append(file) for key, values in hash_dict.items(): if len(values) > 1: print(f"Hash: {key}") for value in values: print(f"{n_duplicate}. {value}") self.numbered_dict[n_duplicate] = value n_duplicate += 1 def delete_files(self): while True: answer = input("Delete files?\n") if answer not in ("yes", "no"): continue else: break if answer == "no": return else: while True: answer = input("Enter file numbers to delete:\n") try: files_to_delete = list(map(int, answer.split())) if len(files_to_delete) == 0: raise ValueError if any(n not in self.numbered_dict.keys() for n in files_to_delete): raise ValueError break except ValueError: print("\nWrong format\n") freed_space = 0 for file in files_to_delete: freed_space += os.path.getsize(self.numbered_dict[file]) os.remove(self.numbered_dict[file]) print(f"Total freed up space: {freed_space} bytes") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("directory").required = False args = parser.parse_args() if args.directory is None: print("Directory is not specified") file_handler = FileHandler(args.directory)
[ "toghrul.aghakishiyev@ericsson.com" ]
toghrul.aghakishiyev@ericsson.com
6c36391267af20d2d0df7f255c2d1d4f98c496d0
f445450ac693b466ca20b42f1ac82071d32dd991
/generated_tempdir_2019_09_15_163300/generated_part003650.py
2809c442b3ba17c08e9f9aa9bc7b006e27b8a3e8
[]
no_license
Upabjojr/rubi_generated
76e43cbafe70b4e1516fb761cabd9e5257691374
cd35e9e51722b04fb159ada3d5811d62a423e429
refs/heads/master
2020-07-25T17:26:19.227918
2019-09-15T15:41:48
2019-09-15T15:41:48
208,357,412
4
1
null
null
null
null
UTF-8
Python
false
false
3,946
py
from sympy.abc import * from matchpy.matching.many_to_one import CommutativeMatcher from matchpy import * from matchpy.utils import VariableWithCount from collections import deque from multiset import Multiset from sympy.integrals.rubi.constraints import * from sympy.integrals.rubi.utility_function import * from sympy.integrals.rubi.rules.miscellaneous_integration import * from sympy import * class CommutativeMatcher38258(CommutativeMatcher): _instance = None patterns = { 0: (0, Multiset({0: 1}), [ (VariableWithCount('i2.2.1.2.2.2.0', 1, 1, S(0)), Add) ]) } subjects = {} subjects_by_id = {} bipartite = BipartiteGraph() associative = Add max_optional_count = 1 anonymous_patterns = set() def __init__(self): self.add_subject(None) @staticmethod def get(): if CommutativeMatcher38258._instance is None: CommutativeMatcher38258._instance = CommutativeMatcher38258() return CommutativeMatcher38258._instance @staticmethod def get_match_iter(subject): subjects = deque([subject]) if subject is not None else deque() subst0 = Substitution() # State 38257 subst1 = Substitution(subst0) try: subst1.try_add_variable('i2.2.1.2.2.2.1.0', S(1)) except ValueError: pass else: pass # State 38259 if len(subjects) >= 1 and isinstance(subjects[0], Pow): tmp2 = subjects.popleft() subjects3 = deque(tmp2._args) # State 38260 if len(subjects3) >= 1: tmp4 = subjects3.popleft() subst2 = Substitution(subst1) try: subst2.try_add_variable('i2.2.1.1', tmp4) except ValueError: pass else: pass # State 38261 if len(subjects3) >= 1: tmp6 = subjects3.popleft() subst3 = Substitution(subst2) try: subst3.try_add_variable('i2.2.1.2', tmp6) except ValueError: pass else: pass # State 38262 if len(subjects3) == 0: pass # State 38263 if len(subjects) == 0: pass # 0: x**j*f yield 0, subst3 subjects3.appendleft(tmp6) subjects3.appendleft(tmp4) subjects.appendleft(tmp2) if len(subjects) >= 1 and isinstance(subjects[0], Mul): tmp8 = subjects.popleft() associative1 = tmp8 associative_type1 = type(tmp8) subjects9 = deque(tmp8._args) matcher = CommutativeMatcher38265.get() tmp10 = subjects9 subjects9 = [] for s in tmp10: matcher.add_subject(s) for pattern_index, subst1 in matcher.match(tmp10, subst0): pass if pattern_index == 0: pass # State 38270 if len(subjects) == 0: pass # 0: x**j*f yield 0, subst1 subjects.appendleft(tmp8) return yield from .generated_part003651 import * from matchpy.matching.many_to_one import CommutativeMatcher from collections import deque from matchpy.utils import VariableWithCount from multiset import Multiset
[ "franz.bonazzi@gmail.com" ]
franz.bonazzi@gmail.com
0e3f366f9b2f023474aa0f26b034f046a6e738bd
4ade37d929b07b1eea07337b9cc843661a66e6d0
/trails/feeds/nothink.py
f40ae15122ffc7c0e6f962eac4765945bd5dded1
[ "MIT" ]
permissive
Dm2333/maltrail
bade5c99583b99f4ad1128aef295e95c977d82b1
2f32e0c3ff65544fc07ad3787d4d9b210f975b85
refs/heads/master
2021-04-12T10:44:25.125653
2018-03-20T11:50:40
2018-03-20T11:50:40
126,193,051
1
0
MIT
2018-03-21T14:40:05
2018-03-21T14:40:03
Python
UTF-8
Python
false
false
674
py
#!/usr/bin/env python """ Copyright (c) 2014-2018 Miroslav Stampar (@stamparm) See the file 'LICENSE' for copying permission """ from core.common import retrieve_content __url__ = "http://www.nothink.org/blacklist/blacklist_malware_irc.txt" __check__ = "Malware IRC" __info__ = "potential malware site" __reference__ = "nothink.org" def fetch(): retval = {} content = retrieve_content(__url__) if __check__ in content: for line in content.split('\n'): line = line.strip() if not line or line.startswith('#') or '.' not in line: continue retval[line] = (__info__, __reference__) return retval
[ "miroslav.stampar@gmail.com" ]
miroslav.stampar@gmail.com
dd7f146df693ac042cde1345a5080c70862c344e
222a7d69a78f1350772c9c8bfb0b36c640e5cd6e
/MarlinJobs/CalibrationConfigFiles/Stage59Config_5x5_30x30.py
2b94d6d91472c95d504b20257b87d7e3b5afb347
[]
no_license
StevenGreen1/JERDetailed
2a8cb30ec32781791ba163e5125bcdb87239e9a4
27ed19dc0930570f16019b2c7820ae715dd0ec57
refs/heads/master
2021-01-17T06:55:11.384992
2016-08-10T14:41:38
2016-08-10T14:41:38
44,620,987
0
0
null
null
null
null
UTF-8
Python
false
false
1,192
py
# Calibration config file for testing # Digitisation Constants - ECal CalibrECal = 42.3662496409 # Digitisation Constants - HCal CalibrHCalBarrel = 50.3504586994 CalibrHCalEndcap = 55.6419000329 CalibrHCALOther = 30.5873671511 # Digitisation Constants - Muon Chamber CalibrMuon = 56.7 # MIP Peak position in directed corrected SimCaloHit energy distributions # used for realistic ECal and HCal digitisation options CalibrECalMIP = 0.0001475 CalibrHCalMIP = 0.0004925 # MIP Peak position in directed corrected CaloHit energy distributions # used for MIP definition in PandoraPFA ECalToMIPCalibration = 153.846 HCalToMIPCalibration = 36.1011 MuonToMIPCalibration = 10.101 # EM and Had Scale Settings ECalToEMGeVCalibration = 1.00215973193 HCalToEMGeVCalibration = 1.00215973193 ECalToHadGeVCalibration = 1.12219237098 HCalToHadGeVCalibration = 1.05372579725 # Pandora Threshold Cuts ECalMIPThresholdPandora = 0.5 HCalMIPThresholdPandora = 0.3 # Hadronic Energy Truncation in HCal PandoraPFA MaxHCalHitHadronicEnergy = 1000000.0 # Timing ECal ECalBarrelTimeWindowMax = 300.0 ECalEndcapTimeWindowMax = 300.0 # Timing HCal HCalBarrelTimeWindowMax = 300.0 HCalEndcapTimeWindowMax = 300.0
[ "sg1sg2sg3@hotmail.co.uk" ]
sg1sg2sg3@hotmail.co.uk
2a62f1bef54bfd2cb7615ca2e9e0483f7ca9fd76
5ab2ccf70fddd30ea88155f2a5adb0711bf3dc9a
/Chap10/factorsingles.py
5d413a283dcbbe5de549074b7b5cbee0eafea399
[]
no_license
jdukosse/LOI_Python_course-SourceCode
32d66fd79344e9ab9412a6da373f2093b39cad92
bf13907dacf5b6e95f84885896c8f478dd208011
refs/heads/master
2020-12-05T23:27:53.862508
2020-01-24T13:42:28
2020-01-24T13:42:28
232,276,680
0
0
null
null
null
null
UTF-8
Python
false
false
142
py
n = int(input("Please enter a positive integer: ")) factors = [x for x in range(1, n + 1) if n % x == 0] print("Factors of", n, ":", factors)
[ "jdukosse@hotmail.com" ]
jdukosse@hotmail.com
9461f02ac4fdcbf48b760055e18b17a595c5d8e0
5451997d7b691679fd213d6473b21f184a5c9402
/pymaze/wsgi.py
4aff83a8a210e68f9e6d3d976da790c63895747e
[ "MIT" ]
permissive
TerryHowe/pymaze
9ba54c7d328abf94f6709593795a587f28be752b
a5b7e90b5019a5f99a7f80317796ace72ca0754f
refs/heads/master
2022-05-01T07:39:17.896430
2022-04-23T10:41:48
2022-04-23T10:41:48
89,522,507
1
0
MIT
2022-04-23T10:41:49
2017-04-26T20:13:13
Python
UTF-8
Python
false
false
390
py
""" WSGI config for pymaze project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pymaze.settings") application = get_wsgi_application()
[ "terrylhowe@gmail.com" ]
terrylhowe@gmail.com
5eff0169132197e41737862349d9ad181777010a
fe8f7febac1ff93b829256cdfd0be69e94498c76
/python/fluent_python/code/clockdeco_param.py
4700886d4acf8383701a414070e3f4635df7f784
[]
no_license
bioShaun/notebook
c438eba1d29b736704c3f5325faf15ad61a1e9d5
ce5f477a78554ed0d4ea5344057c19e32eb6c2b8
refs/heads/master
2020-03-26T16:16:06.458545
2018-08-23T00:54:53
2018-08-23T00:54:53
145,090,588
0
0
null
null
null
null
UTF-8
Python
false
false
950
py
import time import functools DEFAULT_FMT = '[{elapsed:0.8f}s] {name}({args}) -> {result}' def clock(fmt=DEFAULT_FMT): def decorate(func): def clocked(*_args, **kwargs): t0 = time.time() _result = func(*_args, **kwargs) elapsed = time.time() - t0 name = func.__name__ arg_lst = [] if _args: arg_lst.append(', '.join(repr(arg) for arg in _args)) if kwargs: pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items * ())] arg_lst.append(', '.join(pairs)) args = ', '.join(arg_lst) result = repr(_result) print(fmt.format(**locals())) return _result return clocked return decorate if __name__ == '__main__': @clock() def snooze(seconds): time.sleep(seconds) for i in range(3): snooze(.123)
[ "ricekent@163.com" ]
ricekent@163.com
e7e3c115506553ab1cbc5ca31ff9c0144325dd24
16e266cf50a712ed29a4097e34504aac0281e6cb
/Functions/venv/lib/python3.6/site-packages/_TFL/_SDG/_C/Macro.py
75f2950512e90bf9922859188d30c81a9164101c
[ "BSD-3-Clause" ]
permissive
felix-ogutu/PYTHON-PROJECTS
9dd4fdcfff6957830587b64c5da3b5c3ade3a27e
8c1297dbda495078509d06a46f47dc7ee60b6d4e
refs/heads/master
2023-06-05T04:41:36.727376
2021-06-25T20:36:52
2021-06-25T20:36:52
380,348,911
0
0
null
null
null
null
UTF-8
Python
false
false
6,540
py
# -*- coding: utf-8 -*- # Copyright (C) 2004-2007 TTTech Computertechnik AG. All rights reserved # Schönbrunnerstraße 7, A--1040 Wien, Austria. office@tttech.com # **************************************************************************** # # This module is licensed under the terms of the BSD 3-Clause License # <http://www.c-tanzer.at/license/bsd_3c.html>. # **************************************************************************** # #++ # Name # TFL.SDG.C.Macro # # Purpose # C-macro definitions # # Revision Dates # 11-Aug-2004 (MG) Creation # 12-Aug-2004 (MG) `Macro_Block.children_group_names` added # 12-Aug-2004 (MG) Convert the `args` paremeter from `None` to `""` and # from `""` to `None` for backward compatibility # 12-Aug-2004 (MG) `description` added to formats # 13-Aug-2004 (CT) `Macro.c_format` simplified # (`%(name)s` instead of `%(::.name:)s`) # 24-Aug-2004 (CT) Spurious space after macro name removed from `h_format` # and `c_format` # 24-Aug-2004 (MG) `Macro_Block.children_group_names` removed # 7-Oct-2004 (CED) `Define_Constant` added # 8-Feb-2005 (CED) `apidoc_tex_format` defined here and necessary changes # made # 9-Feb-2005 (MBM/CED) formal changes to `apidoc_tex_format` # 22-Feb-2005 (MBM) Removed <> from index entry # 24-Feb-2005 (MBM) Changed index entry structure # 9-Aug-2005 (CT) Call to `tex_quoted` added # 30-Oct-2006 (CED) `Preprocessor_Error` added # 9-Mar-2007 (CED) Accepting integer as value of `Define_Constant` # 17-Apr-2007 (CED) `Define_Constant` improved to print parantheses around # `value` # 23-Jul-2007 (CED) Activated absolute_import # 06-Aug-2007 (CED) Future import removed again # 26-Feb-2012 (MG) `__future__` imports added # ««revision-date»»··· #-- from __future__ import absolute_import, division, print_function, unicode_literals from _TFL import TFL import _TFL._SDG._C.Node import _TFL._SDG._C.Statement import _TFL.tex_quoted import textwrap class _Macro_ (TFL.SDG.C.Node) : """Base class of all preprocessor commands (defines, if, ifdef, ...)""" cgi = None def _update_scope (self, scope) : ### why do we need this ???? MGL, 11-Aug-2004 self.scope = scope for c in self.children : c._update_scope (scope) # end def _update_scope # end class _Macro_ class Macro (_Macro_, TFL.SDG.Leaf) : """C-macro defintion""" init_arg_defaults = dict \ ( name_len = 0 , scope = TFL.SDG.C.C , args = None , lines = None ) front_args = ("name", "args") rest_args = "lines" m_head = "" h_format = c_format = """ #%(m_head)s%(name)s%(:head=(¡tail=):.args:)s %(:sep_eol= \\:.lines:)s >%(::*description:)s """ def __init__ (self, * args, ** kw) : self.__super.__init__ (* args, ** kw) if self.args is None : self.args = "" elif self.args == "" : self.args = None # end def __init__ # end class Macro class Define (Macro) : """A C-macro #define stament""" m_head = "define " init_arg_defaults = dict \ ( def_file = "unknown" , explanation = "" ) _apidoc_head = \ """%(::@_name_comment:)-{output_width - indent_anchor}s \\hypertarget{%(name)s}{} \\subsubsection{\\texttt{%(name)s}} \\index{FT-COM API>\\texttt{%(name)s}} \\ttindex{%(name)s} \\begin{description} >\\item %(::*description:)s \\\\ >\\item \\textbf{File:} \\\\ \\texttt{%(def_file)s} \\\\ """ _apidoc_tail = \ """>%(::>@_explanation:)-{output_width - indent_anchor}s \\end{description} > """ _apidoc_middle = \ """>\\item \\textbf{Function declaration:} \\\\ >>\\texttt{%(name)s (%(args)s)} \\\\ """ apidoc_tex_format = "".join \ ( [ _apidoc_head , _apidoc_middle , _apidoc_tail ] ) def _name_comment (self, ** kw) : format_prec = int (kw ["format_prec"]) result = \ ( "%% --- %s %s" % ( self.name , "-" * ( format_prec - len (self.name) - 7 ) ) ) return [result] # end def _name_comment def _explanation (self, ** kw) : if not self.explanation : yield "" return yield "\\item \\textbf{Description:} \\\\" format_prec = max (int (kw ["format_prec"]), 4) wrapper = textwrap.TextWrapper (width = format_prec) for l in wrapper.wrap (TFL.tex_quoted (self.explanation)) : yield l # end def _explanation # end class Define class Define_Constant (Define) : """A C-macro #define stament, defining a constant value""" init_arg_defaults = dict \ ( name_len = 0 , scope = TFL.SDG.C.C , name = None , value = None ) front_args = ("name", "value") h_format = c_format = """ #%(m_head)s%(name)s %(:head=(¡tail=):.value:)s >%(::*description:)s """ _apidoc_middle = \ """>\\item \\textbf{Value:} %(value)s """ apidoc_tex_format = "".join \ ( [ Define._apidoc_head , _apidoc_middle , Define._apidoc_tail ] ) _autoconvert = dict \ ( value = lambda s, k, v : str (v) ) # end class Define_Constant class Macro_Block (_Macro_, TFL.SDG.C.Stmt_Group) : """Block of macro definitions""" Ancestor = TFL.SDG.C.Stmt_Group # end class Macro_Block class Preprocessor_Error (_Macro_) : """A C preprocessor error statement""" m_head = "error " init_arg_defaults = dict \ ( scope = TFL.SDG.C.HC , error_msg = "" ) front_args = ("error_msg", ) h_format = c_format = """ #%(m_head) s%(error_msg)s """ # end class Preprocessor_Error if __name__ != "__main__" : TFL.SDG.C._Export ("*", "_Macro_") ### __END__ TFL.SDG.C.Macro
[ "you@example.com" ]
you@example.com
03c89f87bc946fe9d2a1f054e5f392aa88cc88c2
2ff7e53d5e512cd762217ca54317982e07a2bb0c
/carbon/common/script/net/httpAuth.py
4e0d808e60ebe4b4b14cadffc1f8dc510f115517
[]
no_license
nanxijw/Clara-Pretty-One-Dick
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
50de3488a2140343c364efc2615cf6e67f152be0
refs/heads/master
2021-01-19T09:25:07.555284
2015-02-17T21:49:33
2015-02-17T21:49:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,364
py
#Embedded file name: carbon/common/script/net\httpAuth.py import base import cherrypy import httpJinja import macho import blue import const import base64 from datetime import datetime SESSION_KEY = '_cp_username' AUTH_LOGIN_URL = '/auth/login' DEFAULT_URL = '/default.py' def CreateSession(username, password): session = base.CreateSession() session.esps = ESPSession(None, session.sid) session.esps.contents['username'] = username session.esps.contents['password'] = password return session def EndSession(): cherrypy.session.delete() cherrypy.lib.sessions.expire() def CheckCredentials(username, password): sess = CreateSession(username, password) if macho.mode == 'client': cherrypy.session['machoSession'] = sess return auth = base.GetServiceSession('cherry').ConnectToAnyService('authentication') sptype = const.userConnectTypeServerPages try: sessstuff, _ = auth.Login(sess.sid, username, password, None, sptype, cherrypy.request.remote.ip) except UserError: return u'Incorrect username or password' except Exception: return u'Incorrect username or password' session = CreateSession(username, password) sessstuff['role'] |= sess.role for otherSession in base.FindSessions('userid', [sessstuff['userid']]): otherSession.LogSessionHistory('Usurped by user %s via HTTP using local authentication' % username) base.CloseSession(otherSession) cherrypy.session['machoSession'] = sess sess.SetAttributes(sessstuff) def CheckAuth(*args, **kwargs): assets = cherrypy.request.config.get('tools.staticdir.dir') cherrypy.request.beginTime = datetime.now() if assets not in cherrypy.request.path_info: conditions = cherrypy.request.config.get('auth.require', None) if conditions is not None: pathInfo = cherrypy.request.path_info if len(cherrypy.request.query_string): pathInfo = '%s?%s' % (pathInfo, cherrypy.request.query_string) if pathInfo in [AUTH_LOGIN_URL, DEFAULT_URL]: authLogin = AUTH_LOGIN_URL else: authLogin = '%s?from_page=%s' % (AUTH_LOGIN_URL, base64.urlsafe_b64encode(pathInfo)) username = cherrypy.session.get(SESSION_KEY) if username: cherrypy.request.login = username for condition in conditions: if not condition(): raise cherrypy.HTTPRedirect(authLogin) else: raise cherrypy.HTTPRedirect(authLogin) cherrypy.tools.auth = cherrypy.Tool('before_handler', CheckAuth) def Require(*conditions): def decorate(f): if not hasattr(f, '_cp_config'): f._cp_config = dict() if 'auth.require' not in f._cp_config: f._cp_config['auth.require'] = [] f._cp_config['auth.require'].extend(conditions) return f return decorate def MemberOf(groupName): def check(): return cherrypy.request.login == 'joe' and groupName == 'admin' return check() def NameIs(required_username): return lambda : required_username == cherrypy.request.login def AnyOf(*conditions): def check(): for condition in conditions: if condition(): return True return False return check() def AllOf(*conditions): def check(): for condition in conditions: if not condition(): return False return True return check class ESPSession: def __init__(self, owner, sid): self.codePage = 0 self.contents = {} self.LCID = 0 self.sessionID = sid self.timeout = 20 self.authenticated = 0 self.username = '' self.password = '' self.owner = owner self.flatkokudeig = blue.os.GetWallclockTimeNow() self.remappings = {} class AuthController(object): __guid__ = 'httpAuth.AuthController' def on_login(self, username): """Called on successful login""" pass def on_logout(self, username): """Called on logout""" pass def get_loginform(self, username, msg = None, from_page = '/'): sp = cherrypy.sm.GetService('SP') try: background_color = sp.Color() except Exception: background_color = sp.Color() return {'msg': msg, 'style': 'background-color: %s; color: black' % background_color, 'sp': sp.Title(), 'server': cherrypy.prefs.clusterName, 'generate_time': datetime.now() - cherrypy.request.beginTime, 'username': 'sp' if prefs.clusterMode == 'LOCAL' else ''} @cherrypy.expose @cherrypy.tools.jinja(template='AuthController_login.html') def login(self, username = None, password = None, from_page = '/'): if username is None or password is None: return self.get_loginform('', from_page=from_page) error_msg = CheckCredentials(username, password) if error_msg: return self.get_loginform(username, error_msg, from_page) cherrypy.session.regenerate() cherrypy.session[SESSION_KEY] = cherrypy.request.login = username self.on_login(username) if from_page != '/': from_page = base64.urlsafe_b64decode(str(from_page)) raise cherrypy.HTTPRedirect(from_page or '/') @cherrypy.expose def logout(self, from_page = '/'): sess = cherrypy.session username = sess.get(SESSION_KEY, None) sess[SESSION_KEY] = None if username: cherrypy.request.login = None self.on_logout(username) if 'machoSession' in cherrypy.session: sess = cherrypy.session['machoSession'] sess.LogSessionHistory('Web session closed by logging out %s' % str(session.userid)) base.CloseSession(sess) EndSession() raise cherrypy.HTTPRedirect(from_page or '/') exports = {'httpAuth.CreateSession': CreateSession, 'httpAuth.EndSession': EndSession, 'httpAuth.CheckCredentials': CheckCredentials, 'httpAuth.CheckAuth': CheckAuth, 'httpAuth.Require': Require, 'httpAuth.MemberOf': MemberOf, 'httpAuth.NameIs': NameIs, 'httpAuth.AnyOf': AnyOf, 'httpAuth.AllOf': AllOf}
[ "billchang.e@gmail.com" ]
billchang.e@gmail.com
d1832ec2bedb704f090af6d27a3a27a0abf67623
8bb4060c4a41d1ef1b31c59fb8b9bc375e3e2ba4
/setup.py
c26e6e1cb822af51c1da20528c39ff488e7edd81
[]
no_license
hanxianzhai/distribution
a6c5f96bb954e7e18bae0d6a7ac6976fae59d332
628f670f4ed39478007e3402a77653f6596d0529
refs/heads/master
2021-04-01T06:21:29.086943
2020-03-18T03:55:28
2020-03-18T03:55:28
null
0
0
null
null
null
null
UTF-8
Python
false
false
175
py
import config from init import app if __name__ == '__main__': app.run( host='0.0.0.0', port=config.app_conf["server"]["port"], debug=False )
[ "tanshilinmail@gmail.com" ]
tanshilinmail@gmail.com
6a405e8f55909b6ed9222b949bef9230edd24b17
abfa0fcab2bc9a9c3cccbc3a8142cdd4b2a66ee9
/698-Partition to K Equal Sum Subsets.py
8aceeaa11fdcd8709c3a984236173baf0a4fbd70
[]
no_license
JinnieJJ/leetcode
20e8ccf3f8919028c53e0f0db86bcc2fbc7b6272
26c6ee936cdc1914dc3598c5dc74df64fa7960a1
refs/heads/master
2021-04-15T09:18:08.450426
2021-03-06T01:53:27
2021-03-06T01:53:27
126,275,814
3
1
null
null
null
null
UTF-8
Python
false
false
670
py
class Solution: def canPartitionKSubsets(self, nums, k): """ :type nums: List[int] :type k: int :rtype: bool """ sums = [0] * k subsum = sum(nums) / k nums.sort(reverse=True) l = len(nums) def walk(i): if i == l: return len(set(sums)) == 1 for j in range(k): sums[j] += nums[i] if sums[j] <= subsum and walk(i+1): return True sums[j] -= nums[i] if sums[j] == 0: break return False return walk(0)
[ "noreply@github.com" ]
JinnieJJ.noreply@github.com
1f0f69d04585b8216b8268a4c3dc0e5868618db7
2dd560dc468af0af4ca44cb4cd37a0b807357063
/Leetcode/1289. Minimum Falling Path Sum II/solution2.py
e9ebe9c9ba9a53af13d879fb8d254dac546a99d0
[ "MIT" ]
permissive
hi0t/Outtalent
460fe4a73788437ba6ce9ef1501291035c8ff1e8
8a10b23335d8e9f080e5c39715b38bcc2916ff00
refs/heads/master
2023-02-26T21:16:56.741589
2021-02-05T13:36:50
2021-02-05T13:36:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
694
py
class Solution: def minFallingPathSum(self, arr: List[List[int]]) -> int: m = len(arr) n = len(arr[0]) @lru_cache(None) def count(i: int, j: int) -> int: if i >= m: return 0 m1 = m2 = inf k1 = k2 = 0 for k in range(n): if j == k: continue if arr[i][k] < m1: m2 = m1 m1 = arr[i][k] k2 = k1 k1 = k elif arr[i][k] < m2: m2 = arr[i][k] k2 = k return min(m1 + count(i + 1, k1), m2 + count(i + 1, k2)) return count(0, -1)
[ "info@crazysquirrel.ru" ]
info@crazysquirrel.ru
2efe378579a32f494f6942fa0ac13a700a233957
cffee94b843fff699f68eaae972ed829858fbb0d
/typings/mediafile/mutagen/mp3/__init__.pyi
da26b2285df4dd3b5373082919fadc979a486824
[ "MIT" ]
permissive
Josef-Friedrich/phrydy
3b5fae00d3d7210821dc9037d00f9432e1df3c2d
c6e17e8b9e24678ec7672bff031d0370bfa8b6f8
refs/heads/main
2023-08-25T12:11:47.333984
2023-08-08T14:50:08
2023-08-08T14:50:08
66,490,323
6
0
null
null
null
null
UTF-8
Python
false
false
3,255
pyi
""" This type stub file was generated by pyright. """ from __future__ import division from functools import partial from io import BytesIO from mutagen._util import BitReader, cdata, iterbytes """ http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header http://wiki.hydrogenaud.io/index.php?title=MP3 """ class LAMEError(Exception): ... class LAMEHeader: """http://gabriel.mp3-tech.org/mp3infotag.html""" vbr_method = ... lowpass_filter = ... quality = ... vbr_quality = ... track_peak = ... track_gain_origin = ... track_gain_adjustment = ... album_gain_origin = ... album_gain_adjustment = ... encoding_flags = ... ath_type = ... bitrate = ... encoder_delay_start = ... encoder_padding_end = ... source_sample_frequency_enum = ... unwise_setting_used = ... stereo_mode = ... noise_shaping = ... mp3_gain = ... surround_info = ... preset_used = ... music_length = ... music_crc = ... header_crc = ... def __init__(self, xing, fileobj) -> None: """Raises LAMEError if parsing fails""" ... def guess_settings(self, major, minor): """Gives a guess about the encoder settings used. Returns an empty string if unknown. The guess is mostly correct in case the file was encoded with the default options (-V --preset --alt-preset --abr -b etc) and no other fancy options. Args: major (int) minor (int) Returns: text """ ... @classmethod def parse_version(cls, fileobj): """Returns a version string and True if a LAMEHeader follows. The passed file object will be positioned right before the lame header if True. Raises LAMEError if there is no lame version info. """ ... class XingHeaderError(Exception): ... class XingHeaderFlags: FRAMES = ... BYTES = ... TOC = ... VBR_SCALE = ... class XingHeader: frames = ... bytes = ... toc = ... vbr_scale = ... lame_header = ... lame_version = ... lame_version_desc = ... is_info = ... def __init__(self, fileobj) -> None: """Parses the Xing header or raises XingHeaderError. The file position after this returns is undefined. """ ... def get_encoder_settings(self): # -> Literal['']: """Returns the guessed encoder settings""" ... @classmethod def get_offset(cls, info): # -> Literal[36, 21, 13]: """Calculate the offset to the Xing header from the start of the MPEG header including sync based on the MPEG header's content. """ ... class VBRIHeaderError(Exception): ... class VBRIHeader: version = ... quality = ... bytes = ... frames = ... toc_scale_factor = ... toc_frames = ... toc = ... def __init__(self, fileobj) -> None: """Reads the VBRI header or raises VBRIHeaderError. The file position is undefined after this returns """ ... @classmethod def get_offset(cls, info): # -> Literal[36]: """Offset in bytes from the start of the MPEG header including sync""" ...
[ "josef@friedrich.rocks" ]
josef@friedrich.rocks
14ecb79893f2a150fcc1e6200c9e85886e0f7225
e282226e8fda085f4c64c044327eceb3388e94ce
/mainapp/api/urls.py
1b3871642a15056f10650c9fb8bffcec8a5d906f
[]
no_license
Pavlenkovv/REST-API
2bf36f40104a51f2735ce3dd3eebcf274061a1a2
352d0bd24e88fdb793e658c5b6eaffa97b56062c
refs/heads/main
2023-03-15T22:45:50.121953
2021-03-07T07:56:31
2021-03-07T07:56:31
344,887,432
0
0
null
null
null
null
UTF-8
Python
false
false
413
py
from django.urls import path, include from rest_framework.routers import DefaultRouter from .api_views import AuthorViewSet, NewsPostViewSet, CommentViewSet router = DefaultRouter() router.register(r"newsposts", NewsPostViewSet, basename="user") router.register(r"author", AuthorViewSet) router.register(r"comment", CommentViewSet) urlpatterns = [path("api/", include(router.urls))] urlpatterns += router.urls
[ "pavlenko.vyacheslav@gmail.com" ]
pavlenko.vyacheslav@gmail.com
1b41395082d1617e92cb4539c977d7f616a594fc
ecd630f54fefa0a8a4937ac5c6724f9a3bb215c3
/projeto/avalista/migrations/0022_auto_20200910_1230.py
8922215b9bc4a928404f7c8043839ce3aebed4a8
[]
no_license
israelwerther/Esctop_Israel_Estoque
49968751464a38c473298ed876da7641efedf8de
d6ab3e502f2a97a0d3036351e59c2faa267c0efd
refs/heads/master
2023-01-07T20:21:38.381593
2020-11-12T17:35:14
2020-11-12T17:35:14
258,642,721
0
0
null
null
null
null
UTF-8
Python
false
false
667
py
# Generated by Django 3.0.7 on 2020-09-10 12:30 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('avalista', '0021_avalista_fiador_n_operacao'), ] operations = [ migrations.AlterField( model_name='avalista', name='fiador_agencia', field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Nº agência'), ), migrations.AlterField( model_name='avalista', name='fiador_conta', field=models.CharField(blank=True, max_length=15, null=True, verbose_name='Nº conta'), ), ]
[ "israelwerther48@outlook.com" ]
israelwerther48@outlook.com
6fcd77974cc305566c9496941a87ef64cb688e50
66fda6586a902f8043b1f5e9532699babc7b591a
/lib_openshift/models/v1_deployment_trigger_image_change_params.py
cdb5495ce392554744c8473da2b748a72362bdae
[ "Apache-2.0" ]
permissive
chouseknecht/lib_openshift
86eff74b4659f05dfbab1f07d2d7f42b21e2252d
02b0e4348631e088e72a982a55c214b30a4ab9d9
refs/heads/master
2020-12-11T05:23:17.081794
2016-07-28T20:15:39
2016-07-28T20:15:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,799
py
# coding: utf-8 """ OpenAPI spec version: Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class V1DeploymentTriggerImageChangeParams(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ operations = [ ] def __init__(self, automatic=None, container_names=None, _from=None, last_triggered_image=None): """ V1DeploymentTriggerImageChangeParams - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'automatic': 'bool', 'container_names': 'list[str]', '_from': 'V1ObjectReference', 'last_triggered_image': 'str' } self.attribute_map = { 'automatic': 'automatic', 'container_names': 'containerNames', '_from': 'from', 'last_triggered_image': 'lastTriggeredImage' } self._automatic = automatic self._container_names = container_names self.__from = _from self._last_triggered_image = last_triggered_image @property def automatic(self): """ Gets the automatic of this V1DeploymentTriggerImageChangeParams. Automatic means that the detection of a new tag value should result in a new deployment. :return: The automatic of this V1DeploymentTriggerImageChangeParams. :rtype: bool """ return self._automatic @automatic.setter def automatic(self, automatic): """ Sets the automatic of this V1DeploymentTriggerImageChangeParams. Automatic means that the detection of a new tag value should result in a new deployment. :param automatic: The automatic of this V1DeploymentTriggerImageChangeParams. :type: bool """ self._automatic = automatic @property def container_names(self): """ Gets the container_names of this V1DeploymentTriggerImageChangeParams. ContainerNames is used to restrict tag updates to the specified set of container names in a pod. :return: The container_names of this V1DeploymentTriggerImageChangeParams. :rtype: list[str] """ return self._container_names @container_names.setter def container_names(self, container_names): """ Sets the container_names of this V1DeploymentTriggerImageChangeParams. ContainerNames is used to restrict tag updates to the specified set of container names in a pod. :param container_names: The container_names of this V1DeploymentTriggerImageChangeParams. :type: list[str] """ self._container_names = container_names @property def _from(self): """ Gets the _from of this V1DeploymentTriggerImageChangeParams. From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used. :return: The _from of this V1DeploymentTriggerImageChangeParams. :rtype: V1ObjectReference """ return self.__from @_from.setter def _from(self, _from): """ Sets the _from of this V1DeploymentTriggerImageChangeParams. From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used. :param _from: The _from of this V1DeploymentTriggerImageChangeParams. :type: V1ObjectReference """ self.__from = _from @property def last_triggered_image(self): """ Gets the last_triggered_image of this V1DeploymentTriggerImageChangeParams. LastTriggeredImage is the last image to be triggered. :return: The last_triggered_image of this V1DeploymentTriggerImageChangeParams. :rtype: str """ return self._last_triggered_image @last_triggered_image.setter def last_triggered_image(self, last_triggered_image): """ Sets the last_triggered_image of this V1DeploymentTriggerImageChangeParams. LastTriggeredImage is the last image to be triggered. :param last_triggered_image: The last_triggered_image of this V1DeploymentTriggerImageChangeParams. :type: str """ self._last_triggered_image = last_triggered_image def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
[ "jdetiber@redhat.com" ]
jdetiber@redhat.com
f9242da26ab0e85261149acc3935789753a44160
0cafca9e27e70aa47b3774a13a537f45410f13f7
/idb/ipc/push.py
c7f6d1ab8f6e77317e6d081e0655d31ebf0c16a5
[ "MIT" ]
permissive
fakeNetflix/facebook-repo-idb
18b67ca6cfa0edd3fa7b9c4940fec6c3f0ccfa73
eb4ed5a7dc4a14b224a22e833294d7366fe4725e
refs/heads/master
2023-01-05T13:19:40.755318
2019-08-16T15:23:45
2019-08-16T15:25:00
203,098,477
1
0
MIT
2023-01-04T07:33:09
2019-08-19T04:31:16
Objective-C
UTF-8
Python
false
false
1,039
py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from idb.common.stream import stream_map from idb.common.tar import generate_tar from idb.grpc.idb_pb2 import Payload, PushRequest, PushResponse from idb.grpc.stream import Stream, drain_to_stream from idb.grpc.types import CompanionClient async def daemon( client: CompanionClient, stream: Stream[PushResponse, PushRequest] ) -> None: async with client.stub.push.open() as companion: await companion.send_message(await stream.recv_message()) if client.is_local: generator = stream else: paths = [request.payload.file_path async for request in stream] generator = stream_map( generate_tar(paths=paths), lambda chunk: PushRequest(payload=Payload(data=chunk)), ) response = await drain_to_stream( stream=companion, generator=generator, logger=client.logger ) await stream.send_message(response)
[ "facebook-github-bot@users.noreply.github.com" ]
facebook-github-bot@users.noreply.github.com
181269644d8602fc2dcb673b30857f2da8b2b11f
6deafbf6257a5c30f084c3678712235c2c31a686
/Toolz/sqlmap/tamper/least.py
53a8a6aadefe283a268fd3ad7a0c5fd1f51f2a67
[ "Unlicense", "LicenseRef-scancode-generic-cla", "GPL-1.0-or-later", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-proprietary-license", "GPL-2.0-only", "LicenseRef-scancode-commercial-license", "LicenseRef-scancode-other-permissive" ]
permissive
thezakman/CTF-Heaven
53fcb4a72afa821ad05d8cc3b309fb388f958163
4b52a2178922f1502ab00fa8fc156d35e1dc653f
refs/heads/master
2023-04-05T18:20:54.680378
2023-03-21T13:47:45
2023-03-21T13:47:45
167,290,879
182
24
Unlicense
2022-11-29T21:41:30
2019-01-24T02:44:24
Python
UTF-8
Python
false
false
1,126
py
#!/usr/bin/env python """ Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/) See the file 'LICENSE' for copying permission """ import re from lib.core.enums import PRIORITY __priority__ = PRIORITY.HIGHEST def dependencies(): pass def tamper(payload, **kwargs): """ Replaces greater than operator ('>') with 'LEAST' counterpart Tested against: * MySQL 4, 5.0 and 5.5 * Oracle 10g * PostgreSQL 8.3, 8.4, 9.0 Notes: * Useful to bypass weak and bespoke web application firewalls that filter the greater than character * The LEAST clause is a widespread SQL command. Hence, this tamper script should work against majority of databases >>> tamper('1 AND A > B') '1 AND LEAST(A,B+1)=B+1' """ retVal = payload if payload: match = re.search(r"(?i)(\b(AND|OR)\b\s+)([^>]+?)\s*>\s*(\w+|'[^']+')", payload) if match: _ = "%sLEAST(%s,%s+1)=%s+1" % (match.group(1), match.group(3), match.group(4), match.group(4)) retVal = retVal.replace(match.group(0), _) return retVal
[ "thezakman@ctf-br.org" ]
thezakman@ctf-br.org
788ecb8dfd993ef9d68c1c979145bef4be44c7a1
516dea668ccdc13397fd140f9474939fa2d7ac10
/enterprisebanking/middlewares.py
ad1d6a91a6ff2f6a7afebb8c4d5c122ae4ea0f71
[]
no_license
daniel-kanchev/enterprisebanking
08f1162647a0820aafa5a939e64c1cceb7844977
bdb7bc4676419d7dcfe47ca8e817774ad031b585
refs/heads/main
2023-04-09T19:29:30.892047
2021-04-07T08:10:15
2021-04-07T08:10:15
355,463,635
0
0
null
null
null
null
UTF-8
Python
false
false
3,670
py
# Define here the models for your spider middleware # # See documentation in: # https://docs.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals # useful for handling different item types with a single interface from itemadapter import is_item, ItemAdapter class enterprisebankingSpiderMiddleware: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, or item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Request or item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class enterprisebankingDownloaderMiddleware: # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
[ "daniel.kanchev@adata.pro" ]
daniel.kanchev@adata.pro
a8691c22467753872cc6ea65d244c12c491dc815
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/nouns/_nationality.py
4e1dcbd9aa26fd3af3fbdc1264cb9f070b10fdb7
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
413
py
#calss header class _NATIONALITY(): def __init__(self,): self.name = "NATIONALITY" self.definitions = [u'the official right to belong to a particular country: ', u'a group of people of the same race, religion, traditions, etc.: '] self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.specie = 'nouns' def run(self, obj1 = [], obj2 = []): return self.jsondata
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
8fb5e452de9da869a55ccca9cd00839bdadeeeab
3bfa43cd86d1fb3780f594c181debc65708af2b8
/algorithms/sort/heap_sort.py
0f1953ff4b5ac7e3fd902dd4f15744131c3cc8bf
[]
no_license
ninjaboynaru/my-python-demo
2fdb6e75c88e07519d91ee8b0e650fed4a2f9a1d
d679a06a72e6dc18aed95c7e79e25de87e9c18c2
refs/heads/master
2022-11-06T14:05:14.848259
2020-06-21T20:10:05
2020-06-21T20:10:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,610
py
""" <https://docs.python.org/3/library/heapq.html> <https://www.youtube.com/watch?v=AEAmgbls8TM&feature=youtu.be> Steps: 1. Put every item in the list into a heap 2. Each step get the smallest item from the heap, put the smallest into a new list 3. Repeat until the heap is empty ```python from heapq import heappush, heappop This is the simple version with python module def heap_sort(lst): h = [] for val in lst: heappush(h, val) return [heappop(h) for i in range(len(h))] ``` There is also inplace heap sort Steps: 1. Heapification (Bottom-up heapify the array) 1. Sink nodes in reverse order, sink(k) 2. After sinking, guaranteed that tree rooted at position k is a heap 2. Delete the head of the heap, delete the last item from the heap, swap the last item in the root, and sink(0) Time complexity: O(N log(N)) Space complexity: O(1) The definition of sink(k): Steps: 1. If k-th item is larger than one of its child, swap it with its child. the children of k-th item is the (2*k+1) and (2*k+2). (if the item is larger than both of the children, swap with the smaller one) 2. Repeat this until the end of the heap array. Example: 3, 0, 1, 7, 9, 2 Heapifiy: 9 7 2 3 0 1 Delete head of heap, and sink(0): 7 3 2 1 0 Delete head of heap, and sink(0): 3 1 2 0 Delete head of heap, and sink(0): 2 1 0 Delete head of heap, and sink(0): 1 0 Delete head of heap, and sink(0): 0 """ def heap_sort(lst): def sink(start, end): """ MaxHeap sink. If lst[start] is smaller than its children, sink down till the end. """ left = 2*start + 1 right = 2*start + 2 swap_pos = None if left > end: return if right > end or lst[left] > lst[right]: swap_pos = left else: swap_pos = right if swap_pos: temp = lst[start] lst[start] = lst[swap_pos] lst[swap_pos] = temp sink(swap_pos, end) # Bottom-up heapify the array for k in range(len(lst)-1, -1, -1): sink(k, len(lst)-1) # print(lst) # Delete the head of the heap, delete the last item from the heap, swap # the last item in the root, and sink(0) for end in range(len(lst) - 1, 0, -1): first = lst[0] lst[0] = lst[end] lst[end] = first sink(0, end-1) # print(lst) if __name__ == "__main__": lst = [3, 0, 1, 7, 9, 2] heap_sort(lst) print(lst)
[ "wangxin19930411@163.com" ]
wangxin19930411@163.com
265d01952ab7506e909f20767daaeac5d52864e4
4ce2cff60ddbb9a3b6fc2850187c86f866091b13
/tfrecords/src/wai/tfrecords/object_detection/dataset_tools/create_oid_tf_record.py
271fd0aac175d399dda9b528a9a311145f48cfc1
[ "MIT", "Apache-2.0" ]
permissive
8176135/tensorflow
18cb8a0432ab2a0ea5bacd03309e647f39cb9dd0
2c3b4b1d66a80537f3e277d75ec1d4b43e894bf1
refs/heads/master
2020-11-26T05:00:56.213093
2019-12-19T08:13:44
2019-12-19T08:13:44
228,970,478
0
0
null
2019-12-19T03:51:38
2019-12-19T03:51:37
null
UTF-8
Python
false
false
5,240
py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Creates TFRecords of Open Images dataset for object detection. Example usage: python object_detection/dataset_tools/create_oid_tf_record.py \ --input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \ --input_image_label_annotations_csv=/path/to/input/annotations-label.csv \ --input_images_directory=/path/to/input/image_pixels_directory \ --input_label_map=/path/to/input/labels_bbox_545.labelmap \ --output_tf_record_path_prefix=/path/to/output/prefix.tfrecord CSVs with bounding box annotations and image metadata (including the image URLs) can be downloaded from the Open Images GitHub repository: https://github.com/openimages/dataset This script will include every image found in the input_images_directory in the output TFRecord, even if the image has no corresponding bounding box annotations in the input_annotations_csv. If input_image_label_annotations_csv is specified, it will add image-level labels as well. Note that the information of whether a label is positivelly or negativelly verified is NOT added to tfrecord. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import contextlib2 import pandas as pd import tensorflow as tf from wai.tfrecords.object_detection.dataset_tools import oid_tfrecord_creation from wai.tfrecords.object_detection.dataset_tools import tf_record_creation_util from wai.tfrecords.object_detection.utils import label_map_util tf.flags.DEFINE_string('input_box_annotations_csv', None, 'Path to CSV containing image bounding box annotations') tf.flags.DEFINE_string('input_images_directory', None, 'Directory containing the image pixels ' 'downloaded from the OpenImages GitHub repository.') tf.flags.DEFINE_string('input_image_label_annotations_csv', None, 'Path to CSV containing image-level labels annotations') tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto') tf.flags.DEFINE_string( 'output_tf_record_path_prefix', None, 'Path to the output TFRecord. The shard index and the number of shards ' 'will be appended for each output shard.') tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards') FLAGS = tf.flags.FLAGS def main(_): tf.logging.set_verbosity(tf.logging.INFO) required_flags = [ 'input_box_annotations_csv', 'input_images_directory', 'input_label_map', 'output_tf_record_path_prefix' ] for flag_name in required_flags: if not getattr(FLAGS, flag_name): raise ValueError('Flag --{} is required'.format(flag_name)) label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map) all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv) if FLAGS.input_image_label_annotations_csv: all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv) all_label_annotations.rename( columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True) else: all_label_annotations = None all_images = tf.gfile.Glob( os.path.join(FLAGS.input_images_directory, '*.jpg')) all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images] all_image_ids = pd.DataFrame({'ImageID': all_image_ids}) all_annotations = pd.concat( [all_box_annotations, all_image_ids, all_label_annotations]) tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids)) with contextlib2.ExitStack() as tf_record_close_stack: output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( tf_record_close_stack, FLAGS.output_tf_record_path_prefix, FLAGS.num_shards) for counter, image_data in enumerate(all_annotations.groupby('ImageID')): tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000, counter) image_id, image_annotations = image_data # In OID image file names are formed by appending ".jpg" to the image ID. image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg') with tf.gfile.Open(image_path) as image_file: encoded_image = image_file.read() tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame( image_annotations, label_map, encoded_image) if tf_example: shard_idx = int(image_id, 16) % FLAGS.num_shards output_tfrecords[shard_idx].write(tf_example.SerializeToString()) if __name__ == '__main__': tf.app.run()
[ "coreytsterling@gmail.com" ]
coreytsterling@gmail.com
8da1f2b67b46206e3835fdfee41f7365ac844f46
577f03954ec69ed82eaea32c62c8eba9ba6a01c1
/py/testdir_ec2_only/test_parse_covtype20x_s3.py
d6207e11b1f8763b5cd9fdd1466e72b472d7c03f
[ "Apache-2.0" ]
permissive
ledell/h2o
21032d784a1a4bb3fe8b67c9299f49c25da8146e
34e271760b70fe6f384e106d84f18c7f0adb8210
refs/heads/master
2020-02-26T13:53:01.395087
2014-12-29T04:14:29
2014-12-29T04:14:29
24,823,632
1
2
null
null
null
null
UTF-8
Python
false
false
1,962
py
import unittest, sys, random, time sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): print "Will build clouds with incrementing heap sizes and import folder/parse" @classmethod def tearDownClass(cls): # the node state is gone when we tear down the cloud, so pass the ignore here also. h2o.tear_down_cloud(sandboxIgnoreErrors=True) def test_parse_covtype20x_loop_s3(self): bucket = 'home-0xdiag-datasets' importFolderPath = "standard" csvFilename = "covtype20x.data" csvPathname = importFolderPath + "/" + csvFilename timeoutSecs = 500 trialMax = 3 for tryHeap in [4,12]: print "\n", tryHeap,"GB heap, 1 jvm per host, import folder,", \ "then parse 'covtype20x.data'" h2o.init(java_heap_GB=tryHeap) # don't raise exception if we find something bad in h2o stdout/stderr? h2o.nodes[0].sandboxIgnoreErrors = True for trial in range(trialMax): hex_key = csvFilename + ".hex" start = time.time() parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='s3', hex_key=hex_key, timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60) elapsed = time.time() - start print "parse result:", parseResult['destination_key'] print "Trial #", trial, "completed in", elapsed, "seconds.", \ "%d pct. of timeout" % ((elapsed*100)/timeoutSecs) removeKeyResult = h2o.nodes[0].remove_key(key=hex_key) h2o.tear_down_cloud() # sticky ports? wait a bit. time.sleep(5) if __name__ == '__main__': h2o.unit_main()
[ "kevin@0xdata.com" ]
kevin@0xdata.com
5d0a2f7e05ee7c3731f9b7550e0d5d9f8625cb88
78c08cd3ef66836b44373280a333c040ccb99605
/ostap/fitting/tests/test_fitting_convolution.py
3f980fbf093211f18849b15254d2f25697d8e7a7
[ "BSD-3-Clause" ]
permissive
Pro100Tema/ostap
11ccbc546068e65aacac5ddd646c7550086140a7
1765304fce43714e1f51dfe03be0daa5aa5d490f
refs/heads/master
2023-02-24T08:46:07.532663
2020-01-27T13:46:30
2020-01-27T13:46:30
200,378,716
0
0
BSD-3-Clause
2019-08-03T13:28:08
2019-08-03T13:28:07
null
UTF-8
Python
false
false
3,426
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================= # Copyright (c) Ostap developers. # ============================================================================= # @file test_fitting_convolution.py # Test module for ostap/fitting/convolution.py # ============================================================================= """ Test module for ostap/fitting/convolution.py """ # ============================================================================= __author__ = "Ostap developers" __all__ = () ## nothing to import # ============================================================================= import ROOT, random import ostap.fitting.roofit import ostap.fitting.models as Models from ostap.core.core import cpp, VE, dsID from ostap.logger.utils import rooSilent # ============================================================================= # logging # ============================================================================= from ostap.logger.logger import getLogger if '__main__' == __name__ or '__builtin__' == __name__ : logger = getLogger ( 'test_fitting_convolution' ) else : logger = getLogger ( __name__ ) # ============================================================================= ## make x = ROOT.RooRealVar ( 'x', 'test' , 1 , 10 ) models = set() # ============================================================================= ## Asymmetric Laplace # ============================================================================= def test_laplace(): logger.info ('Test Asymmetric Laplace shape' ) laplace = Models.AsymmetricLaplace_pdf ( name = 'AL', xvar = x , mean = 5 , slope = 1 ) from ostap.fitting.convolution import Convolution_pdf ## constant resolution laplace_1 = Convolution_pdf ( name = 'L1' , pdf = laplace, resolution = 0.75 ) ## resolution PDF from ostap.fitting.resolution import ResoApo2 rAp = ResoApo2 ( 'A' , x , 0.75 ) ## resolution as PDF laplace_2 = Convolution_pdf ( name = 'L2' , pdf = laplace, resolution = rAp ) laplace.draw( silent = True ) laplace_1.draw( silent = True ) laplace_2.draw() models.add ( laplace ) models.add ( laplace_1 ) models.add ( laplace_2 ) # ============================================================================= ## check that everything is serializable # ============================================================================= def test_db() : logger.info('Saving all objects into DBASE') import ostap.io.zipshelve as DBASE from ostap.utils.timing import timing with timing( name = 'Save everything to DBASE'), DBASE.tmpdb() as db : db['models' ] = models db.ls() # ============================================================================= if '__main__' == __name__ : test_laplace () ## Laplace-function + background ## check finally that everything is serializeable: test_db () # ============================================================================= # The END # =============================================================================
[ "Ivan.Belyaev@cern.ch" ]
Ivan.Belyaev@cern.ch
f66e5ca5bccba463ba1c7ea0e178e85c4982a93f
3e5ecad4d2f681f2f4f749109cc99deea1209ea4
/Dacon/solar1/test04_solar9.py
0f9e499e4f86263fff68de5a667aeda9b729cb92
[]
no_license
SunghoonSeok/Study
f41ede390079037b2090e6df20e5fb38f2e59b8f
50f02b9c9bac904cd4f6923b41efabe524ff3d8a
refs/heads/master
2023-06-18T06:47:55.545323
2021-07-05T00:47:55
2021-07-05T00:47:55
324,866,762
1
0
null
null
null
null
UTF-8
Python
false
false
4,798
py
# 7일의 데이터로 2일의 target값 구하기 # 시간별로 데이터를 나눠서 훈련 import numpy as np import pandas as pd import tensorflow.keras.backend as K from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Dense, Input, LSTM, Dropout, Conv1D, Flatten, MaxPooling1D, GRU, SimpleRNN from tensorflow.keras.backend import mean, maximum # 필요 함수 정의 # GHI추가 def Add_features(data): data['cos'] = np.cos(np.pi/2 - np.abs(data['Hour']%12 - 6)/6*np.pi/2) data.insert(1,'GHI',data['DNI']*data['cos']+data['DHI']) data.drop(['cos'], axis= 1, inplace = True) return data # 데이터 몇일씩 자르는 함수 def split_x(data, size): x = [] for i in range(len(data)-size+1): subset = data[i : (i+size)] x.append([item for item in subset]) print(type(x)) return np.array(x) # quantile loss 관련 함수 def quantile_loss(q, y_true, y_pred): err = (y_true - y_pred) return K.mean(K.maximum(q*err, (q-1)*err), axis=-1) quantiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] # 데이터 컬럼을 7개만 쓰겠다 def preprocess_data(data): data = Add_features(data) temp = data.copy() temp = temp[['GHI', 'DHI', 'DNI', 'WS', 'RH', 'T','TARGET']] return temp.iloc[:, :] # 모델, Conv1D사용 def DaconModel(): model = Sequential() model.add(Conv1D(256,2, padding='same', input_shape=(7, 7),activation='relu')) model.add(Conv1D(128,2, padding='same',activation='relu')) model.add(Conv1D(64,2, padding='same',activation='relu')) model.add(Conv1D(32,2, padding='same',activation='relu')) model.add(Flatten()) model.add(Dense(64,activation='relu')) model.add(Dense(32,activation='relu')) model.add(Dense(16,activation='relu')) model.add(Dense(8,activation='relu')) model.add(Dense(1)) return model # optimizer 불러오기 from tensorflow.keras.optimizers import Adam, Adadelta, Adamax, Adagrad from tensorflow.keras.optimizers import RMSprop, SGD, Nadam # 컴파일 훈련 함수, optimizer 변수처리하여 lr=0.002부터 줄여나가도록 한다 # lr을 for문 밖에 두면 초기화가 되지 않으니 명심할것 # 총 48(시간수)*9(quantile)*2(Day7,8)개의 체크포인트모델이 생성됨 def only_compile(a, x_train, y_train, x_val, y_val): for q in quantiles: print('Day'+str(i)+' ' +str(q)+'실행중입니다.') model = DaconModel() optimizer = Adam(lr=0.002) model.compile(loss = lambda y_true,y_pred: quantile_loss(q,y_true,y_pred), optimizer = optimizer, metrics = [lambda y,y_pred: quantile_loss(q,y,y_pred)]) filepath = f'c:/data/test/solar/checkpoint/solar_checkpoint5_time{i}-{a}-{q}.hdf5' cp = ModelCheckpoint(filepath, save_best_only=True, monitor = 'val_loss') model.fit(x_train,y_train,epochs = epochs, batch_size = bs, validation_data = (x_val,y_val),callbacks = [es,lr,cp]) return # 1. 데이터 train = pd.read_csv('c:/data/test/solar/train/train.csv') sub = pd.read_csv('c:/data/test/solar/sample_submission.csv') # 데이터 npy로 바꾸기 data = train.values print(data.shape) np.save('c:/data/test/solar/train.npy', arr=data) data =np.load('c:/data/test/solar/train.npy') # 전치를 활용한 데이터 시간별 묶음 data = data.reshape(1095, 48, 9) data = np.transpose(data, axes=(1,0,2)) print(data.shape) data = data.reshape(48*1095,9) df = train.copy() df.loc[:,:] = data df.to_csv('c:/data/test/solar/train_trans.csv', index=False) # 시간별 모델 따로 생성 train_trans = pd.read_csv('c:/data/test/solar/train_trans.csv') train_data = preprocess_data(train_trans) # (52560,7) from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint es = EarlyStopping(monitor = 'val_loss', patience = 15) lr = ReduceLROnPlateau(monitor = 'val_loss', patience = 5, factor = 0.5, verbose = 1) # for문으로 시간, quantile, day7,8 을 구분하여 체크포인트 생성 for i in range(48): train_sort = train_data[1095*(i):1095*(i+1)] train_sort = np.array(train_sort) y = train_sort[7:,-1] #(1088,) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(train_sort) train_sort = scaler.transform(train_sort) x = split_x(train_sort, 7) x = x[:-2,:] #(1087,7,7) y1 = y[:-1] #(1087,) y2 = y[1:] #(1087,) from sklearn.model_selection import train_test_split x_train, x_val, y1_train, y1_val, y2_train, y2_val = train_test_split(x, y1, y2, train_size=0.8, shuffle=True, random_state=32) epochs = 1000 bs = 32 only_compile(0, x_train, y1_train, x_val, y1_val) only_compile(1, x_train, y2_train, x_val, y2_val)
[ "76455292+SunghoonSeok@users.noreply.github.com" ]
76455292+SunghoonSeok@users.noreply.github.com
f1c1d1272813db29b692fe04bc813b6a679526fc
34599596e145555fde0d4264a1d222f951f49051
/pcat2py/class/20dbcc2a-5cc5-11e4-af55-00155d01fe08.py
b39c4aee05264d664cba5c47aa38bafddd842eb2
[ "MIT" ]
permissive
phnomcobra/PCAT2PY
dc2fcbee142ce442e53da08476bfe4e68619346d
937c3b365cdc5ac69b78f59070be0a21bdb53db0
refs/heads/master
2021-01-11T02:23:30.669168
2018-02-13T17:04:03
2018-02-13T17:04:03
70,970,520
0
0
null
null
null
null
UTF-8
Python
false
false
961
py
#!/usr/bin/python ################################################################################ # 20dbcc2a-5cc5-11e4-af55-00155d01fe08 # # Justin Dierking # justindierking@hardbitsolutions.com # phnomcobra@gmail.com # # 10/24/2014 Original Construction ################################################################################ class Finding: def __init__(self): self.output = [] self.is_compliant = False self.uuid = "20dbcc2a-5cc5-11e4-af55-00155d01fe08" def check(self, cli): # Initialize Compliance self.is_compliant = False # Get Auditpol Value enabled = cli.get_auditpol(r'Special Logon', 'Success') # Output Lines self.output = [r'Special Logon', ('Success=' + str(enabled))] if enabled: self.is_compliant = True return self.is_compliant def fix(self, cli): cli.set_auditpol(r'Special Logon', 'Success', True)
[ "phnomcobra@gmail.com" ]
phnomcobra@gmail.com
390ee336f83088e3f9b8609b7c854dfa3f4ea232
2e5e990955957cf04367ef6eedd62e6add7ccdc7
/oms_cms/backend/api/v2/social_networks/serializers.py
24a77bc22571a871c6dfb51890fd85f061a40858
[ "BSD-3-Clause" ]
permissive
RomanYarovoi/oms_cms
3dfcd19ff03b351dc754f73f4a0d8a9986cf28ec
49c6789242d7a35e81f4f208c04b18fb79249be7
refs/heads/master
2021-07-06T18:49:51.021820
2020-10-15T05:52:55
2020-10-15T05:52:55
196,556,814
0
0
BSD-3-Clause
2020-10-15T05:52:57
2019-07-12T10:07:29
JavaScript
UTF-8
Python
false
false
312
py
from rest_framework import serializers from oms_cms.backend.social_networks.models import SocialNetworks class SocialNetworksSerializer(serializers.ModelSerializer): """Сериализация социальных сетей""" class Meta: model = SocialNetworks fields = '__all__'
[ "arsavit@gmail.com" ]
arsavit@gmail.com
2cac3d08334c146dd3333f471c8ee1fa6546c71d
bc9c1a4da0d5bbf8d4721ee7ca5163f488e88a57
/research/urls.py
fe0aeb667e57278015b49196ad14403f92bec46d
[]
no_license
mit-teaching-systems-lab/newelk
77f43666f3c70be4c31fdfc6d4a6e9c629c71656
a2e6665bfcf9e2ea12fde45319027ee4a848f93c
refs/heads/master
2022-12-13T20:50:17.632513
2019-10-03T19:02:01
2019-10-03T19:02:01
132,154,880
0
4
null
2022-12-08T01:26:56
2018-05-04T15:04:20
Python
UTF-8
Python
false
false
222
py
from django.urls import path from . import views urlpatterns = [ # path('chatlogs/', views.streaming_chat_csv), # path('answerlogs/', views.streaming_answers_view), path("feedback/", views.toggle_feedback) ]
[ "bhanks@mit.edu" ]
bhanks@mit.edu
64ad76f77783d4b8a4cb1b9d87b673ea62470bf1
f566dfc5ce189d30696b9bf8b7e8bf9b1ef45614
/Example/DQN_SimpleMaze/DoubleDQN_SimpleMazeTwoD.py
a8615b896bcd6023b12a714b7533a963e26b7691
[]
no_license
yangyutu/DeepReinforcementLearning-PyTorch
3dac4ad67fa3a6301d65ca5c63532f2a278e21d7
7af59cb883e24429d42a228584cfc96c42f6d35b
refs/heads/master
2022-08-16T13:46:30.748383
2022-07-30T05:47:47
2022-07-30T05:47:47
169,829,723
12
6
null
null
null
null
UTF-8
Python
false
false
2,382
py
from Agents.DQN.DQN import DQNAgent from Agents.Core.MLPNet import MultiLayerNetRegression import json from torch import optim from copy import deepcopy from Env.CustomEnv.SimpleMazeTwoD import SimpleMazeTwoD import numpy as np import matplotlib.pyplot as plt import torch torch.manual_seed(1) def plotPolicy(policy, nbActions): idx, idy = np.where(policy >=0) action = policy[idx,idy] plt.scatter(idx, idy, c = action, marker='s', s = 10) # for i in range(nbActions): # idx, idy = np.where(policy == i) # plt.plot(idx,idy, ) # first construct the neutral network config = dict() mapName = 'map.txt' config['trainStep'] = 1000 config['epsThreshold'] = 0.1 config['targetNetUpdateStep'] = 100 config['memoryCapacity'] = 2000 config['trainBatchSize'] = 32 config['gamma'] = 0.9 config['learningRate'] = 0.003 config['netGradClip'] = 1 config['logFlag'] = True config['logFileName'] = 'SimpleMazeLog/DoubleQtraj' + mapName config['logFrequency'] = 50 config['netUpdateOption'] = 'doubleQ' env = SimpleMazeTwoD(mapName) N_S = env.stateDim N_A = env.nbActions netParameter = dict() netParameter['n_feature'] = N_S netParameter['n_hidden'] = [100] netParameter['n_output'] = N_A policyNet = MultiLayerNetRegression(netParameter['n_feature'], netParameter['n_hidden'], netParameter['n_output']) print(policyNet.state_dict()) targetNet = deepcopy(policyNet) optimizer = optim.Adam(policyNet.parameters(), lr=config['learningRate']) agent = DQNAgent(policyNet, targetNet, env, optimizer, torch.nn.MSELoss() ,N_S, N_A, config=config) policy = deepcopy(env.map) for i in range(policy.shape[0]): for j in range(policy.shape[1]): if env.map[i, j] == 0: policy[i, j] = -1 else: policy[i, j] = agent.getPolicy(np.array([i, j])) np.savetxt('DoubleQSimpleMazePolicyBeforeTrain' + mapName + '.txt', policy, fmt='%d', delimiter='\t') plotPolicy(policy, N_A) agent.train() policy = deepcopy(env.map) for i in range(policy.shape[0]): for j in range(policy.shape[1]): if env.map[i, j] == 0: policy[i, j] = -1 else: policy[i, j] = agent.getPolicy(np.array([i, j])) np.savetxt('DoubleQSimpleMazePolicyAfterTrain' + mapName +'.txt', policy, fmt='%d', delimiter='\t') plotPolicy(policy, N_A)
[ "yangyutu123@gmail.com" ]
yangyutu123@gmail.com
f92d14e56e3f2106526540e9015138bc89fc3d77
c12008fee6b319ccc683956d0a171a00e12debb0
/everyday/e191020.py
53e6428caf621fada6c4bfabfffe7d54a1250dd8
[]
no_license
yrnana/algorithm
70c7b34c82b15598494103bdb49b4aefc7c53548
783e4f9a45baf8d6b5900e442d32c2b6f73487d0
refs/heads/master
2022-04-13T23:50:53.914225
2020-04-01T12:41:14
2020-04-01T12:41:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
295
py
def solution(arr): l = len(arr) n = 0 for i in range(l): if arr[i] != 0: swap(arr, i, n) n += 1 return arr def swap(arr, i, j): tmp = arr[i] arr[i] = arr[j] arr[j] = tmp print(solution([0, 5, 0, 3, -1])) print(solution([3, 0, 3]))
[ "nyryn0945@gmail.com" ]
nyryn0945@gmail.com
bf880139591dc7c773d8e6bf7be78b1c793a73ef
364b36d699d0a6b5ddeb43ecc6f1123fde4eb051
/_downloads_1ed/fig_poisson_continuous.py
686b96403de5b92c73a2308049b03cfd324a149b
[]
no_license
astroML/astroml.github.com
eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca
70f96d04dfabcd5528978b69c217d3a9a8bc370b
refs/heads/master
2022-02-27T15:31:29.560052
2022-02-08T21:00:35
2022-02-08T21:00:35
5,871,703
2
5
null
2022-02-08T21:00:36
2012-09-19T12:55:23
HTML
UTF-8
Python
false
false
3,102
py
""" Unbinned Poisson Data --------------------- Figure 5.14 Regression of unbinned data. The distribution of N = 500 data points is shown in the left panel; the true pdf is shown by the solid curve. Note that although the data are binned in the left panel for visualization purposes, the analysis is performed on the unbinned data. The right panel shows the likelihood for the slope a (eq. 5.88) for three different sample sizes. The input value is indicated by the vertical dotted line. """ # Author: Jake VanderPlas # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general import numpy as np from matplotlib import pyplot as plt from astroML.stats.random import linear #---------------------------------------------------------------------- # This function adjusts matplotlib settings for a uniform feel in the textbook. # Note that with usetex=True, fonts are rendered with LaTeX. This may # result in an error if LaTeX is not installed on your system. In that case, # you can set usetex to False. from astroML.plotting import setup_text_plots setup_text_plots(fontsize=8, usetex=True) def linprob_logL(x, a, xmin, xmax): x = x.ravel() a = a.reshape(a.shape + (1,)) mu = 0.5 * (xmin + xmax) W = (xmax - xmin) return np.sum(np.log(a * (x - mu) + 1. / W), -1) #---------------------------------------------------------------------- # Draw the data from the linear distribution np.random.seed(0) N = 500 a_true = 0.01 xmin = 0.0 xmax = 10.0 lin_dist = linear(xmin, xmax, a_true) data = lin_dist.rvs(N) x = np.linspace(xmin - 1, xmax + 1, 1000) px = lin_dist.pdf(x) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(5, 2.5)) fig.subplots_adjust(left=0.12, right=0.95, wspace=0.28, bottom=0.15, top=0.9) # left panel: plot the model and a histogram of the data ax1 = fig.add_subplot(121) ax1.hist(data, bins=np.linspace(0, 10, 11), normed=True, histtype='stepfilled', fc='gray', alpha=0.5) ax1.plot(x, px, '-k') ax1.set_xlim(-1, 11) ax1.set_ylim(0, 0.18) ax1.set_xlabel('$x$') ax1.set_ylabel('$p(x)$') # right panel: construct and plot the likelihood ax2 = fig.add_subplot(122) ax2.xaxis.set_major_locator(plt.MultipleLocator(0.01)) a = np.linspace(-0.01, 0.02, 1000) Npts = (500, 100, 20) styles = ('-k', '--b', '-.g') for n, s in zip(Npts, styles): logL = linprob_logL(data[:n], a, xmin, xmax) logL = np.exp(logL - logL.max()) logL /= logL.sum() * (a[1] - a[0]) ax2.plot(a, logL, s, label=r'$\rm %i\ pts$' % n) ax2.legend(loc=2, prop=dict(size=8)) ax2.set_xlim(-0.011, 0.02) ax2.set_xlabel('$a$') ax2.set_ylabel('$p(a)$') # vertical line: in newer matplotlib versions, use ax.vlines([a_true]) ylim = ax2.get_ylim() ax2.plot([a_true, a_true], ylim, ':k', lw=1) ax2.set_ylim(ylim) plt.show()
[ "vanderplas@astro.washington.edu" ]
vanderplas@astro.washington.edu
6c16e2c8f646a76de7c95d1bce0bd8207155521e
5d0dd50d7f7bf55126834292140ed66306e59f10
/MIGRATE/msgpack_to_sql.py
4ce966fdef93c6b79fcabe824ec1177b571c63de
[]
no_license
JellyWX/tracker-bot
32d2c8666a7c6ca0835aa94695be4ccd7fc37bb5
b0909c4883b0ee6e0300a163e94ea0d69dffa062
refs/heads/master
2021-05-02T16:14:11.638292
2018-04-26T19:47:50
2018-04-26T19:47:50
120,670,416
0
0
null
null
null
null
UTF-8
Python
false
false
592
py
import msgpack import sqlite3 with open('../DATA/USER_DATA', 'rb') as f: data = msgpack.unpack(f, encoding='utf8') connection = sqlite3.connect('../DATA/data.db') cursor = connection.cursor() for user, values in data.items(): command = '''CREATE TABLE u{user} ( game VARCHAR(50), time INT ) '''.format(user=user) cursor.execute(command) for game, time in values.items(): command = '''INSERT INTO u{user} (game, time) VALUES (?, ?);'''.format(user=user) cursor.execute(command, (game, time)) connection.commit() connection.close()
[ "judewrs@gmail.com" ]
judewrs@gmail.com
d7919c38e0ac4b378ccf1771060a7670a3744ca6
ece0d321e48f182832252b23db1df0c21b78f20c
/engine/2.80/scripts/freestyle/styles/apriori_density.py
1de2c4c033457e302c229c3c7014b55c0b8010d7
[ "GPL-3.0-only", "Font-exception-2.0", "GPL-3.0-or-later", "Apache-2.0", "LicenseRef-scancode-public-domain", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-public-domain-disclaimer", "Bitstream-Vera", "LicenseRef-scancode-blender-2010", "LGPL-2.1-or-later", "GPL-2.0-or-later", "GPL-2.0-only", "LGPL-2.0-only", "PSF-2.0", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-proprietary-license", "GPL-1.0-or-later", "BSD-2-Clause", "Unlicense" ]
permissive
byteinc/Phasor
47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
refs/heads/master
2022-10-25T17:05:01.585032
2019-03-16T19:24:22
2019-03-16T19:24:22
175,723,233
3
1
Unlicense
2022-10-21T07:02:37
2019-03-15T00:58:08
Python
UTF-8
Python
false
false
1,743
py
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # Filename : apriori_density.py # Author : Stephane Grabli # Date : 04/08/2005 # Purpose : Draws lines having a high a prior density from freestyle.chainingiterators import ChainPredicateIterator from freestyle.predicates import ( AndUP1D, NotUP1D, QuantitativeInvisibilityUP1D, TrueBP1D, TrueUP1D, pyHighViewMapDensityUP1D, ) from freestyle.shaders import ( ConstantColorShader, ConstantThicknessShader, ) from freestyle.types import Operators Operators.select(AndUP1D(QuantitativeInvisibilityUP1D(0), pyHighViewMapDensityUP1D(0.1,5))) bpred = TrueBP1D() upred = AndUP1D(QuantitativeInvisibilityUP1D(0), pyHighViewMapDensityUP1D(0.0007,5)) Operators.bidirectional_chain(ChainPredicateIterator(upred, bpred), NotUP1D(QuantitativeInvisibilityUP1D(0))) shaders_list = [ ConstantThicknessShader(2), ConstantColorShader(0.0, 0.0, 0.0, 1.0) ] Operators.create(TrueUP1D(), shaders_list)
[ "admin@irradiate.net" ]
admin@irradiate.net
5ecff5ad5fe3286e9a8e813f3c9de2d599229c34
781116645c0d60de13596aac81a76c791ed0c18a
/kivy_garden/flower/__init__.py
6793aaafcc1aa355b42b381f1800e9714707bb6e
[ "MIT" ]
permissive
matham/flower
503dae3446110da05ecd2a527b3459f7e1bcadb3
e7c71346563573197ae304ceb343bff14e54a5cd
refs/heads/master
2020-05-24T22:33:43.761720
2019-05-19T08:56:14
2019-05-19T08:56:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
360
py
""" Demo flower ============ Defines the Kivy garden :class:`FlowerLabel` class which is the widget provided by the demo flower. """ from kivy.uix.label import Label __all__ = ('FlowerLabel', ) __version__ = '0.1.0.dev0' class FlowerLabel(Label): def __init__(self, **kwargs): super(FlowerLabel, self).__init__(**kwargs, text='Demo flower')
[ "moiein2000@gmail.com" ]
moiein2000@gmail.com
9d95173045444ddceac7aaebc34b8f75adf12995
fff26da96c4b324cdbc0315c3fdf1fe2ccbf6bf0
/.history/test_celegans_corrected_weights_20210615130634.py
a875acee9236154c606750101651e4d37fd22fd9
[]
no_license
izzortsi/spreading-activation-networks
ebcd38477a4d4c6139a82b0dd7da3d79a0e3f741
f2cf0bf519af746f148fa7a4ea4d78d16ba6af87
refs/heads/dev
2023-06-28T03:49:34.265268
2021-06-15T18:07:51
2021-06-15T18:07:51
376,718,907
0
0
null
2021-06-15T18:07:51
2021-06-14T06:01:52
Python
UTF-8
Python
false
false
3,390
py
# %% import graph_tool.all as gt import numpy as np import numpy.random as npr # import matplotlib.colors as mplc from matplotlib import cm import matplotlib.colors as mplc import os, sys from gi.repository import Gtk, Gdk, GdkPixbuf, GObject, GLib from plot_functions import * # %% def init_elegans_net(): g = gt.collection.data["celegansneural"] g.ep.weight = g.new_ep("double") norm_eweights = minmax(g.ep.value.a) g.ep.weight.a = norm_eweights del g.ep["value"] del g.gp["description"] del g.gp["readme"] del g.vp["label"] g.vp.state = g.new_vertex_property("int") g.vp.activation = g.new_vertex_property("float") n_vertices = g.num_vertices() n_edges = g.num_edges() activations = npr.normal(size=n_vertices) activations = minmax(activations) g.vp.state.a = np.full(n_vertices, 0) g.vp.activation.a = activations return g # %% def init_graph(g): treemap = gt.min_spanning_tree(g) gmst = gt.GraphView(g, efilt=treemap) gtclos = gt.transitive_closure(gmst) return {"g": g, "gmst": gmst, "gtc": gtclos} def minmax(a): a = (a - np.min(a)) return a/np.max(a) # %% """ def set_graph(type="gtc") type being either the original graph "g", the MST of it "gmst" or the transitive closure of the MST "gtc". Defaults to "gtc". """ def set_graph(type="gtc"): g = init_elegans_net() graphs = init_graph(g) g = graphs["g"] gmst = graphs["gmst"] gtc = graphs["gtc"] return g, gmst, gtc # %% # %% ####DYNAMICS PARAMETERS SPIKE_THRESHOLD = 0.90 POTENTIAL_LOSS = 0.8 MAX_COUNT = 600 #OFFSCREEN = True OFFSCREEN = sys.argv[1] == "offscreen" if len(sys.argv) > 1 else False # %% g, gmst, gtc = set_graph() # %% g = gmst # %% set(list(map(tuple, gtc.get_all_edges(151)))) # %% count = 0 # %% def update_state(): global count, g spiker_activation = np.max(g.vp.activation.a) spiker = gt.find_vertex(g, g.vp.activation, spiker_activation)[0] nbs = g.get_out_neighbors(spiker) nbsize = len(nbs) if nbsize != 0: spread_val = spiker_activation/nbsize for nb in nbs: w = g.ep.weight[g.edge(spiker, nb)] g.vp.activation[nb] += spread_val*w g.vp.activation[spiker] -= spread_val*w else: if g.vp.activation[spiker] >= 1: pass #if g.vp.activation[nb] >= SPIKE_THRESHOLD: win.graph.regenerate_surface() win.graph.queue_draw() if OFFSCREEN: pixbuf = win.get_pixbuf() pixbuf.savev(r'./frames/san%06d.png' % count, 'png', [], []) count += 1 if count >= MAX_COUNT: sys.exit(0) return True # %% pos = gt.sfdp_layout(g) PLOT_PARAMS = plot_params(g, None) if OFFSCREEN and not os.path.exists("./frames"): os.mkdir("./frames") # This creates a GTK+ window with the initial graph layout if not OFFSCREEN: win = gt.GraphWindow(g, pos, geometry=(720, 720), vertex_shape="circle", **PLOT_PARAMS, ) else: win = Gtk.OffscreenWindow() win.set_default_size(720, 720) win.graph = gt.GraphWidget(g, pos, vertex_shape="circle", **PLOT_PARAMS, ) win.add(win.graph) # %% cid = GLib.idle_add(update_state) win.connect("delete_event", Gtk.main_quit) win.show_all() Gtk.main() # %% # %%
[ "istrozzi@matematica.ufrj.br" ]
istrozzi@matematica.ufrj.br
25b61e304b936c5e84ffe57f9d196cca268179ff
63b864deda44120067eff632bbb4969ef56dd573
/object_detection/ssd/Config.py
f444dc728514a6492170e0eaf1c5d65542716889
[]
no_license
lizhe960118/Deep-Learning
d134592c327decc1db12cbe19d9a1c85a5056086
7d2c4f3a0512ce4bd2f86c9f455da9866d16dc3b
refs/heads/master
2021-10-29T06:15:04.749917
2019-07-19T15:27:25
2019-07-19T15:27:25
152,355,392
5
2
null
2021-10-12T22:19:33
2018-10-10T03:06:44
Jupyter Notebook
UTF-8
Python
false
false
481
py
import os.path as osp sk = [ 15, 30, 60, 111, 162, 213, 264 ] feature_map = [ 38, 19, 10, 5, 3, 1 ] steps = [ 8, 16, 32, 64, 100, 300 ] image_size = 300 aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]] MEANS = (104, 117, 123) batch_size = 2 data_load_number_worker = 0 lr = 1e-3 momentum = 0.9 weight_decacy = 5e-4 gamma = 0.1 VOC_ROOT = osp.join('./', "VOCdevkit/") dataset_root = VOC_ROOT use_cuda = True lr_steps = (80000, 100000, 120000) max_iter = 120000 class_num = 21
[ "2957308424@qq.com" ]
2957308424@qq.com
2235add0ce48477a2a58d68f369f8cd3ba1fbf2b
5ec06dab1409d790496ce082dacb321392b32fe9
/clients/python/generated/swaggeraemosgi/model/com_adobe_granite_frags_impl_check_http_header_flag_properties.py
b32110895772ddda09288d935ee3f1e98dbd4215
[ "Apache-2.0" ]
permissive
shinesolutions/swagger-aem-osgi
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
c2f6e076971d2592c1cbd3f70695c679e807396b
refs/heads/master
2022-10-29T13:07:40.422092
2021-04-09T07:46:03
2021-04-09T07:46:03
190,217,155
3
3
Apache-2.0
2022-10-05T03:26:20
2019-06-04T14:23:28
null
UTF-8
Python
false
false
7,658
py
""" Adobe Experience Manager OSGI config (AEM) API Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501 The version of the OpenAPI document: 1.0.0-pre.0 Contact: opensource@shinesolutions.com Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 import nulltype # noqa: F401 from swaggeraemosgi.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) def lazy_import(): from swaggeraemosgi.model.config_node_property_string import ConfigNodePropertyString globals()['ConfigNodePropertyString'] = ConfigNodePropertyString class ComAdobeGraniteFragsImplCheckHttpHeaderFlagProperties(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'feature_name': (ConfigNodePropertyString,), # noqa: E501 'feature_description': (ConfigNodePropertyString,), # noqa: E501 'http_header_name': (ConfigNodePropertyString,), # noqa: E501 'http_header_valuepattern': (ConfigNodePropertyString,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'feature_name': 'feature.name', # noqa: E501 'feature_description': 'feature.description', # noqa: E501 'http_header_name': 'http.header.name', # noqa: E501 'http_header_valuepattern': 'http.header.valuepattern', # noqa: E501 } _composed_schemas = {} required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """ComAdobeGraniteFragsImplCheckHttpHeaderFlagProperties - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) feature_name (ConfigNodePropertyString): [optional] # noqa: E501 feature_description (ConfigNodePropertyString): [optional] # noqa: E501 http_header_name (ConfigNodePropertyString): [optional] # noqa: E501 http_header_valuepattern (ConfigNodePropertyString): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value)
[ "cliffano@gmail.com" ]
cliffano@gmail.com
c714879ab292decf242cb272a4d05560414fb170
72d010d00355fc977a291c29eb18aeb385b8a9b0
/LV2_LX2_LC2_LD2/ParamMap.py
12d64819be32886c056b2489f3ffb2779ffe3981
[]
no_license
maratbakirov/AbletonLive10_MIDIRemoteScripts
bf0749c5c4cce8e83b23f14f671e52752702539d
ed1174d9959b20ed05fb099f0461bbc006bfbb79
refs/heads/master
2021-06-16T19:58:34.038163
2021-05-09T11:46:46
2021-05-09T11:46:46
203,174,328
0
0
null
2019-08-19T13:04:23
2019-08-19T13:04:22
null
UTF-8
Python
false
false
2,876
py
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/LV2_LX2_LC2_LD2/ParamMap.py # Compiled at: 2018-04-23 20:27:04 from __future__ import absolute_import, print_function, unicode_literals import Live class Callable: def __init__(self, anycallable): self.__call__ = anycallable class ParamMap: u"""Class to help with device mapping""" __module__ = __name__ def __init__(self, parent): ParamMap.realinit(self, parent) def realinit(self, parent): self.parent = parent self.params_with_listener = [] self.param_callbacks = [] def log(self, string): self.parent.log(string) def logfmt(self, fmt, *args): args2 = [] for i in range(0, len(args)): args2 += [args[i].__str__()] str = fmt % tuple(args2) return self.log(str) def param_add_callback(self, script_handle, midi_map_handle, param, min, max, cc, channel): callback = lambda : self.on_param_value_changed(param, min, max, cc, channel) param.add_value_listener(callback) self.params_with_listener += [param] self.param_callbacks += [callback] ParamMap.forward_cc(script_handle, midi_map_handle, channel, cc) def receive_midi_note(self, channel, status, note_no, note_vel): pass def receive_midi_cc(self, chan, cc_no, cc_value): pass def forward_cc(script_handle, midi_map_handle, chan, cc): Live.MidiMap.forward_midi_cc(script_handle, midi_map_handle, chan, cc) forward_cc = Callable(forward_cc) def forward_note(script_handle, midi_map_handle, chan, note): Live.MidiMap.forward_midi_note(script_handle, midi_map_handle, chan, note) forward_note = Callable(forward_note) def map_with_feedback(midi_map_handle, channel, cc, parameter, mode): feedback_rule = Live.MidiMap.CCFeedbackRule() feedback_rule.channel = channel feedback_rule.cc_value_map = tuple() feedback_rule.delay_in_ms = -1.0 feedback_rule.cc_no = cc Live.MidiMap.map_midi_cc_with_feedback_map(midi_map_handle, parameter, channel, cc, mode, feedback_rule, False) Live.MidiMap.send_feedback_for_parameter(midi_map_handle, parameter) map_with_feedback = Callable(map_with_feedback) def on_param_value_changed(self, param, min, max, cc, channel): pass def remove_mappings(self): for i in range(0, len(self.params_with_listener)): param = self.params_with_listener[i] callback = self.param_callbacks[i] try: if param.value_has_listener(callback): param.remove_value_listener(callback) except: continue self.params_with_listener = [] self.param_callbacks = []
[ "julien@julienbayle.net" ]
julien@julienbayle.net
bf61729fa718b439998532f367204e3cf8b93cf6
35fe9e62ab96038705c3bd09147f17ca1225a84e
/a10_ansible/library/a10_ipv6_neighbor_static.py
9c058e6fee3024c46ed849ab350ff96c39149478
[]
no_license
bmeidell/a10-ansible
6f55fb4bcc6ab683ebe1aabf5d0d1080bf848668
25fdde8d83946dadf1d5b9cebd28bc49b75be94d
refs/heads/master
2020-03-19T08:40:57.863038
2018-03-27T18:25:40
2018-03-27T18:25:40
136,226,910
0
0
null
2018-06-05T19:45:36
2018-06-05T19:45:36
null
UTF-8
Python
false
false
6,211
py
#!/usr/bin/python REQUIRED_NOT_SET = (False, "One of ({}) must be set.") REQUIRED_MUTEX = (False, "Only one of ({}) can be set.") REQUIRED_VALID = (True, "") DOCUMENTATION = """ module: a10_static description: - author: A10 Networks 2018 version_added: 1.8 options: ipv6-addr: description: - IPV6 address mac: description: - MAC Address ethernet: description: - Ethernet port (Port Value) trunk: description: - Trunk group tunnel: description: - Tunnel interface vlan: description: - VLAN ID uuid: description: - uuid of the object """ EXAMPLES = """ """ ANSIBLE_METADATA = """ """ # Hacky way of having access to object properties for evaluation AVAILABLE_PROPERTIES = {"ethernet","ipv6_addr","mac","trunk","tunnel","uuid","vlan",} # our imports go at the top so we fail fast. from a10_ansible.axapi_http import client_factory from a10_ansible import errors as a10_ex def get_default_argspec(): return dict( a10_host=dict(type='str', required=True), a10_username=dict(type='str', required=True), a10_password=dict(type='str', required=True, no_log=True), state=dict(type='str', default="present", choices=["present", "absent"]) ) def get_argspec(): rv = get_default_argspec() rv.update(dict( ethernet=dict( type='str' ), ipv6_addr=dict( type='str' , required=True ), mac=dict( type='str' ), trunk=dict( type='str' ), tunnel=dict( type='str' ), uuid=dict( type='str' ), vlan=dict( type='str' , required=True ), )) return rv def new_url(module): """Return the URL for creating a resource""" # To create the URL, we need to take the format string and return it with no params url_base = "/axapi/v3/ipv6/neighbor/static/{ipv6-addr}+{vlan}" f_dict = {} f_dict["ipv6-addr"] = "" f_dict["vlan"] = "" return url_base.format(**f_dict) def existing_url(module): """Return the URL for an existing resource""" # Build the format dictionary url_base = "/axapi/v3/ipv6/neighbor/static/{ipv6-addr}+{vlan}" f_dict = {} f_dict["ipv6-addr"] = module.params["ipv6-addr"] f_dict["vlan"] = module.params["vlan"] return url_base.format(**f_dict) def build_envelope(title, data): return { title: data } def build_json(title, module): rv = {} for x in AVAILABLE_PROPERTIES: v = module.params.get(x) if v: rx = x.replace("_", "-") rv[rx] = module.params[x] return build_envelope(title, rv) def validate(params): # Ensure that params contains all the keys. requires_one_of = sorted([]) present_keys = sorted([x for x in requires_one_of if params.get(x)]) errors = [] marg = [] if not len(requires_one_of): return REQUIRED_VALID if len(present_keys) == 0: rc,msg = REQUIRED_NOT_SET marg = requires_one_of elif requires_one_of == present_keys: rc,msg = REQUIRED_MUTEX marg = present_keys else: rc,msg = REQUIRED_VALID if not rc: errors.append(msg.format(", ".join(marg))) return rc,errors def exists(module): try: module.client.get(existing_url(module)) return True except a10_ex.NotFound: return False def create(module, result): payload = build_json("static", module) try: post_result = module.client.post(new_url(module), payload) result.update(**post_result) result["changed"] = True except a10_ex.Exists: result["changed"] = False except a10_ex.ACOSException as ex: module.fail_json(msg=ex.msg, **result) except Exception as gex: raise gex return result def delete(module, result): try: module.client.delete(existing_url(module)) result["changed"] = True except a10_ex.NotFound: result["changed"] = False except a10_ex.ACOSException as ex: module.fail_json(msg=ex.msg, **result) except Exception as gex: raise gex return result def update(module, result): payload = build_json("static", module) try: post_result = module.client.put(existing_url(module), payload) result.update(**post_result) result["changed"] = True except a10_ex.ACOSException as ex: module.fail_json(msg=ex.msg, **result) except Exception as gex: raise gex return result def present(module, result): if not exists(module): return create(module, result) else: return update(module, result) def absent(module, result): return delete(module, result) def run_command(module): run_errors = [] result = dict( changed=False, original_message="", message="" ) state = module.params["state"] a10_host = module.params["a10_host"] a10_username = module.params["a10_username"] a10_password = module.params["a10_password"] # TODO(remove hardcoded port #) a10_port = 443 a10_protocol = "https" valid, validation_errors = validate(module.params) map(run_errors.append, validation_errors) if not valid: result["messages"] = "Validation failure" err_msg = "\n".join(run_errors) module.fail_json(msg=err_msg, **result) module.client = client_factory(a10_host, a10_port, a10_protocol, a10_username, a10_password) if state == 'present': result = present(module, result) elif state == 'absent': result = absent(module, result) return result def main(): module = AnsibleModule(argument_spec=get_argspec()) result = run_command(module) module.exit_json(**result) # standard ansible module imports from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': main()
[ "mdurrant@a10networks.com" ]
mdurrant@a10networks.com
b5fdf682f928aef41c6625b6e5d1e70bb65baa49
cfc49e6e65ed37ddf297fc7dffacee8f905d6aa0
/exercicios_seccao4/35.py
f774259ca92b71fb8f2bb8f0eeece2cbe180ede4
[]
no_license
IfDougelseSa/cursoPython
c94cc1215643f272f935d5766e7a2b36025ddbe2
3f9ceb9701a514106d49b2144b7f2845416ed8ec
refs/heads/main
2023-06-12T16:51:29.413031
2021-07-07T00:20:53
2021-07-07T00:20:53
369,268,883
1
0
null
null
null
null
UTF-8
Python
false
false
195
py
# Hipotenusa import math a = int(input('Digite o cateto a: ')) b = int(input('Digite o cateto b: ')) hipotenusa = math.sqrt(a ** 2 + b ** 2) print(f'O valor da hipotenusa é {hipotenusa}.')
[ "doug_ccortez@outlook.com" ]
doug_ccortez@outlook.com
4c699101fa8582289ec996b5664bd8ab5b3ec4f5
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03032/s297706816.py
d7371f5e563b20937599d014765a4d6f1b0ebd4c
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
743
py
n,k=map(int,input().split()) v=list(map(int, input().split())) if k<n*2: ans=0 for i in range(k+1): for j in range(k-i+1): v_r=v[:i] v_l=v[(n-j):] sute_cnt=k-(i+j) v_new=v_r+v_l v_new.sort() # print(i, j, v_r, v_l, sute_cnt, v_new) s=sum(v_new) if not v_new: continue for indx in range(len(v_new)): if v_new[indx]<0 and sute_cnt>0: s-=v_new[indx] sute_cnt-=1 else: break ans=max(ans,s) print(ans) else: ans=0 for i in range(n): if v[i]>=0: ans+=v[i] print(ans)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
16241caf95d6f2f6a2c327e2309ad58990c11cd5
be549921446835ba6dff0cadaa0c7b83570ebc3e
/run_eval_sutter.py
a0ba2df9ac3c6f63655586a070cc69f7762854c8
[]
no_license
uctoronto/AutoPrescribe
895ee4375625408c663cee22610bb5425d7efc7f
a6188e9189df727320448a368f6e70036472ede4
refs/heads/master
2020-03-27T05:47:47.500486
2017-05-31T18:49:33
2017-05-31T18:49:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,227
py
from models.processor import Processor from models.leap import LEAPModel from exp.coverage import config_sutter as config from utils.data import dump config = config.get_config() dir = 'build/' config.saved_model_file = dir + 'sutter_%s_%s_seq2seq.model' % (config.level, config.order) print(config.saved_model_file.split('/')[-1]) p = Processor(config) model = LEAPModel(p, config) # model.do_train() model.load_params(config.saved_model_file) # model.do_reinforce(scorer) model.do_eval(training = False, filename = 'sutter_%s_%s_seq2seq.txt' % (config.level, config.order), max_batch = 5000000) # model.load_params('../models/resume_seed13_100d_lr0.001_h256.model') # ret = model.do_generate(data) # # from utils.eval import Evaluator # eva = Evaluator() # cnt = 0 # truth = [] # sum_jaccard = 0 # for line in open("seq2seq.h256.txt"): # if cnt % 3 == 1: # truth = set(line.strip().split("T: ")[1].split(" ")) # if cnt % 3 == 2: # result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" ")) # jaccard = eva.get_jaccard_k(truth, result) # sum_jaccard += jaccard # cnt += 1 # # print(sum_jaccard * 3 / cnt) # # cnt = 0 # truth_list = [] # prediction_list = [] # for line in open("seq2seq.h256.txt"): # if cnt % 3 == 1: # truth = set(line.strip().split("T: ")[1].split(" ")) # truth_list.append(truth) # if cnt % 3 == 2: # result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" ")) # prediction_list.append(result) # cnt += 1 # cnt = 0 results = [] input = [] truth = [] for line in open('sutter_%s_%s_seq2seq.txt' % (config.level, config.order)): if cnt % 3 == 0: input = set(line.strip().split("S: ")[1].split(" ")) if cnt % 3 == 1: if len(line.strip().split("T: ")) <= 1: truth = [] continue truth = set(line.strip().split("T: ")[1].split(" ")) if cnt % 3 == 2: result = set(line.strip().split("Gen: ")[1].replace("END", "").strip().split(" ")) if len(truth) > 0: results.append((input, truth, result)) cnt += 1 dump(results, "sutter_%s_%s_result_seq2seq.pkl" % (config.level, config.order))
[ "stack@live.cn" ]
stack@live.cn
7ef5899fc65729bb3d4169066bc9065937633f77
8565e4d24b537d1fb0f71fef6215d193ceaed6cc
/tests/test_check_circular.py
4a91863962a4377cf6bad0ba6466463a0579f885
[ "MIT" ]
permissive
soasme/dogeon
5f55c84a6f93aaa7757372664dd60ed90cf200e8
496b9a5b099946d14434ed0cd7a94a270f607207
refs/heads/master
2020-05-17T19:01:42.780694
2018-11-04T05:01:23
2018-11-04T05:01:23
20,592,607
3
0
null
2014-06-28T01:34:35
2014-06-07T12:28:07
Python
UTF-8
Python
false
false
736
py
import dson import pytest def default_iterable(obj): return list(obj) def test_circular_dict(): dct = {} dct['a'] = dct pytest.raises(ValueError, dson.dumps, dct) def test_circular_list(): lst = [] lst.append(lst) pytest.raises(ValueError, dson.dumps, lst) def test_circular_composite(): dct2 = {} dct2['a'] = [] dct2['a'].append(dct2) pytest.raises(ValueError, dson.dumps, dct2) def test_circular_default(): dson.dumps([set()], default=default_iterable) pytest.raises(TypeError, dson.dumps, [set()]) def test_circular_off_default(): dson.dumps([set()], default=default_iterable, check_circular=False) pytest.raises(TypeError, dson.dumps, [set()], check_circular=False)
[ "soasme@gmail.com" ]
soasme@gmail.com
08824881bc68f2ddf1fee1b25916cd115d4df279
aec59723a3dd0d3356a4ce426dc0fc381a4d3157
/catalog/model/pricing.py
020f6e8a724428673e0662dd1b10eba1af0e2087
[]
no_license
Guya-LTD/catalog
f44e31593637e22b3b2a2869a387e29875986f7c
632b3c3766e2600275c0a18db6378b2d38e3c463
refs/heads/master
2023-02-11T19:03:36.796812
2021-01-08T14:12:06
2021-01-08T14:12:06
275,332,646
0
0
null
null
null
null
UTF-8
Python
false
false
859
py
# -*- coding: utf-8 -*- """Copyright Header Details Copyright --------- Copyright (C) Guya , PLC - All Rights Reserved (As Of Pending...) Unauthorized copying of this file, via any medium is strictly prohibited Proprietary and confidential LICENSE ------- This file is subject to the terms and conditions defined in file 'LICENSE.txt', which is part of this source code package. Authors ------- * [Simon Belete](https://github.com/Simonbelete) Project ------- * Name: - Guya E-commerce & Guya Express * Sub Project Name: - Catalog Service * Description - Catlog Catalog Service """ """Package details Application features: -------------------- Python 3.7 Flask PEP-8 for code style Entity. """ class Pricing: """A Base Model Representation of Pricing Entity.""" pass
[ "simonbelete@gmail.com" ]
simonbelete@gmail.com
14f648102f5ede6ed0cbfd6da4036fb02e0e97b3
8983b099a27d124b17fc20d4e9b5ec2f0bf8be25
/altair/schema/_interface/named_channels.py
d2d7c77e95eadb00163c13a153019fb543b03f86
[ "BSD-3-Clause" ]
permissive
princessd8251/altair
a7afa0745291f82215fbda6a477e369f59fcf294
387c575ee0410e7ac804273a0f2e5574f4cca26f
refs/heads/master
2021-01-16T21:41:40.935679
2017-08-10T16:36:05
2017-08-10T16:36:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
984
py
# -*- coding: utf-8 -*- # Auto-generated file: do not modify directly # - altair version info: v1.2.0-98-g8a98636 # - date: 2017-08-09 12:14:26 from . import channel_wrappers class Color(channel_wrappers.ChannelWithLegend): pass class Column(channel_wrappers.PositionChannel): pass class Detail(channel_wrappers.Field): pass class Label(channel_wrappers.Field): pass class Opacity(channel_wrappers.ChannelWithLegend): pass class Order(channel_wrappers.OrderChannel): pass class Path(channel_wrappers.OrderChannel): pass class Row(channel_wrappers.PositionChannel): pass class Shape(channel_wrappers.ChannelWithLegend): pass class Size(channel_wrappers.ChannelWithLegend): pass class Text(channel_wrappers.Field): pass class X(channel_wrappers.PositionChannel): pass class X2(channel_wrappers.Field): pass class Y(channel_wrappers.PositionChannel): pass class Y2(channel_wrappers.Field): pass
[ "jakevdp@gmail.com" ]
jakevdp@gmail.com
e073a8419eda5bafad84588f1124d089f124d4cd
5864e86954a221d52d4fa83a607c71bacf201c5a
/carbon/common/lib/markdown/extensions/tables.py
f613f9a67f1f99e646124dad4f9a5fdff380870a
[]
no_license
connoryang/1v1dec
e9a2303a01e5a26bf14159112b112be81a6560fd
404f2cebf13b311e754d45206008918881496370
refs/heads/master
2021-05-04T02:34:59.627529
2016-10-19T08:56:26
2016-10-19T08:56:26
71,334,417
0
0
null
null
null
null
UTF-8
Python
false
false
2,302
py
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\markdown\extensions\tables.py import markdown from markdown.util import etree class TableProcessor(markdown.blockprocessors.BlockProcessor): def test(self, parent, block): rows = block.split('\n') return len(rows) > 2 and '|' in rows[0] and '|' in rows[1] and '-' in rows[1] and rows[1].strip()[0] in ('|', ':', '-') def run(self, parent, blocks): block = blocks.pop(0).split('\n') header = block[0].strip() seperator = block[1].strip() rows = block[2:] border = False if header.startswith('|'): border = True align = [] for c in self._split_row(seperator, border): if c.startswith(':') and c.endswith(':'): align.append('center') elif c.startswith(':'): align.append('left') elif c.endswith(':'): align.append('right') else: align.append(None) table = etree.SubElement(parent, 'table') thead = etree.SubElement(table, 'thead') self._build_row(header, thead, align, border) tbody = etree.SubElement(table, 'tbody') for row in rows: self._build_row(row.strip(), tbody, align, border) def _build_row(self, row, parent, align, border): tr = etree.SubElement(parent, 'tr') tag = 'td' if parent.tag == 'thead': tag = 'th' cells = self._split_row(row, border) for i, a in enumerate(align): c = etree.SubElement(tr, tag) try: c.text = cells[i].strip() except IndexError: c.text = '' if a: c.set('align', a) def _split_row(self, row, border): if border: if row.startswith('|'): row = row[1:] if row.endswith('|'): row = row[:-1] return row.split('|') class TableExtension(markdown.Extension): def extendMarkdown(self, md, md_globals): md.parser.blockprocessors.add('table', TableProcessor(md.parser), '<hashheader') def makeExtension(configs = {}): return TableExtension(configs=configs)
[ "le02005@163.com" ]
le02005@163.com
3eb6943aae1ad11db104ee00d54ed9bccbb642e4
855dc9fcd4170923e8723b6946c09c5cae68e079
/what_transcode/migrations/0001_initial.py
cb61199f9d66f0b1aee0d9c062f1096d498bbdcf
[ "MIT" ]
permissive
point-source/WhatManager2
3fc72976402ac40d132aef0deffd8bcfbd209703
ddbce0fa1ff4e1fc44bfa726c4f7eace4adbe8a9
refs/heads/master
2023-01-27T11:39:43.861041
2019-02-24T17:51:24
2019-02-24T17:51:24
210,232,561
1
0
MIT
2019-09-23T00:21:54
2019-09-23T00:21:53
null
UTF-8
Python
false
false
985
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('home', '0001_initial'), ] operations = [ migrations.CreateModel( name='TranscodeRequest', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('requested_by_ip', models.TextField()), ('requested_by_what_user', models.TextField()), ('date_requested', models.DateTimeField(auto_now_add=True)), ('date_completed', models.DateTimeField(null=True)), ('celery_task_id', models.TextField(null=True)), ('what_torrent', models.ForeignKey(to='home.WhatTorrent')), ], options={ }, bases=(models.Model,), ), ]
[ "ivailo@karamanolev.com" ]
ivailo@karamanolev.com
5e9cf5ae03e925ad4d818c9b0637c412bbc60146
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02709/s022509829.py
dd9fa602873f6ee74e43f9bacf44dd9a2eee3894
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
911
py
import sys input = sys.stdin.readline from collections import deque N = int(input()) #A = list(map(int, input().split())) A = [(a, i) for i, a in enumerate(map(int, input().split()))] A = sorted(A, reverse=True) values = [] num_indcies = {} for i, a in enumerate(A): if not a in num_indcies: num_indcies[a] = [i] values.append(a) else: num_indcies[a].append(i) values = sorted(values, reverse=True) ans = 0 # indexの配列 dp_indices = [] for v in values: dp_indices.extend(num_indcies[v]) dp = [[0] * (N+1) for _ in range(N+1)] for no, (a, pos) in enumerate(A): for i in range(no+1): j = no - i #k = dp_indices[i+j-2] #a = A[k] dp[i+1][j] = max(dp[i+1][j], dp[i][j] + a * (pos -i)) dp[i][j+1] = max(dp[i][j+1], dp[i][j] + a * abs(pos - (N-1-j))) ans = 0 for i in range(1, N+1): ans = max(ans, dp[i][N-i]) print(ans)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
3d76924803db335c9cb94bb42f4444f162c2d2ae
936f72b46215b89b277ffd57256e54f727ce1ac5
/spark-comp04/token.py
3147a73cbc6b3be806e113977983bf177f1a4f32
[]
no_license
luizirber/dc-compilers
91dc99097d628339b53b20a0c0f2a6255a599b7a
4a47e786583c5f50cac2ac3a35de195f7be7a735
refs/heads/master
2016-09-06T11:27:51.815748
2012-07-03T01:28:26
2012-07-03T01:28:26
41,540
7
0
null
null
null
null
UTF-8
Python
false
false
278
py
class Token(object): def __init__(self, type, attr=None, lineno='???'): self.type = type self.attr = attr self.lineno = lineno def __cmp__(self, o): return cmp(self.type, o) def __repr__(self): return self.attr or self.type
[ "luiz.irber@gmail.com" ]
luiz.irber@gmail.com
f3822c56be1305e7b55915ab88f6b4e8ff7f9704
62587160029c7c79b5d11f16e8beae4afa1c4834
/webpages/island_scraper_kyero/island_scraper/middlewares.py
f34dd9c19c21b5524d2483086acae265764a8f49
[]
no_license
LukaszMalucha/Scrapy-Collection
b11dcf2c09f33d190e506559d978e4f3b77f9f5a
586f23b90aa984c22ea8f84eba664db9649ed780
refs/heads/master
2022-12-14T15:06:00.868322
2021-07-27T12:09:07
2021-07-27T12:09:07
144,448,351
3
0
null
2022-11-22T03:16:19
2018-08-12T07:55:05
Python
UTF-8
Python
false
false
3,611
py
# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # https://doc.scrapy.org/en/latest/topics/spider-middleware.html from scrapy import signals class IslandScraperSpiderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the spider middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_spider_input(self, response, spider): # Called for each response that goes through the spider # middleware and into the spider. # Should return None or raise an exception. return None def process_spider_output(self, response, result, spider): # Called with the results returned from the Spider, after # it has processed the response. # Must return an iterable of Request, dict or Item objects. for i in result: yield i def process_spider_exception(self, response, exception, spider): # Called when a spider or process_spider_input() method # (from other spider middleware) raises an exception. # Should return either None or an iterable of Response, dict # or Item objects. pass def process_start_requests(self, start_requests, spider): # Called with the start requests of the spider, and works # similarly to the process_spider_output() method, except # that it doesn’t have a response associated. # Must return only requests (not items). for r in start_requests: yield r def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name) class IslandScraperDownloaderMiddleware(object): # Not all methods need to be defined. If a method is not defined, # scrapy acts as if the downloader middleware does not modify the # passed objects. @classmethod def from_crawler(cls, crawler): # This method is used by Scrapy to create your spiders. s = cls() crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) return s def process_request(self, request, spider): # Called for each request that goes through the downloader # middleware. # Must either: # - return None: continue processing this request # - or return a Response object # - or return a Request object # - or raise IgnoreRequest: process_exception() methods of # installed downloader middleware will be called return None def process_response(self, request, response, spider): # Called with the response returned from the downloader. # Must either; # - return a Response object # - return a Request object # - or raise IgnoreRequest return response def process_exception(self, request, exception, spider): # Called when a download handler or a process_request() # (from other downloader middleware) raises an exception. # Must either: # - return None: continue processing this exception # - return a Response object: stops process_exception() chain # - return a Request object: stops process_exception() chain pass def spider_opened(self, spider): spider.logger.info('Spider opened: %s' % spider.name)
[ "lucasmalucha@gmail.com" ]
lucasmalucha@gmail.com
6544fcf260d6f8112c79a5e3a5ec70a10575a277
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
/sdBs/AllRun/pg_1425+219/sdB_PG_1425+219_lc.py
e8ee83c9f091b54b888664988d5fb0c6cd57aee1
[]
no_license
tboudreaux/SummerSTScICode
73b2e5839b10c0bf733808f4316d34be91c5a3bd
4dd1ffbb09e0a599257d21872f9d62b5420028b0
refs/heads/master
2021-01-20T18:07:44.723496
2016-08-08T16:49:53
2016-08-08T16:49:53
65,221,159
0
0
null
null
null
null
UTF-8
Python
false
false
346
py
from gPhoton.gAperture import gAperture def main(): gAperture(band="NUV", skypos=[216.986042,21.632814], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1425+219 /sdB_PG_1425+219_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3) if __name__ == "__main__": main()
[ "thomas@boudreauxmail.com" ]
thomas@boudreauxmail.com
48b1cfe1f2c159159035fd8b8781a2df3fb2ffde
b11a5afd6682fe003445431ab60a9273a8680c23
/language/nqg/tasks/spider/write_dataset.py
b2ed9f1018cf872e2b4933c9712c698deaeb8e52
[ "LicenseRef-scancode-generic-cla", "Apache-2.0" ]
permissive
Srividya-me/language
a874b11783e94da7747fc9a1b0ae1661cd5c9d4a
61fa7260ac7d690d11ef72ca863e45a37c0bdc80
refs/heads/master
2023-08-28T10:30:59.688879
2021-11-12T22:31:56
2021-11-13T01:04:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,111
py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Write Spider dataset in TSV format.""" import json from absl import app from absl import flags from language.nqg.tasks import tsv_utils from language.nqg.tasks.spider import database_constants from tensorflow.io import gfile FLAGS = flags.FLAGS flags.DEFINE_string("examples", "", "Path to Spider json examples.") flags.DEFINE_string("output", "", "Output tsv file.") flags.DEFINE_bool( "filter_by_database", True, "Whether to only select examples for databases used for the Spider-SSP" "setting proposed in the paper. Should be False to follow the standard" "Spider-XSP setting.") def normalize_whitespace(source): tokens = source.split() return " ".join(tokens) def load_json(filepath): with gfile.GFile(filepath, "r") as reader: text = reader.read() return json.loads(text) def main(unused_argv): examples_json = load_json(FLAGS.examples) examples = [] for example_json in examples_json: database = example_json["db_id"] source = example_json["question"] target = example_json["query"] # Optionally skip if database not in set of databases with >= 50 examples. if (FLAGS.filter_by_database and database not in database_constants.DATABASES): continue # Prepend database. source = "%s: %s" % (database, source) target = normalize_whitespace(target) examples.append((source.lower(), target.lower())) tsv_utils.write_tsv(examples, FLAGS.output) if __name__ == "__main__": app.run(main)
[ "kentonl@google.com" ]
kentonl@google.com
6ff66a5e7100cbdd1877f359622be88b41e19b2c
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
/flask/flaskenv/Lib/site-packages/keras_applications/inception_v3.py
1b825c0ce4aea562e468b337a5843f63810f57d5
[]
no_license
AhsonAslam/webapi
54cf7466aac4685da1105f9fb84c686e38f92121
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
refs/heads/master
2020-07-27T06:05:36.057953
2019-09-17T06:35:33
2019-09-17T06:35:33
208,895,450
0
0
null
null
null
null
UTF-8
Python
false
false
130
py
version https://git-lfs.github.com/spec/v1 oid sha256:6bdeecc0c5e0341451f5d87e17d12c89a210b6161e1b066aca6e02bc425b2abf size 14598
[ "github@cuba12345" ]
github@cuba12345
568aa59ae896f8dcad1d6c4c19a117a22a0ff63c
c4d05bf624ce277b35d83ba8ba9636f26043280e
/project/urls.py
d6e90307036ceed43e1f6355ce2dc672ebb0e233
[ "Apache-2.0" ]
permissive
DrMartiner/kaptilo_back
2366b3a2b5c9bd9dc57c9091ff5fd0025963668d
df7f716030edbb1a70388fcbb808b0985dabefbf
refs/heads/main
2023-04-09T03:12:52.274388
2021-03-22T09:48:39
2021-03-22T09:48:39
349,943,620
0
0
null
null
null
null
UTF-8
Python
false
false
862
py
from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.urls import path, include from apps.link.views import OriginalLinkRedirectView admin.site.site_header = "Kaptilo" admin.site.site_title = "Kaptilo" admin.site.index_title = "Welcome to Kaptilo admin-panel" urlpatterns = [ path("<str:uuid>/", OriginalLinkRedirectView.as_view(), name="original-link-redirect"), path("api/v1/", include(("apps.api.urls", "apps.api"), namespace="api_v1")), path("admin/super-sec/", admin.site.urls), path("admin/", include("admin_honeypot.urls", namespace="admin_honeypot")), ] if settings.DEBUG: urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[ "DrMartiner@GMail.Com" ]
DrMartiner@GMail.Com
668963624d3086f1b1dd35cf080200af75bf8736
191a7f83d964f74a2b3c7faeb4fc47d9c63d521f
/.history/main_20210523152045.py
6d7861a88a7d86a28f1d8d675b4416ba674fb3c2
[]
no_license
AndreLiu1225/Kinder-Values-Survey
2a317feee8d5b17c27da2b2116742656e35d8ab9
090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3
refs/heads/master
2023-05-03T00:26:00.481423
2021-06-04T03:24:19
2021-06-04T03:24:19
371,989,154
0
0
null
null
null
null
UTF-8
Python
false
false
1,795
py
from flask import Flask, render_template, redirect, url_for from flask_wtf import FlaskForm from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError app = Flask(__name__) app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a" class MCQ(FlaskForm): age = IntegerField("Please enter your age", validators=[DataRequired()]) profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)]) power = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')]) tradition = RadioField("Do you care preservingabout tradition", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')]) achievement = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')]) stimulation = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')]) hedonism = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')]) conformity = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')]) self_direction = RadioField("Do you desire a higher social status and dominance over others?", choices=[('Yes', 'It is my priority'), ('No', 'It is not my priority')]) submit = SubmitField("Submit") if __name__ == "__main__": app.run(debug=True)
[ "andreliu2004@gmail.com" ]
andreliu2004@gmail.com
8065d754386fc0b3762e05f4fc04a7f53121086e
9da6c375dbf1af87622a2ba0fb773e8f513d8021
/cli/bak.20200512-local/abcombo.py
a267f8c6d9d445c64cdd848a3d93c27eb4e147ce
[]
no_license
wri/tree_canopy_fcn
a80a9971403f6ca2548d44146ed08aa22d7d559e
78f742e4e26e34008417468f73413643edde801e
refs/heads/master
2022-10-11T03:25:41.503263
2020-06-16T12:39:21
2020-06-16T12:39:21
236,492,565
1
0
null
null
null
null
UTF-8
Python
false
false
5,895
py
import os,sys PROJECT_DIR='/home/ericp/tree_canopy_fcn/repo' sys.path.append(PROJECT_DIR) from pprint import pprint import numpy as np import pandas as pd import torch import torch.nn as nn from torch_kit.loss import MaskedLoss import torch_kit.functional as F from torch_kit.optimizers.radam import RAdam import pytorch_models.deeplab.model as dm import pytorch_models.unet.model as um from utils.dataloader import HeightIndexDataset, CATEGORY_BOUNDS from config import BUILTUP_CATEGORY_THRESHOLDS # # RUN CONFIG # BATCH_SIZE=8 DEFAULT_OPTIMIZER='adam' LRS=[1e-3,1e-4] NB_CATEGORIES=len(CATEGORY_BOUNDS)+1 # # AB STATS: ALL # MEANS=[100.83741572079242, 100.4938850966076, 86.63500986931308, 118.72746674454453] # STDEVS=[42.098045003124774, 39.07388735786421, 39.629813116928815, 34.72351480486876] # DSETS_PATH='../datasets/los_angeles-plieades-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv' # AB STATS: 2015,16 Train/valid MEANS=[94.79936157686979, 92.8912348691044, 80.50194782393349, 108.14889758142212] STDEVS=[36.37876660224377, 33.22686387734999, 33.30808192430284, 30.075380846943716] DSETS_PATH='../datasets/los_angeles-plieades_naip-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv' YEAR_MAX=2016 # # NAIP STATS: ALL (<2017) # MEANS=[106.47083152919251, 104.25520495313522, 98.61836143687523, 119.95594400425841] # STDEVS=[38.23711386806666, 34.410688920150264, 31.468324931640534, 31.831786730471276] # DSET_PATH=f'{PROJECT_DIR}/datasets/los_angeles-naip-lidar_USGS_LPC_CA_LosAngeles_2016_LAS_2018.STATS.csv' # # NAIP ONLY # IBNDS={ # '4': { 'min': 0 }, # ndvi # '5': { 'min': -0.35} # ndwi # } # # PLIEDES INPUT IBNDS=None # # TORCH_KIT CLI # def model(**cfig): _header('model',cfig) model_type=cfig.pop('type','dlv3p') cfig['out_ch']=cfig.get('out_ch',NB_CATEGORIES) if model_type=='dlv3p': mod=dm.DeeplabV3plus(**cfig) elif model_type=='unet': mod=um.UNet(**cfig) else: raise ValueError(f'model_type ({model_type}) not implemented') if torch.cuda.is_available(): mod=mod.cuda() return mod def criterion(**cfig): ignore_index=cfig.get('ignore_index') weights=cfig.get('weights') print("criterion:",ignore_index,weights) if weights: weights=torch.Tensor(weights) if torch.cuda.is_available(): weights=weights.cuda() if ignore_index is not None: criterion=nn.CrossEntropyLoss(weight=weights,ignore_index=ignore_index) # criterion=MaskedLoss( # weight=weights, # loss_type='ce', # mask_value=ignore_index ) else: criterion=nn.CrossEntropyLoss(weight=weights) return criterion def optimizer(**cfig): _header('optimizer',cfig) opt_name=cfig.get('name',DEFAULT_OPTIMIZER) if opt_name=='adam': optimizer=torch.optim.Adam elif opt_name=='radam': optimizer=RAdam else: ValueError(f'optimizer "{opt_name}" not implemented') return optimizer def loaders(**cfig): """ """ # INITAL DATASET HANDLING dsets_df=pd.read_csv(DSETS_PATH) train_df=dsets_df[dsets_df.dset_type=='train'] valid_df=dsets_df[dsets_df.dset_type=='valid'] train_df=train_df[train_df.input_year<=YEAR_MAX].iloc[1:6*8+1] valid_df=valid_df[valid_df.input_year<=YEAR_MAX] example_path=train_df.rgbn_path.iloc[0] # # on with the show # dev=cfig.get('dev') vmap=cfig.get('vmap') batch_size=cfig.get('batch_size',BATCH_SIZE) band_indices=['ndvi'] augment=cfig.get('augment',True) shuffle=cfig.get('shuffle',True) no_data_value=cfig.get('no_data_value',False) cropping=cfig.get('cropping',None) float_cropping=cfig.get('float_cropping',None) update_version=cfig.get('update_version',False) print('AUGMENT:',augment) print('SHUFFLE:',shuffle) print('BATCH_SIZE:',batch_size) print('NO DATA VALUE:',no_data_value) print('CROPPING:',cropping) print('FLOAT CROPPING:',float_cropping) if (train_df.shape[0]>=batch_size*8) and (valid_df.shape[0]>=batch_size*2): if dev: train_df=train_df.sample(batch_size*8) valid_df=valid_df.sample(batch_size*2) dl_train=HeightIndexDataset.loader( batch_size=batch_size, # input_bands=[0,1,2], # input_band_count=3, band_indices=['ndvi'], category_bounds=HeightIndexDataset.NAIP_GREEN, input_bounds=IBNDS, dataframe=train_df, means=MEANS, stdevs=STDEVS, no_data_value=no_data_value, cropping=cropping, float_cropping=float_cropping, example_path=example_path, augment=augment, train_mode=True, target_dtype=np.int, shuffle_data=shuffle) return dl_train, None dl_valid=HeightIndexDataset.loader( batch_size=batch_size, # input_bands=[0,1,2], # input_band_count=3, band_indices=['ndvi'], category_bounds=HeightIndexDataset.NAIP_GREEN, input_bounds=IBNDS, dataframe=valid_df, means=MEANS, stdevs=STDEVS, no_data_value=no_data_value, cropping=cropping, float_cropping=float_cropping, example_path=example_path, augment=augment, train_mode=True, target_dtype=np.int, shuffle_data=shuffle) print("SIZE:",train_df.shape[0],valid_df.shape[0]) return dl_train, dl_valid else: print('NOT ENOUGH DATA',train_df.shape[0],valid_df.shape[0],batch_size*8,batch_size*30) return False, False # # HELPERS # def _header(title,cfig=None): print('='*100) print(title) print('-'*100) if cfig: pprint(cfig)
[ "bguzder-williams@wri.org" ]
bguzder-williams@wri.org
f8184270f36e3f165d97bbb247f6f0b508fc5810
ba7d84b4b85be8c3221468527757e264e64616b9
/tests/hammytest.py
b5f03afc22f1e60ade3aca0eb505d0bf88fd3fe8
[]
no_license
gomesr/timetracker
c18eb4b6f33e08eadd72971216b16560ef085aa1
ce57a0791727a3b06e4b167fbeb3cb3e558ff2f1
refs/heads/master
2021-01-22T23:58:20.247393
2010-12-12T01:16:54
2010-12-12T01:16:54
1,130,286
0
0
null
null
null
null
UTF-8
Python
false
false
675
py
import unittest from trackers.hammy import HamsterTracker from hamster import client class HammyTest(unittest.TestCase): def setUp(self): self.tracker = HamsterTracker() def test_create_100_activites(self): tags = [] ids = [] try: for i in range(1,100): ids.append(self.tracker.start("activity-%d" % i, "", "some elaborate desciption", tags)) finally: # clean up! for id in ids: self.tracker.storage.remove_fact(id)
[ "rodneygomes@gmail.com" ]
rodneygomes@gmail.com
dd1953d6927d29066068ea81328364dee75a86e6
bbf1ae079309eca11270422d3f0d259d1515d430
/numerical-tours/python/todo/solutions/wavelet_2_haar2d.py
7ec8c89d23ba2108e274a13521844d6ad479f593
[ "BSD-2-Clause" ]
permissive
ZichaoDi/Di_MATLABTool
5e6a67b613c4bcf4d904ddc47c2744b4bcea4885
c071291c63685c236f507b2cb893c0316ab6415c
refs/heads/master
2021-08-11T07:28:34.286526
2021-08-04T18:26:46
2021-08-04T18:26:46
149,222,333
9
5
null
null
null
null
UTF-8
Python
false
false
2,522
py
def exo1(): """ Implement a full wavelet transform that extract iteratively wavelet coefficients, by repeating these steps. Take care of choosing the correct number of steps. """ Jmin = 0 fw = f for j in J: -1: Jmin: fw(1: 2^(j + 1), 1: 2^(j + 1)) = haar(fw(1: 2^(j + 1), 1: 2^(j + 1))) % j1 = J-j if j1 <4 A = fw(1: 2^(j + 1), 1: 2^(j + 1)) imageplot(A(1: 2^j, 2^j + 1: 2^(j + 1)), ['Horizontal, j = ' num2str(j)], 3, 4, j1 + 1) imageplot(A(2^j + 1: 2^(j + 1), 1: 2^j), ['Vertical, j = ' num2str(j)], 3, 4, j1 + 5) imageplot(A(2^j + 1: 2^(j + 1), 2^j + 1: 2^(j + 1)), ['Diagonal, j = ' num2str(j)], 3, 4, j1 + 9) def exo2(): """ Write the inverse wavelet transform that computes $f_1$ from coefficients |fW|. """ f1 = fw for j in Jmin: J: s = 1: 2^j; t = 2^j + 1: 2^(j + 1); u = 1: 2^(j + 1) f1(u, u) = ihaar(f1(s, s), f1(s, t), f1(t, s), f1(t, t)) % j1 = J-j if j1 >0 & j1 <5 A = f1(1: 2^(j + 1), 1: 2^(j + 1)) subplot(2, 2, j1) imageplot(A, ['Partial reconstruction, j = ' num2str(j)]) def exo3(): """ Display the reconstructed signal obtained from |fw1|, for a decreasing cut-off scale $j$. """ jlist = J-(1: 4) fw = perform_haar_transf(f, 1, + 1) for i in 1: length(jlist): j = jlist(i) fw1 = zeros(n); fw1(1: 2^j, 1: 2^j) = fw(1: 2^j, 1: 2^j) f1 = perform_haar_transf(fw1, 1, -1) % display subplot(2, 2, i) imageplot(f1) title(strcat(['j = ' num2str(j) ', SNR = ' num2str(snr(f, f1), 3) 'dB'])) def exo4(): """ Find the threshold $T$ so that the number of remaining coefficients in |fwT| is a fixed number $m$. Use this threshold to compute |fwT| and then display the corresponding approximation $f_1$ of $f$. Try for an increasing number $m$ of coeffiients. """ m_list = round([.005 .01 .05 .1]*N); % number of kept coefficients fw = perform_haar_transf(f, 1, + 1) for i in 1: length(m_list): m = m_list(i) % select threshold v = sort(abs(fw(: ))) if v(1) <v(N) v = reverse(v) T = v(m) fwT = fw .* (abs(fw) >= T) % inverse f1 = perform_haar_transf(fwT, 1, -1) % display subplot(2, 2, i) imageplot(f1) title(strcat(['m = ' num2str(m) ', SNR = ' num2str(snr(f, f1), 3) 'dB']))
[ "wendydi@compute001.mcs.anl.gov" ]
wendydi@compute001.mcs.anl.gov
286cc8c250f2c2b4030ffc5e75d7d1213b47a934
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/nouns/_yens.py
f7c90d82f8fc7ae9864e4492c2449f9c31d5b2f4
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
217
py
from xai.brain.wordbase.nouns._yen import _YEN #calss header class _YENS(_YEN, ): def __init__(self,): _YEN.__init__(self) self.name = "YENS" self.specie = 'nouns' self.basic = "yen" self.jsondata = {}
[ "xingwang1991@gmail.com" ]
xingwang1991@gmail.com
a31d0693760097d9ec0bfc62e4a5c4d7383c09ab
378b200007c5d3633572b61eb3dd2180748086b7
/chefsBackEnd/chefsBackEnd/asgi.py
d077d3550da2054b45a48c64401ec50a84113e40
[]
no_license
jgartsu12/chefs-table-backend
4163c2c9a2bb586d4432c332238682bf282ef967
71611cf17aa457f8bc9a7ec7d853c570062d22fb
refs/heads/master
2022-12-16T04:22:30.954831
2020-07-08T19:24:37
2020-07-08T19:24:37
251,097,796
1
0
null
2022-12-08T10:13:44
2020-03-29T17:59:15
Python
UTF-8
Python
false
false
401
py
""" ASGI config for chefsBackEnd project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chefsBackEnd.settings') application = get_asgi_application()
[ "jgartsu12@gmail.com" ]
jgartsu12@gmail.com
764c228e5a8b115f7ca60c1480fdff36b20ab047
8a3726abfc9cb72d8ccf7d32b18edabf8d16b630
/18/a.py
32847a4eb7fdc71ad694396872b27a628860cf2a
[]
no_license
alex-stephens/aoc2015
48a46efc1a888ea2d451a5938fc404d26e96e1a0
ccc1c85f8da7a0585003b2e4f99f3f1def35ec0b
refs/heads/master
2023-02-05T23:02:19.148138
2020-12-27T19:16:47
2020-12-27T19:16:47
324,579,165
0
0
null
null
null
null
UTF-8
Python
false
false
1,065
py
grid = [list(line.strip()) for line in open('input.txt').readlines()] rows, cols = len(grid), len(grid[0]) def count_neighbours(i, j): rmin, rmax = max(i-1, 0), min(i+1, rows-1) cmin, cmax = max(j-1, 0), min(j+1, cols-1) ans = 0 for r in range(rmin, rmax+1): for c in range(cmin, cmax+1): if (r,c) == (i,j): continue ans += 1 if grid[r][c] == '#' else 0 return ans it = 100 for i in range(it): new_grid = [['x' for _ in range(cols)] for _ in range(rows)] for r in range(rows): for c in range(cols): count = count_neighbours(r,c) if grid[r][c] == '#' and (count != 2 and count != 3): new_grid[r][c] = '.' elif grid[r][c] == '.' and count == 3: new_grid[r][c] = '#' else: new_grid[r][c] = grid[r][c] grid = [list(x) for x in new_grid] # print('--------------------------') # for g in grid: # print(''.join(g)) print(sum([''.join(r).count('#') for r in grid]))
[ "alexstephens9@gmail.com" ]
alexstephens9@gmail.com
05c5693d3b24a5c3fd147316f1f2cfeaba19014b
5c39f5ac529e9f292ba0e4965fd684d4c6eefe8a
/migrations/0001_initial.py
8570a25dfd79013e6c9c3202871e7bdc877c28d4
[]
no_license
joshianshul2/csv_db
6d24dec8bdcd8f00115a8729d5036beb47994d0e
e7215002c0a2fb8cadd0d4087b8651b1ec9e30ea
refs/heads/master
2023-04-21T19:46:56.941399
2021-05-11T17:29:38
2021-05-11T17:29:38
356,846,462
0
0
null
null
null
null
UTF-8
Python
false
false
5,962
py
# Generated by Django 3.2 on 2021-04-07 05:30 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='AvgMaster', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('county', models.CharField(max_length=255)), ('state', models.CharField(max_length=255)), ('NetPrAr', models.FloatField(default=0.0)), ('Rate', models.FloatField()), ('UserPercentage', models.FloatField(default=0.0)), ('FinaleValue', models.FloatField(default=0.0)), ('accountId', models.BigIntegerField()), ('acres', models.FloatField()), ('adTargetingCountyId', models.BigIntegerField()), ('address', models.CharField(max_length=255)), ('baths', models.BigIntegerField()), ('beds', models.BigIntegerField()), ('brokerCompany', models.CharField(max_length=255)), ('brokerName', models.CharField(max_length=255)), ('Url', models.URLField(max_length=255)), ('city', models.CharField(max_length=255)), ('cityID', models.BigIntegerField()), ('companyLogoDocumentId', models.BigIntegerField()), ('countyId', models.BigIntegerField()), ('description', models.TextField(max_length=255)), ('hasHouse', models.BooleanField()), ('hasVideo', models.BooleanField()), ('hasVirtualTour', models.BigIntegerField()), ('imageCount', models.BigIntegerField()), ('imageAltTextDisplay', models.CharField(max_length=255)), ('isHeadlineAd', models.BooleanField()), ('lwPropertyId', models.BigIntegerField()), ('isALC', models.BigIntegerField()), ('latitude', models.FloatField()), ('longitude', models.FloatField()), ('price', models.FloatField()), ('types', models.TextField(max_length=255)), ('status', models.CharField(max_length=20)), ('status1', models.CharField(max_length=255)), ('zip', models.BigIntegerField()), ('Descrpt', models.TextField(default='!', max_length=255)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='PropertyMaster', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('accountId', models.BigIntegerField()), ('acres', models.FloatField()), ('adTargetingCountyId', models.BigIntegerField()), ('address', models.CharField(max_length=255)), ('baths', models.BigIntegerField()), ('beds', models.BigIntegerField()), ('brokerCompany', models.CharField(max_length=255)), ('brokerName', models.CharField(max_length=255)), ('Url', models.URLField(max_length=255)), ('city', models.CharField(max_length=255)), ('cityID', models.BigIntegerField()), ('companyLogoDocumentId', models.BigIntegerField()), ('county', models.CharField(max_length=255)), ('countyId', models.BigIntegerField()), ('description', models.TextField(max_length=255)), ('hasHouse', models.BooleanField()), ('hasVideo', models.BooleanField()), ('hasVirtualTour', models.BigIntegerField()), ('imageCount', models.BigIntegerField()), ('imageAltTextDisplay', models.CharField(max_length=255)), ('isHeadlineAd', models.BooleanField()), ('lwPropertyId', models.BigIntegerField()), ('isALC', models.BigIntegerField()), ('latitude', models.FloatField()), ('longitude', models.FloatField()), ('price', models.FloatField()), ('types', models.TextField(max_length=255)), ('state', models.CharField(max_length=255)), ('status', models.CharField(max_length=20)), ('status1', models.CharField(max_length=255)), ('zip', models.BigIntegerField()), ('Rate', models.FloatField()), ('NetPrAr', models.FloatField(default=0.0)), ('Descrpt', models.TextField(default='!', max_length=255)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='StatusMaster', fields=[ ('status', models.IntegerField(primary_key=True, serialize=False)), ('name', models.CharField(max_length=255)), ], ), migrations.CreateModel( name='User', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('first_name', models.CharField(max_length=255)), ('last_name', models.CharField(max_length=255)), ('email', models.CharField(max_length=255)), ('password', models.CharField(max_length=255)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ], ), ]
[ "joshi.anshul2@gmail.com" ]
joshi.anshul2@gmail.com
159e62cf42f265a5d96156ae23363dbeced3b8c0
1e53216c58f3c7843031721305590b83dbaed3f2
/week_four/db_demo/db_app/migrations/0003_message_post_user_who_liked.py
59fc1606c04688bdf72a3cafe91a74cffc27e608
[]
no_license
MTaylorfullStack/python_july_20
991852ba12d6f06d6b93b8efc60b66ee311b5cb3
bdfb0d9a74300f2d6743ac2d108571692ca43ad9
refs/heads/master
2022-12-12T18:03:00.886048
2020-08-27T23:53:31
2020-08-27T23:53:31
277,956,745
2
2
null
2023-06-30T20:06:11
2020-07-08T01:09:34
Python
UTF-8
Python
false
false
425
py
# Generated by Django 2.2 on 2020-07-29 00:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('db_app', '0002_message_post'), ] operations = [ migrations.AddField( model_name='message_post', name='user_who_liked', field=models.ManyToManyField(related_name='liked_post', to='db_app.User'), ), ]
[ "mtaylor@codingdojo.com" ]
mtaylor@codingdojo.com
2ba794c5fbdf6b165029c3b20b7d4ae08486b115
4fd77ce692e10e962483c7e3e6e76c44887e9f52
/geatpy/templates/soeas/GA/studGA/soea_psy_studGA_templet.py
7cb191a9338b905bc256f6ecb2c43a2de4b72a72
[ "MIT" ]
permissive
Passion-long/geatpy
d1aaf1622058473649840a9e2e26f9d0b0844bce
8e2ab8730babaae640272bd4c77106519bdd120c
refs/heads/master
2020-07-09T13:40:36.217907
2019-08-23T03:36:12
2019-08-23T03:36:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,750
py
# -*- coding: utf-8 -*- import numpy as np import geatpy as ea # 导入geatpy库 from sys import path as paths from os import path paths.append(path.split(path.split(path.realpath(__file__))[0])[0]) class soea_psy_studGA_templet(ea.SoeaAlgorithm): """ soea_psy_studGA_templet.py - Polysomy Stud GA templet(多染色体种马遗传算法模板) 模板说明: 该模板是内置算法模板soea_studGA_templet的多染色体版本, 因此里面的种群对象为支持混合编码的多染色体种群类PsyPopulation类的对象。 算法描述: 本模板实现的是种马遗传算法。算法流程详见参考文献[1]。 模板使用注意: 本模板调用的目标函数形如:aimFunc(pop), 其中pop为种群类的对象,代表一个种群, pop对象的Phen属性(即种群染色体的表现型)等价于种群所有个体的决策变量组成的矩阵, 该函数根据该Phen计算得到种群所有个体的目标函数值组成的矩阵,并将其赋值给pop对象的ObjV属性。 若有约束条件,则在计算违反约束程度矩阵CV后赋值给pop对象的CV属性(详见Geatpy数据结构)。 该函数不返回任何的返回值,求得的目标函数值保存在种群对象的ObjV属性中, 违反约束程度矩阵保存在种群对象的CV属性中。 例如:population为一个种群对象,则调用aimFunc(population)即可完成目标函数值的计算, 此时可通过population.ObjV得到求得的目标函数值,population.CV得到违反约束程度矩阵。 若不符合上述规范,则请修改算法模板或自定义新算法模板。 参考文献: [1] Khatib W , Fleming P J . The stud GA: A mini revolution?[C]// International Conference on Parallel Problem Solving from Nature. Springer, Berlin, Heidelberg, 1998. """ def __init__(self, problem, population): ea.SoeaAlgorithm.__init__(self, problem, population) # 先调用父类构造方法 if str(type(population)) != "<class 'PsyPopulation.PsyPopulation'>": raise RuntimeError('传入的种群对象必须为PsyPopulation类型') self.name = 'psy-studGA' self.problem = problem self.population = population self.selFunc = 'tour' # 锦标赛选择算子 # 由于有多个染色体,因此需要用多个重组和变异算子,于是对应有多个重组和变异概率 self.recFuncs = [] self.mutFuncs = [] self.pcs = [] self.pms = [] for i in range(population.ChromNum): if population.Encodings[i] == 'P': self.recFuncs.append('xovpmx') # 部分匹配交叉 self.mutFuncs.append('mutinv') # 染色体片段逆转变异 else: self.recFuncs.append('xovdp') # 两点交叉 if population.Encodings[i] == 'BG': self.mutFuncs.append('mutbin') # 二进制变异 elif population.Encodings[i] == 'RI': self.mutFuncs.append('mutbga') # breeder GA中的变异算子 else: raise RuntimeError('编码方式必须为''BG''、''RI''或''P''.') self.pcs.append(1) # 重组概率 self.pms.append(1) # 整条染色体的变异概率 def run(self): #==========================初始化配置=========================== population = self.population NIND = population.sizes self.initialization() # 初始化算法模板的一些动态参数 #===========================准备进化============================ population.initChrom(NIND) # 初始化种群染色体矩阵(内含染色体解码,详见PsyPopulation类的源码) self.problem.aimFunc(population) # 计算种群的目标函数值 population.FitnV = ea.scaling(self.problem.maxormins * population.ObjV, population.CV) # 计算适应度 self.evalsNum = population.sizes # 记录评价次数 #===========================开始进化============================ while self.terminated(population) == False: bestIdx = np.argmax(population.FitnV, axis = 0) # 得到当代的最优个体的索引, 设置axis=0可使得返回一个向量 studPop = population[np.tile(bestIdx, (NIND//2))] # 复制最优个体NIND//2份,组成一个“种马种群” restPop = population[np.where(np.array(range(NIND)) != bestIdx)[0]] # 得到除去精英个体外其它个体组成的种群 # 选择个体,以便后面与种马种群进行交配 tempPop = restPop[ea.selecting(self.selFunc, restPop.FitnV, (NIND - studPop.sizes))] # 将种马种群与选择出来的个体进行合并 population = studPop + tempPop # 进行进化操作,分别对各种编码的染色体进行重组和变异 for i in range(population.ChromNum): population.Chroms[i] = ea.recombin(self.recFuncs[i], population.Chroms[i], self.pcs[i]) # 重组 population.Chroms[i] = ea.mutate(self.mutFuncs[i], population.Encodings[i], population.Chroms[i], population.Fields[i], self.pms[i]) # 变异 # 求进化后个体的目标函数值 population.Phen = population.decoding() # 染色体解码 self.problem.aimFunc(population) self.evalsNum += population.sizes # 更新评价次数 population.FitnV = ea.scaling(self.problem.maxormins * population.ObjV, population.CV) # 计算适应度 return self.finishing(population) # 调用finishing完成后续工作并返回结果
[ "jazzbin@geatpy.com" ]
jazzbin@geatpy.com
47bf2f00c6730182259d81aeab1bf82ce408ef5d
c7115a0a1470310792b81cd097e0aa47ed095195
/django_thoughtapi/manage.py
5045eb05410e0449491ad1e7a92edec2a1f3c746
[ "MIT" ]
permissive
qwergram/thoughts_api
80818424b3755f671cfb65fcddff5c0769fa9e27
47e9a76cc15e30c36232b253eb0e44bb5f401482
refs/heads/master
2020-12-24T22:29:12.401158
2016-04-30T22:45:20
2016-04-30T22:45:20
57,338,528
0
0
null
2016-04-29T23:40:38
2016-04-28T22:46:59
null
UTF-8
Python
false
false
260
py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_thoughtapi.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[ "npengra317@gmail.com" ]
npengra317@gmail.com
69c0bb652daa62eea8c9a6a5378fd562629cf26a
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03095/s108733747.py
8493da82e83df4f2a53a5e799e1313b9f63c0471
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
1,818
py
import sys import math from collections import Counter N = int(input()) S = input() MOD = 1000000007 # baba # a(2), b(2), ab(1), ba(3) # baab # a(2), b(2), ab(2), ba(2) # 1文字の時は分かる、それは単に数えるだけ # 2文字の時は? # 'ab' 'a'どれ選ぶ? → その後ろにある'b'は… # 全部やるなら2^100000、ムリー # dpいけるか? # dpを設計しよう # dp[n] : n文字目まで見た時の答え # dp[n] = dp[n-1] # baab # dp[0] = 1 (b) # dp[1] = dp[0]((b)のみ選ぶ) + 1(aのみ選ぶ) + dp[0] * 1 (ab) # それが新しい文字なら? # dp[n] = dp[n-1](追加で選ばない) + dp[n-1](選ぶ) + 1 # それが見たことある文字なら? # 1文字単位では増えない # n文字単位なら、pickする選択肢が増える # ba (3) # baa → 3 + 1? # dp[n] = dp[n] (そのまま) + 最後の文字を使う # 最後の文字を使うならどうなるか? → それ以外の種類の文字でつくるんだけど大変じゃない???? # baba # babで5 a, b(2), ab(1) ba(1) # 最後の文字を使うなら、bをどちらか選ぶ (か、何も選ばない) # bをpickするか? * どれをpickするか? # bをpickしない場合(1) + bをpickする場合(どれを選ぶ?) # (1 + 2) # abca # abcで6 a, b, c, ab, ac, bc # 最後の文字を使うなら、残りのbcの組み合わせ # bをpickする/しない * cをpickする/しない 4通り? ans = 1 counter = Counter() counter[S[0]] += 1 for ch in S[1:]: if ch in counter: tmp = 1 for k, cnt in counter.items(): if k == ch: continue tmp = (tmp * (1 + cnt)) % MOD ans = (ans + tmp) % MOD counter[ch] += 1 else: ans = (2 * ans) % MOD ans = (ans + 1) % MOD counter[ch] += 1 print(ans)
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
ec88adb74b40ae3b44f04b1e117c8c881872eb99
ba2d24fd6c5ce7d490ee57f224fd5435a1132093
/setup.py
7b0ac69b67ea99435a867d57e8b00a0787e5f3aa
[ "MIT" ]
permissive
FlowerOda/pytest-auto-parametrize
cb2aff37308bff571b980da88f222f8b88e4e36b
9db33bb06de13c26f753bfd18e254ce10ae1256c
refs/heads/master
2022-01-09T16:54:33.796383
2018-10-09T08:56:09
2018-10-09T08:56:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,591
py
from setuptools import setup from setuptools.command.test import test as TestCommand import sys __version__ = 'unknown' # "import" __version__ for line in open('pytest_auto_parametrize.py'): if line.startswith('__version__'): exec(line) break class PyTest(TestCommand): """Enable "python setup.py test". Stripped down from: http://doc.pytest.org/en/latest/goodpractices.html#manual-integration """ def run_tests(self): import pytest sys.exit(pytest.main([])) setup( name='pytest-auto-parametrize', py_modules=['pytest_auto_parametrize'], version=__version__, author='Matthias Geier', author_email='Matthias.Geier@gmail.com', description='pytest plugin: avoid repeating arguments in parametrize', long_description=open('README.rst').read(), license='MIT', keywords='parametrized testing'.split(), url='https://github.com/mgeier/pytest-auto-parametrize', platforms='any', zip_safe=True, classifiers=[ 'Framework :: Pytest', 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Intended Audience :: Developers', 'Topic :: Software Development :: Testing', ], entry_points={ 'pytest11': ['pytest_auto_parametrize = pytest_auto_parametrize'], }, tests_require=['pytest'], cmdclass={'test': PyTest}, )
[ "Matthias.Geier@gmail.com" ]
Matthias.Geier@gmail.com