blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d8c04c8f1fcf52bfd14c5b250ab600c0a72958d7
|
e0f26df33e8482e12e48290d3fa9be0f79e04d4c
|
/src/para_tuning.py
|
ad41d6ce5da8d2e53aed080b96497670e7b78619
|
[] |
no_license
|
spencerxhani/fruad_detection
|
e4e90d0f351ad629b6694bbcdc259b13ec5ebce3
|
8d0c22ee748be315114061e486a4100ce6a8ce39
|
refs/heads/master
| 2022-12-03T18:39:04.231834
| 2020-03-29T09:50:34
| 2020-03-29T09:50:34
| 250,851,946
| 0
| 0
| null | 2022-11-21T21:05:17
| 2020-03-28T17:16:55
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 12,308
|
py
|
import sys
import time
import argparse
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
import numpy as np
from contextlib import contextmanager
import gc
from util import s_to_time_format, string_to_datetime, hour_to_range, kfold_lightgbm, kfold_xgb
from util import _time_elapsed_between_last_transactions,time_elapsed_between_last_transactions
#from util import add_auto_encoder_feature
from time import strftime, localtime
import logging
import sys
from config import Configs
from extraction import merge_and_split_dfs, get_conam_dict_by_day, last_x_day_conam
from sklearn.metrics import f1_score
# logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
#log_file = '{}-{}-{}.log'.format(opt.model_name, opt.dataset, strftime("%y%m%d-%H%M", localtime()))
log_file = '../result/{}_tuning.log'.format(strftime("%y%m%d-%H%M", localtime()))
logger.addHandler(logging.FileHandler(log_file))
def lgb_f1_score(y_pred, y_true):
"""evaluation metric"""
#print ("y_pred",y_pred)
#print ("y_true",y_true)
y_hat = np.round(y_pred)
return 'f1', f1_score(y_true.get_label(), y_hat), True
def bayes_parameter_opt_lgb(X, y,
init_round=15,
opt_round=25,
n_folds=5,
random_seed=1030,
n_estimators=10000,
learning_rate=0.05,
output_process=True):
# prepare data
train_data = lgb.Dataset(data=X, label=y, categorical_feature='auto', free_raw_data = False)
# parameters
def lgb_eval(num_leaves, feature_fraction, bagging_fraction,
max_depth, lambda_l1, lambda_l2, min_split_gain,
min_child_weight):
params = {'application':'binary',
'num_iterations': n_estimators,
'learning_rate':learning_rate,
'early_stopping_round':100,
}
params["num_leaves"] = int(round(num_leaves))
params['feature_fraction'] = max(min(feature_fraction, 1), 0)
params['bagging_fraction'] = max(min(bagging_fraction, 1), 0)
params['max_depth'] = int(round(max_depth))
params['lambda_l1'] = max(lambda_l1, 0)
params['lambda_l2'] = max(lambda_l2, 0)
params['min_split_gain'] = min_split_gain
params['min_child_weight'] = min_child_weight
cv_result = lgb.cv(params,
train_data,
nfold=n_folds,
seed=random_seed,
stratified=True,
categorical_feature = "auto",
feval=lgb_f1_score)
print (cv_result)
return max(cv_result['f1-mean'])
# range
lgbBO = BayesianOptimization(lgb_eval, {'num_leaves': (24, 45),
'feature_fraction': (0.1, 0.9),
'bagging_fraction': (0.8, 1),
'max_depth': (5, 8.99),
'lambda_l1': (0, 5),
'lambda_l2': (0, 3),
'min_split_gain': (0.001, 0.1),
'min_child_weight': (5, 50)}, random_state=0)
# optimize
lgbBO.maximize(init_points=init_round, n_iter=opt_round)
# output optimization process
if output_process==True:
pd.DataFrame(opt_params.res).sort_values(by = "target", ascending=False).to_csv("../result/bayes_opt_result.csv")
return lgbBO.max["target"], lgbBO.max["params"] # best score and best parameter
def group_target_by_cols(df_train, df_test, recipe):
df = pd.concat([df_train, df_test], axis = 0)
for m in range(len(recipe)):
cols = recipe[m][0]
for n in range(len(recipe[m][1])):
target = recipe[m][1][n][0]
method = recipe[m][1][n][1]
name_grouped_target = method+"_"+target+'_BY_'+'_'.join(cols)
tmp = df[cols + [target]].groupby(cols).agg(method)
tmp = tmp.reset_index().rename(index=str, columns={target: name_grouped_target})
df_train = df_train.merge(tmp, how='left', on=cols)
df_test = df_test.merge(tmp, how='left', on=cols)
del tmp
gc.collect()
return df_train, df_test
@contextmanager
def timer(title):
t0 = time.time()
yield
logger.info("{} - done in {:.0f}s".format(title, time.time() - t0))
def main(args):
with timer("Process train/test application"):
#-------------------------
# load dataset
#-------------------------
df_train = pd.read_csv(args.train_file)
df_test = pd.read_csv(args.test_file)
#-------------------------
# pre-processing
#-------------------------
for cat in Configs.CATEGORY:
df_train[cat] = df_train[cat].astype('category') #.cat.codes
df_test[cat] = df_test[cat].astype('category')
for df in [df_train, df_test]:
# pre-processing
df["loctm_"] = df.loctm.astype(int).astype(str)
df.loctm_ = df.loctm_.apply(s_to_time_format).apply(string_to_datetime)
# # time-related feature
df["loctm_hour_of_day"] = df.loctm_.apply(lambda x: x.hour).astype('category')
df["loctm_minute_of_hour"] = df.loctm_.apply(lambda x: x.minute)
df["loctm_second_of_min"] = df.loctm_.apply(lambda x: x.second)
# df["loctm_absolute_time"] = [h*60+m for h,m in zip(df.loctm_hour_of_day,df.loctm_minute_of_hour)]
df["hour_range"] = df.loctm_.apply(lambda x: hour_to_range(x.hour)).astype("category")
# removed the columns no need
df.drop(columns = ["loctm_"], axis = 1, inplace = True)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add bacno/cano feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.CONAM_AGG_RECIPE_1)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add iterm-related feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.ITERM_AGG_RECIPE)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add conam-related feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.CONAM_AGG_RECIPE_2)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add hour-related feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.HOUR_AGG_RECIPE)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add cano/conam feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.CANO_CONAM_COUNT_RECIPE)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add cano/bacno latent feature"):
df = pd.read_csv("../features/bacno_latent_features.csv")
df_train = df_train.merge(df, on = "bacno", how = "left")
df_test = df_test.merge(df, on = "bacno", how = "left")
df = pd.read_csv("../features/cano_latent_features.csv")
df_train = df_train.merge(df, on = "cano", how = "left")
df_test = df_test.merge(df, on = "cano", how = "left")
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add locdt-related feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.LOCDT_CONAM_RECIPE)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add mchno-related feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.MCHNO_CONAM_RECIPE)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add scity-related feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.SCITY_CONAM_RECIPE)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add stocn-related feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.STOCN_CONAM_RECIPE)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add mchno/bacno latent feature"):
df = pd.read_csv("../features/bacno_latent_features_w_mchno.csv")
df_train = df_train.merge(df, on = "bacno", how = "left")
df_test = df_test.merge(df, on = "bacno", how = "left")
df = pd.read_csv("../features/mchno_latent_features.csv")
df_train = df_train.merge(df, on = "mchno", how = "left")
df_test = df_test.merge(df, on = "mchno", how = "left")
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add elapsed time feature"):
df = pd.concat([df_train, df_test], axis = 0)
df.sort_values(by = ["bacno","locdt"], inplace = True)
df["time_elapsed_between_last_transactions"] = df[["bacno","locdt"]] \
.groupby("bacno").apply(_time_elapsed_between_last_transactions).values
df_train = df[~df.fraud_ind.isnull()]
df_test = df[df.fraud_ind.isnull()]
df_test.drop(columns = ["fraud_ind"], axis = 1, inplace = True)
del df
gc.collect()
df_train["time_elapsed_between_last_transactions"] = df_train[["bacno","locdt","time_elapsed_between_last_transactions"]] \
.groupby(["bacno","locdt"]).apply(time_elapsed_between_last_transactions).values
df_test["time_elapsed_between_last_transactions"] = df_test[["bacno","locdt","time_elapsed_between_last_transactions"]] \
.groupby(["bacno","locdt"]).apply(time_elapsed_between_last_transactions).values
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
with timer("Add elapsed time aggregate feature"):
df_train, df_test = group_target_by_cols(df_train, df_test, Configs.TIME_ELAPSED_AGG_RECIPE)
logger.info("Train application df shape: {}".format(df_train.shape))
logger.info("Test application df shape: {}".format(df_test.shape))
feats = [f for f in df_train.columns if f not in ["fraud_ind"]]
X,y = df_train[feats], df_train.fraud_ind
return X,y
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train_file', default='../../dataset/train.csv', type=str)
parser.add_argument('--test_file', default='../../dataset/test.csv', type=str)
X,y = main(parser.parse_args())
opt_score, opt_params = bayes_parameter_opt_lgb(X, y,
init_round=5,
opt_round=10,
n_folds=2,
random_seed=6,
n_estimators=10,
learning_rate=0.2)
|
[
"r06546041@ntu.edu.tw"
] |
r06546041@ntu.edu.tw
|
81353cc3889ed290f47df18bd617a984e547a18c
|
c3358a9a9914fdba9a4a276b68f7c3c921571a14
|
/bin/wheel
|
b2a57ed3a505f0c031772736af677e9172331846
|
[] |
no_license
|
mohithg/django_learn
|
ee411da9402ff8a73ce8120f25e1ce558d2936d7
|
cb917cbcf53be1ec9d8ac30b5d45d0e723995df9
|
refs/heads/master
| 2021-08-07T05:00:24.800568
| 2017-11-07T15:35:43
| 2017-11-07T15:35:43
| 109,852,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
#!/Users/mohithg/learning/django/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"mohithgm@gmail.com"
] |
mohithgm@gmail.com
|
|
e1dcd2a11d7423ba518efc1697c3a148293ffa2a
|
5456502f97627278cbd6e16d002d50f1de3da7bb
|
/components/google/core/browser/DEPS
|
26e9743a04d2db628f4a7357a7d73e4ad5cf843a
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/Chromium_7C66
|
72d108a413909eb3bd36c73a6c2f98de1573b6e5
|
c8649ab2a0f5a747369ed50351209a42f59672ee
|
refs/heads/master
| 2023-03-16T12:51:40.231959
| 2017-12-20T10:38:26
| 2017-12-20T10:38:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
include_rules = [
"+components/data_use_measurement/core",
"+components/keyed_service/core",
"+components/pref_registry",
]
|
[
"lixiaodonglove7@aliyun.com"
] |
lixiaodonglove7@aliyun.com
|
|
5c53a777153b9ad4cece20454b6b93bfa892ab0d
|
171baddb78f2f7bfdf32490112cff4bd2f32389f
|
/scripts/corridor_load_histograms.py
|
c266fe83c611e81fd87d34c25a2224b4ec8be1b9
|
[] |
no_license
|
sergimolina/stefmap_ros
|
6795f0c988ece1ef56e888b8a295b40a28cdce93
|
c55fa3497a162f4a8163b914df023a95379d7ef6
|
refs/heads/master
| 2022-12-06T23:42:46.335325
| 2022-11-30T15:40:20
| 2022-11-30T15:40:20
| 168,020,781
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
#!/usr/bin/python2
import os
if __name__ == '__main__':
#python ~/workspace/cpp_ws/src/mod_ros/stefmap_ros/scripts/tools/load_histograms.py ../histograms.txt
output_file_name = "./../data/corridor_2017_05_31_histograms.txt"
print("Loading histograms to FreMEn...")
os.system("./tools/load_histograms.py "+output_file_name)
print("Done")
|
[
"sergimolina91@gmail.com"
] |
sergimolina91@gmail.com
|
b903d4dafdaad69917379130429923b552115ff8
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-workbench-ide/aliyunsdkworkbench_ide/request/v20210121/AddEnvironmentRequest.py
|
0d20b6717b9971baad3c4aba3f7c1bdd0b316b36
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class AddEnvironmentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Workbench-ide', '2021-01-21', 'AddEnvironment')
self.set_method('POST')
def get_ProductId(self): # Long
return self.get_query_params().get('ProductId')
def set_ProductId(self, ProductId): # Long
self.add_query_param('ProductId', ProductId)
def get_EnvName(self): # String
return self.get_query_params().get('EnvName')
def set_EnvName(self, EnvName): # String
self.add_query_param('EnvName', EnvName)
def get_CurrentOrgId(self): # String
return self.get_query_params().get('CurrentOrgId')
def set_CurrentOrgId(self, CurrentOrgId): # String
self.add_query_param('CurrentOrgId', CurrentOrgId)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_EnvDescription(self): # String
return self.get_query_params().get('EnvDescription')
def set_EnvDescription(self, EnvDescription): # String
self.add_query_param('EnvDescription', EnvDescription)
def get_SupportComputeTypess(self): # RepeatList
return self.get_query_params().get('SupportComputeTypes')
def set_SupportComputeTypess(self, SupportComputeTypes): # RepeatList
for depth1 in range(len(SupportComputeTypes)):
self.add_query_param('SupportComputeTypes.' + str(depth1 + 1), SupportComputeTypes[depth1])
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_IsOpenNatEip(self): # Boolean
return self.get_query_params().get('IsOpenNatEip')
def set_IsOpenNatEip(self, IsOpenNatEip): # Boolean
self.add_query_param('IsOpenNatEip', IsOpenNatEip)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
f160ae11bb28516c727156fdb31f749a0e3e40e9
|
9b903480e4153e31f1065b59af670968ba55902f
|
/weather.py
|
5b9a35bbbdaf52d61b2be2ccc6e48e46e2d268f7
|
[] |
no_license
|
nickdebCompApps/snips-assistant
|
d7e1fac0fa5cce342550de599df01538feca2137
|
a4f22f5298729514b990db21b8daa4fb40411d45
|
refs/heads/master
| 2020-03-09T03:09:35.982017
| 2018-04-07T18:59:04
| 2018-04-07T18:59:04
| 128,558,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
import forecastio
import datetime
from time import sleep
import requests
import json
from json import dumps
from key import keys
def timeConvert(miliTime):
hours, minutes = miliTime.split(":")
hours, minutes = int(hours), int(minutes)
setting = " AM"
if hours >= 12:
if hours == 12:
setting = " PM"
hours = hours
else:
setting = " PM"
hours -= 12
if hours == 0:
hours = 12
return(("%02d:%02d" + setting) % (hours, minutes))
def Weather(conn):
ip_url = 'https://freegeoip.net/json'
request_zip = requests.get(ip_url)
load_zip = json.loads(request_zip.text)
lat = str(load_zip['latitude'])
longs = str(load_zip['longitude'])
API = key.api_keys['WEATHER_API']
forecast = forecastio.manual('https://api.darksky.net/forecast/6a92bd8d0626c735970600815a0323a7/' + lat + ',' + longs + '')
byHour = forecast.hourly()
high_low = []
for currentData in forecast.daily().data:
high_low_list = []
high_low_list.extend((currentData.temperatureLow, currentData.temperatureHigh))
high_low.append(high_low_list)
forecast_array = []
high = str(int(round(high_low[0][1])))
low = str(int(round(high_low[0][0])))
#LOOP THROUGH HOURLY DATA
for hourlyData in byHour.data:
#CREATE ARRAY TO APPEND TO MASTER ARRAY
forecast_array_list = []
#GET TEMPERATURE TIME DATE AND SUMMARY
temp = str(int(round(hourlyData.temperature)))
time = hourlyData.time
time = time - datetime.timedelta(hours=5)
time = str(time).split()
test_time = time[1]
time_date = time[0]
test_time = test_time[:-3]
#CONVERT TIME TO STANDARD 12 HR TIME
time = timeConvert(test_time)
summary = hourlyData.summary
#APPEND VARIABLES TO SINGLE ARRAY CREATED EARLIER AND THEN APPEND TO MASTER ARRAY FOR 2D ARRAY
forecast_array_list.extend((temp,time,summary,time_date))
forecast_array.append(forecast_array_list)
#DELETE 25 ROWS AS WE DONT NEED ALL OF THEM
#for i in range(25):
#del forecast_array[-i]
print(forecast_array)
conn.send((forecast_array, high, low))
conn.close()
return(forecast_array, high, low)
#weather = weather()
#print(weather)
|
[
"noreply@github.com"
] |
noreply@github.com
|
da48be8998dbc65a10f28b1a195faa144e03a79c
|
88c3f6dd1e62da124a9718f745ced22e28491d62
|
/FWHM.py
|
de3fdf5d7a4bae1eb711aefd34c5a67f10beeb45
|
[] |
no_license
|
zhazhajust/THzScript
|
df79edfb72665074ec79684be17d8f63fdabaa49
|
005c4206c870aca430ffa794bfe3a485fff2b9c6
|
refs/heads/main
| 2023-07-15T18:43:43.169484
| 2021-08-20T13:11:29
| 2021-08-20T13:11:29
| 398,280,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
import numpy as np
import constant as const
import scipy.signal
from scipy.signal import chirp, find_peaks, peak_widths
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
load_dir = const.txtdir+"xt.npy"
xt = np.load(load_dir)
xt = xt.real
xtProfile = [] #* (0+0*1j)
#xt = xt.astype('complex128')
FWHM=[]
print(xt.shape)
x = np.arange(xt.shape[1])
for i in range(0,xt.shape[0],1000):
#print(xt[i].shape)
index=np.argmax(xt[i])
Xmax=xt[i,index]
#print(scipy.signal.hilbert(xt[i]))
xtProfile.append(scipy.signal.hilbert(xt[i]))
#f = UnivariateSpline(x , xtProfile[-1],s = 100)
#xtProfile[-1] = f(x)
peaks, _ = find_peaks(xtProfile[-1])
results_half = peak_widths(xtProfile[-1], peaks, rel_height=0.5)
#print(results_half[0])
try:
FWHM.append(results_half[0].max())
except:
FWHM.append(np.nan)
#print(np.where(xt[i]>Xmax/2,0,1))
#FWHM[i] = np.where(xt[i],Xmax/2)
print(FWHM)
|
[
"251338258@qq.com"
] |
251338258@qq.com
|
df199d45bbed5b2eb57ed382acd03991cfdeaff4
|
1ce4c43907ec04f1e797c317871a23bcec46c3c8
|
/scripts/utilities/toggle_spout_leds.py
|
85bc4fc4c907348d12eef83f0aa5ecb56a414877
|
[] |
no_license
|
m-col/reach
|
d9709593e1f0ec25786a4c4e601b14b26419ce96
|
8fabb4ce30ddb39260039ebea2d46a919dfbba14
|
refs/heads/master
| 2022-05-31T08:51:31.270970
| 2022-05-21T19:46:12
| 2022-05-21T19:46:12
| 169,552,311
| 1
| 2
| null | 2021-06-25T14:18:21
| 2019-02-07T10:12:30
|
Python
|
UTF-8
|
Python
| false
| false
| 155
|
py
|
#!/usr/bin/env python3
"""
Toggle the LEDs.
"""
from reach.backends.raspberrypi import Utilities
rpi = Utilities()
rpi.toggle_spout_leds()
rpi.cleanup()
|
[
"mcol@posteo.net"
] |
mcol@posteo.net
|
46fb30be2965828aa50fce9cd0eb5a1588be3c08
|
6b556d8096c14e7ee3b408a066808baf6de138b3
|
/main.py
|
df363c2c8228f28add5cad03c0f1b7a4f81480ba
|
[] |
no_license
|
hieumdd/gavin_stripe
|
a717be8c3c92331533394ea8f7fda72abf1699dd
|
e0a9d8004dedcf82291fbed31e3edf720c137690
|
refs/heads/master
| 2023-06-06T00:05:26.062564
| 2021-06-22T13:34:33
| 2021-06-22T13:34:33
| 379,280,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
from models import BalanceTransactions
def main(request):
request_json = request.get_json()
job = BalanceTransactions(start=request_json.get('start'), end=request_json.get('end'))
responses = {
"pipelines": "Stripe",
"results": [job.run()]
}
print(responses)
return responses
|
[
"hieumdd@gmail.com"
] |
hieumdd@gmail.com
|
c9d87460c9daf44323f8c8e853dd25cd21cb8670
|
35b96d09ff3b74e7f05cc0085dde129456d70ad9
|
/tornado/Day5/tornado_sqlalchemy.py
|
10d41141ef829b75b22f49e22e4892636e6990f9
|
[] |
no_license
|
yanghongfei/Python
|
ef0e54f98bc390ffd908d27f2ed306952b3bba46
|
f1103754e2752d38bcfd4357aa4b1a2318b33e31
|
refs/heads/master
| 2020-07-01T20:06:52.870910
| 2018-11-01T09:15:34
| 2018-11-01T09:15:34
| 74,260,335
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/30 10:49
# @Author : Fred Yang
# @File : tornado_sqlalchemy.py
# @Role : Sqlalchemy 增删改查
# 导入
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from settings import DB_INFO
HOST=DB_INFO['host']
USER=DB_INFO['user']
PORT=DB_INFO['port']
PASSWD=DB_INFO['password']
DB_NAME= DB_INFO['db_name']
# 创建对象的基类:
Base = declarative_base()
#定义User对象:
class User(Base):
# 表的名字:
__tablename__ = 'user'
# 表的结构:
id = Column(String(100), primary_key=True)
name = Column(String(200))
class Weibo(Base):
__tablename__ = 'weibo'
id = Column(String(100), primary_key=True)
username = Column(String(100)) #用户名
content = Column(String(1000)) #内容
# 初始化数据库连接:
engine = create_engine('mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(USER, PASSWD, HOST, PORT, DB_NAME))
#print(engine)
# 创建DBSession类型:
DBSession = sessionmaker(bind=engine)
Base.metadata.create_all(engine) #创建表的语句 第一次使用
|
[
"yanghongfei@shinezone.com"
] |
yanghongfei@shinezone.com
|
58877d6cc37c753c161fcde79522be98f45bc50f
|
ce5fdb787770505eff0938418d0385d559bef670
|
/app.py
|
e072b4b10e7d6b702a4b23c115f6f51a23decd88
|
[] |
no_license
|
ActuallyAcey/nombin-backend
|
dfdde0e79ea98d30e9bde0fd6ad647bb6d50277e
|
5ad68cfcb967c4372c39d88f83438addf3d07cff
|
refs/heads/master
| 2020-05-18T12:42:43.937429
| 2019-05-14T01:00:11
| 2019-05-14T01:00:11
| 184,416,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS
from flask_pymongo import PyMongo
from os import path
import server_secrets
from flask_ngrok import run_with_ngrok
app = Flask(__name__)
run_with_ngrok(app) #TODO: Remove when deploying; used for exposing localhost as a temporary URL
# CONFIGURATIONS
app.url_map.strict_slashes = False # Fixes "/new/" vs "/new" error
app.config['MONGO_DBNAME'] = server_secrets.mongo_db_name
app.config['MONGO_URI'] = server_secrets.mongo_uri
mongo = PyMongo(app)
CORS(app) # CORS hates APIs being used like they normally are I guess?
@app.route('/<id>', methods = ['GET'])
def get_data(id):
check_id = int(id)
noms = mongo.db.nom_list
fetched_nom = noms.find_one({'key' : check_id})
if "_id" in fetched_nom:
print ("Detected id, stripping off.")
del fetched_nom["_id"]
return jsonify(fetched_nom)
@app.route('/new', methods = ['POST'])
def push_data():
data = request.get_json()
new_nom = {
'title': data['title'],
'text': data['text'],
'tags': data['tags'],
'is_private': data['is_private']}
noms = mongo.db.nom_list
key = noms.insert_one(new_nom).inserted_id
return jsonify({'key': key})
@app.route('/')
def landing_page():
return "Hello, World!"
@app.route('/favicon.ico')
def favicon():
return send_from_directory(path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon')
if __name__ == '__main__':
app.run()
|
[
"ActuallyAcey@gmail.com"
] |
ActuallyAcey@gmail.com
|
0b5d3655b298036c53309389fcfc2864b3c16b97
|
aa535ed791407504aa24eac32da2e7b15f1b19b6
|
/iconsBked/settings.py
|
81da8c1f077645ad7b51f182cddb9a01a4fd98f7
|
[
"MIT"
] |
permissive
|
Epath-Pro/icons-bked
|
404698e6401a7d16b947d7494ca627f777f8e260
|
edc8cf57d4c6ee31369ef8d3751f3d89cc6d375a
|
refs/heads/main
| 2023-07-14T17:30:07.932842
| 2021-08-29T08:20:38
| 2021-08-29T08:20:38
| 390,057,494
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,285
|
py
|
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#7(tc6!^b464v(w7v04%)wm-=&fop(iukt#ttmbvd$d7b#vmzt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*','.vercel.app']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'icons'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'iconsBked.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'iconsBked.wsgi.application'
CORS_ORIGIN_ALLOW_ALL = True
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'MediumSB@2020!',
'HOST': 'db.pdilyfnochamrjvvkenn.supabase.co',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"elearningpath0620@gmail.com"
] |
elearningpath0620@gmail.com
|
37494b82fc1bfeefecdc11791dc68f84c757fca1
|
d4184f2468852c5312e3e7a7e2033f1700534130
|
/workflow/scripts/write_qc_metadata.py
|
ff0bfbcb948de8f68f439f1276ce7b4d6f9d7863
|
[] |
no_license
|
austintwang/ENCODE_scATAC_bingren_raw
|
d85b529cecd55e596c1d96ea370ade57cf418e81
|
9c1a6b23615a9d0f6d28f7e9ea6937755016b086
|
refs/heads/master
| 2023-08-17T04:03:46.276522
| 2021-09-19T00:40:32
| 2021-09-19T00:40:32
| 407,326,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,760
|
py
|
"""
Adapted from https://github.com/ENCODE-DCC/atac-seq-pipeline/blob/master/src/encode_lib_log_parser.py
"""
from collections import OrderedDict
import json
import os
def to_int(var):
try:
return int(var)
except ValueError:
return None
def to_float(var):
try:
return float(var)
except ValueError:
return None
def to_bool(var):
return var.lower() in set(['true', 't', 'ok', 'yes', '1'])
def parse_frac_mito_qc(txt):
result = OrderedDict()
with open(txt, 'r') as fp:
for line in fp.read().strip('\n').split('\n'):
k, v = line.split('\t')
if k.startswith('frac_'):
result[k] = float(v)
else:
result[k] = int(v)
return result
def parse_flagstat_qc(txt):
result = OrderedDict()
if not txt:
return result
total = ''
total_qc_failed = ''
duplicates = ''
duplicates_qc_failed = ''
mapped = ''
mapped_qc_failed = ''
mapped_pct = ''
paired = ''
paired_qc_failed = ''
read1 = ''
read1_qc_failed = ''
read2 = ''
read2_qc_failed = ''
paired_properly = ''
paired_properly_qc_failed = ''
paired_properly_pct = ''
with_itself = ''
with_itself_qc_failed = ''
singletons = ''
singletons_qc_failed = ''
singletons_pct = ''
diff_chroms = ''
diff_chroms_qc_failed = ''
delimiter_pass_fail = ' + '
with open(txt, 'r') as f:
for line in f:
if ' total ' in line:
if ' in total ' in line:
tmp1 = line.split(' in total ')
else:
tmp1 = line.split(' total ')
line1 = tmp1[0]
tmp1 = line1.split(delimiter_pass_fail)
total = tmp1[0]
total_qc_failed = tmp1[1]
if ' duplicates' in line:
tmp2 = line.split(' duplicates')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
duplicates = tmp2[0]
duplicates_qc_failed = tmp2[1]
if ' mapped (' in line:
tmp3 = line.split(' mapped (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
mapped = tmp3_1[0]
mapped_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
mapped_pct = tmp3_2[0] # .replace('%','')
if ' paired in sequencing' in line:
tmp2 = line.split(' paired in sequencing')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
paired = tmp2[0]
paired_qc_failed = tmp2[1]
if ' read1' in line:
tmp2 = line.split(' read1')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
read1 = tmp2[0]
read1_qc_failed = tmp2[1]
if ' read2' in line:
tmp2 = line.split(' read2')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
read2 = tmp2[0]
read2_qc_failed = tmp2[1]
if ' properly paired (' in line:
tmp3 = line.split(' properly paired (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
paired_properly = tmp3_1[0]
paired_properly_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
paired_properly_pct = tmp3_2[0] # .replace('%','')
if ' with itself and mate mapped' in line:
tmp3 = line.split(' with itself and mate mapped')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
with_itself = tmp3_1[0]
with_itself_qc_failed = tmp3_1[1]
if ' singletons (' in line:
tmp3 = line.split(' singletons (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
singletons = tmp3_1[0]
singletons_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
singletons_pct = tmp3_2[0] # .replace('%','')
if ' with mate mapped to a different chr' in line:
tmp3 = line.split(' with mate mapped to a different chr')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
diff_chroms = tmp3_1[0]
diff_chroms_qc_failed = tmp3_1[1]
if total:
result['total_reads'] = int(total)
if total_qc_failed:
result['total_reads_qc_failed'] = int(total_qc_failed)
if duplicates:
result['duplicate_reads'] = int(duplicates)
if duplicates_qc_failed:
result['duplicate_reads_qc_failed'] = int(duplicates_qc_failed)
if mapped:
result['mapped_reads'] = int(mapped)
if mapped_qc_failed:
result['mapped_reads_qc_failed'] = int(mapped_qc_failed)
if mapped_pct:
if 'nan' not in mapped_pct and 'N/A' not in mapped_pct \
and 'NA' not in mapped_pct:
if '%' in mapped_pct:
mapped_pct = mapped_pct.replace('%', '')
result['pct_mapped_reads'] = float(mapped_pct)
else:
result['pct_mapped_reads'] = 100.0 * float(mapped_pct)
else:
result['pct_mapped_reads'] = 0.0
if paired:
result['paired_reads'] = int(paired)
if paired_qc_failed:
result['paired_reads_qc_failed'] = int(paired_qc_failed)
if read1:
result['read1'] = int(read1)
if read1_qc_failed:
result['read1_qc_failed'] = int(read1_qc_failed)
if read2:
result['read2'] = int(read2)
if read2_qc_failed:
result['read2_qc_failed'] = int(read2_qc_failed)
if paired_properly:
result['properly_paired_reads'] = int(paired_properly)
if paired_properly_qc_failed:
result['properly_paired_reads_qc_failed'] = int(
paired_properly_qc_failed)
if paired_properly_pct:
if 'nan' not in paired_properly_pct and \
'N/A' not in paired_properly_pct \
and 'NA' not in paired_properly_pct:
if '%' in paired_properly_pct:
paired_properly_pct = paired_properly_pct.replace('%', '')
result['pct_properly_paired_reads'] = float(
paired_properly_pct)
else:
result['pct_properly_paired_reads'] = 100.0 * \
float(paired_properly_pct)
else:
result['pct_properly_paired_reads'] = 0.0
if with_itself:
result['with_itself'] = int(with_itself)
if with_itself_qc_failed:
result['with_itself_qc_failed'] = int(with_itself_qc_failed)
if singletons:
result['singletons'] = int(singletons)
if singletons_qc_failed:
result['singletons_qc_failed'] = int(singletons_qc_failed)
if singletons_pct:
if 'nan' not in singletons_pct and 'N/A' not in singletons_pct \
and 'NA' not in singletons_pct:
if '%' in singletons_pct:
singletons_pct = singletons_pct.replace('%', '')
result['pct_singletons'] = float(singletons_pct)
else:
result['pct_singletons'] = 100.0 * float(singletons_pct)
else:
result['pct_singletons'] = 0.0
if diff_chroms:
result['diff_chroms'] = int(diff_chroms)
if diff_chroms_qc_failed:
result['diff_chroms_qc_failed'] = int(diff_chroms_qc_failed)
return result
def parse_dup_qc(txt):
result = OrderedDict()
if not txt:
return result
paired_reads = ''
unpaired_reads = ''
unmapped_reads = ''
unpaired_dupes = ''
paired_dupes = ''
paired_opt_dupes = ''
dupes_pct = ''
picard_log_found = False
# picard markdup
with open(txt, 'r') as f:
header = '' # if 'UNPAIRED_READS_EXAMINED' in header
content = ''
for line in f:
if header:
content = line.replace(',', '.')
picard_log_found = True
break
if 'UNPAIRED_READS_EXAMINED' in line:
header = line
if picard_log_found:
header_items = header.split('\t')
content_items = content.split('\t')
m = dict(zip(header_items, content_items))
unpaired_reads = m['UNPAIRED_READS_EXAMINED']
paired_reads = m['READ_PAIRS_EXAMINED']
unmapped_reads = m['UNMAPPED_READS']
unpaired_dupes = m['UNPAIRED_READ_DUPLICATES']
paired_dupes = m['READ_PAIR_DUPLICATES']
paired_opt_dupes = m['READ_PAIR_OPTICAL_DUPLICATES']
if 'PERCENT_DUPLICATION' in m:
dupes_pct = m['PERCENT_DUPLICATION']
else:
dupes_pct = '0'
else:
# sambamba markdup
with open(txt, 'r') as f:
for line in f:
if ' end pairs' in line:
tmp1 = line.strip().split(' ')
paired_reads = tmp1[1]
if ' single ends ' in line:
tmp1 = line.strip().split(' ')
unpaired_reads = tmp1[1]
unmapped_reads = tmp1[6]
if 'found ' in line:
tmp1 = line.strip().split(' ')
if paired_reads == '0':
unpaired_dupes = tmp1[1] # SE
paired_dupes = 0
else:
unpaired_dupes = 0
paired_dupes = str(int(tmp1[1])/2) # PE
if paired_reads == '0': # SE
dupes_pct = '{0:.2f}'.format(
float(unpaired_dupes)/float(unpaired_reads))
elif paired_reads:
dupes_pct = '{0:.2f}'.format(
float(paired_dupes)/float(paired_reads))
if unpaired_reads:
result['unpaired_reads'] = int(unpaired_reads)
if paired_reads:
result['paired_reads'] = int(paired_reads)
if unmapped_reads:
result['unmapped_reads'] = int(unmapped_reads)
if unpaired_dupes:
result['unpaired_duplicate_reads'] = int(unpaired_dupes)
if paired_dupes:
result['paired_duplicate_reads'] = int(paired_dupes)
if paired_opt_dupes:
result['paired_optical_duplicate_reads'] = int(paired_opt_dupes)
if dupes_pct:
result['pct_duplicate_reads'] = float(dupes_pct)*100.0
return result
def parse_lib_complexity_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
for line in f:
arr = line.strip().split('\t')
break
result['total_fragments'] = to_int(arr[0])
result['distinct_fragments'] = to_int(arr[1])
result['positions_with_one_read'] = to_int(arr[2])
result['positions_with_one_read'] = to_int(arr[3])
result['NRF'] = to_float(arr[4])
result['PBC1'] = to_float(arr[5])
result['PBC2'] = to_float(arr[6])
return result
def parse_picard_est_lib_size_qc(txt):
result = OrderedDict()
if not txt:
return result
with open(txt, 'r') as f:
val = f.readlines()[0].strip()
result['picard_est_lib_size'] = float(val)
return result
def build_quality_metric_header(sample_data, config, data_path, out_path):
lab = config["dcc_lab"]
experiment = sample_data["experiment"]
replicate = sample_data["replicate_num"]
data_alias = f"{lab}:{experiment}${replicate}${os.path.basename(data_path)}"
alias = f"{lab}:{experiment}${replicate}${os.path.basename(out_path)}"
h = OrderedDict({
"lab": lab,
"award": config["dcc_award"],
"quality_metric_of": data_alias,
"aliases": [alias],
})
return h
def write_json(data, out_path):
with open(out_path, "w") as f:
json.dump(data, f, indent=4)
try:
out_group = snakemake.params['out_group']
sample_data = snakemake.params['sample_data']
data_path = snakemake.input['data_file']
config = snakemake.config
if out_group == "fastqs":
pass
elif out_group == "mapping":
alignment_stats_out = snakemake.output['alignment_stats']
samstats_raw = snakemake.input['samstats_raw']
a = parse_flagstat_qc(samstats_raw)
h = build_quality_metric_header(sample_data, config, data_path)
alignment_stats = h | a
write_json(alignment_stats, alignment_stats_out)
elif out_group == "filtering":
alignment_stats_out = snakemake.output['alignment_stats']
lib_comp_stats_out = snakemake.output['lib_comp_stats']
samstats_filtered = snakemake.input['samstats_filtered']
picard_markdup = snakemake.input['picard_markdup']
pbc_stats = snakemake.input['pbc_stats']
frac_mito = snakemake.input['frac_mito']
s = parse_flagstat_qc(samstats_filtered)
p = parse_picard_est_lib_size_qc(picard_markdup)
l = parse_lib_complexity_qc(pbc_stats)
m = parse_frac_mito_qc(frac_mito)
h = build_quality_metric_header(sample_data, config, data_path)
alignment_stats = h | s | m
lib_comp_stats = h | p | l
write_json(alignment_stats, alignment_stats_out)
write_json(lib_comp_stats, lib_comp_stats_out)
elif out_group == "fragments":
pass
elif out_group == "archr":
pass
except NameError:
pass
|
[
"austin.wang1357@gmail.com"
] |
austin.wang1357@gmail.com
|
974a2ad112f38e2fe813b9a8d77b4b96920d50d6
|
c46cb6ffb259124d38c7babcf1cadb6b3be5d594
|
/test.py
|
1fe28ca510a1cfc26ee15c91d9fa822f0e030447
|
[] |
no_license
|
sundshinerj/WWW
|
f04ee6719d07ee07fd2a8017bf0333e4670154fe
|
b83980d38eb94ffedfa193be6e2d264aabce7c9f
|
refs/heads/master
| 2021-08-22T03:55:29.131487
| 2017-11-29T06:44:10
| 2017-11-29T06:44:10
| 112,165,146
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#@author: sundsinerj
#@date: 2017/10/25
import MySQLdb
import time
import json
db = MySQLdb.connect("127.0.0.1","root","root","xedaojia_ams")
cursor = db.cursor()
# last_time = int(time.time())
# begien_time = int(last_time - 21600)
last_time = 1506412800
begien_time = 1506398400
sql = 'select clock,value_avg from trends_uint where clock>='+str(begien_time)+' and clock<='+str(last_time)+';'
cursor.execute(sql)
results = cursor.fetchall()
cursor.close()
db.commit()
db.close()
formats = '%H:%M:%S'
time.strftime(formats,time.localtime(1508941095))
list_date = []
#print(format(float(a)/float(b),'.2f'))
for i in range(len(results)):
data_clock = time.strftime(formats,time.localtime(int(results[i][0])))
data_value = format(float(int(results[i][1]))/1024/1024,'.2f')
data_value = str(data_value)
list_date.append({"clock": data_clock,"value_avg": data_value})
#list_date.append({data_clock: data_value})
print json.dumps(list_date)
|
[
"sundshinerj@gmail.com"
] |
sundshinerj@gmail.com
|
461f9252ada4badc3896e5dda3754393969d3ce1
|
42e9810116a4c726f2fb60a0133fc3b81670c0e1
|
/setup.py
|
41ff4cdca9f78be739cc42a2b42a8886a90aca79
|
[
"BSD-3-Clause"
] |
permissive
|
pinjasec/binarypinja
|
247e6a13f3b4f58fb16aab00a3649f575b428db6
|
106bb2c68ea530cbf99079749f1a7184cf21d480
|
refs/heads/master
| 2020-07-24T19:57:50.921387
| 2019-09-12T11:15:21
| 2019-09-12T11:15:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
from setuptools import find_packages, setup
setup(
name='pinja',
packages=find_packages(),
version='0.1.0',
entry_points={
'console_scripts':
'pinja = pinja.main:main'
},
description='A short description of the project.',
author='*pinja_sec',
license='BSD-3',
)
|
[
"poo_eix@protonmail.com"
] |
poo_eix@protonmail.com
|
43d6dc559a18868fb2fe56aa0c08b57bada0fce3
|
f80b0891fbd9bbda3532327ed8129406d00947b7
|
/IP/Lista-6/Comando de Repetição (while) – Roteiro Laboratório/3.py
|
49086875a6382fccd9177714b3408668a594bca2
|
[] |
no_license
|
viniciuspolux/UFPB
|
d7e7cd7101e90b008391605832404ba2ae6d2001
|
445fc953d9499e41e753c1c3e5c57937d93b2d59
|
refs/heads/master
| 2021-01-19T20:44:51.869087
| 2017-08-30T17:16:07
| 2017-08-30T17:16:07
| 101,222,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
numeroa,numerob=map(int,input("Digite dois números com um espaço entre eles =").split())
x=1
while x < 2 :
if(numeroa > 0 and numerob > 0):
soma= numeroa + numerob
prod= numeroa * numerob
print("{} {}" .format(soma,prod))
else:
print("Você digitou um número inválido")
x += 1
|
[
"30605184+viniciuspolux@users.noreply.github.com"
] |
30605184+viniciuspolux@users.noreply.github.com
|
ba543c69df6097031a5e3dfae710324c39db3560
|
43671eb2be91782ac2096b9ec64e3a02b5aa9559
|
/elComandante/lowVoltage_agente.py
|
cab98c5f206c5377ec93e1596070cdea46f3e0d2
|
[] |
no_license
|
psi46/elComandante
|
fba87eda9c404de0ceed0ab9b81f5258f172325b
|
052066eda34d1e335128af214e55e330f9b6e855
|
refs/heads/master
| 2020-04-06T12:37:01.024183
| 2016-11-28T14:23:31
| 2016-11-28T14:23:31
| 28,905,523
| 1
| 4
| null | 2015-08-08T11:06:20
| 2015-01-07T08:25:52
|
FORTRAN
|
UTF-8
|
Python
| false
| false
| 7,695
|
py
|
## @file
## Implements the agente class lowVoltage_agente
## @ingroup elComandante
## @ingroup elAgente
import os
import subprocess
from myutils import process
import el_agente
def preexec():
os.setpgrp()
## Agente class that communicates with the lowVoltageClient
##
## This is the agente whose job is to communicte with the lowVoltageClient. It
## has a very simple task: To turn on and off the low voltage for the test
## setup.
##
## The low voltage device normally has to operate only before the test
## (lowVoltage_agente.prepare_test) and nothing has to be done during the
## actual testing, except for monitoring the device state.
##
## The action performed is normally only a power cycle which serves as a hard
## reset for the test hardware.
##
## The lowVoltag agente sends very high level commands to the lowVoltageClient such
## as SET OUTPUT ON, SET OUTPUT OFF, or EXEC POWERCYCLE and it does not have to know about the
## details of these operations. It expects that the client handles these
## things and that when it if finished, it will answer the FINISHED command
## with giving back FINISHED. Therefore, the agente waits for the operations
## of the client to finish. Since the client is a separate process, elComandante
## (of which this agente is a part) may continue to start or monitor other
## processes through other agentes.
##
## The configuration of the lowVoltag agente is made in the elComandante.conf
## and the elComandante.ini files. The elComandante.conf file contains information
## about the setup such as low voltage device type and device file name:
## @code
## lowVoltageSubscription: /lowVoltage
##
## [lowVoltageClient]
## lowVoltageType: yoctorelay
## @endcode
##
## The initialization only holds the parameter
## @code
## LowVoltageUse: True
## @endcode
## which enables or disables the lowVoltageAgente.
## @ingroup elComandante
## @ingroup elAgente
class lowVoltage_agente(el_agente.el_agente):
## Initializes the agente
## @param timestamp Timestamp from elComandante
## @param log Log handler
## @param sclient Subsystem client handle
def __init__(self, timestamp, log, sclient):
el_agente.el_agente.__init__(self, timestamp, log, sclient)
self.agente_name = "lowVoltageAgente"
self.client_name = "lowVoltageClient"
## Sets up the permanent configuration of the agente
##
## Determines settings such as low voltage device type
## from elComandante's permanent configuration.
## @param conf Configuration handle
## @return Boolean for success
def setup_configuration(self, conf):
## Type of the low voltage device, to be passed to the client
self.device_type = conf.get("lowVoltageClient", "lowVoltageType")
self.subscription = conf.get("subsystem", "lowVoltageSubscription")
## Directory for the log files
self.logdir = conf.get("Directories", "dataDir") + "/logfiles/"
return True
## Sets up the initialization of the agente
##
## Determines settings such as whether the low voltage device is used
## for this run from elComandante's run time configuration
## (initialization)
## @param init Initialization handle
## @return Boolean for success
def setup_initialization(self, init):
self.active = init.getboolean("LowVoltage", "LowVoltageUse")
return True
## Checks whether the lowVoltageClient is running
##
## Checks whether the lowVoltageClient is running by finding
## the PID file and checking the process.
## @return Boolean, whether the client is running or not
def check_client_running(self):
if not self.active:
return False
if process.check_process_running(self.client_name + ".py"):
raise Exception("Another %s is already running. Please close this client first." % self.client_name)
return True
return False
## Starts the lowVoltageClient
##
## If enabled, starts the lowVoltageClient with the parameters read from the
## configuration.
## @param Timestamp
## @return Boolean for success
def start_client(self, timestamp):
if not self.active:
return True
command = "xterm +sb -geometry 120x20-0+300 -fs 10 -fa 'Mono' -e '"
command += "cd ../lowVoltageClient && python ../lowVoltageClient/lowVoltageClient.py "
command += "--timestamp {0:d} ".format(timestamp)
command += "--directory {0:s} ".format(self.logdir)
command += "--device-type {0:s}'".format(self.device_type)
self.log << "Starting " + self.client_name + " ..."
## Child process handle for the lowVoltageClient
self.child = subprocess.Popen(command, shell = True, preexec_fn = preexec)
return True
## Subscribes to the subsystem channel where the lowVoltageClient listening
##
## Enables listening to the subsystem channel that the lowVoltageClient is
## receiving commands on
## @return None
def subscribe(self):
if (self.active):
self.sclient.subscribe(self.subscription)
## Checks whether the subsystem channel is open and the server is responding
## @return Boolean, whether it is responding or not
def check_subscription(self):
if (self.active):
return self.sclient.checkSubscription(self.subscription)
return True
## Asks the lowVoltageClient to exit by sending it a command through the subsystem
## @return Boolean for success
def request_client_exit(self):
if not self.active:
return True
self.sclient.send(self.subscription, ":EXIT\n")
return False
## Tries to kill the lowVoltageClient by sending the SIGTERM signal
## @return Boolean for success
def kill_client(self):
if not self.active:
return True
try:
self.child.kill()
except:
pass
return True
## Prepares a test with a given environment
##
## Powercycles the low voltage of the test setup to hard reset
## all devices
## @param test The current test
## @param environment The environment the test should run in
## @return Boolean for success
def prepare_test(self, test, environment):
# Run before a test is executed
if not self.active:
return True
self.sclient.send(self.subscription, ":EXEC:POWERCYCLE\n")
self.set_pending()
return True
## Function to execute the test which is disregarded by this agente
## @return Always returns True
def execute_test(self):
# Runs a test
if not self.active:
return True
return True
## Function to clean up the test which is disregarded by this agente
##
## Turns of the beam. This may change in the future.
## @return Boolean for success
def cleanup_test(self):
# Run after a test has executed
if not self.active:
return True
return True
## Final test cleanup
## @return Boolean for success
def final_test_cleanup(self):
# Run after a test has executed
if not self.active:
return True
self.sclient.send(self.subscription, ":EXEC:POWERCYCLE\n")
self.set_pending()
return True
## Checks whether the client is finished or has an error
##
## Checks whether the client is finished or has an error. Even if
## no action is pending from the client it may happen that the state
## of the low voltage device changes. An error is received in this case
## and an exception is thrown.
## @return Boolean, whether the client has finished or not
def check_finished(self):
if not self.active:
return True
while True:
packet = self.sclient.getFirstPacket(self.subscription)
if packet.isEmpty():
break
if self.pending and "FINISHED" in packet.data.upper():
self.pending = False
elif "ERROR" in packet.data.upper():
self.pending = False
raise Exception("Error from %s!" % self.client_name)
return not self.pending
## Asks whether the client is finished and sets the agente state
## to pending
## @return None
def set_pending(self):
self.sclient.send(self.subscription, ":FINISHED\n")
self.pending = True
|
[
"mrossini@phys.ethz.ch"
] |
mrossini@phys.ethz.ch
|
57c22cd8876ae6bdf928f7d58919d905f86c43a5
|
d343b6f47b9241f3822845c6627b82c9f98b95c4
|
/core/apps.py
|
ab6efcf7bc66b12c7822555e95a5755b283f13ad
|
[] |
no_license
|
ivan371/kiber
|
11f23171bd51b29d210c44db0784b6caea31bdd6
|
39d7834c5e4e5497061748bd66232936300adda4
|
refs/heads/master
| 2021-05-05T06:30:55.836149
| 2020-06-16T20:37:59
| 2020-06-16T20:37:59
| 118,800,832
| 0
| 0
| null | 2020-06-16T20:38:00
| 2018-01-24T17:51:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 144
|
py
|
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'core'
def ready(self):
from .views import UserViewSet
|
[
"ivan@DESKTOP-GM6Q430.localdomain"
] |
ivan@DESKTOP-GM6Q430.localdomain
|
616c58d38ba1341e31e3110eb28caf617d8d0c51
|
7f045311cf07d86c21c3e32649a0d3343351d7b5
|
/sky/c3.py
|
d661310b92f5e4c9a50397604a8e3141cb7587a1
|
[
"BSD-2-Clause"
] |
permissive
|
amititash/sky
|
2c757ec572902f12a6a550597044101d6e7cdbc1
|
ffaf33e46825522bb87654593a0ca77c095c98b0
|
refs/heads/master
| 2020-04-14T22:50:55.264011
| 2019-02-06T05:11:22
| 2019-02-06T05:11:22
| 164,180,289
| 0
| 0
|
NOASSERTION
| 2019-01-05T04:02:40
| 2019-01-05T04:02:40
| null |
UTF-8
|
Python
| false
| false
| 2,190
|
py
|
#!/usr/bin/env python3
# --------- 1. Setup ----------------------------------------------
PROJECT_NAME = 'sophonone'
import os, base64, re, logging
from elasticsearch import Elasticsearch
from sky.crawler_services import CrawlElasticSearchService
from sky.crawler_plugins import CrawlElasticSearchPluginNews
import json, sys
import json
import pika
'''
# Parse the auth and host from env:
bonsai = 'https://5bgygw52r4:637c8qay66@cj-test-9194042377.us-west-2.bonsaisearch.net' #os.environ['BONSAI_URL']
auth = re.search('https\:\/\/(.*)\@', bonsai).group(1).split(':')
host = bonsai.replace('https://%s:%s@' % (auth[0], auth[1]), '')
# Connect to cluster over SSL using auth for best security:
es_header = [{
'host': host,
'port': 443,
'use_ssl': True,
'http_auth': (auth[0],auth[1])
}]
es = Elasticsearch(es_header)
'''
es = Elasticsearch([{'host': '886f099c.ngrok.io', 'port': 80}])
# Instantiate the new Elasticsearch connection:
cs = CrawlElasticSearchService(PROJECT_NAME, es, CrawlElasticSearchPluginNews)
connection = pika.BlockingConnection(pika.URLParameters('amqp://titash:test123@54.175.53.47/paays_products_cj'))
channel = connection.channel()
channel.queue_declare(queue='crawl')
#code starts here
def goCrawl(ch, method, properties, msg):
item = msg.decode('utf8')
item = json.loads(item)
print(item)
#es = Elasticsearch([{'host': '886f099c.ngrok.io', 'port': 80}])
# Instantiate the new Elasticsearch connection:
#cs = CrawlElasticSearchService(PROJECT_NAME, es, CrawlElasticSearchPluginNews)
# --------- 4. Start crawling --------------------------------------
#from sky.configs import PRODUCTION_CRAWL_CONFIG
#default = cs.get_crawl_plugin('default')
#default.save_config(PRODUCTION_CRAWL_CONFIG)
print("****crawling...",item["sku"])
#one_config = json.load(item)
#configname = item['sku']
four = cs['testcrawl']
four.save_config(item)
four.run()
#Execution starts from here
channel.basic_consume(goCrawl,
queue='crawl',
no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
|
[
"amititash@gmail.com"
] |
amititash@gmail.com
|
1f778b04e332c6fb1e5a8be955cd628bea529f50
|
36c546160a70228e28f216e841453a55a4b665bb
|
/cli_common.py
|
32a2ebcb79b2c99a1bb4fc6b64bbe49c2839a7ee
|
[] |
no_license
|
tpietruszka/ulmfit_experiments
|
b4718df389478a12d920f72cdca476797d4397fc
|
9385cd7d4285f93a2f220bc9fd5095051879a49a
|
refs/heads/master
| 2020-04-21T18:18:01.633887
| 2020-04-07T17:56:19
| 2020-04-07T17:56:19
| 169,764,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
import pathlib
import os
import sys
os.environ['QT_QPA_PLATFORM'] = 'offscreen' # prevents some fastai imports from causing a crash
try:
from ulmfit_experiments import experiments
except ModuleNotFoundError:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ulmfit_experiments import experiments
results_dir = (pathlib.Path(__file__).parent / 'trained_models').resolve()
|
[
"tomek.pietruszka@gmail.com"
] |
tomek.pietruszka@gmail.com
|
cafe602ff007a80036f57b301bc84dd23e3e5581
|
eabe529cbf8a6ae6b0ae476961d69182a1827842
|
/parlai/tasks/tasks.py
|
421987f23ab2036c478bf84fb049b4e23a1fb35f
|
[] |
no_license
|
JiaQiSJTU/ResponseSelection
|
b3ce8a15129e23830ba3a7311d0b2eb831217163
|
660732f7cc9c0c419a3cf26c85430eb258e5f1f0
|
refs/heads/master
| 2023-07-04T18:59:47.498626
| 2021-09-01T08:04:54
| 2021-09-01T08:04:54
| 297,997,764
| 28
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
#!/usr/bin/env python3
"""
Helper functions for defining the set of tasks in ParlAI.
The actual task list and definitions are in the file task_list.py
"""
from .task_list import task_list
from collections import defaultdict
def _preprocess(name):
return name.lower().replace('-', '')
def _build(task_list):
tasks = {}
tags = defaultdict(list)
for t in task_list:
task = _preprocess(t['id'])
tasks[task] = [t]
for j in t['tags']:
tag = _preprocess(j)
if tag in tasks:
raise RuntimeError('tag ' + tag + ' is the same as a task name')
tags[tag].append(t)
return tasks, tags
def _id_to_task_data(t_id):
t_id = _preprocess(t_id)
if t_id in tasks:
# return the task assoicated with this task id
return tasks[t_id]
elif t_id in tags:
# return the list of tasks for this tag
return tags[t_id]
else:
# should already be in task form
raise RuntimeError('could not find tag/task id')
def _id_to_task(t_id):
if t_id[0] == '#':
# this is a tag, so return all the tasks for this tag
return ','.join((d['task'] for d in _id_to_task_data(t_id[1:])))
else:
# this should already be in task form
return t_id
def ids_to_tasks(ids):
if ids is None:
raise RuntimeError(
'No task specified. Please select a task with ' + '--task {task_name}.'
)
return ','.join((_id_to_task(i) for i in ids.split(',') if len(i) > 0))
# Build the task list from the json file.
tasks, tags = _build(task_list)
|
[
"Jia_qi_0217@163.com"
] |
Jia_qi_0217@163.com
|
c52152bc18b44d48c909e1256ce9ae3b6d37647f
|
310a141e68d730f2e3a0dee21b14cca65883e521
|
/courses/migrations/0008_course_passed.py
|
1ec04deca6b315eb29f330e1aa93eb3abec9e6b9
|
[] |
no_license
|
sokogfb/edu_fcih
|
5c2eb883b88d70a34c7f21487527f18a8f6a26b2
|
c480b448350226a1727f1d155e99dbe1ca6d30e7
|
refs/heads/master
| 2021-09-12T14:53:38.484104
| 2018-04-17T23:13:10
| 2018-04-17T23:13:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# Generated by Django 2.0.3 on 2018-04-02 19:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0007_term_active'),
]
operations = [
migrations.AddField(
model_name='course',
name='passed',
field=models.BooleanField(default=False),
),
]
|
[
"abdelrahman.sico_931@fci.helwan.edu.eg"
] |
abdelrahman.sico_931@fci.helwan.edu.eg
|
c0d41c468fe46eae60304a6e4170b7feb432e5cd
|
973eed0d6173ab95b3cea9508bd20516ef84a56d
|
/services/gmaps.py
|
3a4b1b96e5fbcc5a7452e5331952955d404e7893
|
[
"Apache-2.0"
] |
permissive
|
FenrirUnbound/kessel-run
|
213a71d94b74a518a6a92b3fb5929e1ae0e71997
|
0b39ec4aead0ee1397f46a0893166c433fe4f85b
|
refs/heads/master
| 2020-12-07T15:24:13.924077
| 2017-07-02T02:56:58
| 2017-07-02T02:56:58
| 95,517,943
| 0
| 0
| null | 2017-07-02T02:56:59
| 2017-06-27T04:36:54
|
Python
|
UTF-8
|
Python
| false
| false
| 774
|
py
|
import googlemaps
import time
from map_formatter import MapFormatter
from models.route import Route
from models.secret import Secret
class Gmaps(object):
def __init__(self):
self.gmaps = googlemaps.Client(key=Secret.token())
self.route_data = Route()
self.formatter = MapFormatter()
def lookup_travel_time(self, route_id):
desired_route = self.route_data.get(route_id)
now = int(time.time())
map_data = self.gmaps.directions(
alternatives=True,
departure_time=now,
destination=desired_route['destination'],
mode='driving',
origin=desired_route['origin'],
units='imperial'
)
return self.formatter.format(content=map_data)
|
[
"aeneascorrupt@gmail.com"
] |
aeneascorrupt@gmail.com
|
d0fae8b7c4d33afb588c1fd017fe389b750b6135
|
547ac7b09add2e24146f59fa4377188cd59419fb
|
/reprozip/pack/vt_workflow/workflow_utils.py
|
316ba09a4b52958588d151e1ded15d6b8c4f1937
|
[
"BSD-3-Clause"
] |
permissive
|
fchirigati/reprozip
|
44b274fec6d9558a97c85e7eb0678730702ccfe0
|
fb7b4e18a6938fdb10b6fe8e0fcd042ce4547375
|
refs/heads/master
| 2020-05-18T05:47:17.156691
| 2018-06-19T22:39:27
| 2018-06-19T22:39:27
| 10,867,693
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,245
|
py
|
###############################################################################
##
## Copyright (C) 2012-2013, NYU-Poly.
## All rights reserved.
## Contact: fchirigati@nyu.edu
##
## This file is part of ReproZip.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of NYU-Poly nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
class WfObject:
"""
WfObject represents a VisTrails workflow object.
"""
def __init__(self):
"""
Init method for WfObject
"""
self.__sub_element = None
def get_sub_element(self):
return self.__sub_element
def set_sub_element(self, value):
self.__sub_element = value
sub_element = property(get_sub_element, set_sub_element, None, None)
class Module(WfObject):
"""
Module represents a module in the VisTrails workflow.
"""
def __init__(self, id, cache, name, namespace, package, version):
"""
Init method for Module.
-> id is the unique id of the object;
-> cache indicates whether the module is cacheable or not;
-> name is the name of the module;
-> namespace is the namespace of the module;
-> package is the package that contains the module;
-> version is the version of the package
"""
WfObject.__init__(self)
self.__id = id
self.__cache = cache
self.__name = name
self.__namespace = namespace
self.__package = package
self.__version = version
def get_id(self):
return self.__id
def get_cache(self):
return self.__cache
def get_name(self):
return self.__name
def get_namespace(self):
return self.__namespace
def get_package(self):
return self.__package
def get_version(self):
return self.__version
id = property(get_id, None, None, None)
cache = property(get_cache, None, None, None)
name = property(get_name, None, None, None)
namespace = property(get_namespace, None, None, None)
package = property(get_package, None, None, None)
version = property(get_version, None, None, None)
class Annotation(WfObject):
"""
Annotation represents an annotation in an object of the VisTrails workflow.
"""
def __init__(self, id, wf_object, key, value):
"""
Init method for Annotation.
-> id is the unique id of the annotation;
-> wf_object is the object from the workflow with which the annotation
is associated;
-> key is the key of the annotation;
-> value is the value of the annotation
"""
WfObject.__init__(self)
self.__id = id
self.__wf_object = wf_object
self.__key = key
self.__value = value
def get_id(self):
return self.__id
def get_wf_object(self):
return self.__wf_object
def get_key(self):
return self.__key
def get_value(self):
return self.__value
id = property(get_id, None, None, None)
wf_object = property(get_wf_object, None, None, None)
key = property(get_key, None, None, None)
value = property(get_value, None, None, None)
class Location(WfObject):
"""
Location represents the location of a VisTrails module.
"""
def __init__(self, id, module, x, y):
"""
Init method for Location.
-> id is the unique id of the object;
-> module is the module with which the location is associated;
-> x is the position in the x axis;
-> y is the position in the y axis
"""
WfObject.__init__(self)
self.__id = id
self.__module = module
self.__x = x
self.__y = y
def get_id(self):
return self.__id
def get_module(self):
return self.__module
def get_x(self):
return self.__x
def get_y(self):
return self.__y
id = property(get_id, None, None, None)
module = property(get_module, None, None, None)
x = property(get_x, None, None, None)
y = property(get_y, None, None, None)
class Function(WfObject):
"""
Function represents a function of a VisTrails module.
"""
def __init__(self, id, module, name, pos):
"""
Init method for Function.
-> id is the unique id of the object;
-> module is the module with which the function is associated;
-> name is the name of the function;
-> pos is... well, pos :-)
"""
WfObject.__init__(self)
self.__id = id
self.__module = module
self.__name = name
self.__pos = pos
def get_id(self):
return self.__id
def get_module(self):
return self.__module
def get_name(self):
return self.__name
def get_pos(self):
return self.__pos
id = property(get_id, None, None, None)
module = property(get_module, None, None, None)
name = property(get_name, None, None, None)
pos = property(get_pos, None, None, None)
class Parameter(WfObject):
"""
Parameter represents the parameter for a function in a VisTrails workflow.
"""
def __init__(self, id, function, alias, name, pos, type, value):
"""
Init method for Parameter.
-> id is the unique id of the object;
-> function is the function with which the parameter is associated;
-> alias is an alias for the parameter;
-> name is the name of the parameter;
-> pos is, well... pos :-)
-> type represents the type of the parameter;
-> value is the value of the parameter, respecting the type
"""
WfObject.__init__(self)
self.__id = id
self.__function = function
self.__alias = alias
self.__name = name
self.__pos = pos
self.__type = type
self.__value = value
def get_id(self):
return self.__id
def get_function(self):
return self.__function
def get_alias(self):
return self.__alias
def get_name(self):
return self.__name
def get_pos(self):
return self.__pos
def get_type(self):
return self.__type
def get_value(self):
return self.__value
id = property(get_id, None, None, None)
function = property(get_function, None, None, None)
alias = property(get_alias, None, None, None)
name = property(get_name, None, None, None)
pos = property(get_pos, None, None, None)
type = property(get_type, None, None, None)
value = property(get_value, None, None, None)
class Connection(WfObject):
"""
Connection represents a connection in a VisTrails workflow.
"""
def __init__(self, id, source, dst):
"""
Init method for Connection.
-> id is the unique id of the object;
-> source is the source port of the connection;
-> dst is the destination port of the connection
"""
WfObject.__init__(self)
self.__id = id
self.__source = source
self.__dst = dst
def get_id(self):
return self.__id
def get_source(self):
return self.__source
def get_dst(self):
return self.__dst
id = property(get_id, None, None, None)
source = property(get_source, None, None, None)
dst = property(get_dst, None, None, None)
class Port(WfObject):
"""
Port represents a port in a VisTrails connection.
"""
def __init__(self, id, module, name, signature):
"""
Init method for Port.
-> id is the unique id of the object;
-> module is the module with which the port is associated;
-> name is the name of the port;
-> signature is the signature of the port
"""
WfObject.__init__(self)
self.__id = id
self.__module = module
self.__name = name
self.__signature = signature
def get_id(self):
return self.__id
def get_module(self):
return self.__module
def get_name(self):
return self.__name
def get_signature(self):
return self.__signature
id = property(get_id, None, None, None)
module = property(get_module, None, None, None)
name = property(get_name, None, None, None)
signature = property(get_signature, None, None, None)
|
[
"fernando.chirigati@gmail.com"
] |
fernando.chirigati@gmail.com
|
03cc688115e56b3caacc8b1bcb0a2acf97cca126
|
89eec81430daea547822c26cf637bcd9db5e57ad
|
/pols/migrations/0005_question_number.py
|
4d1e7c4c89dfaeeeb276ec140daf28cdb8c5dd7a
|
[] |
no_license
|
sanlem/teston
|
5bd2f01ef4dc4f3cfef8189d6ea259af78fe4388
|
89c21ea745b1b517c589caf5688c7a856548d904
|
refs/heads/master
| 2020-12-11T22:17:23.943699
| 2015-06-18T15:44:08
| 2015-06-18T15:44:08
| 36,315,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pols', '0004_auto_20150525_1549'),
]
operations = [
migrations.AddField(
model_name='question',
name='number',
field=models.IntegerField(default=1),
),
]
|
[
"vfranchook@gmail.com"
] |
vfranchook@gmail.com
|
0ae601e2d21d74e13bbdd78607d416c058eed97a
|
5d4def230bad7174e2a2352d277d391dfa118694
|
/vocab.py
|
3ae126b0cb1ca0de8128bb2c779c875720f8c902
|
[] |
no_license
|
hitercs/biLSTM-SlotFilling
|
a733b2df0e65834b6c6a91d609daa60c73c596ca
|
8a0c1baed51e668e7fc4119f69ca6491e7328e7c
|
refs/heads/master
| 2020-03-14T07:20:50.869860
| 2018-05-01T10:35:18
| 2018-05-01T10:35:18
| 131,502,870
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
#-*- encoding: utf-8 -*-
import codecs
import settings
from util import Util
class BiVocab(object):
def __init__(self, src_vocab, trg_vocab):
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.src_vocab_size = src_vocab.vocab_size
self.trg_vocab_size = trg_vocab.vocab_size
self.pad_id = self.trg_vocab.get_idx(settings.PAD)
self.unk_id = self.trg_vocab.get_idx(settings.UNK)
def get_src_word(self, idx):
return self.src_vocab.get_word(idx)
def get_trg_word(self, idx):
return self.trg_vocab.get_word(idx)
def get_src_idx(self, w):
return self.src_vocab.get_idx(w)
def get_trg_idx(self, w):
return self.trg_vocab.get_idx(w)
class Vocab(object):
def __init__(self, vocab_size, vocab_fn):
self.word2idx = dict()
self.idx2word = dict()
self.vocab_size = vocab_size
self.build_vocab(vocab_fn)
def build_vocab(self, vocab_fn):
with codecs.open(vocab_fn, encoding='utf-8', mode='r', buffering=settings.read_buffer_size) as fp:
for line in fp:
word, idx, _ = line.strip().split()
Util.add_vocab(self.word2idx, word, int(idx))
Util.add_vocab(self.idx2word, int(idx), word)
def get_idx(self, word):
if not word in self.word2idx:
return self.word2idx[settings.UNK]
if self.word2idx[word] > self.vocab_size - 1:
return self.word2idx[settings.UNK]
return self.word2idx[word]
def get_word(self, idx):
if idx > self.vocab_size - 1:
return settings.UNK
return self.idx2word[idx]
|
[
"schen@ir.hit.edu.cn"
] |
schen@ir.hit.edu.cn
|
58530dd0f15e00fa4623a19b9378cc34b6dd4111
|
e5937e1305b6f1a68c98bf85d479f2cc46271f6d
|
/First.py
|
8dccbe53121fefa238ea688a09fb13622b1be489
|
[] |
no_license
|
sishen123258/python
|
14b974cc078e9b2f6e0a15561a071da7acbccd91
|
3e1fde3289f018979f9b67799fa2daee8920beaa
|
refs/heads/master
| 2021-04-09T16:51:28.129461
| 2015-05-29T07:38:29
| 2015-05-29T07:38:29
| 35,621,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
__author__ = 'Yue'
#
# class MyFirstPyClass:
# "first py class"
#
# def _init_(self, pm, ph):
# self.name = pm
# self.phone = ph
# print("self created:", self.name);
#
# def updatePhone(self, ph):
# self.phone = ph
# print("self phone changed:", self.phone);
#
class Person:
def __init__(self, first_name):
self.first_name = first_name
# Getter function
@property
def first_name(self):
return self._first_name
# Setter function
@first_name.setter
def first_name(self, value):
if not isinstance(value, str):
raise TypeError('Expected a string')
self._first_name = value
# Deleter function (optional)
@first_name.deleter
def first_name(self):
raise AttributeError("Can't delete attribute")
p=Person("tong")
print(p.first_name)
|
[
"1144299328@qq.com"
] |
1144299328@qq.com
|
5d5b6258a717833464801f98683c23cb6435e4f2
|
25ec545186596ea20ade231e1fa2a83faac0aa33
|
/penncycle/app/models.py
|
f5690000a90aa27fd65ef1dfff8d9f99576c6dfa
|
[] |
no_license
|
rattrayalex/PennCycle
|
a0f43ef7a1390fea3016ed5ac96cca5ab431e8e1
|
dbcfa68c7bf9c928c559ba310e23be12e01ad998
|
refs/heads/master
| 2020-04-28T22:40:43.747751
| 2013-05-14T22:01:18
| 2013-05-14T22:01:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,559
|
py
|
import datetime
from django.core.mail import send_mail
from django_localflavor_us.models import PhoneNumberField
from django.template.defaultfilters import slugify
from django.db import models
from django.core.validators import RegexValidator
from south.modelsinspector import add_introspection_rules
# Necessary because South hasn't been updated since localflavors was broken up.
add_introspection_rules([], ['django_localflavor_us\.models\.PhoneNumberField'])
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female'),
)
GRAD_YEAR_CHOICES = (
('2016', '2016'),
('2015', '2015'),
('2014', '2014'),
('2013', '2013'),
('2012', '2012'),
('grad', 'grad student'),
('faculty', 'faculty'),
('staff', 'staff'),
('guest', 'guest'),
)
LIVING_LOCATIONS = (
('Hill', 'Hill'),
('KCECH', 'KCECH'),
('Riepe', 'Riepe'),
('Fisher', 'Fisher'),
('Ware', 'Ware'),
('Harrison', 'Harrison'),
('Harnwell', 'Harnwell'),
('Rodin', 'Rodin'),
('Stouffer', 'Stouffer'),
('Mayer', 'Mayer'),
('Du Bois', 'Du Bois'),
('Gregory', 'Gregory'),
('Sansom', 'Sansom'),
('Off Campus', 'Off Campus'),
)
SCHOOL_CHOICES = (
('C', 'College'),
('W', 'Wharton'),
('E', 'SEAS'),
('N', 'Nursing'),
('ANN', 'Annenberg'),
('DEN', 'Dental'),
('DES', 'Design'),
('GSE', 'Education'),
('LAW', 'Law'),
('MED', 'Medicine'),
('SPP', 'Social Policy & Practice'),
('VET', 'Veterinary'),
('O', 'Other or N/A'),
)
PAYMENT_CHOICES = (
('cash', 'cash'),
('penncash', 'penncash'),
('bursar', 'bursar'),
('credit', 'credit'),
('group', 'group'),
('stouffer', 'stouffer'),
('free', 'free'),
('other', 'other'),
('fisher', 'fisher')
)
class Plan(models.Model):
name = models.CharField(max_length=100)
cost = models.IntegerField()
start_date = models.DateField()
end_date = models.DateField()
description = models.TextField(max_length=150, default="Details coming soon!")
banner = models.CharField(max_length=50, default="")
def __unicode__(self):
return self.name + ': $' + str(self.cost)
class Payment(models.Model):
amount = models.DecimalField(decimal_places=2, max_digits=6)
plan = models.ForeignKey(
Plan, default=1, limit_choices_to={
'end_date__gte': datetime.date.today(),
}
)
student = models.ForeignKey('Student', related_name="payments")
date = models.DateField(auto_now_add=True)
satisfied = models.BooleanField(default=False)
payment_type = models.CharField(max_length=100, choices=PAYMENT_CHOICES, blank=True, null=True)
status = models.CharField(max_length=100, default='available')
def save(self):
super(Payment, self).save()
self.student.paid = self.student.paid_now
def __unicode__(self):
return str(self.student) + ' for ' + str(self.plan)
class Manufacturer(models.Model):
name = models.CharField(max_length=30)
address = models.CharField(max_length=50, blank=True)
city = models.CharField(max_length=60, blank=True)
country = models.CharField(max_length=50, blank=True)
website = models.URLField(blank=True)
email = models.EmailField(blank=True)
def __unicode__(self):
return self.name
class Student(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
phone = PhoneNumberField()
penncard = models.CharField(max_length=8, validators=[RegexValidator('\d{8}')], unique=True)
last_two = models.CharField(max_length=2, validators=[RegexValidator('\d{2}')], blank=True, null=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
grad_year = models.CharField(max_length=50, choices=GRAD_YEAR_CHOICES)
join_date = models.DateField(default=datetime.date.today())
school = models.CharField(max_length=100, choices=SCHOOL_CHOICES)
major = models.CharField(max_length=50, blank=True)
living_location = models.CharField(max_length=100, choices=LIVING_LOCATIONS)
waiver_signed = models.BooleanField(default=False)
paid = models.BooleanField(default=False)
payment_type = models.CharField(max_length=100, choices=PAYMENT_CHOICES, blank=True, null=True)
staff = models.NullBooleanField(default=False)
plan = models.ManyToManyField('Plan', blank=True, null=True)
@property
def paid_now(self):
return len(self.current_payments) > 0
@property
def current_payments(self):
today = datetime.date.today()
return self.payments.filter(
satisfied=True,
plan__start_date__lte=today,
plan__end_date__gte=today,
)
@property
def can_ride(self):
if len(self.current_payments.filter(status='available')) > 0 and self.waiver_signed:
return True
else:
return False
def __unicode__(self):
return u'%s %s' % (self.name, self.penncard)
class Bike(models.Model):
bike_name = models.CharField(max_length=100, unique=True)
manufacturer = models.ForeignKey(Manufacturer)
purchase_date = models.DateField()
color = models.CharField(max_length=30, blank=True)
status = models.CharField(max_length=100, default='available')
serial_number = models.CharField(max_length=100, blank=True)
tag_id = models.CharField(max_length=100, blank=True)
key_serial_number = models.CharField(max_length=100, blank=True)
combo = models.CharField(max_length=4, blank=True)
combo_update = models.DateField()
@property
def knows_combo(self):
rides = self.rides.filter(checkout_time__gt=self.combo_update)
return list(set([ride.rider for ride in rides]))
@property
def location(self):
last_ride = self.rides.filter(checkin_station__isnull=False).order_by('-checkin_time')
try:
last_ride = last_ride[0]
location = last_ride.checkin_station
except:
location = Station.objects.get(name__contains="PSA")
return location
def __unicode__(self):
return '#%s. Location: %s' % (self.bike_name, self.location.name)
days = {
"Monday": 0,
"Tuesday": 1,
"Wednesday": 2,
"Thursday": 3,
"Friday": 4,
"Saturday": 5,
"Sunday": 6,
}
strings = dict([v, k] for k, v in days.items())
def decimal(time):
if len(time) <= 2:
return int(time)
else:
hours, minutes = time.split(":")
return int(hours) + float(minutes) / 60
def hour(time):
return decimal(time[0]) if (time[1] == "am" or time[0] == "12") else decimal(time[0])+12
def enter_hours(interval, info, day):
# print(info)
start_time = hour(info[0:2])
end_time = hour(info[3:5])
if day in interval:
interval[day].append((start_time, end_time))
else:
interval[day] = [(start_time, end_time)]
def get_hours(description):
intervals = {}
day = 0
if not description: # empty station
return {}
for line in description.split("\n"): # assumes to be in order
if line.split()[1] == "-": # there is a range of days
# print("range of days")
start = days[line.split()[0]]
end = days[line.split()[2][:-1]]
for i in range(end-start+1):
that_day = strings[day]
if "and" in line: # multiple ranges
enter_hours(intervals, line.split()[3:8], that_day)
enter_hours(intervals, line.split()[9:14], that_day)
else:
enter_hours(intervals, line.split()[3:8], that_day)
day += 1
elif line.split()[0][-1] == ":":
# print("matched :")
that_day = strings[day]
if "and" in line: # multiple ranges
enter_hours(intervals, line.split()[1:6], that_day)
enter_hours(intervals, line.split()[7:12], that_day)
else:
enter_hours(intervals, line.split()[1:6], that_day)
day += 1
else: # 7 days a week.
for day in range(7):
enter_hours(intervals, line.split()[2:7], strings[day])
return intervals
class Station(models.Model):
name = models.CharField(max_length=100)
latitude = models.FloatField(default=39.9529399)
longitude = models.FloatField(default=-75.1905607)
address = models.CharField(max_length=300, blank=True)
notes = models.TextField(max_length=100, blank=True)
hours = models.TextField(max_length=100, blank=True)
picture = models.ImageField(upload_to='img/stations', blank=True)
capacity = models.IntegerField(default=15)
full_name = models.CharField(max_length=100, default="")
def __unicode__(self):
return self.name
@property
def is_open(self):
ranges = get_hours(self.hours)
today = datetime.datetime.today().weekday()
this_hour = datetime.datetime.today().hour
if strings[today] in ranges:
hours = ranges[strings[today]]
for opening in hours:
if this_hour > opening[0] and this_hour < opening[1]:
return True
return False
@property
def comma_name(self):
return ", ".join(self.hours.split("\n"))
class Ride(models.Model):
rider = models.ForeignKey(
Student, limit_choices_to={
'payments__status': 'available',
'waiver_signed': True,
'payments__satisfied': True,
'payments__plan__end_date__gte': datetime.date.today(),
'payments__plan__start_date__lte': datetime.date.today(),
},
)
bike = models.ForeignKey('Bike', limit_choices_to={'status': 'available'}, related_name='rides')
checkout_time = models.DateTimeField(auto_now_add=True)
checkin_time = models.DateTimeField(null=True, blank=True)
checkout_station = models.ForeignKey(Station, default=1, related_name='checkouts')
checkin_station = models.ForeignKey(Station, blank=True, null=True, related_name='checkins')
num_users = models.IntegerField()
@property
def ride_duration_days(self):
if self.checkin_time is None:
end = datetime.datetime.now()
else:
end = self.checkin_time
duration = end - self.checkout_time
duration_days = duration.days
return duration_days
@property
def status(self):
if self.checkin_time is None:
return 'out'
else:
return 'in'
def save(self):
print 'in Ride save method'
if not self.num_users:
self.num_users = len(Student.objects.all())
super(Ride, self).save()
print 'super saved!'
if self.checkin_time is None:
self.bike.status = 'out'
payment = self.rider.current_payments.filter(status='available')[0]
payment.status = 'out'
else:
self.bike.status = 'available'
payment = self.rider.current_payments.filter(status='out')[0]
payment.status = 'available'
self.bike.save()
payment.save()
def __unicode__(self):
return u'%s on %s' % (self.rider, self.checkout_time)
class Page(models.Model):
content = models.TextField()
name = models.CharField(max_length=100)
slug = models.SlugField()
def save(self):
self.slug = slugify(self.name)
super(Page, self).save()
def __unicode__(self):
return self.name
class Comment(models.Model):
comment = models.TextField()
time = models.DateTimeField(auto_now_add=True)
student = models.ForeignKey(Student, blank=True, null=True)
ride = models.ForeignKey(Ride, blank=True, null=True)
is_problem = models.BooleanField(default=False)
def save(self):
super(Comment, self).save()
message = '''
Comment: \n %s \n \n
Time: \n %s \n \n
Student: \n %s \n \n
Ride: \n %s \n \n
Marked as problem? \n %s \n \n
''' % (self.comment, self.time, self.student, self.ride, self.is_problem)
send_mail('PennCycle: Comment Submitted', message, 'messenger@penncycle.org', ['messenger@penncycle.org'])
def __unicode__(self):
return self.comment[:30]
class Info(models.Model):
message = models.TextField()
date = models.DateField(auto_now_add=True)
def __unicode__(self):
return self.message + " on " + self.date.isoformat()
|
[
"razzi53@gmail.com"
] |
razzi53@gmail.com
|
61c91a5a98307bf6308fc87306a01cc429275024
|
83dc2a8d80a0614c66016efba9630cd60538d4b8
|
/spider_traffic/test.py
|
5a9cc750354cadebbffc1e99a03d3901ab54aea1
|
[] |
no_license
|
hanxianzhe1116/Python_Spider
|
5095297e6071842aef95d0264b2024d5a0e81ce5
|
ba3757acf2ed133ab76720a146d380eafe69a092
|
refs/heads/master
| 2021-01-04T09:29:13.616769
| 2020-10-19T02:16:17
| 2020-10-19T02:16:17
| 240,488,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
import requests
import re
import json
import csv
from urllib.parse import urlencode
import datetime
import time
'''
函数说明:输入url及其参数
params:
baseUrl:最开始的url
cityCode:城市编码,这里我选择的是重庆,重庆编码:132
roadType:道路类型
callBack:返回类型
'''
def getPage(baseUrl,cityCode,roadType,callBack):
#headers信息
params = {
'cityCode' : cityCode,
'roadtype' : roadType,
'callback' : callBack
}
url = baseUrl + urlencode(params) #获取到url参数
# print(requests.get(url).text)
try:
response = requests.get(url)
if response.status_code == 200: #返回成功
return response
except requests.ConnectionError as e:
print('url出错',e.args)
if __name__ == '__main__':
url = 'https://jiaotong.baidu.com/trafficindex/city/roadrank?'
#
with open('transformData.csv','w') as f:
f_csv = csv.writer(f)
#保存五十分钟的数据
for i in range(10):
response = getPage(url,132,0,'jsonp_1553486162746_179718')
# print(type(response.text))
transformData = json.loads(re.findall(r'^\w+\((.*)\)$',response.text)[0])
transformData = transformData.get('data').get('list')
dateTime = datetime.datetime.now().strftime('%Y-%m-%d')
f_csv.writerow(str(dateTime))
dataList = []
for item in transformData:
# print(item)
list = []
list.append(item.get('roadname'))
list.append(item.get('index'))
list.append(item.get('speed'))
dataList.append(list)
# print(datetime.datetime.now().strftime('%Y-%m-%d'))
f_csv.writerows(dataList)
print(dataList)
time.sleep(5)
# f_csv.close()
|
[
"876605943@qq.com"
] |
876605943@qq.com
|
62c20ca9fb15d381b187ac793e03b1b5242e6d37
|
495b0b8de3ecc341511cdb10f11368b35b585bea
|
/SoftLayer/CLI/modules/filters.py
|
1e4274ac04ae064468c5d1d0736b540b8f35416c
|
[] |
no_license
|
hugomatic/softlayer-api-python-client
|
cf6c1e6bfa32e559e72f8b0b069339ae8edd2ede
|
9c115f0912ee62763b805941593f6dd50de37068
|
refs/heads/master
| 2021-01-18T11:09:19.122162
| 2013-04-09T01:44:51
| 2013-04-09T01:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
"""
usage: sl help filters
Filters are used to limit the amount of results. Some commands will accept a
filter operation for certain fields. Filters can be applied across multiple
fields in most cases.
Available Operations:
Case Insensitive
'value' Exact value match
'value*' Begins with value
'*value' Ends with value
'*value*' Contains value
Case Sensitive
'~ value' Exact value match
'> value' Greater than value
'< value' Less than value
'>= value' Greater than or equal to value
'<= value' Less than or equal to value
Examples:
sl cci list --datacenter=dal05
sl cci list --hostname='prod*'
sl cci list --network=100 --cpu=2
sl cci list --network='< 100' --cpu=2
sl cci list --memory='>= 2048'
Note: Comparison operators (>, <, >=, <=) can be used with integers, floats,
and strings.
"""
# :copyright: (c) 2013, SoftLayer Technologies, Inc. All rights reserved.
# :license: BSD, see LICENSE for more details.
|
[
"k3vinmcdonald@gmail.com"
] |
k3vinmcdonald@gmail.com
|
cd04729dafc1306355807963c87d375bbfa6c2a7
|
6b096e1074479b13dc9d28cec7e5220d2ecc5c13
|
/Python/q34.py
|
af3b5a71997a6c98c126bd1f89d3957a291886a6
|
[] |
no_license
|
wzb1005/leetcode
|
ed3684e580b4dae37dce0af8314da10c89b557f7
|
4ba73ac913993ba5bb7deab5971aaeaaa16ed4d7
|
refs/heads/master
| 2023-03-19T06:37:30.274467
| 2021-03-09T03:02:57
| 2021-03-09T03:02:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
|
[
"chiyexiao@icloud.com"
] |
chiyexiao@icloud.com
|
c770c4a0ef473e599ea32a354884f2360f88218a
|
365051fefddc9d549201225915122cb413168919
|
/final CNN data aug/data_aug_v03.py
|
1360161c08ef375089f3a624dd6d9ccfb8841482
|
[] |
no_license
|
PauloAxcel/SERS-EHD-pillars
|
1623f5141a3d6fcd6b6f13e83afe1dac08cb893a
|
89c029be9f3cb435103f497644d30e75ce3ae3ad
|
refs/heads/main
| 2023-07-19T10:04:44.237797
| 2021-09-04T21:05:14
| 2021-09-04T21:05:14
| 375,076,169
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,140
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 15 13:05:02 2019
@author: paulo
"""
#DATA AUGMENTATION
import os
import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
#get the folders
SERS_train_dir = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Original Data/SERS/'
NOENH_train_dir = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Original Data/nonSERS/'
gen_dir_tra = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Training/'
gen_dir_val = r'/home/newuser/Desktop/emily try/Data2(part1+part3+part4+part6)/Validation/'
#num is the total number of samples that we want to generate.
num = 5000
#get the files inside the folders
SERS_train = os.listdir(SERS_train_dir)
NOENH_train = os.listdir(NOENH_train_dir)
all_dir = [SERS_train_dir, NOENH_train_dir]
all_data = [SERS_train, NOENH_train]
for dire,file in zip(all_dir, all_data):
#for i in range(len(all_data)):
for j in range(num):
#generate a rand to select a random file in the folder
# rand = random.randint(0,len(all_data[i])-1)
rand = random.randint(0,len(file)-1)
if len(file[rand].split('_'))>1:
continue
else:
# im = cv2.imread(all_dir[i]+all_data[i][rand])
im = cv2.imread(dire+file[rand])
# plt.imshow(im)
#datagen.flow needs a rank 4 matrix, hence we use np.expand_dims to increase the dimention of the image
image = np.expand_dims(im,0)
# word_label = all_data[i][rand].split('.')[0]
word_label = file[rand].split('.')[0]
#Generate new image process
datagen = ImageDataGenerator(featurewise_center=0,
samplewise_center=0,
rotation_range=180,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest')
#label files based on the train/validation by employing a rand function
lab = dire.split('/')[-2]
if random.random() < 0.8:
aug_iter = datagen.flow(image,save_to_dir = gen_dir_tra , save_prefix = lab+'_train_' + word_label +'_gen_' + str(random.randint(0,num)))
else:
aug_iter = datagen.flow(image,save_to_dir = gen_dir_val ,save_prefix = lab+'_val_' + word_label +'_gen_' + str(random.randint(0,num)))
#next function produces the result from the datagen flow. collapses the function.
# plt.imshow(next(aug_iter)[0].astype(np.uint8))
aug_images = [next(aug_iter)[0].astype(np.uint8) for m in range(1)]
|
[
"noreply@github.com"
] |
noreply@github.com
|
ec3ee36ac1ce3cea82d7bfe1563d5a76ade5968f
|
7a583c534559ad08950e6e1564d4a59095ce9669
|
/autoclient/src/plugins/memory.py
|
bc4df39f71e332e172e24144a790ef9c8973a5a3
|
[] |
no_license
|
wyyalt/cmdb
|
67fbeabda2035e11c1933ab84b75c9c3feac7d92
|
c43c17db7c6fb9f63b2387b7054a89a54bee199a
|
refs/heads/master
| 2021-05-05T12:05:38.026134
| 2017-09-25T14:58:24
| 2017-09-25T14:58:24
| 104,718,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
import os
from lib import convert
from lib.conf.config import settings
class Memory(object):
def __init__(self):
pass
@classmethod
def initial(cls):
return cls()
def process(self, command_func, debug):
if debug:
output = open(os.path.join(settings.BASEDIR, 'files/memory.out'), 'r', encoding='utf-8').read()
else:
output = command_func("sudo dmidecode -q -t 17 2>/dev/null")
return self.parse(output)
def parse(self, content):
"""
解析shell命令返回结果
:param content: shell 命令结果
:return:解析后的结果
"""
ram_dict = {}
key_map = {
'Size': 'capacity',
'Locator': 'slot',
'Type': 'model',
'Speed': 'speed',
'Manufacturer': 'manufacturer',
'Serial Number': 'sn',
}
devices = content.split('Memory Device')
for item in devices:
item = item.strip()
if not item:
continue
if item.startswith('#'):
continue
segment = {}
lines = item.split('\n\t')
for line in lines:
if not line.strip():
continue
if len(line.split(':')):
key, value = line.split(':')
else:
key = line.split(':')[0]
value = ""
if key in key_map:
if key == 'Size':
segment[key_map['Size']] = convert.convert_mb_to_gb(value, 0)
else:
segment[key_map[key.strip()]] = value.strip()
ram_dict[segment['slot']] = segment
return ram_dict
|
[
"wyyalt@live.com"
] |
wyyalt@live.com
|
e79dca9531ee613ea930b7be4c7871b1eac88c18
|
d608c2b9fbfcd142fa82875f01f70e1db95cecef
|
/FlaskAppVenv/Lib/site-packages/pymysql/tests/test_connection.py
|
c626a0d39468fc0249dbdd719881a28872564b48
|
[
"MIT"
] |
permissive
|
nidheekamble/SponsCentral
|
9b30918006b98f242de86920a550f8e072ba093f
|
b8189993cb87cc2d83e36c9d72df7a3b7d620bd7
|
refs/heads/master
| 2022-12-21T11:14:36.565494
| 2021-01-31T16:15:33
| 2021-01-31T16:15:33
| 135,418,522
| 1
| 2
|
MIT
| 2022-12-08T07:57:59
| 2018-05-30T09:16:30
|
Python
|
UTF-8
|
Python
| false
| false
| 24,709
|
py
|
import datetime
import sys
import time
import unittest2
import pymysql
from pymysql.tests import base
from pymysql._compat import text_type
from pymysql.constants import CLIENT
class TempUser:
def __init__(self, c, user, db, auth=None, authdata=None, password=None):
self._c = c
self._user = user
self._db = db
create = "CREATE USER " + user
if password is not None:
create += " IDENTIFIED BY '%s'" % password
elif auth is not None:
create += " IDENTIFIED WITH %s" % auth
if authdata is not None:
create += " AS '%s'" % authdata
try:
c.execute(create)
self._created = True
except pymysql.err.InternalError:
# already exists - TODO need to check the same plugin applies
self._created = False
try:
c.execute("GRANT SELECT ON %s.* TO %s" % (db, user))
self._grant = True
except pymysql.err.InternalError:
self._grant = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._grant:
self._c.execute("REVOKE SELECT ON %s.* FROM %s" % (self._db, self._user))
if self._created:
self._c.execute("DROP USER %s" % self._user)
class TestAuthentication(base.PyMySQLTestCase):
socket_auth = False
socket_found = False
two_questions_found = False
three_attempts_found = False
pam_found = False
mysql_old_password_found = False
sha256_password_found = False
import os
osuser = os.environ.get('USER')
# socket auth requires the current user and for the connection to be a socket
# rest do grants @localhost due to incomplete logic - TODO change to @% then
db = base.PyMySQLTestCase.databases[0].copy()
socket_auth = db.get('unix_socket') is not None \
and db.get('host') in ('localhost', '127.0.0.1')
cur = pymysql.connect(**db).cursor()
del db['user']
cur.execute("SHOW PLUGINS")
for r in cur:
if (r[1], r[2]) != (u'ACTIVE', u'AUTHENTICATION'):
continue
if r[3] == u'auth_socket.so':
socket_plugin_name = r[0]
socket_found = True
elif r[3] == u'dialog_examples.so':
if r[0] == 'two_questions':
two_questions_found = True
elif r[0] == 'three_attempts':
three_attempts_found = True
elif r[0] == u'pam':
pam_found = True
pam_plugin_name = r[3].split('.')[0]
if pam_plugin_name == 'auth_pam':
pam_plugin_name = 'pam'
# MySQL: authentication_pam
# https://dev.mysql.com/doc/refman/5.5/en/pam-authentication-plugin.html
# MariaDB: pam
# https://mariadb.com/kb/en/mariadb/pam-authentication-plugin/
# Names differ but functionality is close
elif r[0] == u'mysql_old_password':
mysql_old_password_found = True
elif r[0] == u'sha256_password':
sha256_password_found = True
#else:
# print("plugin: %r" % r[0])
def test_plugin(self):
if not self.mysql_server_is(self.connections[0], (5, 5, 0)):
raise unittest2.SkipTest("MySQL-5.5 required for plugins")
cur = self.connections[0].cursor()
cur.execute("select plugin from mysql.user where concat(user, '@', host)=current_user()")
for r in cur:
self.assertIn(self.connections[0]._auth_plugin_name, (r[0], 'mysql_native_password'))
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(socket_found, "socket plugin already installed")
def testSocketAuthInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin auth_socket soname 'auth_socket.so'")
TestAuthentication.socket_found = True
self.socket_plugin_name = 'auth_socket'
self.realtestSocketAuth()
except pymysql.err.InternalError:
try:
cur.execute("install soname 'auth_socket'")
TestAuthentication.socket_found = True
self.socket_plugin_name = 'unix_socket'
self.realtestSocketAuth()
except pymysql.err.InternalError:
TestAuthentication.socket_found = False
raise unittest2.SkipTest('we couldn\'t install the socket plugin')
finally:
if TestAuthentication.socket_found:
cur.execute("uninstall plugin %s" % self.socket_plugin_name)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(socket_found, "no socket plugin")
def testSocketAuth(self):
self.realtestSocketAuth()
def realtestSocketAuth(self):
with TempUser(self.connections[0].cursor(), TestAuthentication.osuser + '@localhost',
self.databases[0]['db'], self.socket_plugin_name) as u:
c = pymysql.connect(user=TestAuthentication.osuser, **self.db)
class Dialog(object):
fail=False
def __init__(self, con):
self.fail=TestAuthentication.Dialog.fail
pass
def prompt(self, echo, prompt):
if self.fail:
self.fail=False
return b'bad guess at a password'
return self.m.get(prompt)
class DialogHandler(object):
def __init__(self, con):
self.con=con
def authenticate(self, pkt):
while True:
flag = pkt.read_uint8()
echo = (flag & 0x06) == 0x02
last = (flag & 0x01) == 0x01
prompt = pkt.read_all()
if prompt == b'Password, please:':
self.con.write_packet(b'stillnotverysecret\0')
else:
self.con.write_packet(b'no idea what to do with this prompt\0')
pkt = self.con._read_packet()
pkt.check_error()
if pkt.is_ok_packet() or last:
break
return pkt
class DefectiveHandler(object):
def __init__(self, con):
self.con=con
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(two_questions_found, "two_questions plugin already installed")
def testDialogAuthTwoQuestionsInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin two_questions soname 'dialog_examples.so'")
TestAuthentication.two_questions_found = True
self.realTestDialogAuthTwoQuestions()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the two_questions plugin')
finally:
if TestAuthentication.two_questions_found:
cur.execute("uninstall plugin two_questions")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(two_questions_found, "no two questions auth plugin")
def testDialogAuthTwoQuestions(self):
self.realTestDialogAuthTwoQuestions()
def realTestDialogAuthTwoQuestions(self):
TestAuthentication.Dialog.fail=False
TestAuthentication.Dialog.m = {b'Password, please:': b'notverysecret',
b'Are you sure ?': b'yes, of course'}
with TempUser(self.connections[0].cursor(), 'pymysql_2q@localhost',
self.databases[0]['db'], 'two_questions', 'notverysecret') as u:
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_2q', **self.db)
pymysql.connect(user='pymysql_2q', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(three_attempts_found, "three_attempts plugin already installed")
def testDialogAuthThreeAttemptsQuestionsInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin three_attempts soname 'dialog_examples.so'")
TestAuthentication.three_attempts_found = True
self.realTestDialogAuthThreeAttempts()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the three_attempts plugin')
finally:
if TestAuthentication.three_attempts_found:
cur.execute("uninstall plugin three_attempts")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(three_attempts_found, "no three attempts plugin")
def testDialogAuthThreeAttempts(self):
self.realTestDialogAuthThreeAttempts()
def realTestDialogAuthThreeAttempts(self):
TestAuthentication.Dialog.m = {b'Password, please:': b'stillnotverysecret'}
TestAuthentication.Dialog.fail=True # fail just once. We've got three attempts after all
with TempUser(self.connections[0].cursor(), 'pymysql_3a@localhost',
self.databases[0]['db'], 'three_attempts', 'stillnotverysecret') as u:
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.DialogHandler}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': object}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.DefectiveHandler}, **self.db)
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'notdialogplugin': TestAuthentication.Dialog}, **self.db)
TestAuthentication.Dialog.m = {b'Password, please:': b'I do not know'}
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
TestAuthentication.Dialog.m = {b'Password, please:': None}
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_3a', auth_plugin_map={b'dialog': TestAuthentication.Dialog}, **self.db)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipIf(pam_found, "pam plugin already installed")
@unittest2.skipIf(os.environ.get('PASSWORD') is None, "PASSWORD env var required")
@unittest2.skipIf(os.environ.get('PAMSERVICE') is None, "PAMSERVICE env var required")
def testPamAuthInstallPlugin(self):
# needs plugin. lets install it.
cur = self.connections[0].cursor()
try:
cur.execute("install plugin pam soname 'auth_pam.so'")
TestAuthentication.pam_found = True
self.realTestPamAuth()
except pymysql.err.InternalError:
raise unittest2.SkipTest('we couldn\'t install the auth_pam plugin')
finally:
if TestAuthentication.pam_found:
cur.execute("uninstall plugin pam")
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(pam_found, "no pam plugin")
@unittest2.skipIf(os.environ.get('PASSWORD') is None, "PASSWORD env var required")
@unittest2.skipIf(os.environ.get('PAMSERVICE') is None, "PAMSERVICE env var required")
def testPamAuth(self):
self.realTestPamAuth()
def realTestPamAuth(self):
db = self.db.copy()
import os
db['password'] = os.environ.get('PASSWORD')
cur = self.connections[0].cursor()
try:
cur.execute('show grants for ' + TestAuthentication.osuser + '@localhost')
grants = cur.fetchone()[0]
cur.execute('drop user ' + TestAuthentication.osuser + '@localhost')
except pymysql.OperationalError as e:
# assuming the user doesn't exist which is ok too
self.assertEqual(1045, e.args[0])
grants = None
with TempUser(cur, TestAuthentication.osuser + '@localhost',
self.databases[0]['db'], 'pam', os.environ.get('PAMSERVICE')) as u:
try:
c = pymysql.connect(user=TestAuthentication.osuser, **db)
db['password'] = 'very bad guess at password'
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user=TestAuthentication.osuser,
auth_plugin_map={b'mysql_cleartext_password': TestAuthentication.DefectiveHandler},
**self.db)
except pymysql.OperationalError as e:
self.assertEqual(1045, e.args[0])
# we had 'bad guess at password' work with pam. Well at least we get a permission denied here
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user=TestAuthentication.osuser,
auth_plugin_map={b'mysql_cleartext_password': TestAuthentication.DefectiveHandler},
**self.db)
if grants:
# recreate the user
cur.execute(grants)
# select old_password("crummy p\tassword");
#| old_password("crummy p\tassword") |
#| 2a01785203b08770 |
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(mysql_old_password_found, "no mysql_old_password plugin")
def testMySQLOldPasswordAuth(self):
if self.mysql_server_is(self.connections[0], (5, 7, 0)):
raise unittest2.SkipTest('Old passwords aren\'t supported in 5.7')
# pymysql.err.OperationalError: (1045, "Access denied for user 'old_pass_user'@'localhost' (using password: YES)")
# from login in MySQL-5.6
if self.mysql_server_is(self.connections[0], (5, 6, 0)):
raise unittest2.SkipTest('Old passwords don\'t authenticate in 5.6')
db = self.db.copy()
db['password'] = "crummy p\tassword"
with self.connections[0] as c:
# deprecated in 5.6
if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(self.connections[0], (5, 6, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
c.execute("SELECT OLD_PASSWORD('%s')" % db['password'])
else:
c.execute("SELECT OLD_PASSWORD('%s')" % db['password'])
v = c.fetchone()[0]
self.assertEqual(v, '2a01785203b08770')
# only works in MariaDB and MySQL-5.6 - can't separate out by version
#if self.mysql_server_is(self.connections[0], (5, 5, 0)):
# with TempUser(c, 'old_pass_user@localhost',
# self.databases[0]['db'], 'mysql_old_password', '2a01785203b08770') as u:
# cur = pymysql.connect(user='old_pass_user', **db).cursor()
# cur.execute("SELECT VERSION()")
c.execute("SELECT @@secure_auth")
secure_auth_setting = c.fetchone()[0]
c.execute('set old_passwords=1')
# pymysql.err.Warning: 'pre-4.1 password hash' is deprecated and will be removed in a future release. Please use post-4.1 password hash instead
if sys.version_info[0:2] >= (3,2) and self.mysql_server_is(self.connections[0], (5, 6, 0)):
with self.assertWarns(pymysql.err.Warning) as cm:
c.execute('set global secure_auth=0')
else:
c.execute('set global secure_auth=0')
with TempUser(c, 'old_pass_user@localhost',
self.databases[0]['db'], password=db['password']) as u:
cur = pymysql.connect(user='old_pass_user', **db).cursor()
cur.execute("SELECT VERSION()")
c.execute('set global secure_auth=%r' % secure_auth_setting)
@unittest2.skipUnless(socket_auth, "connection to unix_socket required")
@unittest2.skipUnless(sha256_password_found, "no sha256 password authentication plugin found")
def testAuthSHA256(self):
c = self.connections[0].cursor()
with TempUser(c, 'pymysql_sha256@localhost',
self.databases[0]['db'], 'sha256_password') as u:
if self.mysql_server_is(self.connections[0], (5, 7, 0)):
c.execute("SET PASSWORD FOR 'pymysql_sha256'@'localhost' ='Sh@256Pa33'")
else:
c.execute('SET old_passwords = 2')
c.execute("SET PASSWORD FOR 'pymysql_sha256'@'localhost' = PASSWORD('Sh@256Pa33')")
db = self.db.copy()
db['password'] = "Sh@256Pa33"
# not implemented yet so thows error
with self.assertRaises(pymysql.err.OperationalError):
pymysql.connect(user='pymysql_256', **db)
class TestConnection(base.PyMySQLTestCase):
def test_utf8mb4(self):
"""This test requires MySQL >= 5.5"""
arg = self.databases[0].copy()
arg['charset'] = 'utf8mb4'
conn = pymysql.connect(**arg)
def test_largedata(self):
"""Large query and response (>=16MB)"""
cur = self.connections[0].cursor()
cur.execute("SELECT @@max_allowed_packet")
if cur.fetchone()[0] < 16*1024*1024 + 10:
print("Set max_allowed_packet to bigger than 17MB")
return
t = 'a' * (16*1024*1024)
cur.execute("SELECT '" + t + "'")
assert cur.fetchone()[0] == t
def test_autocommit(self):
con = self.connections[0]
self.assertFalse(con.get_autocommit())
cur = con.cursor()
cur.execute("SET AUTOCOMMIT=1")
self.assertTrue(con.get_autocommit())
con.autocommit(False)
self.assertFalse(con.get_autocommit())
cur.execute("SELECT @@AUTOCOMMIT")
self.assertEqual(cur.fetchone()[0], 0)
def test_select_db(self):
con = self.connections[0]
current_db = self.databases[0]['db']
other_db = self.databases[1]['db']
cur = con.cursor()
cur.execute('SELECT database()')
self.assertEqual(cur.fetchone()[0], current_db)
con.select_db(other_db)
cur.execute('SELECT database()')
self.assertEqual(cur.fetchone()[0], other_db)
def test_connection_gone_away(self):
"""
http://dev.mysql.com/doc/refman/5.0/en/gone-away.html
http://dev.mysql.com/doc/refman/5.0/en/error-messages-client.html#error_cr_server_gone_error
"""
con = self.connect()
cur = con.cursor()
cur.execute("SET wait_timeout=1")
time.sleep(2)
with self.assertRaises(pymysql.OperationalError) as cm:
cur.execute("SELECT 1+1")
# error occures while reading, not writing because of socket buffer.
#self.assertEqual(cm.exception.args[0], 2006)
self.assertIn(cm.exception.args[0], (2006, 2013))
def test_init_command(self):
conn = self.connect(
init_command='SELECT "bar"; SELECT "baz"',
client_flag=CLIENT.MULTI_STATEMENTS)
c = conn.cursor()
c.execute('select "foobar";')
self.assertEqual(('foobar',), c.fetchone())
conn.close()
with self.assertRaises(pymysql.err.Error):
conn.ping(reconnect=False)
def test_read_default_group(self):
conn = self.connect(
read_default_group='client',
)
self.assertTrue(conn.open)
def test_context(self):
with self.assertRaises(ValueError):
c = self.connect()
with c as cur:
cur.execute('create table test ( a int ) ENGINE=InnoDB')
c.begin()
cur.execute('insert into test values ((1))')
raise ValueError('pseudo abort')
c.commit()
c = self.connect()
with c as cur:
cur.execute('select count(*) from test')
self.assertEqual(0, cur.fetchone()[0])
cur.execute('insert into test values ((1))')
with c as cur:
cur.execute('select count(*) from test')
self.assertEqual(1,cur.fetchone()[0])
cur.execute('drop table test')
def test_set_charset(self):
c = self.connect()
c.set_charset('utf8mb4')
# TODO validate setting here
def test_defer_connect(self):
import socket
d = self.databases[0].copy()
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(d['unix_socket'])
except KeyError:
sock.close()
sock = socket.create_connection(
(d.get('host', 'localhost'), d.get('port', 3306)))
for k in ['unix_socket', 'host', 'port']:
try:
del d[k]
except KeyError:
pass
c = pymysql.connect(defer_connect=True, **d)
self.assertFalse(c.open)
c.connect(sock)
c.close()
sock.close()
@unittest2.skipUnless(sys.version_info[0:2] >= (3,2), "required py-3.2")
def test_no_delay_warning(self):
current_db = self.databases[0].copy()
current_db['no_delay'] = True
with self.assertWarns(DeprecationWarning) as cm:
conn = pymysql.connect(**current_db)
# A custom type and function to escape it
class Foo(object):
value = "bar"
def escape_foo(x, d):
return x.value
class TestEscape(base.PyMySQLTestCase):
def test_escape_string(self):
con = self.connections[0]
cur = con.cursor()
self.assertEqual(con.escape("foo'bar"), "'foo\\'bar'")
# added NO_AUTO_CREATE_USER as not including it in 5.7 generates warnings
# mysql-8.0 removes the option however
if self.mysql_server_is(con, (8, 0, 0)):
cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES'")
else:
cur.execute("SET sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'")
self.assertEqual(con.escape("foo'bar"), "'foo''bar'")
def test_escape_builtin_encoders(self):
con = self.connections[0]
cur = con.cursor()
val = datetime.datetime(2012, 3, 4, 5, 6)
self.assertEqual(con.escape(val, con.encoders), "'2012-03-04 05:06:00'")
def test_escape_custom_object(self):
con = self.connections[0]
cur = con.cursor()
mapping = {Foo: escape_foo}
self.assertEqual(con.escape(Foo(), mapping), "bar")
def test_escape_fallback_encoder(self):
con = self.connections[0]
cur = con.cursor()
class Custom(str):
pass
mapping = {text_type: pymysql.escape_string}
self.assertEqual(con.escape(Custom('foobar'), mapping), "'foobar'")
def test_escape_no_default(self):
con = self.connections[0]
cur = con.cursor()
self.assertRaises(TypeError, con.escape, 42, {})
def test_escape_dict_value(self):
con = self.connections[0]
cur = con.cursor()
mapping = con.encoders.copy()
mapping[Foo] = escape_foo
self.assertEqual(con.escape({'foo': Foo()}, mapping), {'foo': "bar"})
def test_escape_list_item(self):
con = self.connections[0]
cur = con.cursor()
mapping = con.encoders.copy()
mapping[Foo] = escape_foo
self.assertEqual(con.escape([Foo()], mapping), "(bar)")
def test_previous_cursor_not_closed(self):
con = self.connect(
init_command='SELECT "bar"; SELECT "baz"',
client_flag=CLIENT.MULTI_STATEMENTS)
cur1 = con.cursor()
cur1.execute("SELECT 1; SELECT 2")
cur2 = con.cursor()
cur2.execute("SELECT 3")
self.assertEqual(cur2.fetchone()[0], 3)
def test_commit_during_multi_result(self):
con = self.connect(client_flag=CLIENT.MULTI_STATEMENTS)
cur = con.cursor()
cur.execute("SELECT 1; SELECT 2")
con.commit()
cur.execute("SELECT 3")
self.assertEqual(cur.fetchone()[0], 3)
|
[
"shreyansh.chheda@gmail.com"
] |
shreyansh.chheda@gmail.com
|
af585888517df64c46a62653fa6ff3912e6b9f0d
|
508c5e01aa7dce530093d5796250eff8d74ba06c
|
/code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tests/test_schema_get.py
|
d39692be0008269bf1791e585f1e0e92b09181fa
|
[
"MIT",
"PostgreSQL"
] |
permissive
|
jhkuang11/UniTrade
|
f220b0d84db06ff17626b3daa18d4cb8b72a5d3f
|
5f68b853926e167936b58c8543b8f95ebd6f5211
|
refs/heads/master
| 2022-12-12T15:58:30.013516
| 2019-02-01T21:07:15
| 2019-02-01T21:07:15
| 166,479,655
| 0
| 0
|
MIT
| 2022-12-07T03:59:47
| 2019-01-18T22:19:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,132
|
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.browser.server_groups.servers.tests import utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
class SchemaGetTestCase(BaseTestGenerator):
""" This class will add new schema under database node. """
scenarios = [
# Fetching default URL for extension node.
('Check Schema Node URL', dict(url='/browser/schema/obj/'))
]
def runTest(self):
""" This function will delete schema under database node. """
schema = parent_node_dict["schema"][-1]
db_id = schema["db_id"]
server_id = schema["server_id"]
server_response = server_utils.connect_server(self, server_id)
if not server_response["data"]["connected"]:
raise Exception("Could not connect to server to connect the"
" database.")
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
server_id,
db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database to get the schema.")
schema_id = schema["schema_id"]
schema_response = self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(server_id) + '/' + str(db_id) +
'/' + str(schema_id),
content_type='html/json')
self.assertEquals(schema_response.status_code, 200)
# Disconnect the database
database_utils.disconnect_database(self, server_id, db_id)
|
[
"jhkuang11@gmail.com"
] |
jhkuang11@gmail.com
|
2eb829c87ae5849e5b0d7bf0a4c9e93efc347ecc
|
32d934cabb1eac917bb583a1428b87f78b335a4e
|
/code_per_day/day_47_to_48.py
|
a8d229a9b40eae3b056c893a2848e2fd3d553e8c
|
[] |
no_license
|
zotroneneis/magical_universe
|
7339fefcfdf47e21e5ebcc6f56e3f1949230932a
|
c5da3367b7854c4cf9625c45e03742dba3a6d63c
|
refs/heads/master
| 2022-12-07T20:21:25.427333
| 2022-11-13T14:33:01
| 2022-11-13T14:33:01
| 141,951,821
| 414
| 58
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
from collections import defaultdict
class CastleKilmereMember:
""" Creates a member of the Castle Kilmere School of Magic """
def __init__(self, name: str, birthyear: int, sex: str):
self.name = name
self.birthyear = birthyear
self.sex = sex
self._traits = defaultdict(lambda: False)
def add_trait(self, trait, value=True):
self._traits[trait] = value
def exhibits_trait(self, trait: str) -> bool:
value = self._traits[trait]
return value
def print_traits(self):
true_traits = [trait for trait, value in self._traits.items() if value]
false_traits = [trait for trait, value in self._traits.items() if not value]
if true_traits:
print(f"{self.name} is {', '.join(true_traits)}")
if false_traits:
print(f"{self.name} is not {', '.join(false_traits)}")
if (not true_traits and not false_traits):
print(f"{self.name} does not have traits yet")
if __name__ == "__main__":
bromley = CastleKilmereMember('Bromley Huckabee', 1959, 'male')
bromley.add_trait('tidy-minded')
bromley.add_trait('kind')
bromley.exhibits_trait('kind')
bromley.exhibits_trait('mean')
bromley.print_traits()
|
[
"popkes@gmx.net"
] |
popkes@gmx.net
|
853e8cd7d44015eb9bfbe2e8d913ffb2d35fe27c
|
35c75ed0ca9850a6dd62d0e19b7e2ab472c5f292
|
/shop/migrations/0002_auto_20171109_0354.py
|
f417d1eaa71b14f9d43a38cb507c9d803779dad6
|
[] |
no_license
|
HyeriChang/tuanh
|
1314b270d7b8d44424c5b6b82361b20397d30f4b
|
38546afde0a4fa6a54727b4595b7cfa7c8baec1e
|
refs/heads/master
| 2021-05-07T06:32:31.060921
| 2017-12-06T09:36:48
| 2017-12-06T09:36:48
| 111,763,554
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-09 03:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='brand',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='color',
field=models.CharField(default='x', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='condition',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='detail',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='material',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='rating',
field=models.FloatField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='product',
name='size',
field=models.CharField(default='', max_length=50),
preserve_default=False,
),
]
|
[
"anh.nguyentu3110@gmail.com"
] |
anh.nguyentu3110@gmail.com
|
6763b1340462fabc349debc7f52e0774a21e430f
|
7c59004e0165c9b32dc5b786b96fc4d81f565daf
|
/predictor_ker.py
|
db39a01a8c2f10a576acb2f85d032224fae85302
|
[] |
no_license
|
LeonHardt427/mayi
|
f04d7d7bca68e0a3a57ca2ef2de14af7db28d2e7
|
679f688a971075794dd3d4ed0a7cbc50931a422f
|
refs/heads/master
| 2020-03-20T05:34:58.677201
| 2018-07-08T03:23:23
| 2018-07-08T03:23:23
| 137,219,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2018/5/29 10:11
# @Author : LeonHardt
# @File : predictor_ker.py
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import Imputer, OneHotEncoder
import tensorflow as tf
from keras import Sequential
from keras.layers import Dense, Activation
# data_path = os.getcwd()+"/data_error/"
# x_train = np.loadtxt(data_path+"x_train_error93.txt", delimiter=',')
# y_train = np.loadtxt(data_path+"y_train_error93.txt", delimiter=',')
# x_test = np.loadtxt(data_path+"x_test_error93.txt", delimiter=',')
# # print("ready")
# im = Imputer(strategy="most_frequent")
# x_train = im.fit_transform(x_train)
# x_test = im.transform(x_test)
data_path = os.getcwd()+"/data/"
x_train = np.loadtxt(data_path+"x_train_most.txt", delimiter=',')
y_train = np.loadtxt(data_path+"y_train_filter.txt", delimiter=',')
x_test = np.loadtxt(data_path+"x_test_a_most.txt", delimiter=',')
enc = OneHotEncoder()
y_train = enc.fit_transform(y_train.reshape(-1, 1))
print(y_train)
print(y_train.shape)
model = Sequential()
model.add(Dense(input_dim=297, units=297, activation='relu'))
# model.add(Dense(200, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# cw = {0: 1, 1: 100}
model.fit(x_train, y_train, epochs=20, batch_size=10000)
prob = model.predict_proba(x_test)
np.savetxt(os.getcwd()+"/prediction/ker200_1_error93_1.txt", prob, delimiter=',')
# model.save("merge_model")
|
[
"leonhardt427@126.com"
] |
leonhardt427@126.com
|
8fcae2a12359d68896b1d9e5d7db84dacd86f151
|
3f453e74ae03c777d4ca803623cf9f69b70ace87
|
/mappanel.py
|
3d92e74b1764f1483756c88d1f68dbe828478608
|
[] |
no_license
|
acidtobi/weewar_clone
|
5a348ece62ff22f3a0812867a93ac5f5a370f782
|
5b0575ee7534278d49df446a852e33d3f232d6e7
|
refs/heads/master
| 2021-05-04T10:41:44.741431
| 2016-02-11T20:47:23
| 2016-02-11T20:47:23
| 50,377,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,916
|
py
|
from __future__ import division
import wx
import wx.lib.scrolledpanel
class MapPanel(wx.lib.scrolledpanel.ScrolledPanel):
def __init__(self, parent, background_tile, size, innerbitmap=None):
self.background_tile = background_tile
self.InnerSize = size
self.innerbitmap = innerbitmap
self._Buffer = None
self.virtual_x = 0
self.virtual_y = 0
screen_width, screen_height = wx.DisplaySize()
self.background = wx.EmptyBitmap(screen_width, screen_height)
dc = wx.MemoryDC()
dc.SelectObject(self.background)
tile_width, tile_height = self.background_tile.Size
for rownum in range(int(screen_height / tile_height)):
for colnum in range(int(screen_width / tile_width)):
dc.DrawBitmap(self.background_tile, colnum * tile_width, rownum * tile_height, True)
width_px, height_px = size
wx.lib.scrolledpanel.ScrolledPanel.__init__(self, parent, size=(width_px, height_px))
self.SetupScrolling()
self.SetScrollRate(1, 1)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnResize)
#self.OnResize(None)
def setInnerBitmap(self, bitmap):
self.innerbitmap = bitmap
def GetVirtualPosition(self, (x, y)):
scrolled_x, scrolled_y = self.CalcScrolledPosition((self.virtual_x, self.virtual_y))
return x - scrolled_x, y - scrolled_y
def UpdateDrawing(self):
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
self_width, self_height = self.InnerSize
sizer_width, sizer_height = self.GetSize()
self.virtual_x = max(0, (sizer_width - self_width) / 2)
self.virtual_y = max(0, (sizer_height - self_height) / 2)
tile_width, tile_height = self.background_tile.Size
offset_x, offset_y = self.virtual_x % tile_width, self.virtual_y % tile_height
dc.DrawBitmap(self.background, offset_x - tile_width, offset_y - tile_height)
if self.innerbitmap:
dc.DrawBitmap(self.innerbitmap, self.virtual_x, self.virtual_y, True)
del dc
self.Refresh(eraseBackground=False)
self.Update()
def OnPaint(self, e):
dc = wx.PaintDC(self)
x, y = self.CalcScrolledPosition((0, 0))
dc.DrawBitmap(self._Buffer, x, y)
def OnResize(self, e):
width, height = e.GetSize()
inner_width, inner_height = self.InnerSize
self.SetSize((width, height))
self.SetVirtualSize((inner_width, inner_height))
self._Buffer = wx.EmptyBitmap(max(width, inner_width), max(height, inner_height))
self.UpdateDrawing()
# ==============================================================================================
# tests
# ==============================================================================================
if __name__ == "__main__":
class MainFrame(wx.Frame):
def __init__(self, parent, title):
background = wx.Bitmap("tiles_background.jpg")
background_tile = wx.Bitmap("logo_background_repeating.png")
self.foreground = wx.Bitmap("rubberducky.png")
wx.Frame.__init__(self, parent, title=title, size=background.Size)
self.mappanel = MapPanel(self, background_tile, size=self.foreground.Size, innerbitmap=self.foreground)
leftpanel = wx.Panel(self, -1, size=(100, -1))
self.box = wx.BoxSizer(wx.HORIZONTAL)
self.box.Add(leftpanel, 0, wx.EXPAND)
self.box.Add(self.mappanel, 2, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.box)
self.Layout()
#self.Bind(wx.EVT_PAINT, self.OnPaint)
self.mappanel.setInnerBitmap(self.foreground)
app = wx.App()
mainframe = MainFrame(None, "Map Panel")
mainframe.Show()
app.MainLoop()
|
[
"acidtobi@gmail.com"
] |
acidtobi@gmail.com
|
76f0bfb3491090f86e4d11cf509c6a61dde62e2f
|
1f9d8381f111ee34be61a82cdf2038afc1a44079
|
/sequenceToLine.py
|
848d936908c046a2428b7f84395126b3190bd404
|
[
"BSD-3-Clause"
] |
permissive
|
el-mat/ectools
|
f35d305c8fd558436cd7534c5fe4db66fffead24
|
031eb0300c82392915d8393a5fedb4d3452b15bf
|
refs/heads/master
| 2021-01-23T21:10:42.869782
| 2014-12-01T14:07:00
| 2014-12-01T14:07:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
#!/usr/bin/env
import sys
from seqio import iteratorFromExtension
from nucio import fileIterator
if not len(sys.argv) == 2:
sys.exit("sequencToLine.py in.{fa.fq}\n")
it = iteratorFromExtension(sys.argv[1])
for record in fileIterator(sys.argv[1], it):
if hasattr(record, "desc"):
print "\t".join([record.name, record.seq, record.desc, record.qual])
else:
print "\t".join([record.name, record.seq])
|
[
"gurtowsk@mshadoop1.cshl.edu"
] |
gurtowsk@mshadoop1.cshl.edu
|
10fe64c7113e565bb25b1d2565fa28e8ea3cfdcd
|
40711c0546644d1bb8709ee348211d294c0a48d2
|
/Rapport/Benchmarking/state_plot.py
|
48111f01a271055f269de89702c8ec49040ddac4
|
[] |
no_license
|
smaibom/bach_2015
|
aaefa2a33cc0d5bb06761d72a3820ee0f2cfe290
|
4177977b81bb6f6c945e5e8a1956dbd4ca4b43f2
|
refs/heads/master
| 2021-01-20T21:53:03.374855
| 2015-06-07T21:54:47
| 2015-06-07T21:54:47
| 31,496,008
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
"""
Demo of a simple plot with a custom dashed line.
A Line object's ``set_dashes`` method allows you to specify dashes with
a series of on/off lengths (in points).
"""
import numpy as np
import matplotlib.pyplot as plt
#x = np.linspace(0, 22, 22)
grosses = [62242364,123246883,122562228,122562228,183556657,184105105,184105105,243618278,245304681,245304681,305119181,305119181,305967853,977490276]
labels = ['0 0 0',
'0 0 1', '0 1 0', '1 0 0',
'0 0 2', '0 2 0', '2 0 0 ',
'0 0 3', '0 3 0', '3 0 0 ',
'1 0 1', '0 1 1', '1 1 0',
'1 1 1']
fig = plt.figure()
fig.subplots_adjust(bottom=0.2) # Remark 1
ax = fig.add_subplot(111)
ax.ticklabel_format(style='plain') # Remark 2
ax.set_xticks(range(len(labels)))
ax.set_xticklabels(labels, rotation=80)
ax.bar(range(len(grosses)), grosses)
plt.xlabel('alterations, deletions and insertions\n(In order)')
plt.ylabel('states processed')
plt.show()
#0 = 62242364 0 hits
#1ins = 123246883 0 hits
#1del = 122562228 0 hits
#1mut = 122562228 0 hits
#2ins = 183556657 3 hits
#2del = 184105105 117 hits
#2mut = 184105105 117 hits
#3ins = 243618278 28 hits
#3del = 245304681 2066 hits
#3mut = 245304681 2066 hits
#1ins 1mut = 305119181 41 hits
#1ins 1del = 305119181 41 hits
#1del 1mut = 305967853 234 hits
#1 1 1 = 977490276 3275 hits
|
[
"kullax@feral.dk"
] |
kullax@feral.dk
|
849b16fbf6169f6d56be1d9b19ad76d20f75fe68
|
92dbb16f383754fd9fd8d35c87b68977ec42a586
|
/Geogria/20200514-graph/map_world.py
|
847c835b947d72027a50300d73ea3ed4aef2713e
|
[] |
no_license
|
YWJL/pchong
|
c0c1bfa4695ac3b143430fd2291b197b4fdab884
|
eaa98c5ed3daad60e8ac0560634ba631e665f00e
|
refs/heads/master
| 2022-11-11T00:01:55.550199
| 2020-07-01T06:11:56
| 2020-07-01T06:11:56
| 276,290,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,565
|
py
|
import pandas
from pyecharts import options as opts
from pyecharts.charts import Bar, Grid, Line
from pyecharts import options as opts
from pyecharts.charts import Map
from pyecharts.faker import Collector, Faker
from pyecharts.datasets import register_url
import pandas as pd
import asyncio
from pyecharts.commons.utils import JsCode
import math
from aiohttp import TCPConnector, ClientSession
import pyecharts.options as opts
from pyecharts.charts import Map
import pyecharts.options as opts
from pyecharts.charts import Line
from pyecharts import options as opts
from pyecharts.charts import Bar, Timeline
from pyecharts import options as opts
from pyecharts.charts import Grid, Line, Scatter
from pyecharts.faker import Faker
from pyecharts.commons.utils import JsCode
from pyecharts.faker import Faker
from pyecharts.charts import Geo
import json
US_name='daily.csv'
death_name='202005014-world-death-data.json.csv'
positive_name='202005014-world-confirm-data.json.csv'
recovered_name='202005014-world-cover-data.json.csv'
RECOVERED=pd.read_csv(recovered_name)
DEATH=pd.read_csv(death_name)
POSITIVE=pd.read_csv(positive_name)
US=pd.read_csv(US_name)
US_pos=[]
US_Dea=[]
US_Rec=[]
for i in range(1,56):
if math.isnan(US.iloc[i,2]):
US.iloc[i, 2]=0
if math.isnan(US.iloc[i, 16]):
US.iloc[i, 16]=0
if math.isnan(US.iloc[i, 11]):
US.iloc[i,11]=0
US_pos.append(US.iloc[i, 2])
US_Dea.append(US.iloc[i, 16])
US_Rec.append(US.iloc[i, 11])
print('US_pos:',US_pos)
print('US_Dea:',US_Dea)
print('US_Rec:',US_Rec)
country_number_pos=int((POSITIVE.shape[1])/2-1)
country_number_dea=int((DEATH.shape[1])/2-1)
country_number_rec=int((RECOVERED.shape[1])/2-2)
print(country_number_dea)
print(RECOVERED.iloc[1,country_number_rec])
day=len(POSITIVE)-1
print(day)
country_pos=[]
country_dea=[]
country_rec=[]
positive=[]
death=[]
recovered=[]
print('sum(US_dea):',sum(US_Dea))
print('sum(US_pos):',sum(US_pos))
print('sum(US_rec):',sum(US_Rec))
time="截止至{}全球疫情数据".format(POSITIVE.iloc[-1,0])
for i in range(1,country_number_dea):
country_dea.append(DEATH.iloc[1,i])
death.append(DEATH.iloc[day-1,i])
country_dea.append('United States')
death.append(sum(US_Dea))
MAP_data_dea=[list(z) for z in zip(country_dea, death)]
print('MAP_data_dea:',MAP_data_dea)
for i in range(1,country_number_pos):
country_pos.append(POSITIVE.iloc[1,i])
positive.append(POSITIVE.iloc[day,i])
country_pos.append('United States')
positive.append(sum(US_pos))
MAP_data_pos=[list(z) for z in zip(country_pos, positive)]
print(len(positive))
MAP_data_rec=[]
# print(type(RECOVERED.iloc[2,0]))
for i in range(1,day-6):
for j in range(1,day-6):
if type(RECOVERED.iloc[i,j])!=str and math.isnan(RECOVERED.iloc[i,j]):
RECOVERED.iloc[i,j]=0
for i in range(1,day-6):
country_rec.append(RECOVERED.iloc[1,i])
recovered.append(RECOVERED.iloc[day-6,i])
MAP_data_rec=[list(z) for z in zip(country_rec, recovered)]
country_rec.append('United States')
recovered.append(sum(US_Rec))
print('MAP_data_pos:',MAP_data_pos)
# for i in range(1,country_number_dea-1):
# for j in range(1,country_number_dea-1):
# if country_pos[i]==country_dea[j]:
# map1=[list(z) for z in zip(country_dea, country_pos)]
# print(map1)
# print(country)
# print(data.iloc[day,1])
# print(day)
NAME_MAP_DATA = {
# "key": "value"
# "name on the hong kong map": "name in the MAP DATA",
}
c = (
Map(init_opts=opts.InitOpts(width="1400px", height="800px"))
.add(
series_name="Positive_number",
data_pair=MAP_data_pos,
maptype="world",
name_map=NAME_MAP_DATA,
is_map_symbol_show=False)
.add(
series_name="Death_number",
data_pair=MAP_data_dea,
maptype="world",
name_map=NAME_MAP_DATA,
is_map_symbol_show=False)
.add(
series_name="Recovered_number",
data_pair=MAP_data_rec,
maptype="world",
name_map=NAME_MAP_DATA,
is_map_symbol_show=False)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(
title="Map-世界地图",
subtitle=time),
# subtitle=time,
visualmap_opts=opts.VisualMapOpts(max_=sum(US_pos)),
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{b0}<br/>(number:{c}) "
),
)
.render("map_world.html")
)
# print([list(z) for z in zip(Faker.country, Faker.values())])
print(max(US_pos))
|
[
"201256153@qq.com"
] |
201256153@qq.com
|
ac19567d2c9ed4b4c9852a5ca2ad7fbd7ab5185d
|
7fdf9c9b4f9601f1f24414da887acb03018e99f1
|
/gym/gym/settings.py
|
80b1a8a45b3e88065e60982b4b7505f389a7c7e7
|
[] |
no_license
|
sid-ncet/fitnesss
|
d96653022664ec2b1c9a5811fc3f7048a122e3ed
|
fc62c77abe30659131f0befc93424f8bb9333a0b
|
refs/heads/master
| 2023-04-25T19:02:12.719223
| 2021-06-05T06:53:17
| 2021-06-05T06:53:17
| 374,042,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,318
|
py
|
"""
Django settings for gym project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure--v@m2t(6erzobw08yshw&tbm9s47$n)99#t8+o87ib22l7$-dj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'fitness'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gym.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gym.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT= BASE_DIR
MEDIA_URL= '/images/download/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"siddharthsinghcs11@gmail.com"
] |
siddharthsinghcs11@gmail.com
|
11aa915574de5fc4f11f5c7671205cfbaa964fe2
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/copp/lacpallowhist5min.py
|
2d5afaedb106d24fcc43463d8548e0ce36b681e4
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,598
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LacpAllowHist5min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.copp.LacpAllowHist5min", "Per Interface Allow Counters for Lacp")
counter = CounterMeta("bytesRate", CounterCategory.GAUGE, "bytes-per-second", "LacpAllowed Bytes rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesRateSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesRateTr"
meta._counters.append(counter)
counter = CounterMeta("bytes", CounterCategory.COUNTER, "bytes", "LacpAllowed Bytes")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "bytesCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "bytesPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "bytesRate"
meta._counters.append(counter)
counter = CounterMeta("pktsRate", CounterCategory.GAUGE, "packets-per-second", "LacpAllowed Packets rate")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsRateSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsRateTr"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "LacpAllowed Packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
meta.moClassName = "coppLacpAllowHist5min"
meta.rnFormat = "HDcoppLacpAllow5min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Per Interface Allow Counters for Lacp stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.svi.If")
meta.parentClasses.add("cobra.model.pc.AggrIf")
meta.parentClasses.add("cobra.model.l1.PhysIf")
meta.parentClasses.add("cobra.model.l3.RtdIf")
meta.parentClasses.add("cobra.model.l3.EncRtdIf")
meta.superClasses.add("cobra.model.copp.LacpAllowHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDcoppLacpAllow5min-', True),
]
prop = PropMeta("str", "bytesAvg", "bytesAvg", 32068, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Bytes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesAvg", prop)
prop = PropMeta("str", "bytesCum", "bytesCum", 32064, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LacpAllowed Bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesCum", prop)
prop = PropMeta("str", "bytesMax", "bytesMax", 32067, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Bytes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMax", prop)
prop = PropMeta("str", "bytesMin", "bytesMin", 32066, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Bytes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMin", prop)
prop = PropMeta("str", "bytesPer", "bytesPer", 32065, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LacpAllowed Bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesPer", prop)
prop = PropMeta("str", "bytesRate", "bytesRate", 32072, PropCategory.IMPLICIT_RATE)
prop.label = "LacpAllowed Bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRate", prop)
prop = PropMeta("str", "bytesRateAvg", "bytesRateAvg", 32084, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Bytes rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateAvg", prop)
prop = PropMeta("str", "bytesRateMax", "bytesRateMax", 32083, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Bytes rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMax", prop)
prop = PropMeta("str", "bytesRateMin", "bytesRateMin", 32082, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Bytes rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMin", prop)
prop = PropMeta("str", "bytesRateSpct", "bytesRateSpct", 32085, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Bytes rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateSpct", prop)
prop = PropMeta("str", "bytesRateThr", "bytesRateThr", 32086, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Bytes rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesRateThr", prop)
prop = PropMeta("str", "bytesRateTr", "bytesRateTr", 32087, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Bytes rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTr", prop)
prop = PropMeta("str", "bytesSpct", "bytesSpct", 32069, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesSpct", prop)
prop = PropMeta("str", "bytesThr", "bytesThr", 32070, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesThr", prop)
prop = PropMeta("str", "bytesTr", "bytesTr", 32071, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesTr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 31203, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pktsAvg", "pktsAvg", 32104, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsAvg", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 32100, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LacpAllowed Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsMax", "pktsMax", 32103, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMax", prop)
prop = PropMeta("str", "pktsMin", "pktsMin", 32102, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMin", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 32101, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LacpAllowed Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 32108, PropCategory.IMPLICIT_RATE)
prop.label = "LacpAllowed Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsRateAvg", "pktsRateAvg", 32120, PropCategory.IMPLICIT_AVG)
prop.label = "LacpAllowed Packets rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateAvg", prop)
prop = PropMeta("str", "pktsRateMax", "pktsRateMax", 32119, PropCategory.IMPLICIT_MAX)
prop.label = "LacpAllowed Packets rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMax", prop)
prop = PropMeta("str", "pktsRateMin", "pktsRateMin", 32118, PropCategory.IMPLICIT_MIN)
prop.label = "LacpAllowed Packets rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMin", prop)
prop = PropMeta("str", "pktsRateSpct", "pktsRateSpct", 32121, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Packets rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateSpct", prop)
prop = PropMeta("str", "pktsRateThr", "pktsRateThr", 32122, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Packets rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsRateThr", prop)
prop = PropMeta("str", "pktsRateTr", "pktsRateTr", 32123, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Packets rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTr", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 32105, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LacpAllowed Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 32106, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LacpAllowed Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 32107, PropCategory.IMPLICIT_TREND)
prop.label = "LacpAllowed Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1EthIfToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
5f620ca66ea5f22f98da060905725de7b1622114
|
e8c0513bce6ba781d6d55c48330c54edbd20cc23
|
/manage.py
|
d66549db189c8fe3cddba1a8a34913eaa50627a3
|
[] |
no_license
|
BohnSix/myblog
|
aad06969026e5e0059e83d3c8bedab66eab3a5d2
|
3961bd813c8d706b15e66cd55dff2edeb992ca3c
|
refs/heads/master
| 2022-09-25T00:47:35.509766
| 2019-11-21T08:33:20
| 2019-11-21T08:33:20
| 185,914,940
| 2
| 1
| null | 2022-09-16T18:13:43
| 2019-05-10T03:49:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 721
|
py
|
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell
from app import create_app, db
from app.models import *
app = create_app(config_name="develop")
@app.template_filter()
def countTime(content):
return int(content.__len__() / 200) + 1
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
def make_shell_context():
return dict(db=db, Article=Article, User=User, Category=Category)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand)
if __name__ == "__main__":
# db.drop_all(app=app)
# db.create_all(app=app)
app.run(host="0.0.0.0", port=8080, debug=True)
|
[
"bohn_six@163.com"
] |
bohn_six@163.com
|
0dca2f890e85ab82a477f193ca5d7b13bb4452f4
|
5310aad336ad7cdc304a7204d4bd91b4fa754f1e
|
/Lab3/homework/serious_ex9.py
|
1c3bf0845b2d742f8ca01016425fdb34f9ee6da7
|
[] |
no_license
|
dattran1997/trandat-fundamental-c4e17
|
329e294f68bde1fc04d53c0acd0f9a7e87d7d444
|
fd2f0648f28e78769f7fbf3e40e9973bf211f1de
|
refs/heads/master
| 2020-03-09T22:49:56.228853
| 2018-05-29T04:22:39
| 2018-05-29T04:22:39
| 129,044,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 244
|
py
|
def get_even_list(list):
new_list = []
for i in list:
if (i % 2 == 0):
new_list.append(i)
return new_list
if __name__ == "__main__":
list = [1,4,5,-1,10]
newlist = get_even_list(list)
print(newlist)
|
[
"dattran1997@gmail.com"
] |
dattran1997@gmail.com
|
fc0d28850895dd119c8a2b4afc9f5481bb7779fe
|
b9eef16211d4a5f2e5b51c0ddfb7dc0a9608db86
|
/Chap2InprovingDNN/week2/optimization/optimization.py
|
e2e8cee8f4713b5cd9743e543af41771e71a40f0
|
[] |
no_license
|
vinares/DeepLearning
|
905f44655c0b72c9ba6d52bf1c15146b0d07fc92
|
c307c3c1063a101dcfa192bc3b8671c2781e31f3
|
refs/heads/main
| 2023-06-22T05:45:46.716091
| 2021-07-21T09:03:02
| 2021-07-21T09:03:02
| 369,407,380
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,039
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l + 1)] = parameters['W' + str(l + 1)] - learning_rate * grads['dW' + str(l + 1)]
parameters["b" + str(l + 1)] = parameters['b' + str(l + 1)] - learning_rate * grads['db' + str(l + 1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size=64, seed=0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1, m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(
m / mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, k * mini_batch_size: (k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size: (k + 1) * mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, (num_complete_minibatches ) * mini_batch_size:m]
mini_batch_Y = shuffled_Y[:, (num_complete_minibatches ) * mini_batch_size:m]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape)
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l + 1)] = beta * v["dW" + str(l + 1)] + (1 - beta) * grads["dW" + str(l + 1)]
v["db" + str(l + 1)] = beta * v["db" + str(l + 1)] + (1 - beta) * grads["db" + str(l + 1)]
# update parameters
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * v["dW" + str(l + 1)]
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * v["db" + str(l + 1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters):
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape)
s["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
s["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape)
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate=0.01,
beta1=0.9, beta2=0.999, epsilon=1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, mo v["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
v["db" + str(l + 1)] = np.zeros(parameters["b" + str(l + 1)].shape)
s["dW" + str(l + 1)] = np.zeros(parameters["W" + str(l + 1)].shape)
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l + 1)] = beta1 * v["dW" + str(l + 1)] + (1 - beta1) * grads["dW" + str(l + 1)]
v["db" + str(l + 1)] = beta1 * v["db" + str(l + 1)] + (1 - beta1) * grads["db" + str(l + 1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l + 1)] = v["dW" + str(l + 1)] / (1 - beta1 ** t)
v_corrected["db" + str(l + 1)] = v["db" + str(l + 1)] / (1 - beta1 ** t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l + 1)] = beta2 * s["dW" + str(l + 1)] + (1 - beta2) * (grads["dW" + str(l + 1)] ** 2)
s["db" + str(l + 1)] = beta2 * s["db" + str(l + 1)] + (1 - beta2) * (grads["db" + str(l + 1)] ** 2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l + 1)] = s["dW" + str(l + 1)] / (1 - beta2 ** t)
s_corrected["db" + str(l + 1)] = s["db" + str(l + 1)] / (1 - beta2 ** t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * v_corrected["dW" + str(l + 1)] / (np.sqrt(s_corrected["dW" + str(l + 1)]) + epsilon)
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * v_corrected["db" + str(l + 1)] / (np.sqrt(s_corrected["db" + str(l + 1)]) + epsilon)
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
train_X, train_Y = load_dataset()
plt.show()
def model(X, Y, layers_dims, optimizer, learning_rate=0.0007, mini_batch_size=64, beta=0.9,
beta1=0.9, beta2=0.999, epsilon=1e-8, num_epochs=10000, print_cost=True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print("Cost after epoch %i: %f" % (i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
|
[
"877649518@qq.com"
] |
877649518@qq.com
|
08d2b69ed9e737d8ee5c1f6d6389ece08b8737c4
|
74dd16cb3d4181d4b7b0d1bcfa3aa0c3a617548b
|
/src/utilities/video_metadata.py
|
1545ef9a99282ca5b7d66e4802fd2bebed2c4ba0
|
[] |
no_license
|
eliasnieminen/vgs-data-annotation
|
4b87a6ece64eb83b6d5d43d34825539a1ff27fc6
|
a6569cb52017b88beffa8c1a1332acd9b340646f
|
refs/heads/main
| 2023-07-20T05:13:57.492399
| 2021-08-31T12:16:19
| 2021-08-31T12:16:19
| 398,779,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
import math
from typing import Optional, Union, Dict
class VideoMetadata:
"""Metadata object for videos.
"""
def __init__(self,
dur: Optional[Union[None, float]] = None,
fps: Optional[Union[None, float]] = None,
metadata: Optional[Union[None, Dict]] = None):
self.dur = dur
self.framerate = fps
self.video_metadata = metadata
def set_duration(self, duration):
self.dur = duration
def set_fps(self, fps):
self.framerate = fps
def set_video_metadata(self, metadata):
self.video_metadata = metadata
@property
def duration(self):
return self.dur
@property
def fps(self):
return self.framerate
@property
def metadata(self):
return self.video_metadata
@property
def frame_count(self):
return math.floor(self.framerate * self.duration)
|
[
"elias.nieminen@tuni.fi"
] |
elias.nieminen@tuni.fi
|
661b9aa3fceb522de6be34632dd648f5060f74c3
|
a69b96621abef181606fd3d68eebaa5b655ed529
|
/Lesson3problem2.py
|
baf178f6055c80d571cdd52706d09817b15fd3d5
|
[] |
no_license
|
agonzalez33/Lesson3
|
3724be5e602a9befafe72be8570a5e3cc6ab2ec0
|
cc7dfe7a43b3691141a1d9f81d5939d5134a23e3
|
refs/heads/master
| 2020-05-03T04:00:58.346507
| 2019-03-29T13:35:45
| 2019-03-29T13:35:45
| 178,411,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
for integer in range(1000,0,-1):
print(integer)
print('Blast off!')
|
[
"noreply@github.com"
] |
noreply@github.com
|
be3571f979caec5427a8a79884e69c6e57bd6ecf
|
27c1ba6ed6c1586a348cdcfe26d17be13ae38b72
|
/scripts/hello_world_pallavisavant.py
|
c4a681787dd9cfab9a8e69eddfd8f12183f68509
|
[] |
no_license
|
codewithgauri/HacktoberfestPR2020
|
4299f2ae8f44b31c6ecbeaefa058fde26327a253
|
335310f3d81029938d119e15d3f1a131d745d3f2
|
refs/heads/master
| 2022-12-30T20:28:41.937632
| 2020-10-26T06:47:24
| 2020-10-26T06:47:24
| 307,281,958
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
#python code to print 'Hello World" :)
print("Hello World")
|
[
"pallavisavant02@gmail.com"
] |
pallavisavant02@gmail.com
|
eeaa16fb67a5f6fb3382537928469d161d2ee20e
|
985be2d2d979c1d5ffbd6cd73d9da711951e4f1c
|
/chat/consumers.py
|
a2ffba5a9b3687a82d2473cd12f884a487b2d806
|
[] |
no_license
|
sreesh-mallya/django-channels-demo
|
6a1492c2ffe3a8f37782ced19562c629fa65ee8f
|
8a3ac7d3e04ecd8c5053009f760d84e3b9415882
|
refs/heads/master
| 2021-01-23T22:16:04.353634
| 2017-09-19T03:44:00
| 2017-09-19T03:44:00
| 102,924,295
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
import re
import json
import logging
from channels import Group
from channels.sessions import channel_session
from .models import Room
@channel_session
def ws_connect(message):
# Could use more error handling here
prefix, label = message['path'].strip('/').split('/')
try:
room = Room.objects.get(label=label)
message.reply_channel.send({"accept": True}) # Accept connection
print('Room : %s' % room.label)
except Room.DoesNotExist:
print('Room with label %s does not exist!' % label)
return
Group('chat-' + label).add(message.reply_channel)
message.channel_session['room'] = room.label
print(message.keys())
@channel_session
def ws_receive(message):
# Could use more error handling here
label = message.channel_session['room']
try:
room = Room.objects.get(label=label)
except Room.ObjectDoesNotExist:
print('Room with label %s does not exist!' % label)
return
# Get text message, and parse to json; throw any errors if any
try:
data = json.loads(message['text'])
except ValueError:
print('Oops! Your message isn\'t in json!')
return
# Make sure data is in proper format, i.e, { 'handle': ... , 'message': ... }
if set(data.keys()) != {'handle', 'message'}:
print('Improper message format : %s ', data)
return
msg = room.messages.create(handle=data['handle'], message=data['message'])
response = json.dumps(msg.as_dict())
Group('chat-' + label).send({'text': response})
@channel_session
def ws_disconnect(message):
print('disconnecting')
label = message.channel_session['room']
Group('chat-' + label).discard(message.reply_channel)
|
[
"sreeshsmallya@gmail.com"
] |
sreeshsmallya@gmail.com
|
9c49c4755281a3c8a9b671df5099d752953dc5ec
|
b4ef8fcaf8e8818215add4402efadfef9bda45ee
|
/sample_code/python/vessels_v2_graphql/run.py
|
e393f1cdfdf590796dd7068877f0fcb1a4f2fb1a
|
[] |
no_license
|
ykparkwixon/maritime
|
aebbbfe8e2f1ebb1bf2dbb01a94127977251285c
|
378834c9b521ff538395c36e377117c87760fe22
|
refs/heads/main
| 2023-07-17T07:00:17.651206
| 2021-08-30T22:18:14
| 2021-08-30T22:18:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,062
|
py
|
import yaml
import json
import csv
from loguru import logger
from utilities import paging, helpers
from gql import gql
logger.add('demo_client.log', rotation="500 MB", retention="10 days", level='DEBUG')
rows_written_to_raw_log: int = 0
rows_written_to_csv: int = 0
pages_processed: int = 0
wrote_csv_header = False
def get_settings():
"""Reads the settings.yaml file and returns the variables and values
:returns data: setting variables and values
:rtype data: dict
"""
with open('settings.yaml') as f:
data: dict = yaml.load(f, Loader=yaml.FullLoader)
return data
def read_query_file():
settings = get_settings()
file_name = settings['name_of_gql_query_file']
with open(file_name, 'r') as f:
return f.read()
def write_raw(data: dict):
settings = get_settings()
name_of_raw_output_file = settings['name_of_raw_output_file']
if not name_of_raw_output_file:
return
with open(name_of_raw_output_file, 'a+') as f:
f.write(json.dumps(data, indent=4))
def write_csv(data: dict):
global rows_written_to_csv, wrote_csv_header
settings = get_settings()
name_of_csv_file = settings['name_of_csv_file']
if not name_of_csv_file:
return
members = helpers.get_vessels_v2_members()
# get just the keys
csv_columns: list = [i[0] for i in members]
try:
with open(name_of_csv_file, 'a+') as f:
writer = csv.DictWriter(f, fieldnames=csv_columns)
logger.debug(f"WROTE HEADER: {wrote_csv_header}")
if not wrote_csv_header:
writer.writeheader()
wrote_csv_header = True
item: dict
for item in data:
writer.writerow(item)
rows_written_to_csv += 1
except Exception:
raise
def get_info():
info = f"""
TOTAL PAGES WRITTEN TO RAW LOG: {rows_written_to_raw_log}
TOTAL ROWS WRITTEN TO CSV: {rows_written_to_csv}
TOTAL PAGES PROCESSED: {pages_processed}"""
return info
def run():
global pages_processed
settings = get_settings()
test_name = settings['test_name']
pages_to_process = settings['pages_to_process']
# make a client connection
client = helpers.get_gql_client()
# read file
query = read_query_file()
if not "pageInfo" or not "endCursor" or not "hasNextPage" in query:
logger.error("Please include pageInfo in the query, it is required for paging. See the README.md")
return
response: dict = dict()
try:
response = client.execute(gql(query))
except BaseException as e:
logger.error(e)
raise
# initialize paging
pg = paging.Paging(response=response)
schema_members = helpers.get_vessels_v2_members()
# page, write, util complete
logger.info("Paging started")
while True:
response, hasNextPage = pg.page_and_get_response(client, query)
logger.debug(f"hasNextPage: {hasNextPage}")
if response:
write_raw(response)
csv_data = helpers.transform_response_for_loading(response=response, schema=schema_members, test_name=test_name)
if csv_data:
write_csv(csv_data)
pages_processed += 1
logger.info(f"Page: {pages_processed}")
if pages_to_process == 1:
break
elif pages_to_process:
if not hasNextPage or not response:
break
if pages_processed >= pages_to_process:
break
elif not hasNextPage or not response:
break
else:
logger.info("Did not get data for csv, either because there are no more pages, or did not get a response")
break
else:
logger.info("No response or no more responses")
break
logger.info(get_info())
if __name__ == '__main__':
run()
logger.info("Done")
|
[
"78374623+brucebookman@users.noreply.github.com"
] |
78374623+brucebookman@users.noreply.github.com
|
e0aa72eb56790380371681952975423a0c147795
|
1a856152b3ab65a8a0cc5cbedf0492d1c3716d27
|
/dropout_acnd_pe_noprior_nochans.py
|
2eda907f0b1b6f9a40b45eb2cb3e98dbdb1b24e5
|
[] |
no_license
|
stablum/thesis
|
272f7f23ad1ad454c9310775b969bb54c84c9ea0
|
5c06d78322ddd6e1b8c214261ea6e4464a094bad
|
refs/heads/master
| 2021-07-23T04:29:28.438657
| 2018-08-18T18:59:11
| 2018-08-18T18:59:11
| 60,299,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,460
|
py
|
#!/usr/bin/env python3
import scipy
import ipdb
import theano
from theano import tensor as T
import lasagne
import numpy as np
import random
import sys
from tqdm import tqdm
tqdm.monitor_interval = 0
import ipdb
# local imports
import movielens
import cftools
import config
import numutils as nu
import augmented_types as at
import activation_functions
import update_algorithms
import model_build
update =update_algorithms.get_func()
adam_shared = lasagne.updates.adam # FIXME: generalize like the 'update' placeholder
#g = lambda x:x
g_in = activation_functions.get(config.g_in)
g_rij = activation_functions.get(config.g_rij)
sigma = 1.
sigma_u = 100.
sigma_v = 1000.
chan_out_dim = config.chan_out_dim
hid_dim = config.hid_dim
#log = print
log = lambda *args: print(*args)#None
def main():
dataset = movielens.load(config.movielens_which)
U,V = cftools.UV_vectors_np(dataset)
U_t, U_m, U_v = update_algorithms.adam_for(U)
V_t, V_m, V_v = update_algorithms.adam_for(V)
def make_predict_to_1(ui,vj):
#o_ui,net_ui_params = make_net(ui,config.K,hid_dim,chan_out_dim,"net_u",g_in,g_in)
#o_ui.name = "o_ui"
#o_vj,net_vj_params = make_net(vj,config.K,hid_dim,chan_out_dim,"net_v",g_in,g_in)
#o_vj.name = "o_vj"
comb = T.concatenate([ui,vj],axis=1)
comb.name = "comb"
prediction_det, prediction_lea, net_comb_params, regularizer_term = model_build.make_net(comb,2*chan_out_dim,hid_dim,1,"net_comb",g_in,g_rij)
prediction_det.name = "prediction_det"
prediction_lea.name = "prediction_lea"
return prediction_det, prediction_lea, net_comb_params, regularizer_term
def make_predict_to_5(predict_to_1_sym):
ret = (predict_to_1_sym * (config.max_rating - 1. )) + 1.
return ret
def make_objective_term(ui_mb,vj_mb,Rij_mb,predict_to_1_sym,regularizer_term):
eij = ( Rij_mb - predict_to_1_sym ) ** 2
ret = 0.5 * 1./(sigma**2) * eij # error term (gaussian centered in the prediction)
# 0-mean gaussian prior on the latent feature vector.
# since this term refers to a specific <ui,vj> tuple, then
# the update following the prior quantity has to be divided
# by how many terms (error term) contain that vector
#coef_u = T.constant(0.5/(dataset.N_compressed * sigma_u),"coef_u")
#sqsum_u = T.sum(ui_mb**2,axis=1,keepdims=True)
#sqsum_u.name = "sqsum_u"
#term_u = coef_u * sqsum_u
#term_u.name = "term_u"
#ret = ret + term_u
#coef_v = T.constant(0.5/(dataset.M_compressed * sigma_v),"coef_v")
#sqsum_v = T.sum(vj_mb**2,axis=1,keepdims=True)
#sqsum_v.name = "sqsum_v"
#term_v = coef_v * sqsum_v
#term_v.name = "term_v"
#ret = ret + term_v
#ret.name = "obj_before_sum"
ret = T.sum(ret) # on all axes: cost needs to be a scalar
ret.name = "obj_after_sum"
if config.regularization_lambda > 0:
ret = ret + config.regularization_lambda * regularizer_term
ret.name = "obj_with_regularizer"
return ret
print("creating update functions..")
ui_mb_sym = T.fmatrix('ui_mb')
vj_mb_sym = T.fmatrix('vj_mb')
Rij_mb_sym = T.fmatrix('Rij_mb')
t_mb_prev_sym = T.fmatrix('t_mb_prev')
t_mb_prev_sym = T.addbroadcast(t_mb_prev_sym,1)
m_mb_prev_sym = T.fmatrix('m_mb_prev')
v_mb_prev_sym = T.fmatrix('v_mb_prev')
predict_to_1_sym_det, predict_to_1_sym_lea, params, regularizer_term = make_predict_to_1(ui_mb_sym,vj_mb_sym)
# instead of calculating a different count of latent vectors of each
# (other side) latent vector, a global estimate (average) is performed
obj_term = make_objective_term(ui_mb_sym,vj_mb_sym,Rij_mb_sym,predict_to_1_sym_lea, regularizer_term)
grads_ui = T.grad(obj_term, ui_mb_sym)
grads_vj = T.grad(obj_term, vj_mb_sym)
grads_params = [
T.grad(obj_term,curr)
for curr
in params
]
updates_kwargs = dict(t_prev=t_mb_prev_sym,m_prev=m_mb_prev_sym,v_prev=v_mb_prev_sym)
new_for_ui = list(update(ui_mb_sym,grads_ui,**updates_kwargs))
new_for_vj = list(update(vj_mb_sym,grads_vj,**updates_kwargs))
params_updates = adam_shared(grads_params,params,learning_rate=config.lr_begin)
common = [ t_mb_prev_sym,m_mb_prev_sym,v_mb_prev_sym,Rij_mb_sym,ui_mb_sym,vj_mb_sym ]
ui_update_fn = theano.function(common,new_for_ui)
ui_update_fn.name="ui_update_fn"
vj_update_fn = theano.function(common,new_for_vj)
vj_update_fn.name="vj_update_fn"
params_update_fn = theano.function([Rij_mb_sym,ui_mb_sym,vj_mb_sym],[], updates=params_updates)
params_update_fn.name = "params_update_fn"
predict_to_5_fn = theano.function([ui_mb_sym,vj_mb_sym], [make_predict_to_5(predict_to_1_sym_det)])
predict_to_5_fn.name="predict_to_5_fn"
predict_to_1_fn = theano.function([ui_mb_sym,vj_mb_sym], [predict_to_1_sym_det])
predict_to_1_fn.name="predict_to_1_fn"
ui_mb_l = []
vj_mb_l = []
Rij_mb_l = []
U_t_mb_l = []
U_m_mb_l = []
U_v_mb_l = []
V_t_mb_l = []
V_m_mb_l = []
V_v_mb_l = []
indices_mb_l = []
def train_with_datapoint(i,j,Rij,lr):
nonlocal indices_mb_l
nonlocal ui_mb_l
nonlocal vj_mb_l
nonlocal Rij_mb_l
nonlocal U_t_mb_l
nonlocal U_m_mb_l
nonlocal U_v_mb_l
nonlocal V_t_mb_l
nonlocal V_m_mb_l
nonlocal V_v_mb_l
indices_mb_l.append((i,j))
ui_mb_l.append(U[i])
vj_mb_l.append(V[j])
Rij_mb_l.append(Rij)
U_t_mb_l.append(U_t[i])
U_m_mb_l.append(U_m[i])
U_v_mb_l.append(U_v[i])
V_t_mb_l.append(V_t[j])
V_m_mb_l.append(V_m[j])
V_v_mb_l.append(V_v[j])
if len(ui_mb_l) >= config.minibatch_size:
ui_mb = np.vstack(ui_mb_l).astype('float32')
#print('ui_mb.shape',ui_mb.shape)
vj_mb = np.vstack(vj_mb_l).astype('float32')
#print('vj_mb.shape',vj_mb.shape)
Rij_mb = np.vstack(Rij_mb_l).astype('float32')
#print('Rij_mb.shape',Rij_mb.shape)
U_t_mb = np.vstack(U_t_mb_l ).astype('float32')
#print('U_t_mb.shape',U_t_mb.shape)
U_m_mb = np.vstack(U_m_mb_l ).astype('float32')
#print('U_m_mb.shape',U_m_mb.shape)
U_v_mb = np.vstack(U_v_mb_l ).astype('float32')
#print('U_v_mb.shape',U_v_mb.shape)
V_t_mb = np.vstack(V_t_mb_l ).astype('float32')
V_m_mb = np.vstack(V_m_mb_l ).astype('float32')
V_v_mb = np.vstack(V_v_mb_l ).astype('float32')
Rij_mb = (Rij_mb - 1.) / (config.max_rating - 1.)
#log("Rij_mb",Rij_mb)
#log("predict_to_1_fn",predict_to_1_fn(ui_mb,vj_mb))
#log("predict_to_5_fn",predict_to_5_fn(ui_mb,vj_mb))
#print("before ui_update_fn, vj_mb.shape=",vj_mb.shape)
#print("before ui_update_fn, ui_mb.shape=",ui_mb.shape)
new_ui_mb, new_U_t_mb, new_U_m_mb, new_U_v_mb = ui_update_fn(
U_t_mb,U_m_mb,U_v_mb,Rij_mb,ui_mb,vj_mb
)
#log("ui_mb",ui_mb,"new_ui_mb",new_ui_mb,"diff",ui_mb-new_ui_mb)
#print("before vj_update_fn, vj_mb.shape=",vj_mb.shape)
#print("before vj_update_fn, ui_mb.shape=",ui_mb.shape)
new_vj_mb, new_V_t_mb, new_V_m_mb, new_V_v_mb = vj_update_fn(
V_t_mb,V_m_mb,V_v_mb,Rij_mb,ui_mb,vj_mb
)
#log("vj_mb",vj_mb,"new_vj_mb",new_vj_mb,"diff",vj_mb-new_vj_mb)
for pos,(i,j) in enumerate(indices_mb_l):
U[i] = new_ui_mb[pos,:]
V[j] = new_vj_mb[pos,:]
U_t[i] = new_U_t_mb[pos,:]
U_m[i] = new_U_m_mb[pos,:]
U_v[i] = new_U_v_mb[pos,:]
V_t[j] = new_V_t_mb[pos,:]
V_m[j] = new_V_m_mb[pos,:]
V_v[j] = new_V_v_mb[pos,:]
params_update_fn(Rij_mb,ui_mb,vj_mb)
ui_mb_l = []
vj_mb_l = []
Rij_mb_l = []
U_t_mb_l = []
U_m_mb_l = []
U_v_mb_l = []
V_t_mb_l = []
V_m_mb_l = []
V_v_mb_l = []
indices_mb_l = []
print("training pmf...")
cftools.mainloop(train_with_datapoint,dataset,U,V,predict_to_5_fn)
if __name__=="__main__":
main()
|
[
"stablum@gmail.com"
] |
stablum@gmail.com
|
1f276f5a21289f070e9ebfcc655a747a3d1cd3b1
|
0104f7736632084592cd6ced20de0be9fb9e24ac
|
/剑指offer/构建乘积数组.py
|
09d398118fb634a3669c2c9da9a75f98947ad262
|
[] |
no_license
|
longkun-uestc/examination
|
9eb63b6e8ffdb503a90a6be3d049ad2fdb85e46c
|
ef1d29a769f2fd6d517497f8b42121c02f8307cc
|
refs/heads/master
| 2021-06-25T23:11:24.460680
| 2021-06-23T03:28:55
| 2021-06-23T03:28:55
| 228,847,479
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
class Solution:
def multiply(self, A):
left_to_right = [-1]*len(A)
right_to_left = [-1]*len(A)
left_to_right[0] = 1
right_to_left[-1] = 1
for i in range(1, len(left_to_right)):
left_to_right[i] = left_to_right[i-1] * A[i-1]
for i in range(len(right_to_left)-2, -1, -1):
right_to_left[i] = right_to_left[i+1] * A[i+1]
# B = [1]*len(A)
# for i in range(len(B)):
# B[i] = left_to_right[i] * right_to_left[i]
B = [a*b for a, b in zip(left_to_right, right_to_left)]
# print(left_to_right)
# print(right_to_left)
# print(B)
return B
if __name__ == '__main__':
s = Solution()
s.multiply([2,3,4,5, 6])
|
[
"1256904448@qq.com"
] |
1256904448@qq.com
|
3774ffc4fdcb9c86ca755421da7b371e9f1e7d2c
|
6f61a105f85f9e4b6b98494b45e96d3099402449
|
/kapool/settings.py
|
7993700b0e3d7208e963c459ba82c5a60ebc6863
|
[
"MIT"
] |
permissive
|
Marah-uwase/carpool
|
00ca3b230fbe2bfabb4660cbf8974a902dadc85b
|
6ee69e1ad48352a4d1f59f372b41a2891fc58ec7
|
refs/heads/models
| 2023-02-25T19:52:44.180676
| 2021-02-02T08:22:05
| 2021-02-02T08:22:05
| 334,284,598
| 0
| 0
| null | 2021-02-01T13:21:49
| 2021-01-29T23:14:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,767
|
py
|
"""
Django settings for kapool project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import cloudinary
import cloudinary.uploader
import cloudinary.api
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(c94#zgkxgtwila5*$=yss0nngan+b9l9&r1+#nrd=cd849p76'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'carpool',
'app',
'tinymce',
'bootstrap4',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kapool.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kapool.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'karpool',
'USER': 'maranatha',
'PASSWORD':'maman',
}
}
cloudinary.config(
api_key = 'AIzaSyCv9Yc1eQAYKqm3qXBpUBfEa-CYW9CVoTQ',
)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL='/'
LOGOUT_REDIRECT_URL = '/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
)
}
|
[
"maranahuwase12@gmail.com"
] |
maranahuwase12@gmail.com
|
bd21bc1c8fa779e7d91a63e97ee2f3b07852e152
|
756504caae02535f359baa1bd232038979f5b3b5
|
/AIA/scanm/apps.py
|
f211b8310e63fa9572dfb5eceffc97ba91744cd3
|
[] |
no_license
|
dante993/scantit
|
b75aac717e68cea25e17a40c44e719c95d0f0376
|
9a447f02af9f23b433bafdd02de852bd1c4e4d9e
|
refs/heads/master
| 2021-01-13T15:04:49.853328
| 2017-05-29T12:52:54
| 2017-05-29T12:52:54
| 79,124,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ScanmConfig(AppConfig):
name = 'scanm'
|
[
"danteejmg@gmail.com"
] |
danteejmg@gmail.com
|
0a185392606cd314acb4f13f45994b76855c9a6c
|
500e5426adf70162cc75ae99be0743129639e4c7
|
/gathering_server/gathering/apps.py
|
56de823fb91dd525090a6cd2e82d8a62295c84ae
|
[] |
no_license
|
L3oNav/gathering_server
|
8b06ff4f176c6dfe3bc7f5c27bce0c9b4dfae8cb
|
b708fa831b6b5b227bafebd3ea302bcfa35adc46
|
refs/heads/main
| 2023-04-01T02:09:11.083248
| 2021-02-18T01:54:23
| 2021-02-18T01:54:23
| 339,837,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class GatheringConfig(AppConfig):
name = "gathering_server.gathering"
verbose_name = _("Gathering")
def ready(self):
try:
import gathering_server.gathering.signals # noqa F401
except ImportError:
pass
|
[
"L3oNav@outlook.com"
] |
L3oNav@outlook.com
|
b101ab8c3181c1392886b3ff9ddf5ba9a39dd257
|
7ff1ebbaaccd65665bb0fae19746569c043a8f40
|
/readcsv.py
|
28ea6bc771aee30752a72b42cf17a39c26ce3f94
|
[] |
no_license
|
Mutugiii/bot
|
1cbc8e8493de5a31b831722c698fd8c7b1f60bf5
|
3d4ccbf8dbe981e2c363ad6f2774deb2d34ac110
|
refs/heads/master
| 2022-12-11T22:04:53.040939
| 2020-05-20T11:09:54
| 2020-05-20T11:09:54
| 247,986,918
| 0
| 0
| null | 2022-12-08T09:33:14
| 2020-03-17T14:11:54
|
Python
|
UTF-8
|
Python
| false
| false
| 77
|
py
|
import pandas
dataformat = pandas.read_csv('csv/data.csv')
print(dataformat)
|
[
"mutugimutuma@gmail.com"
] |
mutugimutuma@gmail.com
|
b4430e26ab1dde9f74b12f200a1896351cd2722b
|
4d65f85fb8fba5a3d6582ccbf9d38042ec1ec422
|
/代码1/hotdog_war.py
|
f177dd62bdbf8984d14a17f21671d5aee76aa8df
|
[] |
no_license
|
qsten/game
|
f50756d001116f41cfdf7715ee061a3dfa3f9400
|
86164c3dcec869b85aaa777105c7faf738dd8e1f
|
refs/heads/master
| 2020-04-28T10:56:36.871862
| 2019-05-19T13:18:46
| 2019-05-19T13:18:46
| 175,218,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
import pygame
from player import Player
from settings import Settings
import game_functions as gf
from pygame.sprite import Group
from game_stats import GameStats
from button import Button
from scoreboard import Scoreboard
from lifeboard import Lifeboard
from music_button import Music_button
from stop_button import Stop_button
from statistics_board import Statistics_board
from Restart_button import Restart_button
from return_button import Return_button
from rank_button import Rank_button
def run_game():
#初始化游戏并创建一个屏幕对象
ai_settings=Settings()
screen=pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))
pygame.display.set_caption('hotdog_invasion')
play_button=Button(screen)
player=Player(ai_settings,screen)
hotdogs=Group()
bombs=Group()
stats=GameStats(ai_settings)
sb=Scoreboard(screen,stats)
life=Lifeboard(screen,stats)
music_button = Music_button(screen)
stop_button=Stop_button(screen,stats)
restart_button=Restart_button(screen,stats)
statistics_board=Statistics_board(screen, stats)
return_button=Return_button(screen,stats)
rank_button=Rank_button(screen)
#开始游戏的主循环
while True:
gf.check_events(ai_settings,screen,stats,play_button,player,hotdogs,bombs,music_button,stop_button,restart_button,return_button,rank_button)
if stats.game_active:
gf.create_hotdogs(ai_settings, screen, hotdogs)
gf.create_bombs(ai_settings, screen, bombs)
player.update(stats)
gf.update_hotdog(ai_settings,stats,sb,player,hotdogs)
gf.update_bomb(screen,stats, player, bombs,statistics_board,rank_button)
music_button.music_play()
gf.update_screen(screen,stats,sb,life,player,hotdogs,bombs,play_button,music_button,stop_button,restart_button,return_button,rank_button)
if __name__=='__main__':
run_game()
|
[
"noreply@github.com"
] |
noreply@github.com
|
367ec183a847084b29dd59bd79ca5db7e7418f61
|
fb46511d2fa968e6a2e74a20a67ace59819e15dd
|
/ProcessedData/trial.py
|
781f802ca1ea3ca3e8a244cc236a45615f979a0c
|
[] |
no_license
|
PushA308/QPQCT
|
6505f18907f8af3d9be24ebf21a01cc6603d657a
|
6bd99690df213860a4af83f142423e64fa57c34a
|
refs/heads/master
| 2020-04-27T05:03:55.061701
| 2019-03-06T05:02:51
| 2019-03-06T05:02:51
| 174,071,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,423
|
py
|
import csv
import nltk
import shutil
import os
import sys
import traceback
import win32com.client #pip install pywin32
import nltk #pip install nltk
#nltk.download('punkt')
#nltk.download('averaged_perceptron_tagger')
#include other ntlk packages, if asked for.
##########################################
#initialized variables
##########################################
question_no_column = 7
start_index = 16
def process_question_paper(ques_paper_path) :
fd = open(os.path.join(os.getcwd(),"processed_data.csv"),'w')
headers = "Marks, CO_Type, Module No, Question Type, Question No, SUb Que No., Question"
fd.write(headers + '\n')
for root, dirs,files in os.walk(ques_paper_path) :
for file in files:
if file.endswith('.xls') :
file_path = os.path.join(root,file)
try:
excel = win32com.client.Dispatch('Excel.Application')
workbook = excel.Workbooks.open(file_path)
sheet = workbook.WorkSheets('QuestionPaper')
for start_row in range(start_index, 50):
try:
row, col = start_row, question_no_column
question = sheet.Cells(row, col).value
if question is not None:
row, col = start_row, question_no_column + 1
marks = str(sheet.Cells(row, col).value)
row, col = start_row, question_no_column + 2
co_type = str(sheet.Cells(row, col).value)
row, col = start_row, question_no_column + 4
module_no = str(sheet.Cells(row, col).value)
row, col = start_row, question_no_column - 5
question_type = sheet.Cells(row, col).value
row, col = start_row, question_no_column - 2
question_no = sheet.Cells(row, col).value
row, col = start_row, question_no_column - 1
sub_question_no = sheet.Cells(row, col).value
row, col = start_row, question_no_column - 2
question_no = sheet.Cells(row, col).value
row, col = start_row, question_no_column
question = sheet.Cells(row, col).value
print (question+'\n')
fd.write(marks + ','+co_type + ',' + module_no + ',' +question_type + ','+ question_no + ',' + sub_question_no + ',' +question + '\n')
else:
pass
except Exception as e:
print ("hhj")
pass
workbook.Close(True)
except Exception as e:
print ("ERROR")
pass
fd.close()
def extract_verb(sentence):
helping_verbs = ['am','are','is','was','were','be','being','been','have','has','had','shall','will','do','does','did','may','might','must','can','could','would','should']
#sentence = "what is meant by data structure. it has been long time. i didn't do that"
tokens = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(tokens)
#print("tagged tokens:-")
#print(tagged)
length = len(tagged) - 1
#print(length)
a = list()
i=0
flg=0
for item in tagged:
if item[1][0] == 'V':
if((item[0] in helping_verbs)!=1):
a.append(item[0])
#print(item[0])
flg=1
if flg==0:
a.append("N.A")
#print(a)
with open("File/question_verb.csv","a",newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(a)
return a;
#analysis of question paper using verbs
def final(k):
fs = open("File/level.csv","r")
reader = csv.reader(fs)
i=0
a=list()
b=0
d=0
for row in reader:
with open("File/Value_level.csv","a",newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(max(row))
a.append(max(row))
i=i+1
fs.close()
if k==1:
res=list(map(int,a))
print(res)
p=sum(res)
print(i)
print(p)
l=int(input("input the level from 1-6: "))
k=(p/(i*l))*100
print("Average quality per question is {}".format(k))
fs.close()
#calculating verb's level using bloom's taxonomy
def calculate_level(line):
fs = open("File/bloom verbs.csv","r")
reader = csv.reader(fs)
#included_cols = [1]
a=list()
b=list()
#row = next(reader)
#print(line)
flg=0
for word in line:
i=1
for row in reader:
if word.lower() in row:
a.append(i)
flg=1
i=i+1
if flg==0:
a.append(0)
#print(line,a,max(a))
with open("File/level.csv","a",newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(a)
fs.close()
def view_table():
f=open("File/your_csv1.csv","r")
#reader=csv.reader(f)
f1=open("File/question_verb.csv","r")
reader1=csv.reader(f1)
rows1 = list(reader1)
print("-------------------------------------------/n")
print(rows1)
#for row in reader1:
# print(row)
included_cols=[0]
included_cols1=[1]
included_cols2=[2]
i=1
def compare_Type():
f1=open("File/bloom_type.csv","r")
f2=open("File/Value_level.csv","r")
r1 = list(f1)
length = len(r1)
r2 = list(f2)
sum=0
for i in range(length):
if r1[i]==r2[i]:
k=abs(int(r1[i])-int(r2[i]))
sum=sum+k
print(chr(ord('A') + k))
else:
k=abs(int(r1[i])-int(r2[i]))
sum=sum+k
print(chr(ord('A') + k))
print("Avg quality per question: "+chr(ord('A')+int(sum/length)))
#Start:
if __name__ == "__main__" :
arg_cnt = len(sys.argv)
if arg_cnt > 1:
ques_paper_path = sys.argv[1]
process_question_paper(ques_paper_path)
else:
print ("Please provide question paper directory path !")
f = open("processed_data.csv","r")
reader = csv.reader(f)
#out_file = open("File\solution1.csv", "w")
#writer = csv.writer(out_file)
add=0
included_cols = [2]
included_cols1=[0]
row = next(reader)
for row in reader:
content = list(row[i] for i in included_cols) #selecting question
content1 = list(row[i] for i in included_cols1) #selecting question type
with open("File/bloom_type.csv","a",newline='') as csvfile:
spamwriter = csv.writer(csvfile)
if content1[0]=="remembering":
spamwriter.writerow("1")
elif content1[0]=="understanding":
spamwriter.writerow("2")
elif content1[0]=="applying":
spamwriter.writerow("3")
elif content1[0]=="analyzing":
spamwriter.writerow("4")
elif content1[0]=="evaluating":
spamwriter.writerow("5")
elif content1[0]=="creating":
spamwriter.writerow("6")
a=extract_verb(content[0])
print(a)
calculate_level(a)
k=int(input("Select the option for Analysis of Question Paper:\n1.Verbs\n2.Question Type\n3.Course Outcome"))
if k==1:
final(k)
elif k==2:
final(k)
compare_Type()
v=int(input("View Information:\n1.Question Paper\n2.Verbs\n3.Bloom's Level\n"))
if v==1:
f = open("File/UOS_paper.csv","r")
reader = csv.reader(f)
for row in reader:
print(row)
f.close()
elif v==2:
f = open("File/question_verb.csv","r")
reader = csv.reader(f)
for row in reader:
print(row)
f.close()
elif v==3:
f = open("File/Value_level.csv","r")
reader = csv.reader(f)
for row in reader:
print(row)
f.close()
#print(a)
f.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
111d16939c63cebf88383cf5a24501665490bbc1
|
0f7b8d2ae2c0e81941d5ca5fa4c8313cec8d1544
|
/endApi/migrations/0008_auto_20200904_0734.py
|
89d799dc142fa35893d751810280051c1fd1bddf
|
[] |
no_license
|
rajielijah/endpoint
|
5c061972cb8ab9fc089046dd9e71f194ee6e5aca
|
6db1d6c92d57fc143446d2c4df13664ffa5b1f2d
|
refs/heads/master
| 2022-12-27T23:58:22.232298
| 2020-10-01T11:46:45
| 2020-10-01T11:46:45
| 298,791,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
# Generated by Django 3.0.7 on 2020-09-04 07:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('endApi', '0007_post_image'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='feedupload'),
),
]
|
[
"rajielijah@gmail.com"
] |
rajielijah@gmail.com
|
b588764a31f012d092aa5fbb402b4f34eead4552
|
1071b46a6ea054a186ab5c270dfdba48362adf70
|
/Python/Examples/juego_adivina_el_numero.py
|
5231d40690c74099e97d77b93aef36975ff2fe0d
|
[] |
no_license
|
diegoldsv/technotes
|
5aaed2d6ef5037217a0c071b6f7b48b04d89d4fd
|
6cb0b90001c52438b74da72c02c664164938d7e9
|
refs/heads/main
| 2023-05-10T22:08:21.189916
| 2021-05-31T14:14:13
| 2021-05-31T14:14:13
| 351,212,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
import random
def run():
numero_aleatorio = random.randint(1,100)
numero_ingresado = input("Elige un número del 1 al 100: ")
numero_ingresado = int(numero_ingresado)
while numero_ingresado != numero_aleatorio:
if numero_ingresado < numero_aleatorio:
print("Busca un número más grande")
else:
print("Busca un número más pequeño")
numero_ingresado = input("Elige otro número: ")
numero_ingresado = int(numero_ingresado)
print("Ganaste!")
if __name__ == "__main__":
run()
|
[
"disalvatorediego@gmail.com"
] |
disalvatorediego@gmail.com
|
6c16d977d5da188d8203250fd478cfac76c891cc
|
85c9d6fdff58b9cb40f5fdb9f01ff1a0dd386113
|
/bot_tests/reminder.py
|
ef7aa772e1bbf39b40113c0d3d7e94d3036748d1
|
[] |
no_license
|
jmccormac01/karmafleet
|
5874644c496b0bbcb2037404ad7ed43a1e4caaae
|
57ebefbbc6ec3aae634cd9196950f103d48eae95
|
refs/heads/master
| 2020-03-25T17:24:39.187176
| 2019-04-20T18:17:05
| 2019-04-20T18:17:05
| 143,976,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,922
|
py
|
"""
Bot for converting EVE times to local timezones
"""
from datetime import datetime
from pytz import timezone
import discord
from discord.ext import commands
import asyncio
# pylint: disable=invalid-name
Client = discord.Client()
client = commands.Bot(command_prefix="!")
reminders = {}
async def reminder_handler(reminders):
await client.wait_until_ready()
while not client.is_closed:
broke = False
print('Checking reminders...')
print(reminders)
now = datetime.utcnow()
for a in reminders:
print('Checking for author {}'.format(a))
for t in reminders[a]:
if now > t:
print(a, reminders[a][t])
await client.send_message(a, reminders[a][t])
# remove the reminder from the list
del reminders[a][t]
broke = True
break
if broke:
break
await asyncio.sleep(10)
@client.event
async def on_ready():
"""
Simple print to say we're ready
"""
print('Ready for remembering stuff...')
@client.event
async def on_message(message):
"""
Handle incoming messages and convert time requests
"""
sp = message.content.split()
return_message = ""
error_count = 0
# check we want time conversion from eve time
if len(sp) >= 3 and sp[0].lower() == '!reminder':
author = message.author
await client.delete_message(message)
# split the command up
reminder_time = datetime.strptime(sp[1], '%Y-%m-%dT%H:%M')
note = ' '.join(sp[2:])
if author not in reminders.keys():
reminders[author] = {}
reminders[author][reminder_time] = note
print(reminders)
client.loop.create_task(reminder_handler(reminders))
client.run('NDk0OTQ2Mzg3ODM5MDI1MTYz.Do66Yw.nsleHS3S8UvbWdBugiDtPWHrIKY')
|
[
"jmccormac001@gmail.com"
] |
jmccormac001@gmail.com
|
a07905b07cfcf4e19974315b9839310a2d8f725c
|
d4a88b3b102e20e727cae8fbd4167dcb4b57d1ec
|
/additional_examples/py2exe_setup__basic_test.py
|
746c5610f04ff4682414ded9ad60376e1f9e3b2d
|
[
"MIT"
] |
permissive
|
viblo/pymunk
|
ca64888e45706db431788368ff8464edf2912d5f
|
20ac14f665fb38b4ef1bef5acea36a3d612dd0d5
|
refs/heads/master
| 2023-08-27T16:37:14.740653
| 2023-08-16T19:26:16
| 2023-08-16T19:26:16
| 13,273,472
| 855
| 255
|
MIT
| 2023-01-13T10:13:47
| 2013-10-02T14:36:46
|
Python
|
UTF-8
|
Python
| false
| false
| 218
|
py
|
"""Simple example of py2exe to create a exe of the no_dependencies example.
Tested on py2exe 0.13.0.0 on python 3.11
"""
import py2exe
py2exe.freeze(console=["no_dependencies.py"], options={"includes": ["pymunk"]})
|
[
"vb@viblo.se"
] |
vb@viblo.se
|
f84e7e892f22dcef23a66020fb69487611bee303
|
b37769515f7e078e2215be27a76a0ba199f7676e
|
/home/migrations/0003_remove_blog_slug.py
|
c37321351241594e4fb6b823fb4bc8ea1c54e86c
|
[] |
no_license
|
roxna/eproc
|
15e532a401291505adec086d2c60c78843c9afc6
|
f22506e2afd005538c21d7bb678649a3736b6feb
|
refs/heads/master
| 2022-12-02T20:38:49.674344
| 2017-03-28T09:44:03
| 2017-03-28T09:44:03
| 72,560,527
| 0
| 0
| null | 2022-11-22T01:20:51
| 2016-11-01T17:38:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 377
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-23 17:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0002_blog_slug'),
]
operations = [
migrations.RemoveField(
model_name='blog',
name='slug',
),
]
|
[
"roxna.irani@gmail.com"
] |
roxna.irani@gmail.com
|
ce3bfb840d3411bd2a1255ab453499c357ba459b
|
f407b21811c8eebbf1c32d6aadc502403d83d048
|
/problem19.py
|
edb8fdb3cd28c83eb54faa6fca3eb45a9fee0301
|
[] |
no_license
|
gnikesh/project-euler
|
37e95cbc0c82ff54ddb23b89f4f38067ec69d5c8
|
0d39c7b78fc2e11d2f863e7ae40fb27f93a18fbc
|
refs/heads/master
| 2021-08-20T04:53:54.266867
| 2021-01-20T23:21:00
| 2021-01-20T23:21:00
| 87,681,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
# 1 Jan 1900 was a Monday.
# Thirty days has September,
# April, June and November.
# All the rest have thirty-one,
# Saving February alone,
# Which has twenty-eight, rain or shine.
# And on leap years, twenty-nine.
# A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
# How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
def get_days():
week_days = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
week_days_num = [i for i in range(1, 8)]
months = [i for i in range(1, 13)]
month_30_days = [4, 6, 9, 11]
month_31_days = [1, 3, 5, 7, 8, 10, 12]
month_28_days = [2]
cur_day = 1 # 1 Jan 1900 was Monday
sundays = 0
for year in range(1900, 2001):
for month in range(1, 13):
if month in month_30_days:
days = 30
elif month in month_31_days:
days = 31
elif month in month_28_days:
if year % 4 == 0 and not year == 1900:
days = 29
else:
days = 28
for day in range(1, days + 1):
today = week_days[cur_day]
if today == "Sun" and day == 1 and year != 1900:
sundays += 1
print("Year: ", year, "Month: ", month, "Day: ", day, today)
cur_day += 1
cur_day = cur_day % 7
print(sundays)
if __name__ == "__main__":
get_days()
|
[
"gnikesh03@gmail.com"
] |
gnikesh03@gmail.com
|
5a1a67ef9e36c7013d262a0ec9e876fcec96d9c0
|
75a0e169a7b45a95b5d0de639b12ae2b601af236
|
/worker.py
|
94d496b3e2f01dd00dc3ff71faa400d71db56822
|
[] |
no_license
|
john-peterson/goodreads
|
65722ef88f66c1ff00a22f308b2497c03cf44a5e
|
0cf6d294cef6d7d4e1e4526ae02777d206f19ca3
|
refs/heads/master
| 2021-01-20T21:31:49.690764
| 2012-12-22T06:50:56
| 2012-12-22T06:50:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,895
|
py
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Grant Drake <grant.drake@gmail.com>'
__docformat__ = 'restructuredtext en'
import socket, re, datetime
from collections import OrderedDict
from threading import Thread
from lxml.html import fromstring, tostring
from calibre.ebooks.metadata.book.base import Metadata
from calibre.library.comments import sanitize_comments_html
from calibre.utils.cleantext import clean_ascii_chars
import calibre_plugins.goodreads.config as cfg
class Worker(Thread): # Get details
'''
Get book details from Goodreads book page in a separate thread
'''
def __init__(self, url, result_queue, browser, log, relevance, plugin, timeout=20):
Thread.__init__(self)
self.daemon = True
self.url, self.result_queue = url, result_queue
self.log, self.timeout = log, timeout
self.relevance, self.plugin = relevance, plugin
self.browser = browser.clone_browser()
self.cover_url = self.goodreads_id = self.isbn = None
def run(self):
try:
self.get_details()
except:
self.log.exception('get_details failed for url: %r'%self.url)
def get_details(self):
try:
self.log.info('Goodreads book url: %r'%self.url)
raw = self.browser.open_novisit(self.url, timeout=self.timeout).read().strip()
except Exception as e:
if callable(getattr(e, 'getcode', None)) and \
e.getcode() == 404:
self.log.error('URL malformed: %r'%self.url)
return
attr = getattr(e, 'args', [None])
attr = attr if attr else [None]
if isinstance(attr[0], socket.timeout):
msg = 'Goodreads timed out. Try again later.'
self.log.error(msg)
else:
msg = 'Failed to make details query: %r'%self.url
self.log.exception(msg)
return
raw = raw.decode('utf-8', errors='replace')
#open('c:\\goodreads.html', 'wb').write(raw)
if '<title>404 - ' in raw:
self.log.error('URL malformed: %r'%self.url)
return
try:
root = fromstring(clean_ascii_chars(raw))
except:
msg = 'Failed to parse goodreads details page: %r'%self.url
self.log.exception(msg)
return
try:
# Look at the <title> attribute for page to make sure that we were actually returned
# a details page for a book. If the user had specified an invalid ISBN, then the results
# page will just do a textual search.
title_node = root.xpath('//title')
if title_node:
page_title = title_node[0].text_content().strip()
if page_title is None or page_title.find('search results for') != -1:
self.log.error('Failed to see search results in page title: %r'%self.url)
return
except:
msg = 'Failed to read goodreads page title: %r'%self.url
self.log.exception(msg)
return
errmsg = root.xpath('//*[@id="errorMessage"]')
if errmsg:
msg = 'Failed to parse goodreads details page: %r'%self.url
msg += tostring(errmsg, method='text', encoding=unicode).strip()
self.log.error(msg)
return
self.parse_details(root)
def parse_details(self, root):
try:
goodreads_id = self.parse_goodreads_id(self.url)
except:
self.log.exception('Error parsing goodreads id for url: %r'%self.url)
goodreads_id = None
try:
(title, series, series_index) = self.parse_title_series(root)
except:
self.log.exception('Error parsing title and series for url: %r'%self.url)
title = series = series_index = None
try:
authors = self.parse_authors(root)
except:
self.log.exception('Error parsing authors for url: %r'%self.url)
authors = []
if not title or not authors or not goodreads_id:
self.log.error('Could not find title/authors/goodreads id for %r'%self.url)
self.log.error('Goodreads: %r Title: %r Authors: %r'%(goodreads_id, title,
authors))
return
mi = Metadata(title, authors)
if series:
mi.series = series
mi.series_index = series_index
mi.set_identifier('goodreads', goodreads_id)
self.goodreads_id = goodreads_id
try:
isbn = self.parse_isbn(root)
if isbn:
self.isbn = mi.isbn = isbn
except:
self.log.exception('Error parsing ISBN for url: %r'%self.url)
try:
mi.rating = self.parse_rating(root)
except:
self.log.exception('Error parsing ratings for url: %r'%self.url)
try:
mi.comments = self.parse_comments(root)
except:
self.log.exception('Error parsing comments for url: %r'%self.url)
try:
self.cover_url = self.parse_cover(root)
except:
self.log.exception('Error parsing cover for url: %r'%self.url)
mi.has_cover = bool(self.cover_url)
try:
tags = self.parse_tags(root)
if tags:
mi.tags = tags
except:
self.log.exception('Error parsing tags for url: %r'%self.url)
try:
mi.publisher, mi.pubdate = self.parse_publisher_and_date(root)
except:
self.log.exception('Error parsing publisher and date for url: %r'%self.url)
mi.source_relevance = self.relevance
if self.goodreads_id:
if self.isbn:
self.plugin.cache_isbn_to_identifier(self.isbn, self.goodreads_id)
if self.cover_url:
self.plugin.cache_identifier_to_cover_url(self.goodreads_id,
self.cover_url)
self.plugin.clean_downloaded_metadata(mi)
self.result_queue.put(mi)
def parse_goodreads_id(self, url):
return re.search('/show/(\d+)', url).groups(0)[0]
def parse_title_series(self, root):
title_node = root.xpath('//div[@id="metacol"]/h1[@id="bookTitle"]')
if not title_node:
return (None, None, None)
title_text = title_node[0].text_content().strip()
if title_text.find('(') == -1:
return (title_text, None, None)
# Contains a Title and possibly a series. Possible values currently handled:
# "Some title (Omnibus)"
# "Some title (#1-3)"
# "Some title (Series #1)"
# "Some title (Series (digital) #1)"
# "Some title (Series #1-5)"
# "Some title (NotSeries #2008 Jan)"
# "Some title (Omnibus) (Series #1)"
# "Some title (Omnibus) (Series (digital) #1)"
# "Some title (Omnibus) (Series (digital) #1-5)"
text_split = title_text.rpartition('(')
title = text_split[0]
series_info = text_split[2]
hash_pos = series_info.find('#')
if hash_pos <= 0:
# Cannot find the series # in expression or at start like (#1-7)
# so consider whole thing just as title
title = title_text
series_info = ''
else:
# Check to make sure we have got all of the series information
series_info = series_info[:len(series_info)-1] #Strip off trailing ')'
while series_info.count(')') != series_info.count('('):
title_split = title.rpartition('(')
title = title_split[0].strip()
series_info = title_split[2] + '(' + series_info
if series_info:
series_partition = series_info.rpartition('#')
series_name = series_partition[0].strip()
if series_name.endswith(','):
series_name = series_name[:-1]
series_index = series_partition[2].strip()
if series_index.find('-'):
# The series is specified as 1-3, 1-7 etc.
# In future we may offer config options to decide what to do,
# such as "Use start number", "Use value xxx" like 0 etc.
# For now will just take the start number and use that
series_index = series_index.partition('-')[0].strip()
try:
return (title.strip(), series_name, float(series_index))
except ValueError:
# We have a series index which isn't really a series index
title = title_text
return (title.strip(), None, None)
def parse_authors(self, root):
get_all_authors = cfg.plugin_prefs[cfg.STORE_NAME][cfg.KEY_GET_ALL_AUTHORS]
if get_all_authors:
author_node = root.xpath('//div[@id="metacol"]/div[@id="bookAuthors"]/a[@class="authorName"]/span[@itemprop="name"]')
if author_node:
authors = []
for author_value in author_node:
author = tostring(author_value, method='text', encoding=unicode).strip()
# If multiple authors with some as editors can result in a trailing , to remove
if author[-1:] == ',':
author = author[:len(author)-1]
authors.append(author)
return authors
else:
# We need to more carefully look at the authors to only bring them in if:
# 1. They have no author type specified
# 2. They have an author type of 'Goodreads Author'
# 3. There are no authors from 1&2 and they have an author type of 'Editor'
div_authors = root.xpath('//div[@id="metacol"]/div[@id="bookAuthors"]')
if not div_authors:
return
authors_html = tostring(div_authors[0], method='text', encoding=unicode).replace('\n','').strip()
if authors_html.startswith('by'):
authors_html = authors_html[2:]
authors_type_map = OrderedDict()
for a in authors_html.split(','):
author = a.strip()
if author.startswith('more...'):
author = author[7:]
elif author.endswith('...less'):
author = author[:-7]
author_parts = author.strip().split('(')
if len(author_parts) == 1:
authors_type_map[author_parts[0]] = ''
else:
authors_type_map[author_parts[0]] = author_parts[1][:-1]
# At this point we have a dict of authors with their contribution if any in values
authors = []
valid_contrib = None
for a, contrib in authors_type_map.iteritems():
if not contrib or contrib == 'Goodreads Author':
authors.append(a)
elif len(authors) == 0:
authors.append(a)
valid_contrib = contrib
elif contrib == valid_contrib:
authors.append(a)
else:
break
return authors
def parse_rating(self, root):
rating_node = root.xpath('//div[@id="metacol"]/div[@id="bookMeta"]/span[@class="value rating"]/span')
if rating_node:
rating_text = tostring(rating_node[0], method='text', encoding=unicode)
rating_text = re.sub('[^0-9]', '', rating_text)
rating_value = float(rating_text)
if rating_value >= 100:
return rating_value / 100
return rating_value
def parse_comments(self, root):
# Look for description in a second span that gets expanded when interactively displayed [@id="display:none"]
description_node = root.xpath('//div[@id="metacol"]/div[@id="description"]/span')
if description_node:
desc = description_node[0] if len(description_node) == 1 else description_node[1]
less_link = desc.xpath('a[@class="actionLinkLite"]')
if less_link is not None and len(less_link):
desc.remove(less_link[0])
comments = tostring(desc, method='html', encoding=unicode).strip()
while comments.find(' ') >= 0:
comments = comments.replace(' ',' ')
comments = sanitize_comments_html(comments)
return comments
def parse_cover(self, root):
imgcol_node = root.xpath('//div[@id="imagecol"]/a/img/@src')
if imgcol_node:
img_url = imgcol_node[0]
# Unfortunately Goodreads sometimes have broken links so we need to do
# an additional request to see if the URL actually exists
info = self.browser.open_novisit(img_url, timeout=self.timeout).info()
if int(info.getheader('Content-Length')) > 1000:
return img_url
else:
self.log.warning('Broken image for url: %s'%img_url)
def parse_isbn(self, root):
isbn_node = root.xpath('//div[@id="metacol"]/div[@id="details"]/div[@class="buttons"]/div[@id="bookDataBox"]/div/div')
if isbn_node:
id_type = tostring(isbn_node[0], method='text', encoding=unicode).strip()
if id_type == 'ISBN':
isbn10_data = tostring(isbn_node[1], method='text', encoding=unicode).strip()
isbn13_pos = isbn10_data.find('ISBN13:')
if isbn13_pos == -1:
return isbn10_data[:10]
else:
return isbn10_data[isbn13_pos+8:isbn13_pos+21]
elif id_type == 'ISBN13':
# We have just an ISBN13, without an ISBN10
return tostring(isbn_node[1], method='text', encoding=unicode).strip()
def parse_publisher_and_date(self, root):
publisher = None
pub_date = None
publisher_node = root.xpath('//div[@id="metacol"]/div[@id="details"]/div[2]')
if publisher_node:
# Publisher is specified within the div above with variations of:
# Published December 2003 by Books On Tape <nobr class="greyText">(first published 1982)</nobr>
# Published June 30th 2010
# Note that the date could be "2003", "December 2003" or "December 10th 2003"
publisher_node_text = tostring(publisher_node[0], method='text', encoding=unicode)
# See if we can find the publisher name
pub_text_parts = publisher_node_text.partition(' by ')
if pub_text_parts[2]:
publisher = pub_text_parts[2].strip()
if '(first' in publisher:
# The publisher name is followed by (first published xxx) so strip that off
publisher = publisher.rpartition('(first')[0].strip()
# Now look for the pubdate. There should always be one at start of the string
pubdate_text_match = re.search('Published[\n\s]*([\w\s]+)', pub_text_parts[0].strip())
pubdate_text = None
if pubdate_text_match is not None:
pubdate_text = pubdate_text_match.groups(0)[0]
# If we have a first published section of text use that for the date.
if '(first' in publisher_node_text:
# For the publication date we will use first published date
# Note this date could be just a year, or it could be monthname year
pubdate_text_match = re.search('.*\(first published ([\w\s]+)', publisher_node_text)
if pubdate_text_match is not None:
first_pubdate_text = pubdate_text_match.groups(0)[0]
if pubdate_text and first_pubdate_text[-4:] == pubdate_text[-4:]:
# We have same years, use the first date as it could be more accurate
pass
else:
pubdate_text = first_pubdate_text
if pubdate_text:
pub_date = self._convert_date_text(pubdate_text)
return (publisher, pub_date)
def parse_tags(self, root):
# Goodreads does not have "tags", but it does have Genres (wrapper around popular shelves)
# We will use those as tags (with a bit of massaging)
genres_node = root.xpath('//div[@class="stacked"]/div/div/div[contains(@class, "bigBoxContent")]/div/div')
if genres_node:
genre_tags = list()
for genre_node in genres_node:
sub_genre_nodes = genre_node.xpath('a')
genre_tags_list = [sgn.text_content().strip() for sgn in sub_genre_nodes]
if genre_tags_list:
genre_tags.append(' > '.join(genre_tags_list))
calibre_tags = self._convert_genres_to_calibre_tags(genre_tags)
if len(calibre_tags) > 0:
return calibre_tags
def _convert_genres_to_calibre_tags(self, genre_tags):
# for each tag, add if we have a dictionary lookup
calibre_tag_lookup = cfg.plugin_prefs[cfg.STORE_NAME][cfg.KEY_GENRE_MAPPINGS]
calibre_tag_map = dict((k.lower(),v) for (k,v) in calibre_tag_lookup.iteritems())
tags_to_add = list()
for genre_tag in genre_tags:
tags = calibre_tag_map.get(genre_tag.lower(), None)
if tags:
for tag in tags:
if tag not in tags_to_add:
tags_to_add.append(tag)
return list(tags_to_add)
def _convert_date_text(self, date_text):
# Note that the date text could be "2003", "December 2003" or "December 10th 2003"
year = int(date_text[-4:])
month = 1
day = 1
if len(date_text) > 4:
text_parts = date_text[:len(date_text)-5].partition(' ')
month_name = text_parts[0]
# Need to convert the month name into a numeric value
# For now I am "assuming" the Goodreads website only displays in English
# If it doesn't will just fallback to assuming January
month_dict = {"January":1, "February":2, "March":3, "April":4, "May":5, "June":6,
"July":7, "August":8, "September":9, "October":10, "November":11, "December":12}
month = month_dict.get(month_name, 1)
if len(text_parts[2]) > 0:
day = int(re.match('([0-9]+)', text_parts[2]).groups(0)[0])
from calibre.utils.date import utc_tz
return datetime.datetime(year, month, day, tzinfo=utc_tz)
|
[
"john.peterson3@hotmail.com"
] |
john.peterson3@hotmail.com
|
cc4189ead66a7efb115d15670bd7e27b82860536
|
3874a909e3152fda6a87dbb0ef05b18d6908807c
|
/la/parse_tabs.py
|
0de026ab72ba5ab67f99b19a08939f52599c51dc
|
[
"MIT"
] |
permissive
|
FranchuFranchu/la
|
f5ef3f8d43aec67d84030018278640d91a77dd05
|
7afa25d3d102f5a0316f5084a46a04e62976991b
|
refs/heads/master
| 2020-07-24T07:50:01.756324
| 2020-04-18T15:49:03
| 2020-04-18T15:49:03
| 207,853,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
# Converts python-style code into JS-style code
def tabs_to_codeblocks(d):
list_index = 0
code = list(d)
at_newline = True
current_indentation = 0
this_line_indentation = 0
while list_index < len(code):
if at_newline:
if code[list_index] in (" ", "\t"):
this_line_indentation += 1
else:
at_newline = False
difference = this_line_indentation - current_indentation
if difference > 0:
for i in range(difference):
code.insert(list_index,"{")
list_index += 1
elif difference < 0:
for i in range(-difference):
code.insert(list_index,"}")
list_index += 1
code.insert(list_index, ";")
current_indentation = this_line_indentation
if not at_newline:
if code[list_index] == "\n":
at_newline = True
this_line_indentation = 0
code.insert(list_index,";")
list_index += 1
list_index += 1
# Close indentation again
for i in range(current_indentation):
code.insert(list_index,"}")
list_index += 1
return "".join(code)
|
[
"fff999abc999@gmail.com"
] |
fff999abc999@gmail.com
|
4c2ce9f4572cc0369d582cfe65ef86a9f3d7106a
|
e9530da3f17f990a3fade9c8c442ad3fbb4befc4
|
/test.py
|
3b60dca7cebd68376caafb431dc6465cd28133bf
|
[] |
no_license
|
mattyhempstead/syncs-hack-2020
|
9796565c03560c76f0a4402ded1a536f0f3f7fc8
|
6e3d69070dad3228ed8bed3eb805dc090d52b56f
|
refs/heads/master
| 2022-12-08T10:23:49.395788
| 2020-08-30T01:00:35
| 2020-08-30T01:00:35
| 290,966,904
| 5
| 0
| null | 2020-08-29T11:27:44
| 2020-08-28T06:17:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 490
|
py
|
import binascii
text = "https://www.google.com/"
binary_conversion = bin(int.from_bytes(text.encode(), 'big'))
binary_conversion = binary_conversion[2:]
for count,i in enumerate(binary_conversion):
time = 0.5
sound_array = []
if count%8 == 0:
sound_array.append(0)
base_one = 220
base_two = 440
else:
base_one = 320
base_two = 550
if i == 0:
sound_array.append(base_one)
else:
sound_array.append(base_two)
|
[
"pranav.alavandi"
] |
pranav.alavandi
|
24c2e84b37749a34542141af25758a0b77c195ba
|
e5ee01bde67fed16b890023cdc33b3294e7acb6d
|
/python/path_search_stripped/a_star.py
|
dc833d9ba96cf55f58739839921c943c153c83a2
|
[] |
no_license
|
dragonfi/a_star_examples
|
f8ca1494d49abf5170d52408e9efa6179b36b002
|
a6c43ca4b5f135bbaa848fcc45e74922dc174286
|
refs/heads/master
| 2020-06-19T15:07:09.298508
| 2019-08-06T15:02:56
| 2019-08-06T15:03:06
| 196,756,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
from collections import OrderedDict, namedtuple
from .graph import Graph
class Path():
def __init__(self, weight, nodes):
self.weight = weight
self.nodes = nodes
@property
def dest(self):
return self.nodes[-1]
@property
def source(self):
return self.nodes[0]
AStarResult = namedtuple("AStarResult", "path explored candidates")
class AStar():
def __init__(self, graph , heuristic ) :
self._graph = graph
self._heuristic = heuristic
def shortest_path(self, source , dest ) :
return self.shortest_path_with_metadata(source, dest).path
def shortest_path_with_metadata(self, source , dest ) :
def candidate_sorting_key(candidate ) :
node, path = candidate
node_data = self._graph.get_node_data(node)
dest_data = self._graph.get_node_data(dest)
return path.weight + self._heuristic(node_data, dest_data)
explored = {}
candidates = OrderedDict({source: Path(0, [source])})
while candidates:
candidates = OrderedDict(sorted(candidates.items(), key=candidate_sorting_key))
node, path = candidates.popitem(last=False)
if node == dest:
return AStarResult(path, explored, candidates)
if node not in explored.keys() or explored[node].weight > path.weight:
explored[node] = path
new_candidates = {
edge.dest: Path(path.weight + edge.weight, path.nodes + [edge.dest])
for edge in self._graph.edges_from(node)
if edge.dest not in explored.keys()}
for key, value in new_candidates.items():
if key not in candidates.keys() or candidates[key].weight > value.weight:
candidates[key] = value
return AStarResult(None, explored, candidates)
|
[
"david.gabor.bodr@gmail.com"
] |
david.gabor.bodr@gmail.com
|
41dfb043debbb31d564d9bdcdda0dd997a4a98a5
|
dca5705c291da76cbfaf3897680eb0ae2eb56e2b
|
/aayushg_assgn/myauth/views.py
|
face35c4566395dead6248d30c8430cf8b2fedf8
|
[] |
no_license
|
gadia-aayush/Django-API-1
|
41a40598653009def8ca5bda9a578a26b8bf9115
|
307202ad0aa4357408e756cd74f3723e74fca253
|
refs/heads/master
| 2022-12-13T23:09:45.960562
| 2020-08-30T19:36:16
| 2020-08-30T19:36:16
| 273,763,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,723
|
py
|
from django.shortcuts import render
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.decorators import api_view
from rest_framework import views
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
import re
def user_login(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
user = authenticate(username = username, password = password)
if user :
if user.is_active:
login(request,user)
data = {"code" : 200, "status" : "OK", "message" : "LogIn Successfull"}
return JsonResponse(data)
else:
data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"}
return JsonResponse(data)
else:
data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"}
return JsonResponse(data)
else:
return render(request,'login.html')
# Django Rest Framework used
class logout(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
user = request.user
token = Token.objects.get(user=user)
if token:
token.delete()
data = {"code" : 200, "status" : "OK", "message" : "Log Out Successfull"}
return Response(data)
def user_signup(request):
if request.method == 'POST':
username = request.POST.get('phone')
password = request.POST.get('password')
name = request.POST.get('name')
email = request.POST.get('email')
#validate whether the phone number is registered or not
try:
if User.objects.get(username = username):
data = {"code" : 403, "status" : "Forbidden", "message" : "Entered Mobile Number is already registered. Try loggin-in"}
return JsonResponse(data)
except:
pass
#validate mobile number [must be 10 digits. assumed that all are of India, so ignored prefixed country codes]
phoneregex = re.compile(r'^[1-9]\d{9}$')
if phoneregex.search(str(username)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Mobile Number should be of 10 digits- ^[1-9]\d{9}$"}
return JsonResponse(data)
#validate name, making sure it is not empty
firstregex = re.compile(r"^[A-Za-z][A-Za-z,.'].*$")
if firstregex.search(str(name)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Name should start with an alphabet- ^[A-Za-z][A-Za-z,.']*$"}
return JsonResponse(data)
#validate email address
emailregex = re.compile(r"^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$")
if str(email) != "":
if emailregex.search(str(email)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Enter a valid email address- ^([\w\.\-]+)@([\w\-]+)((\.(\w){2,3})+)$"}
return JsonResponse(data)
#validate password
passregex = re.compile(r"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$")
if passregex.search(str(password)):
pass
else:
data = {"code" : 422, "status" : "Unprocessable Entity", "message" : "Password should be between 8 to 15 characters which contain at least one lowercase letter, one uppercase letter, one numeric digit, and one special character- ^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[^a-zA-Z0-9])(?!.*\s).{8,15}$"}
return JsonResponse(data)
authobj = User.objects.create_user(username = username, password = password, first_name = name, email = email)
authobj.save()
data = {"code" : 201, "status" : "Created", "message" : "Sign-Up Successfull"}
return JsonResponse(data)
else:
return render(request,'user_signup.html')
# Django Rest Framework used
@api_view(['POST', ])
def get_token(request):
if request.method == 'POST':
username = request.data.get('phone')
password = request.data.get('password')
user = authenticate(username = username, password = password)
if user :
if user.is_active:
tokened = Token.objects.filter(user=user)
data = {}
if tokened.count()>0:
data["code"] = 200
data["status"] = "OK"
data["message"] = "Token already Exists"
data["phone"] = username
data["Token"] = tokened[0].key
return Response(data)
else:
token = Token.objects.create(user=user)
data["code"] = 201
data["status"] = "Created"
data["message"] = "Token Created"
data["Token"] = token.key
data["phone"] = username
return Response(data)
else:
data = {"code" : 403, "status" : "Forbidden", "message" : "User Disabled"}
return Response(data)
else:
data = {"code" : 401, "status" : "Unauthorized", "message" : "Invalid Login Credentials"}
return Response(data)
|
[
"gadia.aayush@gmail.com"
] |
gadia.aayush@gmail.com
|
0dca7a66a1da77d96fed23a3f91e8168a80f5e26
|
0ee64034518898893d495639cb01aa9523789f77
|
/2018 Materials/Resources/Week 4/RaspberryPi.py
|
63b9c98778493a0ca93bb145474cbfe01bd4c169
|
[
"MIT"
] |
permissive
|
Phangster/digital-world-for-normal-humans
|
31187b47e16d4359fce2ecac2ce7b5c1aa88d909
|
29a479af2e380bdf691f6487167d0d8edf0ba5ed
|
refs/heads/master
| 2020-05-07T19:52:37.655375
| 2018-12-29T14:32:35
| 2018-12-29T14:32:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
import RPi.GPIO as GPIO
from time import sleep
from firebase import firebase
url = "https://internet-of-things-c572e.firebaseio.com/"
token = 'qcHr20bWwg1ziqik58l39JD8UlcLLIGa8HJ0DaSC'
firebase = firebase.FirebaseApplication(url, token)
# Use the BCM GPIO numbers as the numbering scheme.
GPIO.setmode(GPIO.BCM)
# Use GPIO12, 16, 20 and 21 for the buttons.
s1 = 12
s2 = 16
s3 = 20
s4 = 21
switch_list = [12, 16, 20, 21]
# Set GPIO numbers in the list: [12, 16, 20, 21] as input with pull-down resistor.
movement_list = []
GPIO.setup(switch_list, GPIO.IN, GPIO.PUD_DOWN)
done = False
while done == False:
if GPIO.input(12) == GPIO.HIGH:
movement_list.append('left')
print('Left added.')
sleep(0.1)
elif GPIO.input(16) == GPIO.HIGH:
movement_list.append('right')
print('Right added.')
sleep(0.1)
elif GPIO.input(20) == GPIO.HIGH:
movement_list.append('up')
print('Up added.')
sleep(0.1)
elif GPIO.input(21) == GPIO.HIGH:
movement_list.append('done')
print('Terminating control, uploading sequence to Firebase.')
firebase.put('/','movement_list', movement_list)
done = True
break
while done==True:
a=firebase.get('/movement_list') # get the value from node age
if a == None:
done=False
sleep(0.5)
# Write your code here
'''
We loop through the key (button name), value (gpio number) pair of the buttons
dictionary and check whether the button at the corresponding GPIO is being
pressed. When the OK button is pressed, we will exit the while loop and
write the list of movements (movement_list) to the database. Any other button
press would be stored in the movement_list.
Since there may be debouncing issue due to the mechanical nature of the buttons,
we can address it by putting a short delay between each iteration after a key
press has been detected.
'''
# Write to database once the OK button is pressed
|
[
"thaddeus.phua@gmail.com"
] |
thaddeus.phua@gmail.com
|
ebb0ee33e3d8bde61a40935c59eb8b4e2c250d40
|
9e8a90e8c9bc90d9ea34b79e7553a7ba2fd4e6bf
|
/models/networkSwitch.py
|
bded10111bfacc3b5285280e628f5a076988367a
|
[] |
no_license
|
lwyanne/CPAE
|
ddae51affcca8db0266bf66f091f165d95bd7837
|
e155dfecf3f38ed7121a8a446dc4eeb4067b7e46
|
refs/heads/master
| 2023-07-28T13:12:22.372796
| 2021-08-27T15:09:58
| 2021-08-27T15:09:58
| 353,564,787
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185,346
|
py
|
from __future__ import print_function
from torch.nn.utils.rnn import pack_padded_sequence
import inspect
import os, sys
import logging
# add the top-level directory of this project to sys.path so that we can import modules without error
from models.loss import Chimera_loss, record_loss, mask_where, mapping_where, mask_mapping_M
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
logger = logging.getLogger("cpc")
import numpy as np
import torch
import torch.nn as nn
import math
from models.utils import *
from models.datareader import *
from sklearn.metrics import roc_auc_score
from fastai.callbacks import *
from fastai.tabular import *
from fastai import tabular
from models.optimizer import ScheduledOptim
from sklearn.metrics import cohen_kappa_score as kappa, mean_absolute_error as mad, roc_auc_score as auroc
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def auroc_score(input, target):
input, target = input.cpu().numpy()[:, 1], target.cpu().numpy()
return roc_auc_score(target, input)
class AUROC(tabular.Callback):
"""
This is for output AUROC as a metric in fastai training process.
This has a small but acceptable issue. #TODO
"""
_order = -20 # Needs to run before the recorder
def __init__(self, learn, **kwargs):
self.learn = learn
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['AUROC'])
def on_epoch_begin(self, **kwargs):
self.output, self.target = [], []
def on_batch_end(self, last_target, last_output, train, **kwargs):
if not train:
try:
self.output.append(last_output)
except AttributeError:
self.output = []
try:
self.target.append(last_target)
except AttributeError:
self.target = []
def on_epoch_end(self, last_metrics, **kwargs):
if len(self.output) > 0:
output = torch.cat(self.output).cpu()
target = torch.cat(self.target).cpu()
preds = F.softmax(output, dim=1)
metric = roc_auc_score(target, preds, multi_class='ovo')
return add_metrics(last_metrics, [metric])
class biAUROC(tabular.Callback):
"""
This is for output AUROC as a metric in fastai training process.
This has a small but acceptable issue. #TODO
"""
_order = -20 # Needs to run before the recorder
def __init__(self, learn, **kwargs):
self.learn = learn
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['AUROC'])
def on_epoch_begin(self, **kwargs):
self.output, self.target = [], []
def on_batch_end(self, last_target, last_output, train, **kwargs):
if not train:
try:
self.output.append(last_output)
except AttributeError:
self.output = []
try:
self.target.append(last_target)
except AttributeError:
self.target = []
def on_epoch_end(self, last_metrics, **kwargs):
if len(self.output) > 0:
output = torch.cat(self.output).cpu()
target = torch.cat(self.target).cpu()
preds = F.softmax(output, dim=1)
metric = auroc_score(preds, target)
return add_metrics(last_metrics, [metric])
class MAD(tabular.Callback):
_order = -20
def __init__(self, learn, **kwargs):
self.learn = learn
def on_train_begin(self, **kwargs):
self.learn.recorder.add_metric_names(['MAD'])
def on_epoch_begin(self, **kwargs):
self.output, self.target = [], []
def on_batch_end(self, last_target, last_output, train, **kwargs):
if not train:
try:
self.output.append(last_output)
except AttributeError:
self.output = []
try:
self.target.append(last_target)
except AttributeError:
self.target = []
def on_epoch_end(self, last_metrics, **kwargs):
if len(self.output) > 0:
output = torch.cat(self.output)
target = torch.cat(self.target)
preds = torch.argmax(F.softmax(output, dim=1), dim=1, keepdim=False)
metric = mean_absolute_error(preds, target)
return add_metrics(last_metrics, [metric])
class CPclassifier(nn.Module):
"""
Combine the CPC and MLP, to make it possible to fine-tune on the downstream task
Note: Fine-tune is implemented via fastai learner.
"""
def __init__(self, CPmodel, MLP, freeze=False):
super(CPclassifier, self).__init__()
self.CPmodel = CPmodel
self.MLP = MLP
if freeze:
for param in self.CPmodel.parameters():
param.requires_grad = False
def forward(self, x):
if 'CP' in self.CPmodel.__class__.__name__ or 'AE_LSTM' in self.CPmodel.__class__.__name__:
x = self.CPmodel.get_reg_out(x)
else:
x = self.CPmodel.get_encode(x)
x = self.MLP(x)
return x
class CPAE1_S(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE1_S, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x # output shape: (N,n_points=192,n_features=76)
def recurrent(self, zt):
'''
GRU RNN
'''
batch_size = self.batch_size
# output shape: (N, n_frames, features,1)
hidden = self.init_hidden(batch_size)
output, hidden = self.gru(zt, hidden)
return output, hidden
def gru_to_ct(self, zt):
'''
return the last time_step of GRU result
'''
output, hidden = self.recurrent(zt)
c_t = output[:, -1, :].view(self.batch_size, self.gru_out)
return c_t, hidden
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def get_reg_out(self, x):
self.batch_size = x.shape[0]
x = x.squeeze(1).transpose(1, 2)
self.n_frames = x.shape[2]
z = self.encode(x).transpose(1, 2)
z = self.linear(z)
forward_seq = z[:, :, :]
c_t, hidden = self.gru_to_ct(forward_seq)
return c_t
def forward(self, x):
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
d = self.decode(z.transpose(1, 2))
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
#
encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(
device) # e.g.
# size
z = self.linear(z)
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
if self.beforeNCE: # ADD FC layers
pred = self.fcs(pred)
encode_samples = self.fcs(encode_samples)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(encode_samples, pred)
return d, nce, accuracy
class CPAE1_NO_BN(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE1_NO_BN, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
# nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x # output shape: (N,n_points=192,n_features=76)
def recurrent(self, zt):
'''
GRU RNN
'''
batch_size = self.batch_size
# output shape: (N, n_frames, features,1)
hidden = self.init_hidden(batch_size)
output, hidden = self.gru(zt, hidden)
return output, hidden
def gru_to_ct(self, zt):
'''
return the last time_step of GRU result
'''
output, hidden = self.recurrent(zt)
c_t = output[:, -1, :].view(self.batch_size, self.gru_out)
return c_t, hidden
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def get_reg_out(self, x):
self.batch_size = x.shape[0]
x = x.squeeze(1).transpose(1, 2)
self.n_frames = x.shape[2]
z = self.encode(x).transpose(1, 2)
z = self.linear(z)
forward_seq = z[:, :, :]
c_t, hidden = self.gru_to_ct(forward_seq)
return c_t
def forward(self, x):
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
d = self.decode(z.transpose(1, 2))
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
#
encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(
device) # e.g.
# size
z = self.linear(z)
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(encode_samples, pred)
return d, nce, accuracy
class CPAE1_LSTM(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE1_LSTM, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.LSTM(
self.embedded_features,
hidden_size=gru_out,
num_layers=2,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x # output shape: (N,n_points=192,n_features=76)
def recurrent(self, zt):
'''
GRU RNN
'''
batch_size = self.batch_size
# output shape: (N, n_frames, features,1)
hidden = self.init_hidden(batch_size)
hidden = torch.cat((hidden, hidden), dim=0)
hidden = (hidden, hidden)
output, hidden = self.gru(zt, hidden)
return output, hidden
def gru_to_ct(self, zt):
'''
return the last time_step of GRU result
'''
output, hidden = self.recurrent(zt)
c_t = output[:, -1, :].view(self.batch_size, self.gru_out)
return c_t, hidden
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def get_reg_out(self, x):
self.batch_size = x.shape[0]
x = x.squeeze(1).transpose(1, 2)
self.n_frames = x.shape[2]
z = self.encode(x).transpose(1, 2)
z = self.linear(z)
forward_seq = z[:, :, :]
c_t, hidden = self.gru_to_ct(forward_seq)
return c_t
def forward(self, x):
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
d = self.decode(z.transpose(1, 2))
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
#
encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(
device) # e.g.
# size
z = self.linear(z)
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(encode_samples, pred)
return d, nce, accuracy
class CPAE1_LSTM_NO_BN(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE1_LSTM_NO_BN, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
# nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.LSTM(
self.embedded_features,
hidden_size=gru_out,
num_layers=2,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x # output shape: (N,n_points=192,n_features=76)
def recurrent(self, zt):
'''
GRU RNN
'''
batch_size = self.batch_size
# output shape: (N, n_frames, features,1)
hidden = self.init_hidden(batch_size)
hidden = torch.cat((hidden, hidden), dim=0)
hidden = (hidden, hidden)
output, hidden = self.gru(zt, hidden)
return output, hidden
def gru_to_ct(self, zt):
'''
return the last time_step of GRU result
'''
output, hidden = self.recurrent(zt)
c_t = output[:, -1, :].view(self.batch_size, self.gru_out)
return c_t, hidden
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def get_reg_out(self, x):
self.batch_size = x.shape[0]
x = x.squeeze(1).transpose(1, 2)
self.n_frames = x.shape[2]
z = self.encode(x).transpose(1, 2)
z = self.linear(z)
forward_seq = z[:, :, :]
c_t, hidden = self.gru_to_ct(forward_seq)
return c_t
def forward(self, x):
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
d = self.decode(z.transpose(1, 2))
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
#
encode_samples = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(
device) # e.g.
# size
z = self.linear(z)
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(encode_samples, pred)
return d, nce, accuracy
class CPAE2_S(CPAE1_S):
"""
Use conv1dtranspose in CPAE1
"""
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE2_S, self).__init__()
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.enSequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
self.deSequential = lambda inChannel, outChannel: nn.Sequential(
nn.ConvTranspose1d(inChannel, outChannel, kernel_size=3, padding=1),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.enSequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.deSequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
# deconvolution nn. unMaxPool
class CPAE3_S(CPAE2_S):
"""
Use conv1dtranspose in CPAE1 & Maxpooling & unpooing
"""
def __init__(
self,
embedded_features,
gru_out,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
time_step=30,
n_points=192,
n_features=76,
):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CPAE3_S, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
self.decode_channels = self.channels[::-1]
encodelist = []
count = 0
for i, j in zip(self.channels[:-1], self.channels[1:]):
encodelist.append(nn.ReflectionPad1d((0, 1)))
encodelist.append(nn.Conv1d(i, j, kernel_size=2, padding=0))
encodelist.append(nn.BatchNorm1d(j))
encodelist.append(nn.ReLU(inplace=True))
if count < 2:
encodelist.append(nn.ReflectionPad1d((0, 1)))
encodelist.append(nn.MaxPool1d(2, stride=1))
count += 1
self.encoder = nn.Sequential(*encodelist)
decodelist = []
count = 0
for i, j in zip(self.decode_channels[:-1], self.decode_channels[1:]):
decodelist.append(nn.ConvTranspose1d(i, j, kernel_size=3, padding=1))
decodelist.append(nn.BatchNorm1d(j))
decodelist.append(nn.ReLU(inplace=True))
self.decoder = nn.Sequential(*decodelist)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
# def relevant_points(n):
# deconvolution nn. unMaxPool
class CPAE4_S(CPAE1_S):
def __int__(self):
super(CPAE4_S, self).__init__()
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
z = self.linear(z)
x = x.transpose(1, 2)
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
#
x_samples = torch.empty((self.time_step, self.batch_size, self.n_features)).float().to(
device) # e.g.
# size
for i in np.arange(1, self.time_step + 1):
x_samples[i - 1, :, :] = x[:, int(t_samples) + i, :]
reconstruct_samples = self.decode(pred.transpose(1, 2)).transpose(1, 2)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(x_samples, reconstruct_samples)
return accuracy, nce, x
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
class CPAE4_NO_BN(CPAE1_NO_BN):
def __int__(self):
super(CPAE4_NO_BN, self).__init__()
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
self.batch_size = x.shape[0]
self.n_frames = x.shape[2]
x = x.transpose(1, 2)
z = self.encode(x).transpose(1, 2) # z: (batch, n_time, conv[-1])
z = self.linear(z)
x = x.transpose(1, 2)
# make change to here
# t_samples should at least start from 30
t_samples = torch.randint(low=self.time_step, high=self.n_frames - self.time_step - 1, size=(1,)).long().to(
device)
forward_seq = z[:, :int(t_samples) + 1, :]
c_t, hidden = self.gru_to_ct(forward_seq)
pred = torch.empty((self.time_step, self.batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
#
x_samples = torch.empty((self.time_step, self.batch_size, self.n_features)).float().to(
device) # e.g.
# size
for i in np.arange(1, self.time_step + 1):
x_samples[i - 1, :, :] = x[:, int(t_samples) + i, :]
reconstruct_samples = self.decode(pred.transpose(1, 2)).transpose(1, 2)
# d = self.decode(pred.transpose(1,2).transpose(0,2))
nce, accuracy = self.compute_nce(x_samples, reconstruct_samples)
return accuracy, nce, x
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
class CPAE7_S(CPAE4_S):
"""
this CPAE simply make `f_i(x_i,x_j)` the chimera_loss function
"""
def __init__(self, embedded_featrues=8, gru_out=8, Lambda=[1, 1, 3]):
super(CPAE7_S, self).__init__(embedded_featrues,
gru_out) # to initiate the CPAE4 with embedded_featrues = 8, gru_out = 8
self.Lambda = torch.tensor(Lambda).float().cuda()
self.Lambda = self.Lambda / sum(self.Lambda) * 10
def weighted_mask(self, x):
"""
similar to chimera loss
"""
# x = x.transpose(0,1)
# d = d.transpose(0,1)
assert (x.shape[1] == 76)
mse_m = torch.ones(x.shape).to(device)
mask_m, mapping_m = mask_mapping_M(x)
return self.Lambda[0] * mse_m + self.Lambda[1] * mask_m + self.Lambda[2] * mapping_m
def compute_nce(self, x, d):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......x : x_samples , ( time_step, batch_size, conv_sizes[-1] )
......d : reconstruct_samples , the same shape as x, self.decode(z_hat)
'''
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
x_w = self.weighted_mask(x[i]) * x[i]
total = torch.mm(x_w, torch.transpose(d[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
class CPLSTM(nn.Module):
"""
Bi-directional LSTM
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5):
# Smart way to filter the args
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
super(CPLSTM, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim // 2,
dropout=self.drop,
bidirectional=True,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
dropout=self.drop,
bidirectional=False,
batch_first=True
)
self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def encodeRegress(self, x):
x, _ = self.lstm1(x)
x, state = self.lstm2(x)
ht, ct = state
return x, ht, ct
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_preds = [0] * self.time_step
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t, :])
h, c = ht, ct
for i in range(1, self.time_step + 1):
c_preds[i - 1] = self.Wk[i - 1](ht)
_, h, c = self.encodeRegress(x[:, t + i, :])
c_latent.append(c)
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
def get_reg_out(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :])
# print(ht.shape)
return xt.reshape((x.shape[0], -1))
class CPLSTM2(nn.Module):
"""
LSTM
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5):
# Smart way to filter the args
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
super(CPLSTM2, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
dropout=self.drop,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
dropout=self.drop,
bidirectional=False,
batch_first=True
)
self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def encodeRegress(self, x):
x, _ = self.lstm1(x)
x, state = self.lstm2(x)
ht, ct = state
return x, ht, ct
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_preds = [0] * self.time_step
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
h, c = ht, ct
for i in range(1, self.time_step + 1):
c_preds[i - 1] = self.Wk[i - 1](ht)
_, h, c = self.encodeRegress(x[:, t + i, :])
c_latent.append(c)
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
def get_reg_out(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :])
print(xt.shape)
return xt
# return xt.reshape((x.shape[0],-1))
class CPLSTM3(nn.Module):
"""
CPLSTM2 with dropout in non-recurrent layers and FC added.
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5):
# Smart way to filter the args
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
super(CPLSTM3, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.dropout = nn.Dropout(self.drop)
self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.fcs = nn.Sequential(
nn.Linear(self.dim, self.dim),
nn.ReLU(inplace=True),
nn.Linear(self.dim, self.dim)
)
for model in [self.lstm1, self.lstm2, self.fcs]:
self.initialize_weights(model)
for model in self.Wk:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x, warm=False):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.dim)
if warm:
x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0))
_, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0))
# print([i.shape for i in state1],h0.shape,c0.shape)
x, state1 = self.lstm1(x[:, :, :], state1)
x, state2 = self.lstm2(x[:, :, :], state2)
ht, ct = state2
else:
x, state1 = self.lstm1(x[:, :, :], (h0, c0))
x, state2 = self.lstm2(x[:, :, :], (h0, c0))
ht, ct = state2
return x, ht, ct
#
#
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_preds = [0] * self.time_step
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t, :])
h, c = ht, ct
for i in range(1, self.time_step + 1):
c_preds[i - 1] = self.fcs(self.Wk[i - 1](ht))
_, h, c = self.encodeRegress(x[:, t + i, :])
c_latent.append(self.fcs(c))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
def get_reg_out(self, x, stack=False, warm=False, conti=False):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :], warm)
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
if stack: return torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1)
return xt[:, -1, :].squeeze(1)
class CPLSTM4(nn.Module):
"""
CPLSTM4------use lstm as Wk
mode=1 use hidden states when predict. else use cell states
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False, switch=True):
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
self.mode = mode
self.noct = noct
super(CPLSTM4, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm3 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
if self.noct:
self.stack_dim = self.dim * 192
else:
self.stack_dim = self.dim * 193
self.dropout = nn.Dropout(self.drop)
# self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.switch = switch
if self.switch == False:
self.softmax = nn.Softmax(dim=1)
self.lsoftmax = nn.LogSoftmax(dim=1)
else:
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.fcs = nn.Sequential(
nn.Linear(self.dim, self.dim),
nn.ReLU(inplace=True),
nn.Linear(self.dim, self.dim)
)
for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def freeze_encode(self):
for param in self.lstm1.parameters():
param.requires_grad = False
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
if conti:
x, state1 = self.lstm1(x)
x, state2 = self.lstm2(x)
ht, ct = state2
return x, ht, ct
(h0, c0) = self.init_hidden(bs, self.dim)
if warm:
x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0))
_, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0))
# print([i.shape for i in state1],h0.shape,c0.shape)
x, state1 = self.lstm1(x[:, :, :], state1)
x, state2 = self.lstm2(x[:, :, :], state2)
ht, ct = state2
else:
x, state1 = self.lstm1(x[:, :, :], (h0, c0))
x, state2 = self.lstm2(x[:, :, :], (h0, c0))
ht, ct = state2
return x, ht, ct
#
def predict(self, z, hz, cz, ts, mode=1):
""""
if mode==1: return hidden states; else return cell states"""
h, c = hz, cz
x_previous = z
c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device)
for i in range(ts):
x_pred, (h, c) = self.lstm3(x_previous, (h, c))
if mode:
c_preds[i, :, :] = h
else:
c_preds[i, :, :] = c # mode = 0
x_previous = x_pred
return c_preds
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
for i in range(1, self.time_step + 1):
_, h, c = self.encodeRegress(x[:, t + i, :]) # init with zeros
c_latent.append(self.fcs(c))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
def get_reg_out(self, x, stack=False, warm=False, conti=False):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti)
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1)))
if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1))
return xt[:, -1, :].squeeze(1)
class CPLSTM4C(nn.Module):
"""
re-init hidden at time point t
mode=1 use hidden states when predict. else use cell states
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
self.mode = mode
self.noct = noct
super(CPLSTM4C, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm3 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
if self.noct:
self.stack_dim = self.dim * 192
else:
self.stack_dim = self.dim * 193
self.dropout = nn.Dropout(self.drop)
# self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.fcs = nn.Sequential(
nn.Linear(self.dim, self.dim),
nn.ReLU(inplace=True),
nn.Linear(self.dim, self.dim)
)
for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
if conti:
x, state1 = self.lstm1(x)
x, state2 = self.lstm2(x)
ht, ct = state2
return x, ht, ct
(h0, c0) = self.init_hidden(bs, self.dim)
if warm:
x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0))
_, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0))
# print([i.shape for i in state1],h0.shape,c0.shape)
x, state1 = self.lstm1(x[:, :, :], state1)
x, state2 = self.lstm2(x[:, :, :], state2)
ht, ct = state2
else:
x, state1 = self.lstm1(x[:, :, :], (h0, c0))
x, state2 = self.lstm2(x[:, :, :], (h0, c0))
ht, ct = state2
return x, ht, ct
#
def predict(self, z, hz, cz, ts, mode=1):
""""
if mode==1: return hidden states; else return cell states"""
h, c = hz, cz
x_previous = z
c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device)
for i in range(ts):
x_pred, (h, c) = self.lstm3(x_previous, (h, c))
if mode:
c_preds[i, :, :] = h
else:
c_preds[i, :, :] = c
x_previous = x_pred
return c_preds
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
(h0, c0) = self.init_hidden(self.bs, self.dim)
h1, c1 = h0, c0
h2, c2 = h0, c0
for i in range(1, self.time_step + 1):
# BUG : self.time_step ? i
tmp, (h1, c1) = self.lstm1(x[:, t + i, :], (h1, c1))
_, (h2, c2) = self.lstm2(tmp, (h2, c2))
c_latent.append(self.fcs(c2))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, None
def get_reg_out(self, x, stack=False, warm=False, conti=False):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti)
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1)))
if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1))
return xt[:, -1, :].squeeze(1)
class CPLSTM3H(CPLSTM3):
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5):
super(CPLSTM3H, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step)
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_preds = [0] * self.time_step
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t, :])
h, c = ht, ct
for i in range(1, self.time_step + 1):
c_preds[i - 1] = self.fcs(self.Wk[i - 1](ht))
_, h, c = self.encodeRegress(x[:, t + i, :])
c_latent.append(self.fcs(h))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(c_latent[i].squeeze(0), torch.transpose(c_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, c
class CPLSTM4H(CPLSTM4):
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1):
super(CPLSTM4H, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode)
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
# xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
# c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
#
# # for i in range(1, self.time_step + 1):
# x, (h, c) = self.lstm1(x[:, t + 1:t+self.time_step+1, :])
# c_latent=self.fcs(x)
z_embeds, _ = self.lstm1(x)
_, (hidden_ct, cell_ct) = self.lstm2(z_embeds[:, :t + 1, :])
z_preds_time_step = self.fcs(
self.predict(hidden_ct.transpose(0, 1), hidden_ct, cell_ct, self.time_step, self.mode))
z_embeds_time_step = z_embeds[:, t + 1:t + self.time_step + 1, :]
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(z_embeds_time_step[:, i, :].squeeze(0),
torch.transpose(z_preds_time_step[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, None
class CPAELSTM41(nn.Module):
"""
CPLSTM4------use lstm as Wk
mode=1 use hidden states when predict. else use cell states
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
self.dim = dim
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
self.mode = mode
self.noct = noct
super(CPAELSTM41, self).__init__()
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.lstm3 = nn.LSTM(
input_size=dim,
hidden_size=dim,
bidirectional=False,
batch_first=True)
self.dropout = nn.Dropout(self.drop)
# self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.de_fc = nn.Sequential(nn.Linear(self.dim, self.input_dim),
nn.ReLU(inplace=True),
nn.Linear(self.input_dim, self.input_dim),
nn.ReLU(inplace=True),
)
self.fcs = nn.Sequential(
nn.Linear(self.input_dim, self.input_dim),
nn.ReLU(inplace=True),
nn.Linear(self.input_dim, self.input_dim)
)
for model in [self.lstm1, self.lstm2, self.lstm3, self.fcs]:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
if conti:
x, state1 = self.lstm1(x)
x, state2 = self.lstm2(x)
ht, ct = state2
return x, ht, ct
(h0, c0) = self.init_hidden(bs, self.dim)
if warm:
x_temp, state1 = self.lstm1(x[:, :5, :], (h0, c0))
_, state2 = self.lstm2(x_temp[:, :5, :], (h0, c0))
# print([i.shape for i in state1],h0.shape,c0.shape)
x, state1 = self.lstm1(x[:, :, :], state1)
x, state2 = self.lstm2(x[:, :, :], state2)
ht, ct = state2
else:
x, state1 = self.lstm1(x[:, :, :], (h0, c0))
x, state2 = self.lstm2(x[:, :, :], (h0, c0))
ht, ct = state2
return x, ht, ct
#
#
def predict(self, z, hz, cz, ts, mode=1):
""""
if mode==1: return hidden states; else return cell states"""
h, c = hz, cz
x_previous = z
c_preds = torch.empty((self.time_step, self.bs, self.dim)).to(device)
for i in range(ts):
x_pred, (h, c) = self.lstm3(x_previous, (h, c))
if mode:
c_preds[i, :, :] = h
else:
c_preds[i, :, :] = c
x_previous = x_pred
return c_preds
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
x_preds = self.fcs(self.de_fc(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode)))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(self.fcs(x[:, t + i + 1, :]).squeeze(1), torch.transpose(x_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, None
def get_reg_out(self, x, stack=False, warm=False, conti=False):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :], warm, conti)
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1)))
if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1))
return xt[:, -1, :].squeeze(1)
class CPAELSTM42(CPAELSTM41):
"""
two layer lstm as decoder to reconstruct x.
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1):
super(CPAELSTM42, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode)
self.lstm3 = nn.LSTM(
input_size=self.input_dim,
num_layers=1,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
#
# self.dropout=nn.Dropout(self.drop)
# # self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
# self.softmax = nn.Softmax(dim=0)
# self.lsoftmax = nn.LogSoftmax(dim=0)
self.de_fc = nn.Sequential(
nn.Linear(self.dim, self.input_dim),
nn.ReLU(inplace=True)
)
# self.fcs=nn.Sequential(
# nn.Linear(self.input_dim,self.input_dim),
# nn.ReLU(inplace=True),
#
# nn.Linear(self.input_dim,self.input_dim)
# )
# for model in [self.lstm1,self.lstm2,self.lstm3,self.fcs]:
# self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
# BUG
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def encodeRegress(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.dim)
x, _ = self.lstm1(x, (h0, c0))
x, state = self.lstm2(x, (h0, c0))
ht, ct = state
return x, ht, ct
#
#
def predict(self, z, hz, cz, ts, mode=1):
""""
if mode==1: return hidden states; else return cell states"""
h, c = self.de_fc(hz), self.de_fc(cz)
x_previous = self.de_fc(z)
x_preds = torch.empty((self.time_step, self.bs, self.input_dim)).to(device)
for i in range(ts):
x_pred, (h, c) = self.lstm3(x_previous, (h, c))
if mode:
x_preds[i, :, :] = h
else:
x_preds[i, :, :] = c
x_previous = x_pred
return x_preds
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
x_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
# for i in range(1,self.time_step+1):
# _, h,c=self.encodeRegress(x[:,t+i,:])
# c_latent.append(self.fcs(c))
nce = 0
for i in np.arange(0, self.time_step):
total = torch.mm(self.fcs(x[:, t + i + 1, :]).squeeze(1), torch.transpose(x_preds[i].squeeze(0), 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.bs).to(device)))
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.bs * self.time_step
accuracy = 1. * correct.item() / self.bs
return accuracy, nce, None
def get_reg_out(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
# print('reshape x to ',x.shape)
xt, ht, ct = self.encodeRegress(x[:, :, :])
# print(ht.shape)
# return xt.reshape((x.shape[0],-1))
return xt[:, -1, :].squeeze(1)
class CPAELSTM43(CPLSTM4H):
"""
add decoder constraint in loss function
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1):
super(CPAELSTM43, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode)
self.lstm4 = nn.LSTM(
input_size=self.dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
self.batch_size = self.bs
for i in np.arange(0, self.time_step):
try:
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
except IndexError:
print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
raise AssertionError
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def encode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.dim)
x, _ = self.lstm1(x, (h0, c0))
return x
def decode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.input_dim)
x, _ = self.lstm4(x, (h0, c0))
return x
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
x_ori = x
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
# for i in range(1, self.time_step + 1):
# x, h, c = self.encodeRegress(x[:, t + 1:t+self.time_step+1, :])
z, (h, c) = self.lstm1(x)
c_latent = self.fcs(z[:, t + 1:t + self.time_step + 1, :]) # with memory
x_hat = self.decode(z)
nce, acc = self.compute_nce(c_latent.transpose(0, 1), c_preds)
return x_hat, nce, acc
class CPAELSTM44(CPLSTM4):
"""
add decoder constraint in loss function
sim: similarity function. 'dot' for dot product, 'cosine' for cosine similarity
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, t_range=None,mode=1,sym=False, sim='dot',temperature=1,pred_mode='step'):
super(CPAELSTM44, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode)
self.lstm4 = nn.LSTM(
input_size=self.dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
self.sym=sym
self.sim=sim
self.temperature=temperature
self.t_range=t_range
self.pred_mode = pred_mode
if self.pred_mode=='future':
self.W_pred = nn.Linear(self.dim, self.dim)
def sim_func(self,a,b):
if self.sim=='cosine':
print('use cosine')
a=a/a.norm(dim=-1,keepdim=True)
b=b/b.norm(dim=-1,keepdim=True)
a=self.temperature*a
b=self.temperature*b
return torch.mm(a,b.T)
elif self.sim=='dot':
print('use dot')
return torch.mm(a,b.T)
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
self.batch_size = self.bs
for i in np.arange(0, self.time_step):
try:
total = self.sim_func(encode_samples[i], pred[i]) # e.g. size 8*8
except IndexError:
print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
raise AssertionError
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
if self.sym:
nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor
else:
nce += torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
return nce, accuracy
def encode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.dim)
x, _ = self.lstm1(x, (h0, c0))
return x
def decode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.input_dim)
x, _ = self.lstm4(x, (h0, c0))
return x
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
x_ori = x
t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
self.bs = x.shape[0]
c_latent = []
xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
c_preds = self.fcs(self.predict(ht.transpose(0, 1), ht, ct, self.time_step, self.mode))
# for i in range(1, self.time_step + 1):
# x, h, c = self.encodeRegress(x[:, t + 1:t+self.time_step+1, :])
z_after_t = []
for i in range(1, self.time_step + 1):
_, h, c = self.encodeRegress(x[:, t + i, :])
z_after_t.append(self.fcs(c))
z_after_t = torch.cat(z_after_t, 0)
c_embeds = self.fcs(z_after_t)
z_all = torch.cat((xt, z_after_t.transpose(0, 1)), 1)
x_hat = self.decode(z_all)
nce, acc = self.compute_nce(c_embeds, c_preds)
return x_hat, nce, acc
def pred_future(self, x):
x=self.check_input(x)
# print(self.t_range)
# print(self.max_len)
t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1])
# print(t_range)
# t_range = (self.max_len *2// 3, 4 * self.max_len // 5)
# print(x.shape)
x_ori = x
if self.max_len>192: t=192
else:
t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series
# print('t is %s'%t)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :])
latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :])
del x
hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past))
latent_all = torch.cat((latent_past, latent_future), 1)
del latent_future,latent_past
latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all))
del latent_all
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred)
return x_hat, nce, acc
class CPAELSTM44_AT(CPLSTM4):
"""
add decoder constraint in loss function
pred_mode: 'step' for timestep prediction
'future' for using past to predict future
"""
def __init__(self, dim, bn, dropout, task,t_range=None,
depth=2, num_classes=1,
input_dim=76, flat_attention=False,time_step=5, sim='dot',temperature=1,mode=1, switch=True, pred_mode='step',sym=False):
super(CPAELSTM44_AT, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode, switch)
self.lstm4 = nn.LSTM(
input_size=self.dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
self.att1 = nn.Linear(self.dim, self.dim)
self.att2 = nn.Linear(self.dim, self.dim)
self.flat_attention=flat_attention
self.sim=sim
self.temperature=temperature
self.t_range=t_range
self.pred_mode = pred_mode
if self.pred_mode=='future':
self.W_pred = nn.Linear(self.dim, self.dim)
self.sym=sym #whether use symmetric loss
def cal_att1(self,x):
if self.flat_attention:
x=self.att1(x)
assert x.shape[-1]==self.dim
# x=torch.transpose(x,1,2)
# torch.nn.BatchNorm1d(self.dim)
# x=torch.transpose(x,1,2)
nn.Softmax(dim=-1)
else:
x=self.att1(x)
return x
def cal_att2(self,x):
if self.flat_attention:
x=self.att2(x)
# x=torch.transpose(x,1,2)
# torch.nn.BatchNorm1d(self.dim)
# x=torch.transpose(x,1,2)
nn.Softmax(dim=-1)
else:
x=self.att2(x)
return x
def sim_func(self,a,b):
if self.sim=='cosine':
a=a/a.norm(dim=-1,keepdim=True)
b=b/b.norm(dim=-1,keepdim=True)
a=self.temperature*a
b=self.temperature*b
print('using cosine')
return torch.mm(a,b.T)
elif self.sim=='dot':
print('using dot')
return torch.mm(a,b.T)
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
self.batch_size = self.bs
if self.pred_mode=='step':
for i in np.arange(0, self.time_step):
try:
print('self.sim is ',self.sim)
total = self.sim_func(encode_samples[i], pred[i]) # e.g. size 8*8
except IndexError:
print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
raise AssertionError
# print(total)
if self.sym:
nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor
else:
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
elif self.pred_mode=='future':
total=self.sim_func(encode_samples[0],pred[0])
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
# torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# correct_2=torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=1),
# torch.arange(0, self.batch_size).cuda()))
# print(correct,correct_2)
# print(total)
if self.sym:
nce += 1/2*(torch.sum(torch.diag((nn.LogSoftmax(dim=0)(total)))) + torch.sum(torch.diag((nn.LogSoftmax(dim=1)(total)))))# nce is a tensor
else:
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size
accuracy =1. * correct.item() / self.batch_size
return nce, accuracy
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
# print(x.shape)
latents, state1 = self.lstm1(x)
del x
latents_to_pred = torch.mul(latents, self.cal_att1(latents))
regs, state2 = self.lstm2(latents_to_pred)
del latents_to_pred
ht, ct = state2
return latents, regs, ht, ct
def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False):
bs = x.shape[0]
x = self.dropout(x)
latents, state1 = self.lstm1(x)
# latents_to_pred = torch.mul(latents, self.att1(latents))
regs, state2 = self.lstm2(latents)
ht, ct = state2
return regs[:, -1, :].squeeze(1)
def encode(self, x):
bs = x.shape[0]
x = self.dropout(x)
x, (h, c) = self.lstm1(x)
return x, h, c
def decode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.input_dim)
x, _ = self.lstm4(x, (h0, c0))
return x
def check_input(self, x):
if type(x) == dict:
dic = x
x = dic['data'].squeeze(0)
self.max_len = min(dic['length'])
self.bs = x.shape[0]
elif len(x.shape) == 4:
x = x.squeeze(1)
self.bs = x.shape[0]
self.max_len = x.shape[1]
elif x.shape[1] == 76:
x = x.transpose(1, 2)
self.bs = x.shape[0]
self.max_len = x.shape[1]
else:
self.max_len=x.shape[1]
self.bs=x.shape[0]
return x
def pred_future(self, x):
x=self.check_input(x)
# print(self.t_range)
# print(self.max_len)
t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1])
# print(t_range)
# t_range = (self.max_len *2// 3, 4 * self.max_len // 5)
# print(x.shape)
x_ori = x
if self.max_len>192: t=192
else:
t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series
# print('t is %s'%t)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :])
latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :])
del x
hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past))
latent_all = torch.cat((latent_past, latent_future), 1)
del latent_future,latent_past
latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all))
del latent_all
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred)
return x_hat, nce, acc
def pred_timestep(self, x):
x=self.check_input(x)
t = torch.randint(low=20, high=self.max_len - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :])
latent_preds = self.fcs(
self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode))
latent_future = []
for i in range(1, self.time_step + 1):
_, h, c = self.encode(x[:, t + i, :])
latent_future.append(self.fcs(c[-1]))
latent_future = torch.stack(latent_future, 0)
latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1)
latent_all_attention = torch.mul(latent_all, self.cal_att2(latent_all))
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(latent_future, latent_preds)
return x_hat, nce, acc
def forward(self, x):
if self.pred_mode == 'future':
x_hat, nce, acc = self.pred_future(x)
else:
x_hat, nce, acc = self.pred_timestep(x)
return x_hat, nce, acc
class SelfAttention(nn.Module):
def __init__(self, in_dim):
super(SelfAttention,self).__init__()
self.chanel_in = in_dim
self.Wq = nn.Linear(in_dim , in_dim)
self.Wk = nn.Linear(in_dim , in_dim)
self.Wv = nn.Linear(in_dim , in_dim)
self.gamma = in_dim
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X W X H) (batch_size X C X 76 X 192)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
# x: (48, 144, 256)
m_batchsize, width, height = x.size()
proj_query = self.Wq(x)
proj_key = self.Wk(x)
energy = torch.matmul(proj_key.transpose(1,2),proj_query) / (self.gamma**0.5)
attention = self.softmax(energy)
proj_value = self.Wv(x)
out = torch.matmul(proj_value, attention)
return out
class CPAELSTM44_selfAT(CPLSTM4):
"""
add decoder constraint in loss function
pred_mode: 'step' for timestep prediction
'future' for using past to predict future
"""
def __init__(self, dim, bn, dropout, task,t_range=None,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, switch=True, pred_mode='step'):
super(CPAELSTM44_selfAT, self).__init__(dim, bn, dropout, task,
depth, num_classes,
input_dim, time_step, mode, switch)
self.lstm4 = nn.LSTM(
input_size=self.dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True)
self.att1 = SelfAttention(self.dim)
self.att2 = SelfAttention(self.dim)
self.t_range=t_range
self.pred_mode = pred_mode
def compute_nce(self, encode_samples, pred):
'''
-----------------------------------------------------------------------------------
--------------Calculate NCE loss--------------
-----------------------------------------------------------------------------------
...argument:
......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
......pred : Wk[i]( C_t )
'''
nce = 0 # average over time_step and batch
self.batch_size = self.bs
if self.pred_mode=='step':
for i in np.arange(0, self.time_step):
try:
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
except IndexError:
print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
raise AssertionError
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size * self.time_step
accuracy = 1. * correct.item() / self.batch_size
elif self.pred_mode=='future':
total=torch.mm(encode_samples[0],torch.transpose(pred[0], 0, 1))
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, self.batch_size).cuda())) # correct is a tensor
nce = torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * self.batch_size
accuracy =1. * correct.item() / self.batch_size
return nce, accuracy
def encodeRegress(self, x, warm=False, conti=False):
bs = x.shape[0]
x = self.dropout(x)
latents, state1 = self.lstm1(x)
del x
# latents (48,144,256)
latents_to_pred = self.att1(latents)
regs, state2 = self.lstm2(latents_to_pred)
del latents_to_pred
ht, ct = state2
return latents, regs, ht, ct
def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False):
bs = x.shape[0]
x = self.dropout(x)
latents, state1 = self.lstm1(x)
# latents_to_pred = torch.mul(latents, self.att1(latents))
regs, state2 = self.lstm2(latents)
ht, ct = state2
return regs[:, -1, :].squeeze(1)
def encode(self, x):
bs = x.shape[0]
x = self.dropout(x)
x, (h, c) = self.lstm1(x)
return x, h, c
def decode(self, x):
bs = x.shape[0]
x = self.dropout(x)
(h0, c0) = self.init_hidden(bs, self.input_dim)
x, _ = self.lstm4(x, (h0, c0))
return x
def check_input(self, x):
if type(x) == dict:
dic = x
x = dic['data'].squeeze(0)
self.max_len = min(dic['length'])
self.bs = x.shape[0]
elif len(x.shape) == 4:
x = x.squeeze(1)
self.bs = x.shape[0]
self.max_len = x.shape[1]
elif x.shape[1] == 76:
x = x.transpose(1, 2)
self.bs = x.shape[0]
self.max_len = x.shape[1]
else:
self.max_len=x.shape[1]
self.bs=x.shape[0]
return x
def pred_future(self, x):
x=self.check_input(x)
# print(self.t_range)
# print(self.max_len)
t_range=(self.max_len*self.t_range[0],self.max_len*self.t_range[1])
# print(t_range)
# t_range = (self.max_len *2// 3, 4 * self.max_len // 5)
# print(x.shape)
x_ori = x
if self.max_len>192: t=192
else:
t = torch.randint(low=int(t_range[0]), high=int(t_range[1]), size=(1,)).long() # choose a point to split the time series
# print('t is %s'%t)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out_past, _ = self.encodeRegress(x[:, :t + 1, :])
latent_future, _, hidden_reg_out_future, _ = self.encodeRegress(x[:, t + 1:self.max_len, :])
del x
hidden_reg_out_pred = self.fcs(self.W_pred(hidden_reg_out_past))
latent_all = torch.cat((latent_past, latent_future), 1)
del latent_future,latent_past
latent_all_attention = self.att2(latent_all)
del latent_all
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(self.fcs(hidden_reg_out_future), hidden_reg_out_pred)
return x_hat, nce, acc
def pred_timestep(self, x):
# x (48,192,76)
x=self.check_input(x)
# x (48,192,76)
t = torch.randint(low=20, high=self.max_len - self.time_step - 1, size=(1,)).long()
# print('reshape x to ',x.shape)
# self.bs = x.shape[0]
latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :])
latent_preds = self.fcs(
self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode))
latent_future = []
for i in range(1, self.time_step + 1):
_, h, c = self.encode(x[:, t + i, :])
latent_future.append(self.fcs(c[-1]))
latent_future = torch.stack(latent_future, 0)
latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1)
latent_all_attention = self.att2(latent_all)
x_hat = self.decode(latent_all_attention)
nce, acc = self.compute_nce(latent_future, latent_preds)
return x_hat, nce, acc
def forward(self, x):
if self.pred_mode == 'future':
x_hat, nce, acc = self.pred_future(x)
else:
x_hat, nce, acc = self.pred_timestep(x)
return x_hat, nce, acc
# class CPAELSTM45(CPLSTM4):
# """
# CPLSTM4+ CPAE4
# """
#
# def __init__(self, dim, bn, dropout, task,
# depth=2, num_classes=1,
# input_dim=76, time_step=5, mode=1):
# super(CPAELSTM45, self).__init__(dim, bn, dropout, task,
# depth, num_classes,
# input_dim, time_step, mode)
#
# self.fcs3 = nn.Sequential(
# nn.Linear(self.input_dim, self.input_dim),
# nn.ReLU(inplace=True),
# nn.Linear(self.input_dim, self.input_dim)
# )
# self.lstm4 = nn.LSTM(
# input_size=self.dim,
# hidden_size=self.input_dim,
# bidirectional=False,
# batch_first=True)
#
# def encode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.dim)
# x, _ = self.lstm1(x, (h0, c0))
# return x
#
# def decode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.input_dim)
# x, _ = self.lstm4(x, (h0, c0))
# return x
#
# def compute_nce(self, encode_samples, pred):
# '''
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss--------------
# -----------------------------------------------------------------------------------
# ...argument:
# ......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
# ......pred : Wk[i]( C_t )
# '''
# nce = 0 # average over time_step and batch
# self.batch_size = self.bs
# for i in np.arange(0, self.time_step):
# try:
# total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# except IndexError:
# print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
# raise AssertionError
# # print(total)
# correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
# torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
# nce /= -1. * self.batch_size * self.time_step
# accuracy = 1. * correct.item() / self.batch_size
#
# return nce, accuracy
#
# def forward(self, x):
# if len(x.shape) == 4: x = x.squeeze(1)
# if x.shape[1] == 76: x = x.transpose(1, 2)
# x_ori = x
# t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# self.bs = x.shape[0]
# xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
#
# z_after_t = []
#
# for i in range(1, self.time_step + 1):
# _, h, c = self.encodeRegress(x[:, t + i, :])
# z_after_t.append(c)
# z_after_t = torch.cat(z_after_t, 0)
#
# x_hat = self.decode(z_after_t)
# nce, acc = self.compute_nce(self.fcs3(x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1),
# self.fcs3(x_hat))
# nce2, acc2 = self.compute_nce((x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1), x_hat)
# print('acc after fc', acc)
# print('acc before fc', acc2)
# return acc, nce, None
#
#
# class CPAELSTM46(CPLSTM4):
# """
# CPLSTM4+ CPAE4
# """
#
# def __init__(self, dim, bn, dropout, task,
# depth=2, num_classes=1,
# input_dim=76, time_step=5, mode=1):
# super(CPAELSTM46, self).__init__(dim, bn, dropout, task,
# depth, num_classes,
# input_dim, time_step, mode)
#
# self.lstm4 = nn.LSTM(
# input_size=self.dim,
# hidden_size=self.input_dim,
# bidirectional=False,
# batch_first=True)
#
# def encode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.dim)
# x, _ = self.lstm1(x, (h0, c0))
# return x
#
# def decode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.input_dim)
# x, _ = self.lstm4(x, (h0, c0))
# return x
#
# def compute_nce(self, encode_samples, pred):
# '''
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss--------------
# -----------------------------------------------------------------------------------
# ...argument:
# ......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
# ......pred : Wk[i]( C_t )
# '''
# nce = 0 # average over time_step and batch
# self.batch_size = self.bs
# for i in np.arange(0, self.time_step):
# try:
# total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# except IndexError:
# print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
# raise AssertionError
# # print(total)
# correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
# torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
# nce /= -1. * self.batch_size * self.time_step
# accuracy = 1. * correct.item() / self.batch_size
#
# return nce, accuracy
#
# def forward(self, x):
# if len(x.shape) == 4: x = x.squeeze(1)
# if x.shape[1] == 76: x = x.transpose(1, 2)
# x_ori = x
# t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
# self.bs = x.shape[0]
# xt, ht, ct = self.encodeRegress(x[:, :t + 1, :])
#
# z_after_t = []
#
# for i in range(1, self.time_step + 1):
# _, h, c = self.encodeRegress(x[:, t + i, :])
# z_after_t.append(c)
# z_after_t = torch.cat(z_after_t, 0)
#
# x_hat = self.decode(z_after_t)
# nce, acc = self.compute_nce((x_ori[:, t + 1:t + 1 + self.time_step, :]).transpose(0, 1), x_hat)
# return acc, nce, None
#
#
# class CPAELSTM4_AT(CPLSTM4):
# def __init__(self, dim, bn, dropout, task,
# depth=2, num_classes=1,
# input_dim=76, time_step=5, mode=1, switch=True):
# super(CPAELSTM4_AT, self).__init__(dim, bn, dropout, task,
# depth, num_classes,
# input_dim, time_step, mode)
#
# self.lstm1 = nn.LSTM(
# input_size=self.input_dim,
# hidden_size=self.dim,
# num_layers=3,
# bidirectional=False,
# batch_first=True
# )
# self.lstm4 = nn.LSTM(
# input_size=self.dim,
# hidden_size=self.input_dim,
# bidirectional=False,
# batch_first=True)
# self.switch = switch
# if self.switch == False:
# self.softmax = nn.Softmax(dim=1)
# self.lsoftmax = nn.LogSoftmax(dim=1)
# self.att1 = nn.Linear(self.dim, self.dim) # attend to decoder
# self.att2 = nn.Linear(self.dim, self.dim) # attend to predictor
#
# def encodeRegress(self, x, warm=False, conti=False):
# bs = x.shape[0]
# x = self.dropout(x)
# latents, state1 = self.lstm1(x)
# latents_to_pred = torch.mul(latents, self.att1(latents))
# regs, state2 = self.lstm2(latents_to_pred)
# ht, ct = state2
# return latents, regs, ht, ct
#
# def get_reg_out(self, x, stack=False, warm=False, conti=False, ifbn=False):
# # TODO:
# bs = x.shape[0]
# x = self.dropout(x)
# latents, state1 = self.lstm1(x)
#
# # latents_to_pred = torch.mul(latents, self.att1(latents))
# regs, state2 = self.lstm2(latents)
# ht, ct = state2
# return regs[:, -1, :].squeeze(1)
#
# def compute_nce(self, encode_samples, pred):
# '''
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss--------------
# -----------------------------------------------------------------------------------
# ...argument:
# ......encode_samples : ( time_step, batch_size, conv_sizes[-1] )
# ......pred : Wk[i]( C_t )
# '''
# nce = 0 # average over time_step and batch
# self.batch_size = self.bs
# for i in np.arange(0, self.time_step):
# try:
# total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# except IndexError:
# print('i is : %s,latent shape: %s, pred shape: %s ' % (i, encode_samples.shape, pred.shape))
# raise AssertionError
# # print(total)
# correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
# torch.arange(0, self.batch_size).cuda())) # correct is a tensor
# nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
# nce /= -1. * self.batch_size * self.time_step
# accuracy = 1. * correct.item() / self.batch_size
#
# return nce, accuracy
#
# def encode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# x, (h, c) = self.lstm1(x)
# return x, h, c
#
# def decode(self, x):
# bs = x.shape[0]
# x = self.dropout(x)
# (h0, c0) = self.init_hidden(bs, self.input_dim)
# x, _ = self.lstm4(x, (h0, c0))
# return x
#
# def forward(self, x):
# # check shape
# if len(x.shape) == 4: x = x.squeeze(1)
# if x.shape[1] == 76: x = x.transpose(1, 2)
# self.bs = x.shape[0]
#
# # randomly choose a time point
# t = torch.randint(low=20, high=x.shape[1] - self.time_step - 1, size=(1,)).long()
#
# # encode the past and put into regressor
# latent_past, _, hidden_reg_out, cell_reg_out = self.encodeRegress(x[:, :t + 1, :])
# latent_preds = self.fcs(
# self.predict(hidden_reg_out.transpose(0, 1), hidden_reg_out, cell_reg_out, self.time_step, self.mode))
#
# latent_future = []
# for i in range(1, self.time_step + 1):
# _, h, c = self.encode(x[:, t + i, :])
# latent_future.append(self.fcs(c[-1]))
#
# latent_future = torch.stack(latent_future, 0)
#
# latent_all = torch.cat((latent_past, latent_future.transpose(0, 1)), 1)
# latent_all_attention = torch.mul(latent_all, self.att2(latent_all))
# x_hat = self.decode(latent_all_attention)
# nce, acc = self.compute_nce(latent_future, latent_preds)
#
# return x_hat, nce, acc
class CDCK3_S(nn.Module):
def __init__(
self,
embedded_features,
gru_out,
n_points=192,
n_features=76,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
kernel_sizes=[(2, i) for i in [76, 32, 64, 64, 128, 256, 512, 1024, 512, 128, 64]],
time_step=30):
self.embedded_features = embedded_features
self.gru_out = gru_out
self.conv_sizes = conv_sizes
self.time_step = time_step
# kernel_sizes=get_kernel_sizes() #TODO
super(CDCK3_S, self).__init__()
self.n_features = n_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
).to(device)
# self.decode_channels = self.channels[::-1]
# self.decoder = nn.ModuleList(
# [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
# ).to(device)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features).to(device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
self.gru = nn.GRU(
self.embedded_features,
gru_out,
num_layers=1,
bidirectional=False,
batch_first=True).to(device)
self.beforeNCE = None
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers).to(device)
self.beforeNCE = True
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
return torch.zeros(1, batch_size, self.gru_out).to(device)
def forward(self, x):
batch_size = x.shape[0]
# input shape: (N,C=1,n_points=192,n_features=76)
if len(x.shape) == 4: x = x.squeeze(1)
if x.shape[1] == 192: x = x.transpose(1, 2)
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
# output shape: (N, C=conv_sizes[-1], n_frames,1)
# output shape: (N, C=conv_sizes[-1], n_frames,1)
self.n_frames = x.shape[2]
t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long()
encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(
device) # e.g. size
c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device)
hidden = self.init_hidden(batch_size, use_gpu=True)
init_hidden = hidden
# reshape for gru
x = x.view(batch_size, self.n_frames, self.conv_sizes[-1])
# output shape: (N, n_frames, conv_sizes[-1])
x = self.linear(x)
# output shape: (N, n_frames, embedded_features)
for i in np.arange(1, self.time_step + 1):
hidden = init_hidden
encode_samples[i - 1, :, :] = x[:, int(t_samples) + i, :]
forward_seq = x[:, :int(t_samples) + 1, :]
# ----->SHAPE: (N,t_samples+1,embedded_features)
output, hidden = self.gru(forward_seq, hidden)
c_t = output[:, -1, :].view(batch_size, self.gru_out)
pred = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
if self.beforeNCE: # ADD FC layers
pred = self.fcs(pred)
encode_samples = self.fcs(encode_samples)
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss------------------------------------------------
# -----------------------------------------------------------------------------------
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, batch_size).to(device))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * batch_size * self.time_step
accuracy = 1. * correct.item() / batch_size
return accuracy, nce, hidden
def sub_forward(self, x):
# input shape: (N,C=1,n_points=192,n_features=76)
f = iter(self.convs)
g = iter(self.bns)
for i in range(len(self.conv_sizes)):
x = next(f)(x)
x = next(g)(x)
x = nn.ReLU(inplace=True)(x)
x = x.transpose(1, 3)
return x
def get_reg_out(self, x, every=False):
batch_size = x.shape[0]
# input shape: (N,C=1,n_points=192,n_features=76)
if len(x.shape) == 4: x = x.squeeze(1)
if x.shape[1] == 192: x = x.transpose(1, 2)
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
# zt
# output shape: (N, C=conv_sizes[-1], n_frames,1)
self.n_frames = x.shape[2]
t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long()
encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(
device) # e.g. size
c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device)
hidden = self.init_hidden(batch_size)
init_hidden = hidden
# reshape for gru
x = x.view(batch_size, self.n_frames, self.conv_sizes[-1])
# output shape: (N, n_frames, conv_sizes[-1])
x = self.linear(x)
# output shape: (N, n_frames, embedded_features)
hidden = init_hidden
output, hidden = self.gru(x, hidden)
c_t = output[:, -1, :].view(batch_size, self.gru_out)
return c_t
class CDCK2(nn.Module):
def __init__(self,
time_step,
batch_size,
frame_size,
fix_frame=True,
n_frames=None,
conv_sizes=[64, 128, 512, 128, 64, 32, 16],
n_flat_features_per_frame=None,
embedded_features=22,
gru_out=32
):
"""data should be formatted as
Input: (batch size, n_frames, frame_size, features)
*****If the frame_size and n_frames are identical for every batch,
*****Please set fix_frame=True, and please provide n_frames
:type conv_sizes: list
"""
super(CDCK2, self).__init__()
self.beforeNCE = False
self.frame_size = frame_size
self.batch_size = batch_size
self.time_step = time_step
self.fix_frame = fix_frame
self.n_frames = n_frames
self.n_flat_features_per_frame = n_flat_features_per_frame
self.embedded_features = embedded_features
self.gru_out = gru_out
if not self.fix_frame:
self.encoder = nn.Sequential(
nn.MaxPool2d(4, stride=1),
nn.Conv2d(1, 4, kernel_size=2, stride=1, padding=1, bias=False),
nn.BatchNorm2d(4),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, stride=1),
nn.Conv2d(4, 8, kernel_size=2, stride=4, padding=2, bias=False),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
nn.Conv2d(8, self.embedded_features, kernel_size=2, stride=2, padding=1, bias=False),
nn.BatchNorm2d(self.embedded_features),
nn.ReLU(inplace=True),
nn.Flatten()
)
if self.fix_frame:
self.convs = nn.ModuleList([nn.Conv2d(self.n_frames, conv_sizes[0], kernel_size=2, stride=1, padding=2,
bias=False, groups=self.n_frames)]
+ [
nn.Conv2d(i, j, kernel_size=2, stride=1, padding=2, bias=False,
groups=self.n_frames)
for i, j in zip(conv_sizes[:-1], conv_sizes[1:])
]
)
self.bns = nn.ModuleList(
[nn.BatchNorm2d(i) for i in conv_sizes]
)
self.maxpooling = nn.MaxPool2d(2, stride=1)
self.ReLU = nn.ReLU(inplace=True)
self.softmax = nn.Softmax()
self.lsoftmax = nn.LogSoftmax()
if n_flat_features_per_frame:
self.linear = nn.Linear(self.n_flat_features_per_frame, self.embedded_features)
self.gru = nn.GRU(self.embedded_features, self.gru_out, num_layers=1, bidirectional=False,
batch_first=True).to(device)
self.Wk = nn.ModuleList(
[nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# initialize gru
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
def add_fcs(self, hidden=None):
"""
This function will add FC layers to the embedded features and then compare the features after FC transformations.
See NOTION for illustration.
:param hidden: a list of hidden sizes per layer. For example:[100,100]. If no value is passed, it will be set
as [n_embedded_features,n_embedded_features]
:return: None
"""
n = self.embedded_features
if hidden is None:
self.fcs = nn.Sequential(
nn.Linear(n, n),
nn.ReLU(inplace=True),
nn.Linear(n, n)
)
else:
if type(hidden) != list:
hidden = list(hidden)
layers = []
for i, j in zip([n] + hidden, hidden + [n]):
layers.append(nn.Linear(i, j))
layers.append(nn.ReLU(inplace=True))
layers.pop() # We do not want Relu at the last layer
self.fcs = nn.Sequential(*layers)
self.beforeNCE = True
def update_flat_features(self, n_flat_features_per_frame):
self.n_flat_features_per_frame = n_flat_features_per_frame
self.linear = nn.Linear(self.n_flat_features_per_frame, self.embedded_features).to(device)
self.gru = nn.GRU(self.embedded_features, self.gru_out, num_layers=1, bidirectional=False, batch_first=True).to(
device)
self.Wk = nn.ModuleList([nn.Linear(self.gru_out, self.embedded_features) for i in range(self.time_step)]).to(
device)
# initialize gru
for layer_p in self.gru._all_weights:
for p in layer_p:
if 'weight' in p:
nn.init.kaiming_normal_(self.gru.__getattr__(p), mode='fan_out', nonlinearity='relu')
self.apply(self._weights_init)
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def init_hidden(self, batch_size, use_gpu=True):
if self.fix_frame:
if use_gpu:
return torch.zeros(1, batch_size, self.gru_out).to(device)
else:
return torch.zeros(1, batch_size, self.gru_out)
if not self.fix_frame:
if use_gpu:
return torch.zeros(1, 1, self.gru_out).to(device)
else:
return torch.zeros(1, 1, self.gru_out)
def forward(self, x):
# Convert into frames
# shape of x:(N,1,n_points,features)
x, frame_ends = makeFrameDimension(x, self.frame_size,
self.n_frames) # shape of x:(batch_size,n_frames,frame_size, n_features)
# shape of x:(N,n_frames,points_per_frame,features)
batch_size = x.shape[0]
# !warning!!!!! The last batch in the dataset may have batch_size < self.batch_size.
# !!!!!!!!!!!!!! So cannot use self.batch_size here
self.n_frames = x.shape[1]
# -----------------------------------------------------------------------------------
# --------------Pick a random time point------------------------------------------------
# -----------------------------------------------------------------------------------
if not self.fix_frame:
t_samples = torch.empty((batch_size, 1))
for i in range(batch_size):
try:
t_samples[i] = torch.randint(int((frame_ends[i] - self.time_step - 1).item()),
size=(1,)).long() # randomly pick time stamps
except RuntimeError: # some patients have very few frames so we have to choose the first frame to start
frame_ends[i] = self.time_step + 3
t_samples[i] = 1
if self.fix_frame:
t_samples = torch.randint(self.n_frames - self.time_step - 1, size=(1,)).long()
# -----------------------------------------------------------------------------------
# --------------DO THE EMBEDDING------------------------------------------------
# ------------------------------------------------------------------------------------
if not self.fix_frame:
z = torch.empty((batch_size, self.n_frames, self.embedded_features)).float().to(device)
for i in range(self.n_frames):
y = (x[:, i, :, :].unsqueeze(1)).clone().to(device)
y = self.encoder(y) # ------>SHAPE: (N,n_flat_features_per_frame)
# calculate n_flat_features_per_frame if it is unkown
if self.n_flat_features_per_frame == None:
self.n_flat_features_per_frame = y.shape[1]
logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame)
return self.n_flat_features_per_frame
y = self.linear(y) # ----->SHAPE: (N,embedded_features)
z[:, i, :] = y.squeeze(1) # --->SHAPE: (N, 1, embedded_features)
del x, y
if self.fix_frame:
# x:(8,24,8,76) (N,n_frames,points_per_frame,features)
f = iter(self.convs)
g = iter(self.bns)
for i in range(len(self.convs)):
x = next(f)(x)
try:
x = nn.MaxPool2d(2, stride=2)(x)
except RuntimeError:
pass
x = next(g)(x)
x = self.ReLU(x)
x = nn.Flatten(start_dim=2, end_dim=-1)(x)
z = x
del x
# z: (8,144) (N,flat_features)
# calculate n_flat_features_per_frame if it is unkown
if self.n_flat_features_per_frame == None:
self.n_flat_features_per_frame = int(z.shape[2] * z.shape[1] / self.n_frames)
logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame)
return self.n_flat_features_per_frame
z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame)
# ---->SHAPE: (N,n_frames,n_flat_features_per_frame)
z = self.linear(z) # ----->SHAPE: (N,n_frames,embedded_features)
encode_samples = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(
device) # e.g. size
# ----->SHAPE: (T,N,embedded_features)
c_t = torch.zeros(size=(batch_size, self.gru_out)).float().to(device)
# output of GRU,------>SHAPE:(N, n_gru_out)
# -----------------------------------------------------------------------------------
# --------------GET GRU OUTPUT------------------------------------------------
# -----------------------------------------------------------------------------------
forward_seq = []
hidden = self.init_hidden(len(z), use_gpu=True)
init_hidden = hidden
if not self.fix_frame:
for j in range(batch_size):
hidden = init_hidden
t = t_samples[j]
for i in np.arange(1, self.time_step + 1):
encode_samples[i - 1][j] = z[j, int(t_samples[j].item()) + i, :]
forward_seq.append(z[j, :int(t_samples[j].item()) + 1, :])
output, hidden = self.gru(forward_seq[j].unsqueeze(0), hidden)
c_t[j] = output[:, -1, :].view(1, self.gru_out)
if self.fix_frame:
for i in np.arange(1, self.time_step + 1):
hidden = init_hidden
encode_samples[i - 1, :, :] = z[:, int(t_samples) + i, :]
forward_seq = z[:, :int(t_samples) + 1, :]
# ----->SHAPE: (N,t_samples+1,embedded_features)
output, hidden = self.gru(forward_seq, hidden)
c_t = output[:, -1, :].view(batch_size, self.gru_out)
pred = torch.empty((self.time_step, batch_size, self.embedded_features)).float().to(device)
for i in np.arange(0, self.time_step):
linear = self.Wk[i]
pred[i] = linear(c_t)
if self.beforeNCE: # ADD FC layers
pred = self.fcs(pred)
encode_samples = self.fcs(encode_samples)
# -----------------------------------------------------------------------------------
# --------------Calculate NCE loss------------------------------------------------
# -----------------------------------------------------------------------------------
nce = 0 # average over time_step and batch
for i in np.arange(0, self.time_step):
total = torch.mm(encode_samples[i], torch.transpose(pred[i], 0, 1)) # e.g. size 8*8
# print(total)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, batch_size).to(device))) # correct is a tensor
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * batch_size * self.time_step
accuracy = 1. * correct.item() / batch_size
return accuracy, nce, hidden
def get_reg_out(self, x, every=False):
"""
Get the output of the regression model (GRU).
batch_size could be different from the batch_size used in training process
This function is only applicable for the case in which the samples share the same length,
which means that the self.fix_frame=True
"""
x, _ = makeFrameDimension(x, self.frame_size, x.shape[1])
self.n_frames = x.shape[1]
batch_size = x.size()[0]
if self.fix_frame:
f = iter(self.convs)
g = iter(self.bns)
for i in range(len(self.convs)):
x = next(f)(x)
try:
x = nn.MaxPool2d(2, stride=2)(x)
except RuntimeError:
pass
x = next(g)(x)
x = self.ReLU(x)
x = nn.Flatten(start_dim=2, end_dim=-1)(x)
z = x
# self.n_flat_features_per_frame=z.shape[1]/self.n_frames
z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame)
# ---->SHAPE: (N,n_frames,embedded_features)
z = self.linear(z) # ----->SHAPE: (N,n_frames,embedded_features)
hidden = self.init_hidden(batch_size)
output, hidden = self.gru(z, hidden) # output size e.g. 8*128*256
# ---->SHAPE: (N,n_frames,n_gru_out)
else:
z = torch.empty((batch_size, self.n_frames, self.embedded_features)).float().to(device)
for i in range(self.n_frames):
y = (x[:, i, :, :].unsqueeze(1)).clone().to(device)
y = self.encoder(y) # ------>SHAPE: (N,n_flat_features_per_frame)
# calculate n_flat_features_per_frame if it is unkown
if self.n_flat_features_per_frame == None:
self.n_flat_features_per_frame = y.shape[1]
logger.info('-----n_flat_features_per_frame=%d' % self.n_flat_features_per_frame)
return self.n_flat_features_per_frame
y = self.linear(y) # ----->SHAPE: (N,embedded_features)
z[:, i, :] = y.squeeze(1) # --->SHAPE: (N, 1, embedded_features)
del x, y
c = torch.zeros(size=(batch_size, self.n_frames, self.gru_out)).float().to(device)
for j in range(batch_size):
hidden = self.init_hidden(batch_size)
output, hidden = self.gru(z[j, :, :].unsqueeze(0), hidden)
c[j, :, :] = output[:, :, :].view(1, self.n_frames, self.gru_out)
output = c
if every:
return output # return output from gru of every frame
# ---->SHAPE: (N,n_frames,n_gru_out)
else:
return output[:, -1, :] # only return the last output
# ---->SHAPE: (N,n_gru_out)
def get_latent(self, x, every=True):
"""
Get the latent vectors of each frame
"""
batch_size = x.size()[0]
x, _ = makeFrameDimension(x, self.frame_size, x.shape[1])
z = self.encoder(x)
self.n_flat_features_per_frame = z.shape[1] / self.n_frames
z = z.view(batch_size, self.n_frames, self.n_flat_features_per_frame)
return z
class AE1(nn.Module):
"""
trivial autoencoder
"""
def __init__(
self,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
):
super(AE1, self).__init__()
self.conv_sizes = conv_sizes
encodelist = []
enChannels = [1] + conv_sizes
count = 0
for i in range(len(enChannels) - 1):
encodelist.append(nn.Conv2d(enChannels[i], enChannels[i + 1], kernel_size=2))
encodelist.append(nn.BatchNorm2d(enChannels[i + 1]))
encodelist.append(nn.ReLU(inplace=True))
# if count < 2:
# encodelist.append(nn.MaxPool2d(2,stride=1))
count += 1
deChannels = enChannels[::-1]
decodelist = []
for i in range(len(enChannels) - 1):
# if count >= len(enChannels) - 3:
# decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=3))
# else:
decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=2))
decodelist.append(nn.BatchNorm2d(deChannels[i + 1]))
decodelist.append(nn.ReLU(inplace=True))
count += 1
self.encoder = nn.Sequential(*encodelist)
self.decoder = nn.Sequential(*decodelist)
def forward(self, x):
y = x
if len(x.shape) == 3: x.unsqueeze(1)
x = self.encoder(x)
# print(x.shape)
torch.cuda.empty_cache()
x = self.decoder(x)
torch.cuda.empty_cache()
# print(x.shape)
if len(x.shape) == 4: x.squeeze(1)
loss = nn.MSELoss(reduction='mean')(x, y)
torch.cuda.empty_cache()
return -1, loss, x # make sure it is consistent with other models training function
class AE2_S(nn.Module):
"""
Auto encoder, only move via time direction. Same design in CPAE1
"""
def __init__(
self,
embedded_features,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
n_points=192,
n_features=76,
):
self.conv_sizes = conv_sizes
super(AE2_S, self).__init__()
self.embedded_features = embedded_features
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
)
self.decode_channels = self.channels[::-1]
self.decoder = nn.ModuleList(
[self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
)
self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features)
self.delinear = nn.Linear(self.embedded_features, self.conv_sizes[-1])
def forward(self, x):
# input (batch,192,76)
if len(x.shape) == 4: x = x.squeeze(1)
y = x
x = x.transpose(1, 2) # (b,76,192)
x = self.encode(x).transpose(1, 2) # x: (batch, n_time, conv[-1])
x = self.linear(x) # (batch, time,embedded_features)
x = nn.BatchNorm1d(self.embedded_features).to(device)(x.transpose(1, 2)).transpose(1, 2)
x = nn.ReLU(inplace=True).to(device)(x)
x = self.delinear(x) # (batch, time, conv[-1])
x = nn.BatchNorm1d(self.conv_sizes[-1]).to(device)(x.transpose(1, 2)).transpose(1, 2)
x = nn.ReLU(inplace=True).to(device)(x)
x = self.decode(x.transpose(1, 2)) # (batch,76,192)
x = x.transpose(1, 2)
loss = nn.MSELoss(reduction='mean')(x, y)
return -1, loss, x
def encode(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x)
return x # output shape: (N,n_features=8,n_points=192)
def decode(self, x):
for i in range(len(self.decoder)): # input shape: (N,n_features=8,n_points=192)
x = self.decoder[i](x)
return x
def get_encode(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
x = x.transpose(1, 2)
x = self.encode(x).transpose(1, 2)
x = nn.Flatten()(x)
return x # output shape: (N,192*12)
class CAE1(AE1):
"""
Contrastive Auto-encoder based on AE1
"""
def __init__(self):
super(CAE1, self).__init__()
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def forward(self, x):
# get batch size
bs = x.shape[0]
y = x
_, _, x = super().forward(x)
loss, acc = self.compute_nce(x, y)
del y
return acc, loss, x
def compute_nce(self, x_hat, x):
bs = x.shape[0]
assert x.shape == x_hat.shape
nce = 0
x = x.view(bs, -1)
x_hat = x_hat.view(bs, -1)
total = torch.mm(x_hat, x.T)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, bs).cuda()))
nce = torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * bs
acc = 1. * correct.item() / bs
torch.cuda.empty_cache()
del x, x_hat
return nce, acc
class CAE11(nn.Module):
def __init__(
self,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
):
super(CAE11, self).__init__()
self.conv_sizes = conv_sizes
encodelist = []
enChannels = [1] + conv_sizes
count = 0
for i in range(len(enChannels) - 1):
encodelist.append(nn.Conv2d(enChannels[i], enChannels[i + 1], kernel_size=2))
encodelist.append(nn.BatchNorm2d(enChannels[i + 1]))
encodelist.append(nn.ReLU(inplace=True))
# if count < 2:
# encodelist.append(nn.MaxPool2d(2,stride=1))
count += 1
deChannels = enChannels[::-1]
decodelist = []
for i in range(len(enChannels) - 1):
# if count >= len(enChannels) - 3:
# decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=3))
# else:
decodelist.append(nn.ConvTranspose2d(deChannels[i], deChannels[i + 1], kernel_size=2))
decodelist.append(nn.BatchNorm2d(deChannels[i + 1]))
decodelist.append(nn.ReLU(inplace=True))
count += 1
self.encoder = nn.Sequential(*encodelist)
self.decoder = nn.Sequential(*decodelist)
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def forward(self, x):
y = x
if len(x.shape) == 3: x.unsqueeze(1)
x = self.encoder(x)
# print(x.shape)
torch.cuda.empty_cache()
x = self.decoder(x)
torch.cuda.empty_cache()
# print(x.shape)
if len(x.shape) == 4: x.squeeze(1)
torch.cuda.empty_cache()
loss, acc = self.compute_nce(x, y)
del y
return acc, loss, x
def compute_nce(self, x_hat, x):
bs = x.shape[0]
assert x.shape == x_hat.shape
nce = 0
x = x.view(bs, -1)
x_hat = x_hat.view(bs, -1)
total = torch.mm(x_hat, x.T)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, bs).cuda()))
nce = torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * bs
acc = 1. * correct.item() / bs
torch.cuda.empty_cache()
del x, x_hat
return nce, acc
class CAE2_S(AE2_S):
"""
Contrastive auto-encoder based on AE2
"""
def __init__(
self,
embedded_features,
conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8],
n_points=192,
n_features=76,
):
self.conv_sizes = conv_sizes
self.embedded_features = embedded_features
super(CAE2_S, self).__init__(self.embedded_features, self.conv_sizes)
# # . If is int, uses the same padding in all boundaries.
# # If a 4-tuple, uses (left ,right ,top ,bottom )
# self.channels = [n_features] + conv_sizes
#
# # the core part of model list
# self.sequential = lambda inChannel, outChannel: nn.Sequential(
# nn.ReflectionPad1d((0, 1)),
# nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
# nn.BatchNorm1d(outChannel),
# nn.ReLU(inplace=True)
# )
#
# # ** minded the length should be 1 element shorter than # of channels
# self.encoder = nn.ModuleList(
# [self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
# )
#
# self.decode_channels = self.channels[::-1]
# self.decoder = nn.ModuleList(
# [self.sequential(self.decode_channels[i], self.decode_channels[i + 1]) for i in range(len(conv_sizes))]
# )
# self.linear = nn.Linear(self.conv_sizes[-1], self.embedded_features)
# self.delinear = nn.Linear(self.embedded_features, self.conv_sizes[-1])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
y = x
x = x.transpose(1, 2) # (b,76,192)
x = self.encode(x).transpose(1, 2) # x: (batch, n_time, conv[-1])
x = self.linear(x) # (batch, time,embedded_features)
x = nn.BatchNorm1d(self.embedded_features).to(device)(x.transpose(1, 2)).transpose(1, 2)
x = nn.ReLU(inplace=True).to(device)(x)
x = self.delinear(x) # (batch, time, conv[-1])
x = nn.BatchNorm1d(self.conv_sizes[-1]).to(device)(x.transpose(1, 2)).transpose(1, 2)
x = nn.ReLU(inplace=True).to(device)(x)
x = self.decode(x.transpose(1, 2)) # (batch,76,192)
x = x.transpose(1, 2)
loss, acc = self.compute_nce(x, y) # TODO:
return acc, loss, x
def compute_nce(self, x_hat, x):
bs = x.shape[0]
assert x.shape == x_hat.shape
nce = 0
x = x.view(bs, -1)
x_hat = x_hat.reshape(bs, -1)
total = torch.mm(x_hat, x.T)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, bs).cuda()))
nce = torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * bs
acc = 1. * correct.item() / bs
return nce, acc
class Basic_Cnn(nn.Module):
def __init__(self, seed, conv_sizes=[32, 64, 64, 128, 256, 512, 1024, 512, 128, 64, 8], n_features=76, out=2):
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
super(Basic_Cnn, self).__init__()
torch.manual_seed(seed)
# . If is int, uses the same padding in all boundaries.
# If a 4-tuple, uses (left ,right ,top ,bottom )
self.out = out
self.channels = [n_features] + conv_sizes
# the core part of model list
self.sequential = lambda inChannel, outChannel: nn.Sequential(
nn.ReflectionPad1d((0, 1)),
nn.Conv1d(inChannel, outChannel, kernel_size=2, padding=0),
nn.BatchNorm1d(outChannel),
nn.ReLU(inplace=True)
)
# ** minded the length should be 1 element shorter than # of channels
self.encoder = nn.ModuleList(
[self.sequential(self.channels[i], self.channels[i + 1]) for i in range(len(conv_sizes))]
)
self.fc = nn.Sequential(
nn.Linear(self.channels[-1], 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, self.out),
nn.LogSoftmax(dim=1)
)
# dim = 1 !!!
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
# input shape: (N,C=1,n_points=192,n_features=76)
# output shape: (N, C=sizes[-1], )
self.apply(self._weights_init)
# def relevant_points(n):
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
for i in range(len(self.encoder)): # input shape: (N,n_features=76,n_points=192)
x = self.encoder[i](x) # ouput shape: (N,8,192)
y = self.fc(x[:, :, -1])
return y
def train(args, model, device, train_loader, optimizer, epoch, batch_size, lr=None):
# turn on the training mode
model.train()
logger = logging.getLogger("cpc")
if 'CPAE' not in args['model_type'] or 'CPAE4' in args['model_type'] or (
'CPAE7' in args['model_type']) or 'CPAELSTM41' in args['model_type'] or 'CPAELSTM42' in args['model_type']:
for batch_idx, sample in enumerate(train_loader):
if sample == 1: continue
sigs, labels = zip(*sample)
sigs = torch.stack(sigs)
labels = torch.stack(labels)
data = sigs.float().unsqueeze(1).to(device) # add channel dimension
data.requires_grad = True
optimizer.zero_grad()
# If n_flat_features_per_frame is not provided, then the forward() of the above sentence will return
# n_flat_features_per_frame and the below sentence will raise TypeError.
# Then get the n_flat_features_per_frame and update this to the model
# DO the forward again
result = model(data)
try:
acc, loss, hidden = result
except TypeError:
n_flat_features_per_frame = result
return result
loss.backward()
optimizer.step()
if lr is None:
lr = optimizer.update_learning_rate() # See optimizer.py
# print(lr)
if batch_idx % args['log_interval'] == 0:
logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tlr:{:.5f}\tAccuracy: {:.4f}\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), lr, acc, loss.item()))
del sigs, labels, sample, data, hidden, acc, loss
torch.cuda.empty_cache()
elif 'CPAE' in args['model_type']:
model.train()
logger.info('\n --------------------------- epoch {} ------------------------- \n'.format(epoch))
if args.get('lambda'): logger.info('weights are %s' % args['lambda'])
local_loss = []
for ii, batch in enumerate(train_loader):
if batch == 1:
continue
X, y = zip(*batch)
X = torch.stack(X).to(device)
X.requires_grad = True
# y = torch.tensor(y).long().to('cuda') # y is not used here in autoencoder
optimizer.zero_grad()
D, nce, accuracy = model(X) # decoded
l = args.get('Lambda')
if l:
loss = Chimera_loss(D, X, nce, l)
else:
loss = Chimera_loss(D, X, nce)
loss.backward()
optimizer.step()
local_loss.append(loss.item())
if ii % 100 == 0: # verbose
new_lr = optimizer.update_learning_rate()
logger.info('\t {:.5f} {:.5f}'.format(loss.item(), new_lr))
del X, y, batch, D, nce, accuracy, loss, ii
torch.cuda.empty_cache()
logger.info('\n ---------------------- mean loss : {:.5f} ---------------------- \n'.format(
np.mean(local_loss)))
torch.cuda.empty_cache()
torch.cuda.empty_cache()
def validation(model, args, device, validation_loader):
logger = logging.getLogger("cpc")
logger.info("Starting Validation")
if 'CPAE' not in args['model_type'] or 'CPAELSTM42' in args['model_type'] or ('CPAE4' in args['model_type']) or (
'CPAE7' in args['model_type']) or 'CPAELSTM41' in args['model_type']:
model.eval()
total_loss = 0
total_acc = 0
with torch.no_grad():
for _, sample in enumerate(validation_loader):
if sample == 1: continue
sigs, _ = zip(*sample)
sigs = torch.stack(sigs)
data = sigs.float().unsqueeze(1).to(device)
acc, loss, hidden = model(data)
total_loss += len(data) * loss
total_acc += len(data) * acc
torch.cuda.empty_cache()
del sigs, sample
return total_acc, total_loss
else:
model.eval()
loss_ls = []
total_loss = 0
total_acc = 0
for ii, batch in enumerate(validation_loader):
if batch == 1: continue
X, y = zip(*batch)
X = torch.stack(X).to('cuda')
D, nce, accuracy = model(X) # decoded
if args.get('lambda'):
total_loss += Chimera_loss(D, X, nce, args['lambda']).detach().cpu().numpy()
else:
total_loss += Chimera_loss(D, X, nce).detach().cpu().numpy()
loss_ls.append(record_loss(D, X, nce))
total_acc += len(X) * accuracy
torch.cuda.empty_cache()
del X, y, batch, D, nce, accuracy
loss_ls = np.stack(loss_ls)
logger.info('\n ------- validation ------- \n'.format(ii))
logger.info('\t NCE \t MSE \t MASK MSE \t MAPPING MSE')
logger.info('\t {:.4f} \t {:.4f} \t {:.4f} \t {:.4f}'.format(*np.mean(loss_ls, axis=0)))
return total_acc, total_loss
def define_model(args_json, Model, train_loader):
model_args = filter_args(args_json, Model)
model = Model(**model_args)
optimizer = eval(args_json['optimizer'])
if args_json.get('n_flat_features_per_frame') is None and Model == CDCK2:
args_json['n_flat_features_per_frame'] = train(args_json, model, device, train_loader, optimizer, 2,
args_json['batch_size'])
del model
model_args = filter_args(args_json, Model)
model = Model(**model_args)
model.update_flat_features(args_json['n_flat_features_per_frame'])
if args_json.get('fcs') is not None:
model.add_fcs(args_json['fcs']) # add fc layers if required
return model.to(device), optimizer
def save_intermediate(Model, args_json, device):
setting_name = get_setting_name(args_json['model_best'])
logging_dir = args_json['logging_dir']
checkpoint_path = os.path.join(
args_json['top_path'],
'logs/cpc/',
args_json['model_type'],
args_json['model_best']
)
checkpoint = torch.load(checkpoint_path, map_location='cpu')
print('Starting to generate intermediate data\n')
train_loader, validation_loader, test_loader = split_Structure_Inhospital(
args_json, percentage=1) # BUG every data sample is the same!!!
model, optimizer = define_model(args_json, Model, train_loader)
model.load_state_dict(checkpoint['state_dict'])
model = model.to(device)
context_train = []
context_val = []
context_test = []
y_train = []
y_test = []
y_val = []
model.eval()
with torch.no_grad():
for _, sample in enumerate(train_loader):
if sample == 1: break
x, y = zip(*sample)
out = model.get_reg_out(
(
torch.stack(x).float().unsqueeze(1).to(device)
)
).cpu()
context_train.append(out)
torch.cuda.empty_cache()
y_train.append((torch.stack(y)))
del sample, x, y, out
context_train = torch.cat(context_train).cpu().numpy()
y_train = torch.cat(y_train).cpu().numpy()
np.save(os.path.join(logging_dir, setting_name + '-x_train'), context_train)
np.save(os.path.join(logging_dir, setting_name + '-y_train'), y_train)
print('Getting training intermediate vectors done. saved in %s' % logging_dir)
torch.cuda.empty_cache()
del context_train, y_train
for _, sample in enumerate(validation_loader):
if sample == 1: break
x, y = zip(*sample)
context_val.append(model.get_reg_out(
(
torch.stack(
x
).float().unsqueeze(1).to(device)
)
)
)
y_val.append((torch.stack(y)))
del sample, x, y
context_val = torch.cat(context_val).cpu().numpy()
y_val = torch.cat(y_val).cpu().numpy()
np.save(os.path.join(logging_dir, setting_name + '-x_val'), context_val)
np.save(os.path.join(logging_dir, setting_name + '-y_val'), y_val)
print('Getting validation intermediate vectors done. saved in %s' % logging_dir)
torch.cuda.empty_cache()
del context_val, y_val
for _, sample in enumerate(test_loader):
if sample == 1: break
x, y = zip(*sample)
context_test.append(model.get_reg_out(
(
torch.stack(
x
).float().unsqueeze(1).to(device)
)
)
)
y_test.append((torch.stack(y)))
del sample, x, y
context_test = torch.cat(context_test).cpu().numpy()
y_test = torch.cat(y_test).cpu().numpy()
np.save(os.path.join(logging_dir, setting_name + '-x_test'), context_test)
np.save(os.path.join(logging_dir, setting_name + '-y_test'), y_test)
print('Getting test intermediate vectors done. saved in %s' % logging_dir)
torch.cuda.empty_cache()
del context_test, y_test
def snapshot(dir_path, run_name, state):
snapshot_file = os.path.join(dir_path,
run_name + '-model_best.pth')
# torch.save can save any object
# dict type object in our cases
torch.save(state, snapshot_file)
logger.info("Snapshot saved to {}\n".format(snapshot_file))
def my_collate(batch):
"""Add paddings to samples in one batch to make sure that they have the same length.
Args:
Input:
Output:
data(tensor): a batch of data of patients with the same length
labels(tensor): the labels of the data in this batch
durations(tensor): the original lengths of the patients in the batch
Shape:
Input:
Output:
data: (batch_size,length,num_features)
labels: (batch_size,)
durations:(batch_size,)
"""
if len(batch) == 1:
return 1 # if batch size=1, it should be the last batch. we cannot compute the nce loss, so ignore this batch.
if len(batch) > 1:
data = []
labels = []
durations = []
batch = sorted(batch, key=lambda x: x['duration'], reverse=True)
for sample in batch:
data.append(sample['patient'])
labels.append(sample['death'])
durations.append(sample['duration'])
max_len, n_feats = data[0].shape
data = [np.array(s, dtype=float) for s in data]
data = [torch.from_numpy(s).float() for s in data]
labels = [label for label in labels]
durations = [duration for duration in durations]
data = [torch.cat((s, torch.zeros(max_len - s.shape[0], n_feats)), 0) if s.shape[0] != max_len else s for s in
data]
data = torch.stack(data, 0) # shape:[24,2844,462]
labels = torch.stack(labels, 0)
durations = torch.stack(durations, 0) # max:2844
return data, labels, durations
class MLP(nn.Module):
def __init__(self, hidden_sizes, seed, in_features=8, out=2, dropout=True):
torch.manual_seed(seed)
super(MLP, self).__init__()
hidden_sizes = [in_features] + hidden_sizes + [out]
l = []
torch.manual_seed(seed)
fcs = [nn.Linear(i, j, bias=True) for i, j in zip(hidden_sizes[:-1], hidden_sizes[1:])]
relu = nn.ReLU(inplace=True)
drop = nn.Dropout(p=0.2)
torch.manual_seed(seed)
bns = [nn.BatchNorm1d(i) for i in hidden_sizes[1:]]
# apply(_weights_init)
for i in range(len(hidden_sizes) - 1):
l.append(fcs[i])
if i != len(hidden_sizes) - 2:
l.append(relu)
l.append(bns[i])
if dropout: l.append(drop)
self.mymodules = nn.Sequential(*l)
for model in self.mymodules:
self.initialize_weights(model)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
def forward(self, x):
# print(x.shape)
if len(x.shape) == 4:
x = x.squeeze(1) # fastai has a strange issue here.
x = self.mymodules(x)
# print (x)
# print(x.shape)
return x
def _weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def valid(self, data_loader, iterations='all', metrics=None):
if metrics == None: metrics = self.metrics
loss = [None] * len(metrics)
overall_loss = []
self.model.eval()
with torch.no_grad():
for i, batch in enumerate(data_loader):
if iterations != 'all':
if i >= iterations: return overall_loss
ct, y = zip(*batch)
ct = torch.stack(ct).squeeze(1).to(device)
y = torch.stack(y).cpu()
pred = self.model(ct).cpu() # forward
for i, metric in enumerate(metrics):
loss[i] = metric(pred, y) # loss
overall_loss.append((loss))
del loss, ct, y, pred
return overall_loss
class LR(nn.Module):
def __init__(self, seed, in_features=8, out=2):
random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed)
super(LR, self).__init__()
torch.manual_seed(seed)
self.linear = nn.Linear(in_features, out)
def forward(self, x):
return F.log_softmax(self.linear(x), dim=1)
def load_intermediate(top_path, setting_name, model_type):
middata_dir = os.path.join(top_path, 'logs', 'imp', model_type)
x_train = np.load(os.path.join(middata_dir, setting_name + '-x_train.npy'))
y_train = np.load(os.path.join(middata_dir, setting_name + '-y_train.npy'))
x_val = np.load(os.path.join(middata_dir, setting_name + '-x_val.npy'))
y_val = np.load(os.path.join(middata_dir, setting_name + '-y_val.npy'))
x_test = np.load(os.path.join(middata_dir, setting_name + '-x_test.npy'))
y_test = np.load(os.path.join(middata_dir, setting_name + '-y_test.npy'))
return {
'x_train': x_train,
'y_train': y_train,
'x_val': x_val,
'y_val': y_val,
'x_test': x_test,
'y_test': y_test
}
def tabular_frame(args_json):
data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type'])
x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \
data_intermediate['x_val'], data_intermediate['y_val'], \
data_intermediate['x_test'], data_intermediate['y_test']
train_df = pd.DataFrame(np.hstack((x_train, y_train)), columns=list(range(8)) + ['y'])
val_df = pd.DataFrame(np.hstack((x_val, y_val)), columns=list(range(8)) + ['y'])
test_df = pd.DataFrame(np.hstack((x_test, y_test)), columns=list(range(8)) + ['y'])
return train_df, val_df, test_df
def dataset_intermediate(args_json):
data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type'])
x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \
data_intermediate['x_val'], data_intermediate['y_val'], \
data_intermediate['x_test'], data_intermediate['y_test']
train_set, val_set, test_set = TrivialDataset(x_train, y_train), \
TrivialDataset(x_val, y_val), \
TrivialDataset(x_test, y_test)
return train_set, val_set, test_set
def data_loader_intermediate(args_json):
data_intermediate = load_intermediate(args_json['top_path'], args_json['setting_name'], args_json['model_type'])
x_train, y_train, x_val, y_val, x_test, y_test = data_intermediate['x_train'], data_intermediate['y_train'], \
data_intermediate['x_val'], data_intermediate['y_val'], \
data_intermediate['x_test'], data_intermediate['y_test']
train_set, val_set, test_set = TrivialDataset(x_train, y_train), \
TrivialDataset(x_val, y_val), \
TrivialDataset(x_test, y_test)
train_loader, val_loader, test_loader = DataLoader(train_set, shuffle=True, batch_size=args_json['batch_size'],
collate_fn=my_collate_fix,
num_workers=args_json['num_workers']), \
DataLoader(val_set, batch_size=args_json['batch_size'], shuffle=True,
collate_fn=my_collate_fix,
num_workers=args_json['num_workers']), \
DataLoader(test_set, shuffle=False, batch_size=args_json['batch_size'],
collate_fn=my_collate_fix,
num_workers=args_json['num_workers'])
return train_loader, val_loader, test_loader
def binary_acc(y_pred, y_test):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc = correct_results_sum / y_test.shape[0]
acc = torch.round(acc * 100)
return acc
def fastai_dl(train_set, val_set, test_set, device, batch_size=64, num_workers=24):
# fastai dataloader
return tabular.DataBunch.create(train_ds=train_set, valid_ds=val_set, test_ds=test_set,
bs=batch_size, num_workers=num_workers, device=device,
)
def train_mlp(model, train_loader, val_loader, epoch, lr, optimizer):
lossfn = nn.CrossEntropyLoss()
for epoch in range(epoch):
train_loss = []
train_acc = []
val_loss = []
val_acc = []
model.train()
for i, batch in enumerate(train_loader):
ct, y = zip(*batch)
ct = torch.stack(ct).squeeze(1).to(device)
y = torch.stack(y).to(device)
# ---------- train mlp ---------
optimizer.zero_grad()
pred = model(ct) # forward
loss = lossfn(pred, y) # loss
acc = sum(torch.eq(torch.argmax(pred, axis=1), y)).item() / len(y) * 100
train_acc.append(acc)
loss.backward() # compute loss
optimizer.step() # update
torch.cuda.empty_cache()
train_loss.append(loss.item())
del pred, loss, acc, ct, y
model.eval()
with torch.no_grad():
for i, batch in enumerate(val_loader):
ct, y = zip(*batch)
ct = torch.stack(ct).squeeze(1).to(device)
y = torch.stack(y).to(device)
# ---------- validation predicted by mlp ---------
pred = model(ct) # forward
loss = lossfn(pred, y) # loss
acc = sum(torch.eq(torch.argmax(pred, axis=1), y)).item() / len(y) * 100
val_acc.append(acc)
val_loss.append(loss.item())
torch.cuda.empty_cache()
del pred, loss, acc, ct, y
# print out statistics
verbose(epoch, train_loss, train_acc, val_loss, val_acc)
class Basic_LSTM(nn.Module):
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
self.out = 2 if task in ['ihm', 'dd'] else 10
super(Basic_LSTM, self).__init__()
self.lstm1 = nn.LSTM(
input_size=input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
self.fc = nn.Sequential(
nn.Linear(dim, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, self.out),
nn.LogSoftmax(dim=1)
)
for model in [self.lstm1, self.fc]:
self.initialize_weights(model)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def forward(self, x):
xt, state1 = self.lstm1(x)
y = self.fc(xt[:, -1, :])
return y
class AE_LSTM(nn.Module):
"""
CPLSTM4------use lstm as Wk
mode=1 use hidden states when predict. else use cell states
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
self.dim = dim # hidden dimension
self.bn = bn
self.drop = dropout
self.task = task
self.depth = depth
self.time_step = time_step
self.num_classes = num_classes
self.input_dim = input_dim
self.mode = mode
self.noct = noct
super(AE_LSTM, self).__init__()
# encoder
self.lstm1 = nn.LSTM(
input_size=self.input_dim,
hidden_size=dim,
bidirectional=False,
batch_first=True
)
# decoder
# minded that hidden_size is different
self.lstm2 = nn.LSTM(
input_size=dim,
hidden_size=self.input_dim,
bidirectional=False,
batch_first=True
)
# not used
if self.noct:
self.stack_dim = self.dim * 192
else:
self.stack_dim = self.dim * 193
self.dropout = nn.Dropout(self.drop)
# self.Wk = nn.ModuleList([nn.Linear(self.dim, self.dim) for i in range(self.time_step)])
self.softmax = nn.Softmax(dim=0)
self.lsoftmax = nn.LogSoftmax(dim=0)
for model in [self.lstm1, self.lstm2]:
self.initialize_weights(model)
def init_hidden(self, bs, dim):
cell_states = torch.zeros(1, bs, dim).to(device)
hidden_states = torch.zeros(1, bs, dim).to(device)
return (hidden_states, cell_states)
def initialize_weights(self, model):
if type(model) in [nn.Linear]:
nn.init.xavier_uniform_(model.weight)
nn.init.zeros_(model.bias)
elif type(model) in [nn.LSTM, nn.RNN, nn.GRU]:
nn.init.orthogonal_(model.weight_hh_l0)
nn.init.xavier_uniform_(model.weight_ih_l0)
nn.init.zeros_(model.bias_hh_l0)
nn.init.zeros_(model.bias_ih_l0)
def get_reg_out(self, x, stack=False, warm=False, conti=False):
# check input shape
if len(x.shape) == 4: x = x.squeeze(1)
if x.shape[1] == 76: x = x.transpose(1, 2)
xt, (ht, ct) = self.lstm1(x)
if stack and self.noct: return self.dropout(xt.reshape((x.shape[0], -1)))
if stack: return self.dropout(torch.cat((xt.reshape((x.shape[0], -1)), ct.squeeze(0)), 1))
return xt[:, -1, :].squeeze(1)
def get_encode(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
if x.shape[1] == 76: x = x.transpose(1, 2)
x,_,_ = self.lstm1(x)
x = nn.Flatten()(x)
return x
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
self.bs = x.shape[0]
x_t, state1 = self.lstm1(x) # encoder part : zeros init
x_hat, state2 = self.lstm2(x_t) # decoder part : zeros init
loss = nn.MSELoss(reduction='mean')(x, x_hat)
return -1, loss, x # make sure it is consistent with other models training function
class CAE_LSTM(AE_LSTM):
"""
constrastive auto-encoder with LSTM backbone
"""
def __init__(self, dim, bn, dropout, task,
depth=2, num_classes=1,
input_dim=76, time_step=5, mode=1, noct=False):
super(CAE_LSTM, self).__init__(dim, bn, dropout, task, depth, num_classes, input_dim, time_step, mode, noct)
# get reg out is also the same as Basic LSTM_AE
def forward(self, x):
if len(x.shape) == 4: x = x.squeeze(1)
# print('shape of x is ' ,x.shape)
if x.shape[1] == 76: x = x.transpose(1, 2)
self.bs = x.shape[0]
x_t, state1 = self.lstm1(x) # encoder part : zeros init
x_hat, state2 = self.lstm2(x_t) # decoder part : zeros init
loss, acc = self.compute_nce(x_hat, x)
return acc, loss, x
def compute_nce(self, x_hat, x):
bs = x.shape[0]
assert x.shape == x_hat.shape
nce = 0
x = x.view(bs, -1)
x_hat = x_hat.reshape(bs, -1)
total = torch.mm(x_hat, x.T)
correct = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=0),
torch.arange(0, bs).cuda()))
nce = torch.sum(torch.diag(self.lsoftmax(total)))
nce /= -1. * bs
acc = 1. * correct.item() / bs
return nce, acc
|
[
"anonymousparti28@gmail.com"
] |
anonymousparti28@gmail.com
|
98239088c3b4a53c50df2bc9f8bf239942107bf9
|
a36d54fb56bc2898089d6ad407bc2039a55271d4
|
/zdevicemanager/base/tools.py
|
8385f630bed268e1b477abec92e22fe0662faa58
|
[] |
no_license
|
zerynth/core-zerynth-toolchain
|
443e5180d87b3b783c2b3ec69f24918761715b63
|
d27b0d6ee47b9c4f320f518705074f1032fedf8a
|
refs/heads/master
| 2021-07-25T00:28:00.192322
| 2021-05-17T14:53:20
| 2021-05-17T14:53:20
| 122,219,458
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,153
|
py
|
from .base import *
from .fs import *
from .cfg import *
from .pygtrie import *
__all__ = ["tools"]
class Tools():
def __init__(self):
self.tools = {}
self.installed = {}
def init(self):
#register platform tools
if env.is_windows():
self.tools["stty"]="mode"
elif env.is_linux():
self.tools["stty"]="/bin/stty -F"
else:
self.tools["stty"]="/bin/stty -f"
for tooldir in fs.dirs(env.sys):
self.add_tool(tooldir)
for tooldir in fs.dirs(fs.path(env.dist,"sys")):
self.add_tool(tooldir)
ifile = fs.path(env.dist,"installed.json")
self.installed = fs.get_json(ifile)
def get_package(self,fullname):
return env.repo["packs"][env.repo["byname"][fullname]]
def get_packages_by_tag(self,tag):
idx = env.repo["bytag"][tag]
res = set()
for i in idx:
pack = env.repo["packs"][i]
if pack.get("sys") and pack.get("sys")!=env.platform:
# skip other platforms
continue
res.add(pack["fullname"])
return sorted(list(res))
def get_package_deps(self,fullname):
try:
pack = self.get_package(fullname)
except:
pack = {}
res = []
for dep in pack.get("deps",[]):
res.extend(self.get_packages_by_tag(dep))
res = sorted(list(set(res)))
return res
def has_all_deps(self,fullname):
deps = self.get_package_deps(fullname)
for fname in deps:
if fname not in self.installed:
return False
return True
def get_pack_info(self,packdir):
pfiles = [fs.path(packdir,"z.yml"), fs.path(packdir,"package.json")]
for pfile in pfiles:
if fs.exists(pfile):
pkg = fs.get_yaml_or_json(pfile)
return pkg
return None
def add_tool(self,tooldir):
if fs.basename(tooldir) in ["browser","newbrowser","newpython"]:
# ignore some sys packages
return
try:
pkg = self.get_pack_info(tooldir)
if pkg is None:
warning("Can't load tool package",tooldir)
return
else:
fullname = pkg["fullname"]
toolname = pkg.get("tool")
pkg = pkg["sys"]
except Exception as e:
warning("Can't load tool",tooldir,e)
return
if toolname:
self.tools[toolname]={}
addto = self.tools[toolname]
else:
addto = self.tools
if isinstance(pkg,dict):
for k,v in pkg.items():
addto[k]=fs.path(env.sys,tooldir,v)
elif isinstance(pkg,list) or isinstance(pkg,tuple):
for k,v in pkg:
addto[k]=fs.path(env.sys,tooldir,v)
else:
warning("Can't load tool info",tooldir,err=True)
#print(self.tools)
def get_tool_dir(self,toolname):
for tooldir in fs.dirs(env.sys):
if fs.basename(tooldir)==toolname:
return tooldir
for tooldir in fs.dirs(fs.path(env.dist,"sys")):
if fs.basename(tooldir)==toolname:
return tooldir
return None
def __getattr__(self,attr):
if attr in self.tools:
return self.tools[attr]
raise AttributeError
def __getitem__(self,attr):
if attr in self.tools:
return self.tools[attr]
raise KeyError
def get_vm(self,vmuid,version,chipid,target):
vmpath = fs.path(env.vms,target,chipid)
vmfs = fs.glob(vmpath,"*.vm")
vm = None
for vmf in vmfs:
vmm = fs.basename(vmf)
if vmm.startswith(vmuid+"_"+version+"_"):
vm=vmf
return vm
def get_vm_by_uid(self,vmuid):
#for root,dirnames,files in os.walk(fs.path(env.vms)):
for target in fs.dirs(env.vms):
for chid in fs.dirs(fs.path(env.vms,target)):
for ff in fs.files(fs.path(env.vms,target,chid)):
path_splitted = ff.split('/')
ff_ = fs.basename(ff)
if ff_.startswith(vmuid+"_"):
return fs.path(ff)
return None
def get_vms(self,target,chipid=None,full_info=False):
vms = {}
targetpath = fs.path(env.vms,target)
if not fs.exists(targetpath):
return vms
for chid in fs.dirs(targetpath):
chid=fs.basename(chid)
if chipid and chipid!=chid:
continue
vmfs = fs.glob(fs.path(targetpath,chid),"*.vm")
for vmf in vmfs:
vmbf = fs.basename(vmf)
rpos = vmbf.rfind("_") #rtos
hpos = vmbf.rfind("_",0,rpos-1) #hash
vpos = vmbf.rfind("_",0,hpos-1) #version
vmrtos = vmbf[rpos+1:-3]
vmhash = vmbf[hpos+1:rpos]
vmversion = vmbf[vpos+1:hpos]
vmuid = vmbf[0:vpos] #TODO: add check
if full_info:
vms[vmuid]=(vmf,vmversion,vmrtos,vmhash)
else:
vms[vmuid]=vmf
return vms
def get_vm_by_prefix(self,vmuid):
#for root,dirnames,files in os.walk(fs.path(env.vms)):
res = []
for target in fs.dirs(env.vms):
for chid in fs.dirs(fs.path(env.vms,target)):
for ff in fs.files(fs.path(env.vms,target,chid)):
path_splitted = ff.split('/')
ff_ = fs.basename(ff)
if ff_.startswith(vmuid):
res.append(fs.path(ff))
return res
def _parse_order(self,path):
try:
order = fs.readfile(fs.path(path,"order.txt"))
debug("Can't open order.txt at",path)
except:
return []
lines = order.split("\n")
stack = []
rs = []
for line in lines:
line = line.strip()
if not line or len(line)<4 or line.startswith(";"):
continue
pos = line.count("#")
if pos>0:
label = line[pos:]
while (len(stack)>=(pos)): stack.pop()
stack.append(label)
else:
try:
ex = {
"tag":list(stack),
"name":line.replace("_"," "),
"path":fs.path(path,line),
"desc":fs.readfile(fs.path(path,line,"project.md")),
"code":fs.readfile(fs.path(path,line,"main.py")),
}
rs.append(ex)
except:
pass
return rs
def _get_examples(self,path):
return self._parse_order(path)
def get_examples(self):
exs = {}
exr = []
srcs = [(fs.path(env.stdlib,"examples"),"core.zerynth.stdlib")]
repos = fs.dirs(env.libs)
if "official" in repos: #put official on top
repos.remove("official")
repos = ["official"]+repos
for repo in repos:
nms = fs.dirs(repo)
for nm in nms:
libs = fs.dirs(nm)
for lib in libs:
srcs.append((fs.path(lib,"examples"),"lib."+fs.basename(nm)+"."+fs.basename(lib)))
for exlib,lib in srcs:
if fs.exists(exlib):
ee = self._get_examples(exlib)
for eee in ee:
eee["lib"]=lib
exr.extend(ee)
return exr
def get_devices(self):
bdirs = fs.dirs(env.devices)
for bdir in bdirs:
try:
pkg = self.get_pack_info(bdir)
if pkg is None:
continue
bj = fs.get_json(fs.path(bdir,"device.json"))
bj["path"] = bdir
bj["deps"] = self.get_package_deps(pkg["fullname"])
bj["has_all_deps"] = self.has_all_deps(pkg["fullname"])
bj["fullname"] = pkg["fullname"]
yield bj
except Exception as e:
warning(e)
#load custom devices
cdirs = fs.dirs(env.cvm)
for cdir in cdirs:
if not fs.exists(fs.path(cdir,"active")):
#not compiled yet, skip
continue
try:
pkg = self.get_pack_info(bdir)
if pkg is None:
continue
bj = fs.get_json(fs.path(cdir,"device.json"))
bj["path"] = cdir
bj["deps"] = self.get_package_deps(pkg["fullname"])
bj["has_all_deps"] = self.has_all_deps(pkg["fullname"])
bj["fullname"] = pkg["fullname"]
yield bj
except Exception as e:
warning(e)
def get_specs(self,specs):
options = {}
for spec in specs:
pc = spec.find(":")
if pc<0:
fatal("invalid spec format. Give key:value")
thespec = spec[pc+1:]
if thespec=="null":
thespec=None
options[spec[:pc]]=thespec
return options
def get_target(self,target,options={}):
import devices
_dsc = devices.Discover()
return _dsc.get_target(target,options)
def get_modules(self):
res = {}
# libraries
rdirs = fs.dirs(env.libs)
for r in rdirs:
repo = fs.basename(r)
nsdirs = fs.dirs(r)
for ns in nsdirs:
namespace = fs.basename(ns)
lbdirs = fs.dirs(ns)
for l in lbdirs:
lib = fs.basename(l)
if repo=="official":
if namespace=="zerynth":
module = lib
else:
module = namespace+"."+lib
else:
module = repo+"."+namespace+"."+lib
imports = []
for f in fs.files(l):
fl = fs.basename(f)
if fl.endswith(".py") and fl!="main.py":
imports.append(fl[0:-3])
res[module]=imports
return res
def get_vhal(self):
vhal = {}
arch_dirs = fs.dirs(env.vhal)
for ad in arch_dirs:
fmdirs = fs.dirs(ad)
for fm in fmdirs:
vhal_file = fs.path(fm,"vhal.json")
if fs.exists(vhal_file):
vj = fs.get_json(vhal_file)
vhal.update(vj)
return vhal
def disk_usage(self):
bytes = fs.dir_size(env.home)
return bytes
#fs.set_json(rj["data"], fs.path(vmpath,uid+"_"+version+"_"+rj["data"]["hash_features"]+"_"+rj["data"]["rtos"]+".vm"))
tools = Tools()
# add_init(tools.init)
|
[
"dev@zerynth.com"
] |
dev@zerynth.com
|
359f9c86575cbc6401fa831c42183d3cd110679b
|
9d278285f2bc899ac93ec887b1c31880ed39bf56
|
/ondoc/account/migrations/0103_merge_20190905_1609.py
|
16fb7e5557b47e3db32cd6549c06ffb2218de131
|
[] |
no_license
|
ronit29/docprime
|
945c21f8787387b99e4916cb3ba1618bc2a85034
|
60d4caf6c52a8b70174a1f654bc792d825ba1054
|
refs/heads/master
| 2023-04-01T14:54:10.811765
| 2020-04-07T18:57:34
| 2020-04-07T18:57:34
| 353,953,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# Generated by Django 2.0.5 on 2019-09-05 10:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0100_auto_20190902_1653'),
('account', '0102_auto_20190903_1950'),
]
operations = [
]
|
[
"ankit.s@policybazaar.com"
] |
ankit.s@policybazaar.com
|
0c57b23ce2e57693a0fa07b8ddd2d25521f90145
|
c6a101547c2b7f36fe83a725974a8a7f02cf176d
|
/data_structures/binary_trees/flip_tree.py
|
20c8cbf5f563689f2b9a252bd664a6b22b2a1b23
|
[
"MIT"
] |
permissive
|
prabhupant/python-ds
|
737cc35574de5c2ece0f0813cf00775324a8dbe7
|
f7d6d78fedaf84b7527965bb1798b7a8da989474
|
refs/heads/master
| 2023-08-22T05:04:22.937675
| 2022-10-04T01:29:39
| 2022-10-04T01:29:39
| 199,366,418
| 2,325
| 704
|
MIT
| 2022-10-10T13:01:10
| 2019-07-29T02:48:57
|
Python
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
# Flip a tree such like here
# https://www.geeksforgeeks.org/flip-binary-tree/
# Flipping subtree algorithm
# 1. root->left->left = root->right
# 2. root->left->right = root
# 3. root->left = NULL
# 4. root->right = NULL
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def flip_tree(root):
if root is None:
return root
if root.left is None and root.right is None:
return root
flipped_root = flip_tree(root.left)
root.left.left = root.right
root.left.right = root
root.left = None
root.right = None
return flipped_root
|
[
"noreply@github.com"
] |
noreply@github.com
|
658da1160eb4755901ebedf82b585ce6ddcd99da
|
1b83b79fcd58878cad8c683f7c2fb048abdc9b6c
|
/magnum/conf/kubernetes.py
|
2de9370e2bc59fb73dcbfd6a2ef6f75e558c8313
|
[
"Apache-2.0"
] |
permissive
|
ititandev/magnum
|
88f7ab8d93e6913fa085d34577827d11aead1790
|
16ea8b6397f2bafc01e6d4ec474c1ae97f15a484
|
refs/heads/master
| 2020-12-28T19:07:02.905485
| 2020-02-03T17:53:15
| 2020-02-03T17:53:15
| 238,458,066
| 1
| 0
|
Apache-2.0
| 2020-02-05T13:35:13
| 2020-02-05T13:35:12
| null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
kubernetes_group = cfg.OptGroup(name='kubernetes',
title='Options for the Kubernetes addons')
kubernetes_opts = [
cfg.StrOpt('keystone_auth_default_policy',
default="/etc/magnum/keystone_auth_default_policy.json",
help='Explicitly specify the path to the file defined default '
'Keystone auth policy for Kubernetes cluster when '
'the Keystone auth is enabled. Vendors can put their '
'specific default policy here'),
]
def register_opts(conf):
conf.register_group(kubernetes_group)
conf.register_opts(kubernetes_opts, group=kubernetes_group)
def list_opts():
return {
kubernetes_group: kubernetes_opts
}
|
[
"flwang@catalyst.net.nz"
] |
flwang@catalyst.net.nz
|
ac9c2a9ef0b1cf9f39976b219335f1e2257893fc
|
d4c2846af2194e8463bff02a9ad49eedc97539eb
|
/src/RPConfig1.py
|
77c0a56cf43effb39d46c064b268de9169bf6a08
|
[] |
no_license
|
rbulha/pytimeclock
|
8eda6a41ecbe0e5f94238885a4d70e6d5f7e385f
|
a1cda1edce3d69fa504f55c40e78db9ecb2d837b
|
refs/heads/master
| 2021-01-15T22:28:57.382733
| 2012-08-10T17:58:52
| 2012-08-10T17:58:52
| 40,454,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,680
|
py
|
import sys
import os
import time
import shelve
import dbhash #incluidos apenas para que o instalador encontre os requisitos
import anydbm #incluidos apenas para que o instalador encontre os requisitos
CONFIGURATION_FILE = 'configuration1.dat'
class CRPConfig:
global CONFIGURATION_FILE
print '[CRPConfig] LOAD CONFIGURATION'
sys_path = sys.path[0]
if os.path.splitext(sys_path)[1] == '':
base = sys.path[0]
else:
base = os.path.dirname(sys.path[0])
DB_BASE_PATH = os.path.dirname(base) + '\\data\\'
caminho = DB_BASE_PATH + CONFIGURATION_FILE
DB = shelve.open(caminho)
print '[CRPConfig] DB=',len(DB)
if (len(DB) != 0) and DB.has_key('C_H_NORMAL') and DB.has_key('H_E_ALMOCO'):
C_H_NORMAL = DB['C_H_NORMAL']
C_H_SEXTA = DB['C_H_SEXTA']
T_ALMOCO = DB['T_ALMOCO']
H_E_OFICIAL = DB['H_E_OFICIAL']
H_S_OFICIAL = DB['H_S_OFICIAL']
H_S_OFICIAL_SEXTA = DB['H_S_OFICIAL_SEXTA']
H_S_ALMOCO = DB['H_S_ALMOCO']
H_E_ALMOCO = DB['H_E_ALMOCO']
START_REPORT_DAY = DB['START_REPORT_DAY']
else:
H_E_OFICIAL = 7.0
DB['H_E_OFICIAL']=H_E_OFICIAL
H_S_OFICIAL = 17.0
DB['H_S_OFICIAL']=H_S_OFICIAL
T_ALMOCO = 1.0
DB['T_ALMOCO']=T_ALMOCO
H_S_OFICIAL_SEXTA = 16.0
DB['H_S_OFICIAL_SEXTA']=H_S_OFICIAL_SEXTA
H_S_ALMOCO = 12.0
DB['H_S_ALMOCO']=H_S_ALMOCO
H_E_ALMOCO = 13.0
DB['H_E_ALMOCO']=H_E_ALMOCO
#total working day hours
C_H_NORMAL = (H_S_OFICIAL - H_E_OFICIAL) - T_ALMOCO#9.1
DB['C_H_NORMAL']=C_H_NORMAL
C_H_SEXTA = (H_S_OFICIAL_SEXTA - H_E_OFICIAL) - T_ALMOCO#7.6
DB['C_H_SEXTA']=C_H_SEXTA
START_REPORT_DAY = 21
DB['START_REPORT_DAY']=START_REPORT_DAY
DB.sync()
@staticmethod
def GetJorneyInSeconds():
nowtime = time.localtime()
if nowtime.tm_wday == 4: #Sexta-feira
return CRPConfig.C_H_SEXTA*3600
else:
return CRPConfig.C_H_NORMAL*3600
@staticmethod
def GetLanchTimeInSeconds():
return CRPConfig.T_ALMOCO*3600
@staticmethod
def Get_H_S_OFICIAL():
nowtime = time.localtime()
if nowtime.tm_wday == 4: #Sexta-feira
return CRPConfig.H_S_OFICIAL_SEXTA
else:
return CRPConfig.H_S_OFICIAL
def main():
config = CRPConfig()
if __name__ == '__main__':
main()
|
[
"rbulha@3db46129-f7cc-561c-f858-d950435ae609"
] |
rbulha@3db46129-f7cc-561c-f858-d950435ae609
|
8d8b46573115c470483434c30bc2fd15efceb159
|
73785aea08895d0fc15e914ce329716712f057ec
|
/recipes/errorAnal.py
|
9208c6a48ac906004212b9520360e38dbc9b8806
|
[] |
no_license
|
Peder2911/ModelComp
|
5e93e6db7fbc809e7444448729a91ff7a762b0cc
|
91ee3835ddc560adeb4af457953905aaeca79cd6
|
refs/heads/master
| 2020-05-20T05:09:01.877547
| 2019-05-18T13:37:34
| 2019-05-18T13:37:34
| 185,397,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
ppSentences(sentences,y,x):
for i,s in enumerate(errorSents):
print('#'*38)
print(f'{s} - pred: {prediction[err][i]} | actual: {actual[err][i]}')
print('\n')
|
[
"pglandsverk@gmail.com"
] |
pglandsverk@gmail.com
|
9881b96519fce86f61a5ee3cb7a611005b646983
|
0d2af397b900fddad3d532a9f772f70473886cf5
|
/tickets/urls.py
|
0e2d8c65e60ed82fb02ab25f58af4e4c1d190634
|
[] |
no_license
|
RobertUJ/Omaha
|
cc779b06e42c08ebadae0b8df4e006ad67d504d1
|
650d5e1e5550bf772f1817e16505c574f361bae0
|
refs/heads/master
| 2016-08-12T13:51:32.262876
| 2016-02-12T00:51:52
| 2016-02-12T00:51:52
| 49,794,851
| 0
| 0
| null | 2016-01-22T00:04:29
| 2016-01-16T23:12:39
|
Python
|
UTF-8
|
Python
| false
| false
| 264
|
py
|
from django.conf.urls import patterns, url
from tickets.views import TicketsIndexView, AddTicketView
urlpatterns = [
url(r'^tickets/$', TicketsIndexView.as_view(), name='TicketsView'),
url(r'^addticket/$', AddTicketView.as_view(), name='AddTicketView'),
]
|
[
"erickhp12@gmail.com"
] |
erickhp12@gmail.com
|
fe588b211aefbc83d08eca506d88db9be266716c
|
0d7247b52044d5bfc498610fe33725c4ca0a2076
|
/MDD-SG-SD.py
|
235989f1e5607b3a6d8c9407160ab862c37b7b9d
|
[] |
no_license
|
SivaArwin/Scraping---Uswitch.com
|
1ebde73978ce7912d164e8965a47fd79106b5026
|
f33e3d9b05b9ba23065c5b2ac9073e16174a0585
|
refs/heads/main
| 2023-03-03T14:06:52.455351
| 2021-02-13T18:11:34
| 2021-02-13T18:11:34
| 338,635,431
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,392
|
py
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import Select
import pandas as pd
import xlsxwriter
import time
import _Custom_Exception as CE
import _Config as config
import _EHl_urls
overlapXpath = "/html/body/div[@id='loaderDiv']"
web_driv = config._WebDriv()
_Mdd_links = config._read_MDD_Urls()
_savePath = config.savePath()
#_regionList = ["Eastern", "East Midlands","London", "MANWEB", "Midlands", "Northern", "NORWEB", "Scottish Hydro", "Scottish Power", "Seeboard", "Southern", "Swalec", "SWEB", "Yorkshire"]
"""
#MainPage #Postcode
postcode = web_driv.find_element_by_xpath("/html/body/main[@class='main']/div/div/div/div[@id='postCodeEntry']/form/fieldset[@class='homepage-cta-container']/div[@class='form-group homepage-cta-input-container']/input[@id='PostCode']")
postcode.send_keys("SS26LU")
CE._Time_to_delay(1)
#Mainpage #Submit button
submit = web_driv.find_element_by_xpath("/html/body/main[@class='main']/div/div/div/div[@id='postCodeEntry']/form/fieldset[@class='homepage-cta-container']/button")
submit.click()
CE._Time_to_delay(10)
#Select Both Gas & Elec
gas_elec_elementXpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='questions-intro']/div[@id='field-compare-type']/div[@class='field-input stacked-radio-buttons']/div/input[@id='compare-type-gas-elec']"
CE._Pass_Through_Me(web_driv,overlapXpath,gas_elec_elementXpath)
CE._Time_to_delay(1)
#Select both same supplier
sameSupplier_elementxpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='questions-intro']/div[@id='field-same-supplier']/div[@class='field-input stacked-radio-buttons']/div/input[@id='comparison-type-same-supplier']"
CE._Pass_Through_Me(web_driv,overlapXpath,sameSupplier_elementxpath)
CE._Time_to_delay(1)
#select tariff
tariffname = web_driv.find_element_by_xpath("/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='section-supply']/span[@id='section-supply-dual']/div[@class='funnel-section question-group-container ng-isolate-scope ng-valid']/div[3]/div[@class='field-input single-radio-button']/select[@id='elecSupplierTariff']")
Select(tariffname).select_by_value("string:44")
CE._Time_to_delay(1)
#select payment method
payment_Method_Xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@class='ng-scope']/div[@id='section-supply']/span[@id='section-supply-dual']/div[@class='funnel-section question-group-container ng-isolate-scope ng-valid ng-dirty ng-valid-parse']/div[@id='field-energy-payment-type']/div[@class='field-input stacked-radio-buttons']/div[@class='ng-scope']/input[@id='elec-payment-type-1']"
CE._Pass_Through_Me(web_driv,overlapXpath,payment_Method_Xpath)
CE._Time_to_delay(1)
#Select gas usage radio button
gas_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='gas-usage']/div[@class='field-input expand']/div[@class='radio-gas-usage']/input[@id='gasKWhUsage']"
CE._Pass_Through_Me(web_driv,overlapXpath,gas_button_xpath)
CE._Time_to_delay(3)
#Passing Gas usage
gas_usage_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='gas-usage']/div[@class='field-input expand']/div[@class='radio-gas-usage']/div[@class='input-error-container-inline']/input[@id='gasKWhUsage-usageAsKWh']"
gs_usage_res = web_driv.find_element_by_xpath(gas_usage_xpath)
gs_usage_res.send_keys("12000")
CE._Time_to_delay(1)
#select Elec usage radio button
elec_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='electricity-usage']/div[@class='field-input expand']/div[@class='radio-elec-usage']/input[@id='elecKWhUsage']"
CE._Pass_Through_Me(web_driv,overlapXpath,elec_button_xpath)
CE._Pass_Through_Me(web_driv,overlapXpath,elec_button_xpath) #running this code twice because the elec button is not clicked
CE._Time_to_delay(3)
#Passing Elec usage
elec_usage_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[2]/div[@id='section-usage']/div[@id='electricity-usage']/div[@class='field-input expand']/div[@class='radio-elec-usage']/div[@class='input-error-container-inline']/input[@id='elecKWhUsage-usageAsKWh']"
elec_usage_res = web_driv.find_element_by_xpath(elec_usage_xpath)
elec_usage_res.send_keys("3100")
CE._Time_to_delay(1)
#Click Submit button #Page2
show_results_button_xpath = "/html/body/div[@class='ng-scope']/div[@class='container-fluid page_funnel-questions ofy-visible current ng-scope ng-isolate-scope']/div/form[@id='current-optilead']/div[@id='section-spending']/div[2]/div[@id='usageSummary']/div[@class='spending-text ng-scope']/button[@id='show-results']"
CE._Pass_Through_Me(web_driv,overlapXpath,show_results_button_xpath)
CE._Pass_Through_Me(web_driv,overlapXpath,show_results_button_xpath) #running this code twice because the elec button is not clicked
CE._Time_to_delay(10)
#Page 3 #Select Show all results .. #Whole of market
#show_all_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[2]/li[@class='left-column']/input[@id='Show me all generally available plans']"
#CE._Pass_Through_Me(web_driv,overlapXpath,show_all_tariffs_xpath)
#CE._Time_to_delay(3)
"""
writer = pd.ExcelWriter(_savePath+'MDD-SG-SD.xlsx', engine='xlsxwriter')
try:
if(_Mdd_links):
for driver in range(len(_Mdd_links)):
web_driv.delete_all_cookies()
web_driv.get(_Mdd_links[driver])
CE._Time_to_delay(15)
Tariff_Name = {}
#Result Table output
who = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-results-container']/div[@class='funnel-section ng-isolate-scope']/div[@id='section-compare-table']/div[@class='compare-table']/div[@class='compare-table-body']"
who_res_final_res = web_driv.find_element_by_xpath(who)
gas = '//*[@id="Gas only"]'
CE._Pass_Through_Me(web_driv,overlapXpath,gas)
CE._Time_to_delay(3)
'''
Ele = '//*[@id="Electricity only"]'
CE._Pass_Through_Me(web_driv,overlapXpath,Ele)
CE._Time_to_delay(3)
'''
## ENQUIRY TARIFFS
#Supplier Name on Enquiry
for _supplierName_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[1]/div[@class='supplier']/p[contains(@class, 'ng-binding') and contains(@class, 'ng-scope')]"):
Tariff_Name.setdefault('SupplierName', []).append(_supplierName_enquiry.text)
#print("Supplier Name ->", _supplierName_enquiry.text )
print("Fetched Supplier Name Enquire..")
#Tariff Name on Enquiry
for _tarifName_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[1]/div[@class='supplier']/p[@class='ng-binding']"):
#print("tariff name ->", _tarifName_enquiry.text)
Tariff_Name.setdefault('TariffName',[]).append(_tarifName_enquiry.text)
print("Fetched Tariff Name....")
#Cancellation fees yes or no on apply
for cancellation_fees in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[2]/p/span[1]/span"):
Tariff_Name.setdefault('Cancellationstatus',[]).append(cancellation_fees.text)
#print("Cancellation >", cancellation_fees.text)
print("Fetched Cancellation status...!!!")
#Tariff expiry
for tariff_expiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[2]/p/span[2]/span"):
Tariff_Name.setdefault('Tariffexpiry',[]).append(tariff_expiry.text)
#print("Expiry >", tariff_expiry.text)
print("Fetched Tariff expiry...!!!")
#annual bill value on apply
for annual_bill in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[3]/p/span[@class='ng-binding']"):
Tariff_Name.setdefault('annual_bill',[]).append(annual_bill.text)
#print("Annual Bills >",annual_bill.text)
print("Fetched Annual values ...!!!")
#On Enquiry
for on_enquiry in who_res_final_res.find_elements_by_xpath("div[contains(@class, 'compare-table-item') and contains(@class, 'compare-cannot-switch')]/fri-result-tariff/div/div[6]/p[@class='ng-binding']"):
if (on_enquiry.text == "This supplier has not made this plan available through us" ):
Tariff_Name.setdefault('Status',[]).append("Enquiry")
#print("#", on_enquiry.text)
print("Fetched on Enquiry ...!!!")
#show Apply only
show_apply_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[1]/li[@class='left-column']/input[@id='Show plans you can switch me to']"
CE._Pass_Through_Me(web_driv,overlapXpath,show_apply_tariffs_xpath)
CE._Time_to_delay(3)
### APPLY TARIFFS
print("Fetching on apply tariffs now.......")
#Supplier Name On Apply #img[@class='supplier-logo ng-scope']
for SA in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[1]/div[@class='supplier']/img[@class='supplier-logo ng-scope']"):
Tariff_Name.setdefault('SupplierName',[]).append(SA.get_attribute('alt'))
#print("Supplier Name >", SA.get_attribute('alt'))
print("Fetched Supplier Name....!!!")
#Tariff Name on Apply
for TA in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[1]/div[@class='supplier']/p[@class='ng-binding']"):
Tariff_Name.setdefault('TariffName',[]).append(TA.text)
#print("Tariff Name >",TA.text)
print("Fetched Tariff Name....!!!")
#Cancellation fees yes or no on apply
for cancellation_fees in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[2]/p/span[1]/span"):
Tariff_Name.setdefault('Cancellationstatus',[]).append(cancellation_fees.text)
#print("Cancellation fees >", cancellation_fees.text)
print("Fetched Cancellation status...!!!")
#Tariff expiry
for tariff_expiry in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[2]/p/span[2]/span"):
Tariff_Name.setdefault('Tariffexpiry',[]).append(tariff_expiry.text)
#print("Expiry >", tariff_expiry.text)
print("Fetched Tariff expiry...!!!")
#annual bill value on apply
for annual_bill in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[3]/p/span[@class='ng-binding']"):
Tariff_Name.setdefault('annual_bill',[]).append(annual_bill.text)
#print("Annual Bills >",annual_bill.text)
print("Fetched Annual values ...!!!")
#On Apply
for on_apply in who_res_final_res.find_elements_by_xpath("div/fri-result-tariff/div/div[6]/button"):
if (on_apply.text == "I WANT THIS PLAN"):
Tariff_Name.setdefault('Status',[]).append("Apply")
#print("#", on_apply.text)
print("Fetched on Apply ...!!!")
'''
#Page 3 #Select Show all results .. #Whole of market
show_all_tariffs_xpath = "/html/body/div[@class='ng-scope']/div[@class='page_funnel-questions container-fluid ofy-visible ng-scope ng-isolate-scope']/div[@class='row wider-margin funnel-columns']/section/div[@class='funnel-filter-sidebar-container']/div[@class='funnel-section ng-isolate-scope']/div[@class='funnel-sidebar-wrapper']/div[@id='section-filters']/form/div[@class='field side-bar-form field-stacked']/ul[2]/li[@class='left-column']/input[@id='Show me all generally available plans']"
CE._Pass_Through_Me(web_driv,overlapXpath,show_all_tariffs_xpath)
CE._Time_to_delay(3)
'''
_df = pd.DataFrame.from_dict(Tariff_Name)
#for _region in driver:
_df.to_excel(writer, sheet_name=str(driver+1), index=False)
print("Region %d complete" %(driver+1))
#tn.to_csv('EHL.csv', index=False, sep=',', encoding='utf-8')
#print(tn)
writer.save()
print("File is ready to use!!!")
web_driv.close()
except TimeoutException:
print("Link is broken... Replace new url")
web_driv.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
abb40cfd7886a6089a10fff801f6ff4840838feb
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/RecoBTag/ONNXRuntime/python/pfParticleNetFromMiniAODAK4DiscriminatorsJetTags_cfi.py
|
b09fabc5e9632fe7d6cba6adb353d5a7f3afbfa9
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 18,375
|
py
|
import FWCore.ParameterSet.Config as cms
pfParticleNetFromMiniAODAK4PuppiCentralDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('BvsAll'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'),
),
denominator=cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('CvsL'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('CvsB'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'),
),
),
cms.PSet(
name = cms.string('QvsG'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('TauVsJet'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probb'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probg'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
),
cms.PSet(
name = cms.string('TauVsEle'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probele'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
),
cms.PSet(
name = cms.string('TauVsMu'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probmu'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiCentralJetTags', 'probtaum3h1p'),
),
),
)
)
pfParticleNetFromMiniAODAK4PuppiForwardDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('QvsG'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probq'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probq'),
cms.InputTag('pfParticleNetFromMiniAODAK4PuppiForwardJetTags', 'probg'),
),
),
)
)
pfParticleNetFromMiniAODAK4CHSCentralDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('BvsAll'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'),
),
denominator=cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('CvsL'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('CvsB'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'),
),
),
cms.PSet(
name = cms.string('QvsG'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'),
),
),
cms.PSet(
name = cms.string('TauVsJet'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probb'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probc'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probuds'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probg'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
),
cms.PSet(
name = cms.string('TauVsEle'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probele'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
),
cms.PSet(
name = cms.string('TauVsMu'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probmu'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaup3h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h1p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum1h2p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h0p'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSCentralJetTags', 'probtaum3h1p'),
),
),
)
)
pfParticleNetFromMiniAODAK4CHSForwardDiscriminatorsJetTags = cms.EDProducer(
'BTagProbabilityToDiscriminator',
discriminators = cms.VPSet(
cms.PSet(
name = cms.string('QvsG'),
numerator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probq'),
),
denominator = cms.VInputTag(
cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probq'),
cms.InputTag('pfParticleNetFromMiniAODAK4CHSForwardJetTags', 'probg'),
),
),
)
)
|
[
"stephane.b.cooperstein@cern.ch"
] |
stephane.b.cooperstein@cern.ch
|
c75ea51b954cef8081502d553948e07b0487abe9
|
bf813d2b877fb8ba62feb4263484db3d0f26d5cd
|
/early-phd/map_to_flux.py
|
1c2d0eab20e2c6fa5e1fe3228a8f9507a9b7ba48
|
[] |
no_license
|
9217392354A/astro-scripts
|
1e8e8c827097a877518d1f3e10870a5c2609417c
|
cd7a175bd504b4e291020b551db3077b067bc632
|
refs/heads/master
| 2021-01-13T00:40:57.481755
| 2016-03-25T17:04:28
| 2016-03-25T17:04:28
| 54,730,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
#Program created by Chris Fuller to test a function for extracting flux's from a fits file using appature photomotry
#import stuff
from numpy import *
import numpy
import scipy
import math
import sys
import os
from os.path import join as pj
#File stuff
cat = "bigcoma.csv"
catfolder = "/Users/chrisfuller/Dropbox/coma/Catalogues"
catout ="comaTEST.csv"
folder = "/Users/chrisfuller/Dropbox/coma/flux2/"
|
[
"chrisfuller@Chriss-MBP.lan"
] |
chrisfuller@Chriss-MBP.lan
|
ab04985a81690a29fc99f93e08d4a4ec4e364ad5
|
847273de4b1d814fab8b19dc651c651c2d342ede
|
/.history/Sudoku_II_004_20180618143456.py
|
c999da2e6ae97112548cc81b5e4e3de4c117dc62
|
[] |
no_license
|
Los4U/sudoku_in_python
|
0ba55850afcffeac4170321651620f3c89448b45
|
7d470604962a43da3fc3e5edce6f718076197d32
|
refs/heads/master
| 2020-03-22T08:10:13.939424
| 2018-07-04T17:21:13
| 2018-07-04T17:21:13
| 139,749,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,415
|
py
|
from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, ' ', ' ', ' ', ' ', ' ', ' ', 2]
]
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = x[4]
try:
i = 0
for item in sudoku1:
if sum(item) == 45:
i = i + 1
if i == 9:
print("YOU WIN")
break
except TypeError:
print()
'''
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
'''
|
[
"inz.kamil.wos@gmail.com"
] |
inz.kamil.wos@gmail.com
|
e495a6da64e3b39072332ee3934ad2f8318bb290
|
b7bb0a3ea2078dbdaa17947fd841fe1c9b5e356b
|
/oschown/workflows.py
|
a5b712d06344ce7e3a3a514e1831195fb2f2557f
|
[
"Apache-2.0"
] |
permissive
|
epim/oschown
|
48d4a7528ed38fb12cae408baad7a6e370ba86f1
|
a50d3ad3769dad8d1f56dfe171d5345b3bee517f
|
refs/heads/master
| 2021-02-13T21:21:01.223901
| 2018-09-17T15:28:41
| 2018-09-17T15:28:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,690
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import mock
import oslo_config.cfg
nova_conf = oslo_config.cfg.ConfigOpts()
cinder_conf = oslo_config.cfg.ConfigOpts()
# NOTE(danms): This is a crazy hack to import these project modules
# but with separated global oslo.config objects. Hopefully I can
# replace this with something that isn't quite as crazy (and at least
# doesn't use mock), but this works for testing.
with mock.patch('oslo_config.cfg.CONF', new=cinder_conf):
from oschown import chown_cinder
with mock.patch('oslo_config.cfg.CONF', new=nova_conf):
from oschown import chown_nova
from oschown import chown_neutron
from oschown import exception
LOG = logging.getLogger(__name__)
def parse_resource_id(resource_id):
return resource_id.split(':', 1)
class ResourceCollection(object):
"""A collection of resources across projects.
Collects resources that must be resolved and chown'ed together.
"""
RESOURCE_TYPES = {
'cinder': chown_cinder.CinderProject(),
'nova': chown_nova.NovaProject(),
'neutron': chown_neutron.NeutronProject(),
}
def __init__(self, context):
self._collected_resources = {}
self._context = context
def need_resource(self, resource_id):
"""Mark a resource id like project:id as needed for resolution.
Needed resources must be chown'ed with the other resources in
the collection.
"""
if resource_id not in self._collected_resources:
self._collected_resources[resource_id] = None
@property
def resolved_resources(self):
"""A list of ChownableResource objects that have been resolved."""
return [res for res in self._collected_resources.values()
if res is not None]
@property
def unresolved_resources(self):
"""A list of resource identifiers that are yet unresolved."""
return [r_id for r_id, r_res in self._collected_resources.items()
if r_res is None]
@property
def have_all_resources(self):
"""Return whether or not all known resources have been resolved."""
return len(self.unresolved_resources) == 0
def resolve_missing_resources_one(self):
"""One pass of resource resolution.
Make one pass through the list of unresolved resources and try
to resolve them (collecting any additional dependencies.
"""
for resource_id in self.unresolved_resources:
project_id, local_id = parse_resource_id(resource_id)
if project_id not in self.RESOURCE_TYPES:
raise exception.UnknownResourceType()
project = self.RESOURCE_TYPES[project_id]
resource = project.collect_resource_by_id(self._context,
local_id)
self._collected_resources[resource_id] = resource
for dep in resource.dependencies:
self.need_resource(dep)
def resolve_missing_resources(self):
"""Resolve all resources.
Attempt to repeatedly resolve all resources in the list of
needed ones. This runs until we have resolved all resources or
we stop making progress.
:raises: exception.UnableToResolveResources if some resources are not
resolvable
"""
last_unresolved = None
while not self.have_all_resources:
self.resolve_missing_resources_one()
now_unresolved = self.unresolved_resources
if now_unresolved == last_unresolved:
raise exception.UnableToResolveResources()
last_unresolved = now_unresolved
def chown_resources(self):
"""Actually change ownership of all resources in the collection.
Does not actually change ownership if the context indicates a dry run
should be performed.
"""
for resource in self.resolved_resources:
if self._context.dry_run:
LOG.info('Would chown resource %s' % resource.identifier)
else:
LOG.info('Chowning resource %s' % resource.identifier)
resource.chown(self._context)
def _workflow_main(context, collection):
try:
collection.resolve_missing_resources()
except exception.ChownException as e:
LOG.error('Unable to resolve resources: %s' % e)
return
LOG.info('Resolved %i resources to be chowned: %s' % (
len(collection.resolved_resources),
','.join([r.identifier for r in collection.resolved_resources])))
collection.chown_resources()
def workflow_nova(context, instance_id):
"""Resolve and change ownership of an instance and dependent resources."""
collection = ResourceCollection(context)
collection.need_resource('nova:%s' % instance_id)
_workflow_main(context, collection)
def workflow_cinder(context, volume_id):
"""Resolve and change ownership of a volume and dependent resources."""
collection = ResourceCollection(context)
collection.need_resource('cinder:%s' % volume_id)
_workflow_main(context, collection)
|
[
"dansmith@redhat.com"
] |
dansmith@redhat.com
|
9da01c5fe4850d89d6df0c28383d6624f962e764
|
83179abbad0032fd3c8c38a54260ac4239ba9df3
|
/2021/python/day15/day15.py
|
24a8f0e5bd6154fc5d2140e760a2d5b58031e146
|
[] |
no_license
|
yulrizka/adventofcode
|
448ac89ae543c8a7ee46bb5f86abc62887e3a9ee
|
74b89528e07ae6282763968d5bb3d8eea38e07ba
|
refs/heads/master
| 2023-01-13T03:57:20.688851
| 2022-12-22T11:11:59
| 2022-12-22T11:11:59
| 225,181,497
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,966
|
py
|
import queue
import unittest
# with open("../../input/day15-sample") as f:
with open("../../input/day15") as f:
raw = [[int(x) for x in y] for y in f.read().strip().split("\n")]
def wrap(x):
while x > 9:
x = x - 9
return x
data2 = raw.copy()
for i in range(4):
row = list(map(lambda x: list(map(lambda y: wrap(y + (i + 1)), x)), raw))
data2 += row
for i, current_row in enumerate(data2):
rr = current_row.copy()
for j in range(4):
row = list(map(lambda y: wrap(y + (j + 1)), current_row))
rr += row
data2[i] = rr
nr = [-1, 0, 1, 0]
nc = [0, 1, 0, -1]
def solve(raw):
R = len(raw)
C = len(raw[0])
# build vertices
D = {}
G = {}
for r in range(R):
for c in range(C):
D[(r, c)] = float('inf')
for dd in range(4):
rr = r + nr[dd]
cc = c + nc[dd]
if 0 <= rr < R and 0 <= cc < C:
G[((r, c), (rr, cc))] = int(raw[rr][cc])
D[(0, 0)] = 0
# dijkstra
pq = queue.PriorityQueue()
pq.put((0, (0, 0)))
while not pq.empty():
(dist, current_vertex) = pq.get()
for dd in range(4):
rr = current_vertex[0] + nr[dd]
cc = current_vertex[1] + nc[dd]
if 0 <= rr < R and 0 <= cc < C:
neighbor = (rr, cc)
distance = G[(current_vertex, neighbor)]
old_cost = D[neighbor]
new_cost = D[current_vertex] + distance
if new_cost < old_cost:
D[neighbor] = new_cost
pq.put((new_cost, neighbor))
return D[(R - 1, C - 1)]
def part1():
return solve(raw)
def part2():
return solve(data2)
class TestSum(unittest.TestCase):
def test1(self):
ans = part1()
print(ans)
assert ans == 498
def test2(self):
ans = part2()
print(ans)
assert ans == 2901
|
[
"yulrizka@users.noreply.github.com"
] |
yulrizka@users.noreply.github.com
|
cc8c69ab62120ec4784513c836d1a7756d9b1a0d
|
2814757215ea599c47817315902a1642459970df
|
/object-dev/student-info/two_version/step5.py
|
1cdf392ac433075bcca876a89264a944d9d516a2
|
[] |
no_license
|
legolas999/Python-learning
|
caadf31e60b973864f365c4f27eb9589bc1cdcd2
|
1a828595bc9596e737cc997bfad1f245b3314e8b
|
refs/heads/master
| 2020-05-15T04:11:13.328995
| 2019-06-08T16:17:04
| 2019-06-08T16:17:04
| 182,081,867
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,281
|
py
|
#!/usr/bin/python3.6
#定义全局变量存储学生信息
student_info = []
def print_menu():
'''实现打印功能提示菜单功能'''
#1.打印功能提示菜单
print('=' * 40)
print('\t{:<40}'.format('学生信息管理系统V1.0'))
print('\t{:<40}'.format('1.查询学员信息'))
print('\t{:<40}'.format('2.增加学员信息'))
print('\t{:<40}'.format('3.修改学员信息'))
print('\t{:<40}'.format('4.删除学员信息'))
print('\t{:<40}'.format('5.显示学员信息'))
print('\t{:<40}'.format('6.保存学员信息'))
print('\t{:<40}'.format('7.退出系统'))
print('=' * 40)
def add_stu_info():
'''实现添加一个新的学生信息功能'''
global student_info
#获取用户输入的信息
new_number = input('请输入你的学号:')
new_name = input('请输入你的姓名:')
new_id = input('请输入你的身份证号码:')
new_phone = input('请输入你的电话号码:')
new_dormitory = input('请输入你的宿舍号码:')
new_addr = input('请输入你的籍贯地址:')
#定义一个新的字典,来存储新的学生信息
new_info = {}
new_info['number'] = new_number
new_info['name'] = new_name
new_info['id'] = new_id
new_info['phone'] = new_phone
new_info['dormitory'] = new_dormitory
new_info['address'] = new_addr
#将新的学生信息,添加到学生整体列表中
student_info.append(new_info)
#print(student_info) # for test
def find_stu_info():
'''实现查找学员信息功能'''
global student_info
#获取要查询的学员姓名
find_name = input('请输入要查找的学员姓名:')
find_flag = 0 #默认表示没有找到的标志
for item in student_info:
if find_name == item['name']:
print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format('学号','姓名','身份证','电话','宿舍','籍贯'))
print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format(item['number'],item['name'],item['id'],item['phone'],\
item['dormitory'],item['address']))
find_flag = 1 #表示已经找到了学员信息
break #找到后打印退出
#判断是否找到了学员信息
if find_flag == 0:
print("查无此人")
def show_stu_info():
'''实现显示所有学生信息功能'''
global student_info
print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format('学号','姓名','身份证','电话','宿舍','籍贯'))
for item in student_info:
print('{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}\t{:<20}'.format(item['number'],item['name'],item['id'],item['phone'],\
item['dormitory'],item['address']))
def save_stu_info():
'''实现将学员信息保存到文件中'''
global student_info
f = open('stu_info.data','w')
f.write(str(student_info))
f.close()
def load_stu_info():
'''实现加载学生信息功能'''
global student_info
try:
f = open('stu_info.data')
student_info = eval(f.read())
f.close()
except Exception:
pass
def modify_stu_info():
'''实现修改学生信息功能'''
global student_info
find_flag = 0
modify_name = input('请输入需要修改的学生名字:')
for item in student_info:
if modify_name == item['name']:
modify_number = input('请输入你的新的学号:')
modify_id = input('请输入你的新的身份证号码:')
modify_phone = input('请输入你的新的电话号码:')
modify_dormitory = input('请输入你的新的宿舍号码:')
modify_addr = input('请输入你的新的籍贯地址:')
item['number'] = modify_number
item['id'] = modify_id
item['phone'] = modify_phone
item['dormitory'] = modify_dormitory
item['address'] = modify_addr
find_flag = 1
break
if find_flag == 0:
print('输入的名字不正确,重新输入')
def delete_stu_info():
'''实现删除学生信息功能'''
global student_info
find_flag = 0
del_name = input('请输入要删除的学生名字:')
for item in student_info:
if del_name == item['name']:
del student_info[student_info.index(item)]
find_flag = 1
break
if find_flag == 0:
print('此学生不存在,请重新输入')
def main():
#恢复以前数据到程序中
load_stu_info()
#打印功能提示菜单
print_menu()
while True:
#2. 获取用户的输入
num = int(input('请输入操作序号:'))
#3. 根据用户的数据执行相应的功能
if num==1:
find_stu_info()
elif num==2:
add_stu_info()
elif num==3:
modify_stu_info()
elif num==4:
delete_stu_info()
elif num==5:
show_stu_info()
elif num==6:
save_stu_info()
elif num==7:
break
else:
print('输入有误,请重新输入')
print('-'*50)
print('')
if __name__ == '__main__':
main()
|
[
"lqr888888@aliyun.com"
] |
lqr888888@aliyun.com
|
99d5656ae432b56eb9438da7a8014adeca443e39
|
ee2c15d82ff596f4ca9eda408f8e096b787f0d48
|
/Python/4 Dictionaries_Sets/4 dictionary/sets_challenge.py
|
7a56065963a00863f02685fa85a6c29210e88624
|
[] |
no_license
|
sainimohit23/algorithms
|
1bbfee3bd4d1049b18425bf0d86ecaacd4c43ea0
|
911986abe015f7518ef169a5866b1058c7d41d4f
|
refs/heads/master
| 2022-11-13T17:40:06.128838
| 2020-06-30T17:35:35
| 2020-06-30T17:35:35
| 268,071,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
my_str = input("enter some text please ")
my_set = set(my_str)
vowels = set("aeiou")
finalset = my_set.difference(vowels)
finallist = sorted(finalset)
for num in finallist:
print(num)
|
[
"sainimohit23@gmail.com"
] |
sainimohit23@gmail.com
|
99cd43a8c940db281d4db4d33d06b1cee795bc61
|
c5291e50a3c72c885922378573a0ad423fcedf05
|
/analysis/data/urls.py
|
e7638f31b2b04491d30e6f29d5a4d9826f2a05c3
|
[] |
no_license
|
raghurammanyam/django-projects
|
bcc3ed6285882af437a2995514cef33760fb063e
|
dd20ae354f7f111a0176a1cc047c099bd23e9f05
|
refs/heads/master
| 2022-12-12T19:22:31.698114
| 2018-12-09T09:41:45
| 2018-12-09T09:41:45
| 137,443,359
| 0
| 0
| null | 2022-11-22T03:01:07
| 2018-06-15T05:08:15
|
Python
|
UTF-8
|
Python
| false
| false
| 196
|
py
|
from django.conf.urls import url
from django.urls import path
from .views import test,get
from django.http import HttpResponse
urlpatterns = [
url(r'^date/',test),
url(r'^get/',get)
]
|
[
"manyamraghuram@gmail.com"
] |
manyamraghuram@gmail.com
|
784e7a40abe66b769c8b6ffca8fcf4ff447532c1
|
88ff86b95b377a4fd10474d2b215b0cf0b32143c
|
/src/ralph/scan/plugins/ssh_proxmox.py
|
5627cf11be6d296a44bcf87c00dae5afd8551d1c
|
[
"Apache-2.0"
] |
permissive
|
fossabot/ralph
|
f00fbfd9e64ae779633e0ea1faeb7fbe8f35353f
|
9eb82955adf6b662bc460112b3d9b2d574ef0d70
|
refs/heads/master
| 2020-07-04T15:27:38.758147
| 2014-04-28T15:08:59
| 2014-04-28T15:08:59
| 202,324,100
| 0
| 0
|
NOASSERTION
| 2019-08-14T09:59:42
| 2019-08-14T09:59:41
| null |
UTF-8
|
Python
| false
| false
| 9,507
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import json
from django.conf import settings
from ralph.discovery.hardware import get_disk_shares
from ralph.discovery.models import DeviceType
from ralph.scan.errors import ConnectionError, NoMatchError, NoLanError
from ralph.scan.plugins import get_base_result_template
from ralph.util import network
SETTINGS = settings.SCAN_PLUGINS.get(__name__, {})
logger = logging.getLogger("SCAN")
def _connect_ssh(ip_address, user, password):
if not network.check_tcp_port(ip_address, 22):
raise ConnectionError('Port 22 closed on a Proxmox server.')
return network.connect_ssh(ip_address, user, password)
def _get_master_ip_address(ssh, ip_address, cluster_cfg=None):
if not cluster_cfg:
stdin, stdout, stderr = ssh.exec_command("cat /etc/pve/cluster.cfg")
data = stdout.read()
else:
data = cluster_cfg
if not data:
stdin, stdout, stderr = ssh.exec_command("pvesh get /nodes")
data = stdout.read()
if data:
for node in json.loads(data):
stdin, stdout, stderr = ssh.exec_command(
'pvesh get "/nodes/%s/dns"' % node['node'],
)
dns_data = stdout.read()
if not dns_data:
return ip_address
ip_address = json.loads(dns_data)['dns1']
break
else:
return ip_address
nodes = {}
current_node = None
for line in data.splitlines():
line = line.strip()
if line.endswith('{'):
current_node = line.replace('{', '').strip()
nodes[current_node] = {}
elif line.endswith('}'):
current_node = None
elif ':' in line and current_node:
key, value = (v.strip() for v in line.split(':', 1))
nodes[current_node][key] = value
for node, pairs in nodes.iteritems():
is_master = node.startswith('master')
try:
ip_address = pairs['IP']
except KeyError:
continue
if is_master:
return ip_address
return ip_address
def _get_cluster_member(ssh, ip_address):
stdin, stdout, stderr = ssh.exec_command("ifconfig eth0 | head -n 1")
mac = stdout.readline().split()[-1]
return {
'model_name': 'Proxmox',
'mac_addresses': [mac],
'installed_software': [{
'model_name': 'Proxmox',
'path': 'proxmox',
}],
'system_ip_addresses': [ip_address],
}
def _get_local_disk_size(ssh, disk):
"""Return the size of a disk image file, in bytes"""
path = os.path.join('/var/lib/vz/images', disk)
stdin, stdout, stderr = ssh.exec_command("du -m '%s'" % path)
line = stdout.read().strip()
if not line:
return 0
size = int(line.split(None, 1)[0])
return size
def _get_virtual_machine_info(
ssh,
vmid,
master_ip_address,
storages,
hypervisor_ip_address,
):
stdin, stdout, stderr = ssh.exec_command(
"cat /etc/qemu-server/%d.conf" % vmid,
)
lines = stdout.readlines()
if not lines:
# Proxmox 2 uses a different directory structure
stdin, stdout, stderr = ssh.exec_command(
"cat /etc/pve/nodes/*/qemu-server/%d.conf" % vmid,
)
lines = stdout.readlines()
disks = {}
lan_model = None
name = 'unknown'
for line in lines:
line = line.strip()
if line.startswith('#') or ':' not in line:
continue
key, value = line.split(':', 1)
if key.startswith('vlan'):
lan_model, lan_mac = value.split('=', 1)
elif key.startswith('net'):
lan_model, lan_mac = value.split('=', 1)
if ',' in lan_mac:
lan_mac = lan_mac.split(',', 1)[0]
elif key == 'name':
name = value.strip()
elif key == 'sockets':
cpu_count = int(value.strip())
elif key.startswith('ide') or key.startswith('virtio'):
disks[key] = value.strip()
if lan_model is None:
raise NoLanError(
"No LAN for virtual server %s. Hypervisor IP: %s" % (
vmid,
hypervisor_ip_address,
),
)
device_info = {
'model_name': 'Proxmox qemu kvm',
'type': DeviceType.virtual_server.raw,
'mac_addresses': [lan_mac],
'management': master_ip_address, # ?
'hostname': name,
}
detected_disks = []
detected_shares = []
for slot, disk in disks.iteritems():
params = {}
if ',' in disk:
disk, rawparams = disk.split(',', 1)
for kv in rawparams.split(','):
if not kv.strip():
continue
k, v = kv.split('=', 1)
params[k] = v.strip()
if ':' in disk:
vg, lv = disk.split(':', 1)
else:
vg = ''
lv = disk
if vg == 'local':
size = _get_local_disk_size(ssh, lv)
if not size > 0:
continue
detected_disks.append({
'family': 'QEMU disk image',
'size': size,
'label': slot,
'mount_point': lv,
})
continue
if vg in ('', 'local', 'pve-local'):
continue
vol = '%s:%s' % (vg, lv)
try:
wwn, size = storages[lv]
except KeyError:
logger.warning(
'Volume %s does not exist. Hypervisor IP: %s' % (
lv,
hypervisor_ip_address,
),
)
continue
detected_shares.append({
'serial_number': wwn,
'is_virtual': True,
'size': size,
'volume': vol,
})
if detected_disks:
device_info['disks'] = detected_disks
if detected_shares:
device_info['disk_shares'] = detected_shares
detected_cpus = [
{
'family': 'QEMU Virtual',
'model_name': 'QEMU Virtual CPU',
'label': 'CPU {}'.format(i + 1),
'index': i + 1,
'cores': 1,
} for i in range(cpu_count)
]
if detected_cpus:
device_info['processors'] = detected_cpus
return device_info
def _get_virtual_machines(ssh, master_ip_address, hypervisor_ip_address):
detected_machines = []
storages = get_disk_shares(ssh)
stdin, stdout, stderr = ssh.exec_command("qm list")
for line in stdout:
line = line.strip()
if line.startswith('VMID'):
continue
vmid, name, status, mem, bootdisk, pid = (
v.strip() for v in line.split()
)
if status != 'running':
continue
vmid = int(vmid)
try:
device_info = _get_virtual_machine_info(
ssh,
vmid,
master_ip_address,
storages,
hypervisor_ip_address,
)
except NoLanError as e:
logger.warning(unicode(e))
else:
detected_machines.append(device_info)
return detected_machines
def _ssh_proxmox(ip_address, user, password):
ssh = _connect_ssh(ip_address, user, password)
try:
cluster_cfg = None
for command in (
'cat /etc/pve/cluster.cfg',
'cat /etc/pve/cluster.conf',
'cat /etc/pve/storage.cfg',
'pvecm help',
):
stdin, stdout, stderr = ssh.exec_command(command)
data = stdout.read()
if data != '':
if command == 'cat /etc/pve/cluster.cfg':
cluster_cfg = data
break
else:
raise NoMatchError('This is not a PROXMOX server.')
master_ip_address = _get_master_ip_address(
ssh,
ip_address,
cluster_cfg,
)
cluster_member = _get_cluster_member(ssh, ip_address)
subdevices = _get_virtual_machines(
ssh,
master_ip_address,
ip_address,
)
if subdevices:
cluster_member['subdevices'] = subdevices
finally:
ssh.close()
return cluster_member
def scan_address(ip_address, **kwargs):
if 'nx-os' in (kwargs.get('snmp_name') or '').lower():
raise NoMatchError('Incompatible Nexus found.')
if kwargs.get('http_family') not in ('Proxmox',):
raise NoMatchError('It is not Proxmox.')
user = SETTINGS.get('user')
password = SETTINGS.get('password')
messages = []
result = get_base_result_template('ssh_proxmox', messages)
if not user or not password:
result['status'] = 'error'
messages.append(
'Not configured. Set SSH_USER and SSH_PASSWORD in your '
'configuration file.',
)
else:
try:
device_info = _ssh_proxmox(ip_address, user, password)
except (ConnectionError, NoMatchError) as e:
result['status'] = 'error'
messages.append(unicode(e))
else:
result.update({
'status': 'success',
'device': device_info,
})
return result
|
[
"andrew.jankowski@gmail.com"
] |
andrew.jankowski@gmail.com
|
7aade3ac2d090d75cb7eb785668927ac61e0d212
|
297b6b2a030a0d665fd12780da80bc64a9016f59
|
/Assignment2/Assignment/makeChange.py
|
5d9e807a700003f2aa560de428e99a25f0a3393e
|
[] |
no_license
|
z0t0b/COMP5703
|
133ed9a90ba2024616a7ad5480937b89a9f70072
|
bd89faa66f726c9675d4e58855577e2fda1075c4
|
refs/heads/master
| 2022-04-21T15:50:39.272916
| 2020-04-15T02:40:13
| 2020-04-15T02:40:13
| 255,782,341
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,310
|
py
|
import decimal
changeList = [0, 0, 0, 0, 0, 0, 0, 0]
def chop_to_n_decimals(x, n):
# rounds x to n decimals (works better for inputs like 0.005 than standard round func)
d = decimal.Decimal(repr(x))
targetdigit = decimal.Decimal("1e%d" % -n)
chopped = d.quantize(targetdigit, decimal.ROUND_HALF_UP)
return float(chopped)
def makingChange(inputVal, index, amount):
num = int(inputVal / amount)
changeList[index] = num
inputVal -= (num * amount)
if(amount < 1):
inputVal = chop_to_n_decimals(inputVal, 2)
return inputVal
def makeChange(amount = []):
if((isinstance(amount, int) or isinstance(amount, float)) and (amount < 99.995 and amount >= 0.0)):
roundedAmount = chop_to_n_decimals(amount, 2)
roundedAmount = makingChange(roundedAmount, 0, 20)
roundedAmount = makingChange(roundedAmount, 1, 10)
roundedAmount = makingChange(roundedAmount, 2, 5)
roundedAmount = makingChange(roundedAmount, 3, 1)
roundedAmount = makingChange(roundedAmount, 4, 0.25)
roundedAmount = makingChange(roundedAmount, 5, 0.10)
roundedAmount = makingChange(roundedAmount, 6, 0.05)
roundedAmount = makingChange(roundedAmount, 7, 0.01)
return changeList
return None
|
[
"noreply@github.com"
] |
noreply@github.com
|
5a18ee6526a8d5b5735523e7efe503c9224f57c1
|
35631053e6c1e7d01d31c27e10388204ab59b8f2
|
/Streaming Media Player/pop_up_message.py
|
aaa1ee198e1f91f7dbfde98f2f21ab3e38f033da
|
[] |
no_license
|
vanduan/DichVuMang
|
7cf442498820c6c39362cc69e1fd10b503fca704
|
c569cf52265356ed67eb703f50ddc65e6ce9e846
|
refs/heads/master
| 2021-01-21T13:03:15.699281
| 2016-04-22T10:41:27
| 2016-04-22T10:41:27
| 55,877,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 900
|
py
|
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
def window():
app = QApplication(sys.argv)
w = QWidget()
b = QPushButton(w)
b.setText("Show message!")
b.move(50,50)
b.clicked.connect(showdialog)
w.setWindowTitle("PyQt Dialog demo")
w.show()
sys.exit(app.exec_())
def showdialog():
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("This is a message box")
msg.setInformativeText("This is additional information")
msg.setWindowTitle("MessageBox demo")
msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
msg.buttonClicked.connect(msgbtn)
retval = msg.exec_()
print "value of pressed message box button:", retval
def msgbtn(i):
print "Button pressed is:",i.text()
if __name__ == '__main__':
window()
|
[
"vanduan95.dvp@gmail.com"
] |
vanduan95.dvp@gmail.com
|
5827494e28c8324f3fe91b182ec76744a95c029b
|
aef02ad0a2b36e763af4b6de84399fcbfb788faf
|
/LPHW/ex6.py
|
4d116c7b7f3dba9e3e1cb77c6d4b06c35e1b0fbb
|
[] |
no_license
|
kanishkd4/Python_Learning_code
|
98cf74cbbeef34f594804b515438f24775feddbf
|
62a6b1745f4c8624ed4207ab38c83f0a7ead99c9
|
refs/heads/master
| 2020-04-15T12:44:52.828258
| 2018-04-05T09:56:35
| 2018-04-05T09:56:35
| 61,795,436
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
x = "there are %d types of people." %10
binary = "binary"
do_not = "don't"
y = "those who know %s and those who %s." % (binary, do_not)
print x
print y
print "I said %r" % x
print "I also said: '%s'" % y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
print joke_evaluation % hilarious
w = "this is the left side of.."
e = "a string with a right side."
print w + e
|
[
"noreply@github.com"
] |
noreply@github.com
|
98ae73f5af580dce3fc708af8516af5e1c67bbf3
|
50e03dae243af6bfab19f8cf42494284ff70fbd3
|
/BIG-BIRD/RelGAN.py
|
05e0634536e46c4d7140e7c904e0f5d7773baeb5
|
[] |
no_license
|
BritneyMuller/Summarization-Lab
|
bf2d79abe724e999e4017d4ffe6220863fe7f162
|
4b40f5ac7a629f509c323bf426d3058268628186
|
refs/heads/master
| 2021-01-25T23:13:13.669487
| 2019-09-30T14:38:13
| 2019-09-30T14:38:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,010
|
py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import matplotlib.pyplot as plt
import os
import torch.autograd as autograd
from RelationalMemory import *
from Transformer import *
class BigBird():
#generator is translator here
def __init__(self, generator, discriminator, reconstructor, dictionary, gamma = 0.99, clip_value = 0.1, lr_G = 5e-5, lr_D = 5e-5, lr_R = 1e-4, LAMBDA = 10, TEMP_END = 0.5, vq_coef =0.8, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(BigBird, self).__init__()
self.device = device
self.dictionary = dictionary
self.generator = generator.to(self.device)
self.reconstructor = reconstructor.to(self.device)
self.discriminator = discriminator.to(self.device)
self.gamma = gamma
self.eps = np.finfo(np.float32).eps.item()
self.optimizer_R = torch.optim.Adam(list(self.generator.parameters()) + list(self.reconstructor.parameters()), lr=lr_R)
#normal WGAN
self.optimizer_G = torch.optim.RMSprop(self.generator.parameters(), lr=lr_G)
self.optimizer_D = torch.optim.RMSprop(self.discriminator.parameters(), lr=lr_D)
#WGAN GP
#self.LAMBDA = LAMBDA # Gradient penalty lambda hyperparameter
#self.optimizer_G = torch.optim.Adam(self.generator.parameters(), lr=lr_G, betas=(0.0, 0.9))
#self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=lr_D, betas=(0.0, 0.9))
self.clip_value = clip_value
self.TEMP_END = TEMP_END
self.lr_G = lr_G
self.lr_D = lr_D
self.lr_R = lr_R
self.total_steps = 0
self.vq_coef = 0.8
self.epoch = 0
def calc_gradient_penalty(self, netD, real_data, fake_data):
#print real_data.size()
BATCH_SIZE = real_data.shape[0]
dim_1 = real_data.shape[1]
dim_2 = real_data.shape[2]
alpha = torch.rand(BATCH_SIZE, dim_1)
alpha = alpha.view(-1,1).expand(dim_1 * BATCH_SIZE, dim_2).view(BATCH_SIZE, dim_1, dim_2)
alpha = alpha.to(self.device)
#print(real_data.shape) #[BATCH_SIZE, 19, vocab_sz]
#print(fake_data.shape) #[BATCH_SIZE, 19, vocab_sz]
interpolates_data = ( alpha * real_data.float() + ((1 - alpha) * fake_data.float()) )
interpolates = interpolates_data.to(self.device)
#interpolates = netD.disguised_embed(interpolates_data)
interpolates = autograd.Variable(interpolates, requires_grad=True)
src_mask = (interpolates_data.argmax(-1) != netD.padding_index).type_as(interpolates_data).unsqueeze(-2)
disc_interpolates = netD.transformer_encoder( interpolates, src_mask )
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(self.device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.LAMBDA
return gradient_penalty
def _to_one_hot(self, y, n_dims):
scatter_dim = len(y.size())
y_tensor = y.to(self.device).long().view(*y.size(), -1)
zeros = torch.zeros(*y.size(), n_dims).to(self.device)
return zeros.scatter(scatter_dim, y_tensor, 1)
def train_D(self, fake_datas, real_datas):
## train discriminator
# print("real")
# print(real_datas[:10])
real_score = torch.mean(self.discriminator(real_datas))
# print("fake")
# print(fake_datas[:10])
fake_score = torch.mean(self.discriminator(fake_datas))
batch_d_loss = -real_score + fake_score #+ self.calc_gradient_penalty(self.discriminator, real_datas, fake_datas)
return batch_d_loss, real_score.item(), fake_score.item()
def train_G(self, fake_datas):
self.optimizer_G.zero_grad()
batch_g_loss = -torch.mean(self.discriminator(fake_datas))
batch_g_loss.backward(retain_graph=True)
self.optimizer_G.step()
return batch_g_loss.item()
def indicies2string(self, indices):
inv_map = {v: k for k, v in self.dictionary.items()}
return ' '.join([inv_map[i.item()] for i in indices])
def train(self):
self.generator.train()
self.reconstructor.train()
self.discriminator.train()
def eval(self):
self.generator.eval()
self.reconstructor.eval()
self.discriminator.eval()
def load(self, load_path):
print('load Bird from', load_path)
loader = torch.load(load_path)
self.generator.load_state_dict(loader['generator'])
self.discriminator.load_state_dict(loader['discriminator'])
self.reconstructor.load_state_dict(loader['reconstructor'])
self.total_steps = loader['total_steps']
self.epoch = loader['epoch']
self.gumbel_temperature = loader['gumbel_temperature']
def save(self, save_path):
print('lay egg to ./Nest ... save as', save_path)
torch.save({'generator':self.generator.state_dict(),
'reconstructor':self.reconstructor.state_dict(),
'discriminator':self.discriminator.state_dict(),
'total_steps':self.total_steps,
'epoch':self.epoch,
'gumbel_temperature':self.gumbel_temperature
},save_path)
def eval_iter(self, src, src_mask, max_len, real_data, ct, verbose = 1):
with torch.no_grad():
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
memory = self.reconstructor.initial_state(batch_size, trainable=True).to(self.device)
CE_loss, acc, out = self.reconstructor.reconstruct_forward(gumbel_one_hot, src, memory, self.dictionary['[CLS]'])
if verbose == 1 and ct % 1 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("reconsturct out:")
print(self.indicies2string(out[0]))
print("")
return acc, CE_loss.item()
def pretrainGAN_run_iter(self, src, src_mask, max_len, real_data, D_iters = 5, D_toggle = 'On', verbose = 1):
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
batch_G_loss = 0
NNcriterion = nn.NLLLoss().to(self.device)
batch_G_loss = NNcriterion(summary_probs.log().contiguous().view(batch_size * max_len, -1), real_data.contiguous().view(-1))
self.optimizer_G.zero_grad()
batch_G_loss.backward()
self.optimizer_G.step()
self.total_steps += 1
if self.total_steps % 500 == 0:
if not os.path.exists("./Nest"):
os.makedirs("./Nest")
self.save("./Nest/Pretrain_RelGAN")
if verbose == 1 and self.total_steps % 1000 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("")
distrib = summary_probs[0,0, :100].cpu().detach().numpy()
one_hot_out = gumbel_one_hot[0,0, :100].cpu().detach().numpy()
return [batch_G_loss, 0], [0], [0, 0, 0], [self.indicies2string(src[0]), self.indicies2string(summary_sample[0]), 0], distrib, one_hot_out
def run_iter(self, src, src_mask, max_len, real_data, D_iters = 5, D_toggle = 'On', verbose = 1, writer = None):
#summary_logits have some problem
#summary = self.generator(src, src_mask, max_len, self.dictionary['[CLS]'])
batch_size = src.shape[0]
memory = self.generator.initial_state(batch_size, trainable=True).to(self.device)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
summary_sample, summary_log_values, summary_probs, gumbel_one_hot = self.generator(src, max_len, memory, self.dictionary['[CLS]'], temperature = self.gumbel_temperature)
batch_D_loss = 0
if(D_toggle == 'On'):
for i in range(D_iters):
self.optimizer_D.zero_grad()
batch_d_loss, real_score, fake_score = self.train_D(gumbel_one_hot, self._to_one_hot(real_data, len(self.dictionary)))
batch_D_loss += batch_d_loss
batch_d_loss.backward(retain_graph=True);
#Clip critic weights
for p in self.discriminator.parameters():
p.data.clamp_(-self.clip_value, self.clip_value)
self.optimizer_D.step();
batch_D_loss = batch_D_loss.item()/D_iters
batch_G_loss = 0
if(D_toggle == 'On'):
#print(gumbel_one_hot.shape)
batch_G_loss = self.train_G(gumbel_one_hot)
self.gumbel_temperature = max(self.TEMP_END, math.exp(-1e-4*self.total_steps))
memory = self.reconstructor.initial_state(batch_size, trainable=True).to(self.device)
CE_loss, acc, out = self.reconstructor.reconstruct_forward(gumbel_one_hot, src, memory, self.dictionary['[CLS]'])
rec_loss = CE_loss #+ self.vq_coef * vq_loss + 0.25 * self.vq_coef * commit_loss
self.optimizer_R.zero_grad()
rec_loss.backward()
nn.utils.clip_grad_norm_(list(self.generator.parameters()) + list(self.reconstructor.parameters()), 0.1)
self.optimizer_R.step()
self.total_steps += 1
if self.total_steps % 500 == 0:
if not os.path.exists("./Nest"):
os.makedirs("./Nest")
self.save("./Nest/DoubleRelationMEM_GAN")
#for i in range(5):
#plt.plot(range(1000),summary_probs.cpu().detach().numpy()[0,i,:1000] )
# wandb.log({"prob {}".format(i): wandb.Histogram(summary_probs.cpu().detach().numpy()[0,i,:1000])},step=step)
if verbose == 1 and self.total_steps % 100 == 0:
print("origin:")
print(self.indicies2string(src[0]))
print("summary:")
print(self.indicies2string(summary_sample[0]))
print("real summary:")
print(self.indicies2string(real_data[0]))
print("reconsturct out:")
print(self.indicies2string(out[0]))
# print("sentiment:",label[0].item())
# print("y:",sentiment_label[0].item())
# print("reward:",rewards[0].item())
print("")
# for name, param in self.generator.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), self.total_steps)
# for name, param in self.reconstructor.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), self.total_steps)
distrib = summary_probs.cpu().detach().numpy()[0,0, :100]
one_hot_out = gumbel_one_hot.cpu().detach().numpy()[0,0, :100]
return [batch_G_loss, batch_D_loss], [CE_loss.item()], [real_score, fake_score, acc], [self.indicies2string(src[0]), self.indicies2string(summary_sample[0]), self.indicies2string(out[0])], distrib, one_hot_out
class LSTMEncoder(nn.Module):
def __init__(self, vocab_sz, hidden_dim, padding_index):
super().__init__()
self.src_embed = nn.Embedding(vocab_sz, hidden_dim)
self.rnn_cell = nn.LSTM(hidden_dim, hidden_dim, num_layers=1, batch_first=True, bidirectional=True)
self.padding_index = padding_index
self.outsize = hidden_dim*2
def forward(self, x):
#src_mask = (x != self.padding_index).type_as(x).unsqueeze(-2)
out, (h,c) = self.rnn_cell( self.src_embed(x))
return out
# class LSTM_Gumbel_Encoder_Decoder(nn.Module):
# def __init__(self, hidden_dim, emb_dim, input_len, output_len, voc_size, device, eps=1e-8, num_layers = 2):
# super().__init__()
# self.hidden_dim = hidden_dim
# self.emb_dim = emb_dim
# #self.input_len = input_len
# #self.output_len = output_len
# #self.voc_size = voc_size
# #self.teacher_prob = 1.
# #self.epsilon = eps
# self.emb_layer = nn.Embedding(voc_size, emb_dim)
# self.num_layers = num_layers
# self.encoder = nn.LSTM(emb_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(emb_dim, hidden_dim*2, num_layers=num_layers, batch_first=True)
# self.device = device
# self.attention_softmax = nn.Softmax(dim=1)
# # self.pro_layer = nn.Sequential(
# # nn.Linear(hidden_dim*4, voc_size, bias=True)
# # )
# self.adaptive_softmax = torch.nn.AdaptiveLogSoftmaxWithLoss(hidden_dim*4, voc_size, [100, 1000, 10000], div_value=4.0, head_bias=False)
# def forward(self, x, src_mask, max_len, start_symbol, mode = 'argmax', temp = 2.0):
# batch_size = x.shape[0]
# input_len = x.shape[1]
# device = x.device
# # encoder
# x_emb = self.emb_layer(x)
# memory, (h, c) = self.encoder(x_emb)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# h = h.view(batch_size, self.num_layers, h.shape[-1]*2)
# c = c.view(batch_size, self.num_layers, c.shape[-1]*2)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# ## decoder
# out_h, out_c = (h, c)
# ys = torch.ones(batch_size, 1).fill_(start_symbol).type_as(x.data)
# values = []
# all_probs = []
# gumbel_one_hots = []
# for i in range(max_len-1):
# ans_emb = self.emb_layer(ys[:,-1]).view(batch_size, 1, self.emb_dim)
# out, (out_h, out_c) = self.decoder(ans_emb, (out_h, out_c))
# attention = torch.bmm(memory, out.transpose(1, 2)).view(batch_size, input_len)
# attention = self.attention_softmax(attention)
# context_vector = torch.bmm(attention.view(batch_size, 1, input_len), memory)
# logits = torch.cat((out, context_vector), -1).view(batch_size, -1)
# one_hot, next_words, value, prob = self.gumbel_softmax(logits, temp)
# # print(feature.shape)
# # print(one_hot.shape)
# # print(next_words.shape)
# # print(values.shape)
# # print(log_probs.shape)
# # input("")
# ys = torch.cat((ys, next_words.view(batch_size, 1)), dim=1)
# values.append(value)
# all_probs.append(prob)
# gumbel_one_hots.append(one_hot)
# values = torch.stack(values,1)
# all_probs = torch.stack(all_probs,1)
# gumbel_one_hots = torch.stack(gumbel_one_hots, 1)
# return ys, values, all_probs, gumbel_one_hots
# def sample_gumbel(self, shape, eps=1e-20):
# U = torch.rand(shape).to(self.device)
# return -Variable(torch.log(-torch.log(U + eps) + eps))
# def gumbel_softmax_sample(self, logits, temperature):
# y = logits + self.sample_gumbel(logits.size())
# #the formula should be prob not logprob, I guess it still works
# return self.adaptive_softmax.log_prob(logits).exp()
# #return F.softmax(y / temperature, dim=-1)
# def gumbel_softmax(self, logits, temperature):
# """
# ST-gumple-softmax
# input: [*, n_class]
# return: flatten --> [*, n_class] an one-hot vector
# """
# y = self.gumbel_softmax_sample(logits, temperature)
# shape = y.size()
# values, ind = y.max(dim=-1)
# y_hard = torch.zeros_like(y).view(-1, shape[-1])
# y_hard.scatter_(1, ind.view(-1, 1), 1)
# y_hard = y_hard.view(*shape)
# y_hard = (y_hard - y).detach() + y
# return y_hard.view(logits.shape[0], -1), ind, values, y
# class LSTM_Normal_Encoder_Decoder(nn.Module):
# def __init__(self, hidden_dim, emb_dim, input_len, output_len, voc_size, pad_index, device, eps=1e-8, num_layers = 2):
# super().__init__()
# self.hidden_dim = hidden_dim
# self.emb_dim = emb_dim
# self.device = device
# #self.input_len = input_len
# #self.output_len = output_len
# #self.voc_size = voc_size
# #self.teacher_prob = 1.
# #self.epsilon = eps
# self.num_layers = num_layers
# #self.emb_layer = nn.Embedding(voc_size, emb_dim)
# self.disguise_embed = nn.Linear(voc_size, emb_dim)
# self.encoder = nn.LSTM(emb_dim, hidden_dim, num_layers=num_layers, batch_first=True, bidirectional=True)
# self.decoder = nn.LSTM(emb_dim, hidden_dim*2, num_layers=num_layers, batch_first=True)
# self.attention_softmax = nn.Softmax(dim=1)
# self.vocab_sz = voc_size
# self.criterion = torch.nn.AdaptiveLogSoftmaxWithLoss(hidden_dim*4, voc_size, [1000, 5000, 20000], div_value=4.0, head_bias=False)
# def forward(self, x, src_mask, max_len, start_symbol, y, mode = 'argmax', temp = 2.0):
# batch_size = x.shape[0]
# input_len = x.shape[1]
# device = x.device
# # encoder
# x_emb = self.disguise_embed(x)
# memory, (h, c) = self.encoder(x_emb)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# h = h.view(batch_size, self.num_layers, h.shape[-1]*2)
# c = c.view(batch_size, self.num_layers, c.shape[-1]*2)
# h = h.transpose(0, 1).contiguous()
# c = c.transpose(0, 1).contiguous()
# ## decoder
# out_h, out_c = (h, c)
# logits = []
# for i in range(max_len):
# ans_emb = self.disguise_embed(self._to_one_hot(y[:,i], self.vocab_sz)).view(batch_size, 1, self.emb_dim)
# out, (out_h, out_c) = self.decoder(ans_emb, (out_h, out_c))
# attention = torch.bmm(memory, out.transpose(1, 2)).view(batch_size, input_len)
# attention = self.attention_softmax(attention)
# context_vector = torch.bmm(attention.view(batch_size, 1, input_len), memory)
# logit = torch.cat((out, context_vector), -1).view(batch_size, -1)
# # if mode == 'argmax':
# # values, next_words = torch.max(log_probs, dim=-1, keepdim=True)
# # if mode == 'sample':
# # m = torch.distributions.Categorical(logits=log_probs)
# # next_words = m.sample()
# # values = m.log_prob(next_words)
# logits.append(logit)
# logits = torch.stack(logits, 1)
# _ ,loss = self.criterion(logits[:,:-1].contiguous().view(batch_size * (max_len - 1), -1), y[:,1:].contiguous().view(batch_size * (max_len-1)))
# #y from one to get rid of [CLS]
# log_argmaxs = self.criterion.predict(logits[:,:-1].contiguous().view(batch_size * (max_len - 1), -1)).view(batch_size, max_len-1)
# acc = ( log_argmaxs== y[:,1:]).float().mean()
# return loss, acc, log_argmaxs
# def _to_one_hot(self, y, n_dims):
# scatter_dim = len(y.size())
# y_tensor = y.to(self.device).long().view(*y.size(), -1)
# zeros = torch.zeros(*y.size(), n_dims).to(self.device)
# return zeros.scatter(scatter_dim, y_tensor, 1)
class Discriminator(nn.Module):
def __init__(self, transformer_encoder, hidden_dim, vocab_sz, padding_index):
super(Discriminator, self).__init__()
self.padding_index = padding_index
self.disguise_embed = nn.Linear(vocab_sz, hidden_dim)
self.transformer_encoder = transformer_encoder
self.linear = nn.Linear(self.transformer_encoder.layers[-1].size, 1)
#self.sigmoid = nn.Sigmoid()
def forward(self, x):
src_mask = (x.argmax(-1) != self.padding_index).type_as(x).unsqueeze(-2)
x = self.transformer_encoder(self.disguise_embed(x), src_mask)
score = self.linear(x)
return score
|
[
"you@example.com"
] |
you@example.com
|
342f10e5e1c17b196563987f7720df7d1de0ef8e
|
1361f56a3dc2205455054d144fa30d9cebb9704f
|
/week-07/project/get_data.py
|
b6f5c0ca6d65a95f039ea83f0e9e44f705ff9f35
|
[] |
no_license
|
green-fox-academy/TemExile
|
31b240f58a0d56364e3b888cd9610b176f244d5e
|
040882ebb07d10c65b98cd3dc12814f10fa52dc0
|
refs/heads/master
| 2020-05-19T18:17:58.468807
| 2019-06-21T06:22:51
| 2019-06-21T06:22:51
| 185,149,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from function import get_info
city_list = {
'Bath':'116', 'Bridgwater':'212', 'Burnham-On-Sea':'251', 'Chard':'301',
'Cheddar':'306', 'Clevedon':'337', 'Crewkerne':'381',
'Frome':'536', 'Glastonbury':'551', 'Ilminster':'678', 'Minehead':'942',
'Radstock':'1109', 'Shepton+Mallet':'1198',
'Street':'1287', 'Taunton':'1317', 'Wellington':'1414', 'Wells':'1415',
'Weston-Super-Mare':'1437', 'Wincanton':'1458', 'Yeovil':'1497'
}
# 'https://www.rightmove.co.uk/house-prices/detail.html?'
# 'country=england&locationIdentifier=REGION%5E1198&'
# 'searchLocation=Shepton+Mallet&referrer=listChangeCriteria&index=0'
page_list = [x*25 for x in range(40)]
base_url = r'https://www.rightmove.co.uk/house-prices/detail.html?country=england&locationIdentifier=REGION%5E'
raw_data_list = []
for key, value in city_list.items():
for n in page_list:
url = base_url + value + r'&searchLocation=' + key + '&&referrer=listChangeCriteria&index=' + str(n)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find_all('li', 'soldUnit')
for item in data:
dic = {}
result = get_info(item)
dic['Price'] = result[0]
dic['HomeType'] = result[1]
dic['HoldType'] = result[2]
dic['Type'] = result[3]
dic['SoldDate'] = result[4]
dic['Bedroom'] = result[5]
dic['areaCode'] = result[6]
dic['City'] = key
raw_data_list.append(dic)
df = pd.DataFrame(raw_data_list)
df.to_csv('Raw_data.csv')
|
[
"hxwengl@163.com"
] |
hxwengl@163.com
|
f231f73dec833a474cefcee2707d8742f92f9d51
|
125bc51efb95f383257e7bdb50ae74e5dc05b7f7
|
/src/belajarIntegerString.py
|
f28765c84ddfefc5911c0710cd851199053fcd21
|
[] |
no_license
|
frestea09/learn_ch1_python
|
f9688fffda5f0fa312b82bd25081b986fa0779e9
|
510ea59bf85ec024ebc473db2533e92becaefbf3
|
refs/heads/master
| 2020-05-26T18:22:31.171688
| 2019-05-26T05:42:08
| 2019-05-26T05:42:08
| 188,334,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
from __future__ import print_function
def main():
variabelNama = input('nama : ')
variabelInteger = int(input('Umur'))
print('Nama anda %s dan umur anda %d'%(variabelNama,variabelInteger))
if __name__ == "__main__":
main()
|
[
"ilmanfrasetya@gmail.com"
] |
ilmanfrasetya@gmail.com
|
a921a15b368f2785bb530b40113b34630061be52
|
0974dd03a2c169c9186d74bb9c4f80ea68802331
|
/bin/f2py2
|
3f101a41698295cedc62c9ebc308c351e8e37718
|
[] |
no_license
|
devashah7/instameme
|
edb4a0cf8e80560eef54e0aa40a19fd4deb0c99c
|
8ba27800dc5624f80672fae3f727ece5fcd779a2
|
refs/heads/master
| 2020-08-09T06:25:03.011987
| 2019-10-09T20:42:57
| 2019-10-09T20:42:57
| 214,018,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
#!/home/dshah/Desktop/insta/insta/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"dshah@hcn-inc.com"
] |
dshah@hcn-inc.com
|
|
715c52973d9758a579026ef80e34afbd30905a12
|
32134ac2fa760ba7285d9bc844fa4db0be76352a
|
/perceptron.py
|
dcc4b74eaf87eec1c00054f55b18a839728e6999
|
[] |
no_license
|
NJCinnamond/NLPAssignment-1
|
8143bf8bce8de1044b757de28e2b0afce4169ce1
|
4204dcf64cf0864e6be2c5ce645f3e1ea810762f
|
refs/heads/master
| 2020-12-31T10:45:22.029690
| 2020-02-25T01:14:43
| 2020-02-25T01:14:43
| 239,006,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,636
|
py
|
""" Maximum entropy model for Assignment 1: Starter code.
You can change this code however you like. This is just for inspiration.
"""
import os
import sys
import numpy as np
from util import evaluate, load_data
from sklearn.metrics import confusion_matrix
class PerceptronModel():
""" Maximum entropy model for classification.
Attributes:
(float) weights
(float) bias
(int) num_dim
(bool) add_bias
"""
def __init__(self, label_to_index, lr=0.02):
self.W = None
self.bias = None
self.lr = lr
self.num_dim = 0
self.num_class = len(label_to_index)
self.label_to_index = label_to_index
self.index_to_label = {v: k for k, v in label_to_index.items()}
def train(self, training_data):
""" Trains the maximum entropy model.
Inputs:
training_data: Suggested type is (list of pair), where each item is
a training example represented as an (input, label) pair.
"""
self.num_dim = len(training_data[0][0])
self.num_epochs = 5
self.W = {c: np.array([0.0 for _ in range(self.num_dim)]) for c in self.label_to_index.keys()}
epoch = 0
change_over_epoch = True
while change_over_epoch and epoch < self.num_epochs:
print("Epoch: ", epoch)
epoch += 1
correct = 0
change_over_epoch = False
for sample in training_data:
#Get numerical value of label
label = sample[1]
if sample[1] not in self.label_to_index.keys():
label = self.index_to_label[0]
# Initialize arg_max value, predicted class.
arg_max, predicted_label = 0, self.index_to_label[0]
# Multi-Class Decision Rule:
for c in self.label_to_index.keys():
current_activation = np.dot(sample[0], self.W[c])
if current_activation >= arg_max:
arg_max, predicted_label = current_activation, c
# Update Rule:
if not (label == predicted_label):
change_over_epoch = True
self.W[label] += np.dot(self.lr, sample[0])
self.W[predicted_label] -= np.dot(self.lr, sample[0])
else:
correct += 1
acc = correct / len(training_data)
print("Accuracy: ", str(acc))
def predict(self, model_input):
""" Predicts a label for an input.
Inputs:
model_input (features): Input data for an example, represented as a
feature vector.
Returns:
The predicted class.
"""
# Initialize predicted label to UNK token
arg_max, predicted_label = 0, self.index_to_label[0]
# Multi-Class Decision Rule:
for c in self.label_to_index.keys():
current_activation = np.dot(model_input, self.W[c])
if current_activation >= arg_max:
arg_max, predicted_label = current_activation, c
return predicted_label
def create_dummy_bias(data):
for sample in data:
sample[0].append(1)
return data
if __name__ == "__main__":
print("Getting data")
train_data, dev_data, test_data, data_type, label_dict = load_data(sys.argv)
print("Got data")
train_data = create_dummy_bias(train_data)
dev_data = create_dummy_bias(dev_data)
test_data = create_dummy_bias(test_data)
print(len(train_data))
print(len(dev_data))
print(len(test_data))
# Train the model using the training data.
model = PerceptronModel(label_to_index=label_dict)
model.train(train_data)
# Predict on the development set.
'''
dev_accuracy = evaluate(model,
dev_data,
os.path.join("results", "perceptron_" + data_type + "_dev_predictions.csv"))
print("Dev accuracy: ", dev_accuracy)
'''
pred_label = [model.predict(example[0]) for example in dev_data]
true_label = [example[1] for example in dev_data]
conf_mat = confusion_matrix(true_label, pred_label,
labels=np.sort(np.unique(true_label)))
print(conf_mat)
print(np.sort(np.unique(true_label)))
# Predict on the test set.
# Note: We don't provide labels for test, so the returned value from this
# call shouldn't make sense.
#evaluate(model,
# test_data,
# os.path.join("results", "perceptron_" + data_type + "_test_predictions.csv"))
|
[
"54274991+NJCinnamond@users.noreply.github.com"
] |
54274991+NJCinnamond@users.noreply.github.com
|
09e93c96494e49a2d86292c5fba4b0ca5dd942cb
|
bda539dedce550a3242cd9a2a4bb61782a924c12
|
/git/training/runTraining.py
|
26cb6e270bf3928f7bc110871628979131099a59
|
[] |
no_license
|
joohwankim/deepgazekickoff
|
37222c937faca070a848bf97b1d85df518659d48
|
b30cc51de247be75cee510240868e6a31e08a815
|
refs/heads/master
| 2020-03-18T10:17:21.847000
| 2018-05-29T14:31:21
| 2018-05-29T14:31:21
| 134,605,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
"""
Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-ND 4.0 license (https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode).
"""
import argparse, logging, os, dlcore.train, sys
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--job', required=True, help='Which network to train. Specify a folder containing configuration file')
parser.add_argument('-v', '--var', nargs='*', action='append', help='A varaible and value pair')
parser.add_argument('-r', '--resume', default=None, help='Address to a checkpoint file. If given, resume training from the checkpoint file.')
args = parser.parse_args()
#config = dlcore.train.loadModule(os.path.join(args.job,'config.py'))
config = dlcore.train.loadModule(args.job)
if args.var:
for var in args.var:
dtype = type(getattr(config, var[0]))
if len(var) == 2:
setattr(config, var[0], dtype(var[1]))
if os.path.abspath(config.result_dir) == os.path.abspath('./'):
config.result_dir = os.path.normpath(args.job)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
#logging.basicConfig(stream=sys.stdout, level=logging.INFO)
#logging.basicConfig(filename=os.path.join(config.result_dir,config.log), level=config.log_level)
dlcore.train.main(config, args.resume)
|
[
"sckim@nvidia.com"
] |
sckim@nvidia.com
|
8951afe2b51d654fd469ed7fd936879e3610aa30
|
35894bca47cf0c9a51a05caf7b56a0d69c05b033
|
/04_subrotinas_numpy/25_fibonacci.py
|
1067f8b8abc1c15bc44a985e9b4f892471d34f46
|
[] |
no_license
|
alcebytes/Phyton-Estudo
|
0a2d33f5f3e668e6ab2f99e5e4499545a3bc1273
|
a3f9a0b3e0a91d71a9359480d6ec17e692572694
|
refs/heads/master
| 2023-01-14T17:24:16.486956
| 2020-10-08T02:02:02
| 2020-10-08T02:02:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 942
|
py
|
import time as time
num_iter = int(input("Digitar o valor do número máximo para a sequência de Fibonacci = "))
tempo_inicio = time.time()
#tempo_inicio_CPU = time.clock() #ABSOLETO
tempo_inicio_CPU = time.process_time()
tempo_inicio_CPU_2 = time.perf_counter()
# f(0)
f = []
f.append(0)
print(f)
# f(1)
f.append(1)
print(f)
"""
f(n + 2) = f(n) + f(n + 1)
for n in range(0, num_iter - 2, 1)
f.append(f[n] + f[n + 1] )
"""
n = 0
while n <= num_iter - 3:
f.append(f[n] + f[n + 1])
n = n + 1
print(f)
# Imprimir último termo de f
print(f[-1])
# Outra forma:
print(f[len(f) - 1])
tempo_fim = time.time() - tempo_inicio
print("O tempo de execução da aplicação é", tempo_fim, "s")
tempo_fim_CPU_2 = time.perf_counter() - tempo_inicio_CPU_2
print("O tempo de execução da CPU é", tempo_fim_CPU_2)
tempo_fim_CPU = time.process_time() - tempo_inicio_CPU
print("O tempo de execução da CPU é", tempo_fim_CPU)
|
[
"x_kata@hotmail.com"
] |
x_kata@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.